summaryrefslogtreecommitdiffstats
path: root/src/cmd/go/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/go/internal')
-rw-r--r--src/cmd/go/internal/auth/auth.go28
-rw-r--r--src/cmd/go/internal/auth/netrc.go110
-rw-r--r--src/cmd/go/internal/auth/netrc_test.go58
-rw-r--r--src/cmd/go/internal/base/base.go223
-rw-r--r--src/cmd/go/internal/base/env.go46
-rw-r--r--src/cmd/go/internal/base/flag.go85
-rw-r--r--src/cmd/go/internal/base/goflags.go162
-rw-r--r--src/cmd/go/internal/base/limit.go84
-rw-r--r--src/cmd/go/internal/base/path.go79
-rw-r--r--src/cmd/go/internal/base/signal.go31
-rw-r--r--src/cmd/go/internal/base/signal_notunix.go17
-rw-r--r--src/cmd/go/internal/base/signal_unix.go18
-rw-r--r--src/cmd/go/internal/base/tool.go41
-rw-r--r--src/cmd/go/internal/bug/bug.go224
-rw-r--r--src/cmd/go/internal/cache/cache.go627
-rw-r--r--src/cmd/go/internal/cache/cache_test.go285
-rw-r--r--src/cmd/go/internal/cache/default.go105
-rw-r--r--src/cmd/go/internal/cache/hash.go190
-rw-r--r--src/cmd/go/internal/cache/hash_test.go51
-rw-r--r--src/cmd/go/internal/cache/prog.go427
-rw-r--r--src/cmd/go/internal/cfg/cfg.go619
-rw-r--r--src/cmd/go/internal/clean/clean.go428
-rw-r--r--src/cmd/go/internal/cmdflag/flag.go122
-rw-r--r--src/cmd/go/internal/doc/doc.go134
-rw-r--r--src/cmd/go/internal/envcmd/env.go691
-rw-r--r--src/cmd/go/internal/envcmd/env_test.go93
-rw-r--r--src/cmd/go/internal/fix/fix.go85
-rw-r--r--src/cmd/go/internal/fmtcmd/fmt.go115
-rw-r--r--src/cmd/go/internal/fsys/fsys.go784
-rw-r--r--src/cmd/go/internal/fsys/fsys_test.go1139
-rw-r--r--src/cmd/go/internal/generate/generate.go502
-rw-r--r--src/cmd/go/internal/generate/generate_test.go259
-rw-r--r--src/cmd/go/internal/get/get.go640
-rw-r--r--src/cmd/go/internal/get/tag_test.go100
-rw-r--r--src/cmd/go/internal/gover/gomod.go43
-rw-r--r--src/cmd/go/internal/gover/gover.go254
-rw-r--r--src/cmd/go/internal/gover/gover_test.go160
-rw-r--r--src/cmd/go/internal/gover/local.go42
-rw-r--r--src/cmd/go/internal/gover/mod.go127
-rw-r--r--src/cmd/go/internal/gover/mod_test.go72
-rw-r--r--src/cmd/go/internal/gover/toolchain.go98
-rw-r--r--src/cmd/go/internal/gover/toolchain_test.go19
-rw-r--r--src/cmd/go/internal/gover/version.go74
-rw-r--r--src/cmd/go/internal/help/help.go195
-rw-r--r--src/cmd/go/internal/help/helpdoc.go945
-rw-r--r--src/cmd/go/internal/imports/build.go374
-rw-r--r--src/cmd/go/internal/imports/read.go263
-rw-r--r--src/cmd/go/internal/imports/read_test.go254
-rw-r--r--src/cmd/go/internal/imports/scan.go107
-rw-r--r--src/cmd/go/internal/imports/scan_test.go93
-rw-r--r--src/cmd/go/internal/imports/tags.go61
-rw-r--r--src/cmd/go/internal/imports/testdata/android/.h.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/android/a_android.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/android/b_android_arm64.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/android/c_linux.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/android/e.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/android/f.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/android/g.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/android/tags.txt1
-rw-r--r--src/cmd/go/internal/imports/testdata/android/want.txt6
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/.h.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/a_illumos.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/c_solaris.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/e.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/f.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/g.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/tags.txt1
-rw-r--r--src/cmd/go/internal/imports/testdata/illumos/want.txt6
-rw-r--r--src/cmd/go/internal/imports/testdata/star/tags.txt1
-rw-r--r--src/cmd/go/internal/imports/testdata/star/want.txt4
-rw-r--r--src/cmd/go/internal/imports/testdata/star/x.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/star/x1.go6
-rw-r--r--src/cmd/go/internal/imports/testdata/star/x_darwin.go3
-rw-r--r--src/cmd/go/internal/imports/testdata/star/x_windows.go3
-rw-r--r--src/cmd/go/internal/list/context.go39
-rw-r--r--src/cmd/go/internal/list/list.go1001
-rw-r--r--src/cmd/go/internal/load/flag.go96
-rw-r--r--src/cmd/go/internal/load/flag_test.go135
-rw-r--r--src/cmd/go/internal/load/godebug.go126
-rw-r--r--src/cmd/go/internal/load/path.go18
-rw-r--r--src/cmd/go/internal/load/pkg.go3554
-rw-r--r--src/cmd/go/internal/load/pkg_test.go82
-rw-r--r--src/cmd/go/internal/load/search.go57
-rw-r--r--src/cmd/go/internal/load/test.go991
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock.go83
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go210
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go35
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go210
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go40
-rw-r--r--src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go57
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile.go187
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile_filelock.go65
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile_plan9.go94
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile_test.go286
-rw-r--r--src/cmd/go/internal/lockedfile/mutex.go67
-rw-r--r--src/cmd/go/internal/lockedfile/transform_test.go105
-rw-r--r--src/cmd/go/internal/mmap/mmap.go31
-rw-r--r--src/cmd/go/internal/mmap/mmap_other.go21
-rw-r--r--src/cmd/go/internal/mmap/mmap_unix.go36
-rw-r--r--src/cmd/go/internal/mmap/mmap_windows.go41
-rw-r--r--src/cmd/go/internal/modcmd/download.go389
-rw-r--r--src/cmd/go/internal/modcmd/edit.go545
-rw-r--r--src/cmd/go/internal/modcmd/graph.go96
-rw-r--r--src/cmd/go/internal/modcmd/init.go52
-rw-r--r--src/cmd/go/internal/modcmd/mod.go33
-rw-r--r--src/cmd/go/internal/modcmd/tidy.go139
-rw-r--r--src/cmd/go/internal/modcmd/vendor.go431
-rw-r--r--src/cmd/go/internal/modcmd/verify.go143
-rw-r--r--src/cmd/go/internal/modcmd/why.go143
-rw-r--r--src/cmd/go/internal/modconv/convert.go105
-rw-r--r--src/cmd/go/internal/modconv/dep.go132
-rw-r--r--src/cmd/go/internal/modconv/glide.go41
-rw-r--r--src/cmd/go/internal/modconv/glock.go23
-rw-r--r--src/cmd/go/internal/modconv/godeps.go30
-rw-r--r--src/cmd/go/internal/modconv/modconv.go19
-rw-r--r--src/cmd/go/internal/modconv/modconv_test.go69
-rw-r--r--src/cmd/go/internal/modconv/testdata/cockroach.glock41
-rw-r--r--src/cmd/go/internal/modconv/testdata/cockroach.out31
-rw-r--r--src/cmd/go/internal/modconv/testdata/dockermachine.godeps159
-rw-r--r--src/cmd/go/internal/modconv/testdata/dockermachine.out33
-rw-r--r--src/cmd/go/internal/modconv/testdata/dockerman.glide52
-rw-r--r--src/cmd/go/internal/modconv/testdata/dockerman.out16
-rw-r--r--src/cmd/go/internal/modconv/testdata/govmomi.out5
-rw-r--r--src/cmd/go/internal/modconv/testdata/govmomi.vmanifest46
-rw-r--r--src/cmd/go/internal/modconv/testdata/juju.out106
-rw-r--r--src/cmd/go/internal/modconv/testdata/juju.tsv106
-rw-r--r--src/cmd/go/internal/modconv/testdata/moby.out105
-rw-r--r--src/cmd/go/internal/modconv/testdata/moby.vconf149
-rw-r--r--src/cmd/go/internal/modconv/testdata/panicparse.out8
-rw-r--r--src/cmd/go/internal/modconv/testdata/panicparse.vyml17
-rw-r--r--src/cmd/go/internal/modconv/testdata/prometheus.out258
-rw-r--r--src/cmd/go/internal/modconv/testdata/prometheus.vjson1605
-rw-r--r--src/cmd/go/internal/modconv/testdata/traefik.dep79
-rw-r--r--src/cmd/go/internal/modconv/testdata/traefik.out14
-rw-r--r--src/cmd/go/internal/modconv/testdata/upspin.dep57
-rw-r--r--src/cmd/go/internal/modconv/testdata/upspin.out8
-rw-r--r--src/cmd/go/internal/modconv/tsv.go23
-rw-r--r--src/cmd/go/internal/modconv/vconf.go26
-rw-r--r--src/cmd/go/internal/modconv/vjson.go29
-rw-r--r--src/cmd/go/internal/modconv/vmanifest.go29
-rw-r--r--src/cmd/go/internal/modconv/vyml.go41
-rw-r--r--src/cmd/go/internal/modfetch/bootstrap.go17
-rw-r--r--src/cmd/go/internal/modfetch/cache.go815
-rw-r--r--src/cmd/go/internal/modfetch/cache_test.go27
-rw-r--r--src/cmd/go/internal/modfetch/codehost/codehost.go390
-rw-r--r--src/cmd/go/internal/modfetch/codehost/git.go915
-rw-r--r--src/cmd/go/internal/modfetch/codehost/git_test.go801
-rw-r--r--src/cmd/go/internal/modfetch/codehost/shell.go141
-rw-r--r--src/cmd/go/internal/modfetch/codehost/svn.go168
-rw-r--r--src/cmd/go/internal/modfetch/codehost/vcs.go644
-rw-r--r--src/cmd/go/internal/modfetch/coderepo.go1189
-rw-r--r--src/cmd/go/internal/modfetch/coderepo_test.go965
-rw-r--r--src/cmd/go/internal/modfetch/fetch.go998
-rw-r--r--src/cmd/go/internal/modfetch/key.go9
-rw-r--r--src/cmd/go/internal/modfetch/proxy.go449
-rw-r--r--src/cmd/go/internal/modfetch/repo.go411
-rw-r--r--src/cmd/go/internal/modfetch/sumdb.go315
-rw-r--r--src/cmd/go/internal/modfetch/toolchain.go181
-rw-r--r--src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv2119
-rw-r--r--src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go232
-rw-r--r--src/cmd/go/internal/modget/get.go1946
-rw-r--r--src/cmd/go/internal/modget/query.go358
-rw-r--r--src/cmd/go/internal/modindex/build.go950
-rw-r--r--src/cmd/go/internal/modindex/build_read.go594
-rw-r--r--src/cmd/go/internal/modindex/index_format.txt63
-rw-r--r--src/cmd/go/internal/modindex/index_test.go104
-rw-r--r--src/cmd/go/internal/modindex/read.go1037
-rw-r--r--src/cmd/go/internal/modindex/scan.go290
-rw-r--r--src/cmd/go/internal/modindex/syslist.go78
-rw-r--r--src/cmd/go/internal/modindex/syslist_test.go65
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso1
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c0
-rw-r--r--src/cmd/go/internal/modindex/write.go164
-rw-r--r--src/cmd/go/internal/modinfo/info.go85
-rw-r--r--src/cmd/go/internal/modload/build.go449
-rw-r--r--src/cmd/go/internal/modload/buildlist.go1497
-rw-r--r--src/cmd/go/internal/modload/edit.go855
-rw-r--r--src/cmd/go/internal/modload/help.go64
-rw-r--r--src/cmd/go/internal/modload/import.go773
-rw-r--r--src/cmd/go/internal/modload/import_test.go97
-rw-r--r--src/cmd/go/internal/modload/init.go1979
-rw-r--r--src/cmd/go/internal/modload/list.go311
-rw-r--r--src/cmd/go/internal/modload/load.go2343
-rw-r--r--src/cmd/go/internal/modload/modfile.go806
-rw-r--r--src/cmd/go/internal/modload/mvs.go136
-rw-r--r--src/cmd/go/internal/modload/mvs_test.go31
-rw-r--r--src/cmd/go/internal/modload/query.go1265
-rw-r--r--src/cmd/go/internal/modload/query_test.go202
-rw-r--r--src/cmd/go/internal/modload/search.go304
-rw-r--r--src/cmd/go/internal/modload/stat_openfile.go28
-rw-r--r--src/cmd/go/internal/modload/stat_unix.go32
-rw-r--r--src/cmd/go/internal/modload/stat_windows.go21
-rw-r--r--src/cmd/go/internal/modload/vendor.go231
-rw-r--r--src/cmd/go/internal/mvs/errors.go105
-rw-r--r--src/cmd/go/internal/mvs/graph.go226
-rw-r--r--src/cmd/go/internal/mvs/mvs.go488
-rw-r--r--src/cmd/go/internal/mvs/mvs_test.go635
-rw-r--r--src/cmd/go/internal/par/queue.go88
-rw-r--r--src/cmd/go/internal/par/queue_test.go79
-rw-r--r--src/cmd/go/internal/par/work.go223
-rw-r--r--src/cmd/go/internal/par/work_test.go76
-rw-r--r--src/cmd/go/internal/robustio/robustio.go53
-rw-r--r--src/cmd/go/internal/robustio/robustio_darwin.go21
-rw-r--r--src/cmd/go/internal/robustio/robustio_flaky.go91
-rw-r--r--src/cmd/go/internal/robustio/robustio_other.go27
-rw-r--r--src/cmd/go/internal/robustio/robustio_windows.go27
-rw-r--r--src/cmd/go/internal/run/run.go219
-rw-r--r--src/cmd/go/internal/script/cmds.go1125
-rw-r--r--src/cmd/go/internal/script/cmds_other.go11
-rw-r--r--src/cmd/go/internal/script/cmds_posix.go16
-rw-r--r--src/cmd/go/internal/script/conds.go205
-rw-r--r--src/cmd/go/internal/script/engine.go788
-rw-r--r--src/cmd/go/internal/script/errors.go64
-rw-r--r--src/cmd/go/internal/script/scripttest/scripttest.go143
-rw-r--r--src/cmd/go/internal/script/state.go236
-rw-r--r--src/cmd/go/internal/search/search.go512
-rw-r--r--src/cmd/go/internal/str/path.go133
-rw-r--r--src/cmd/go/internal/str/str.go113
-rw-r--r--src/cmd/go/internal/str/str_test.go185
-rw-r--r--src/cmd/go/internal/test/cover.go85
-rw-r--r--src/cmd/go/internal/test/flagdefs.go77
-rw-r--r--src/cmd/go/internal/test/flagdefs_test.go76
-rw-r--r--src/cmd/go/internal/test/genflags.go84
-rw-r--r--src/cmd/go/internal/test/internal/genflags/testflag.go35
-rw-r--r--src/cmd/go/internal/test/internal/genflags/vetflag.go68
-rw-r--r--src/cmd/go/internal/test/test.go1942
-rw-r--r--src/cmd/go/internal/test/test_nonunix.go12
-rw-r--r--src/cmd/go/internal/test/test_unix.go16
-rw-r--r--src/cmd/go/internal/test/testflag.go416
-rw-r--r--src/cmd/go/internal/tool/tool.go224
-rw-r--r--src/cmd/go/internal/toolchain/exec.go55
-rw-r--r--src/cmd/go/internal/toolchain/exec_stub.go13
-rw-r--r--src/cmd/go/internal/toolchain/path_none.go21
-rw-r--r--src/cmd/go/internal/toolchain/path_plan9.go29
-rw-r--r--src/cmd/go/internal/toolchain/path_unix.go46
-rw-r--r--src/cmd/go/internal/toolchain/path_windows.go78
-rw-r--r--src/cmd/go/internal/toolchain/select.go649
-rw-r--r--src/cmd/go/internal/toolchain/switch.go231
-rw-r--r--src/cmd/go/internal/toolchain/toolchain_test.go66
-rw-r--r--src/cmd/go/internal/toolchain/umask_none.go13
-rw-r--r--src/cmd/go/internal/toolchain/umask_unix.go28
-rw-r--r--src/cmd/go/internal/trace/trace.go206
-rw-r--r--src/cmd/go/internal/vcs/discovery.go97
-rw-r--r--src/cmd/go/internal/vcs/discovery_test.go110
-rw-r--r--src/cmd/go/internal/vcs/vcs.go1688
-rw-r--r--src/cmd/go/internal/vcs/vcs_test.go581
-rw-r--r--src/cmd/go/internal/vcweb/auth.go108
-rw-r--r--src/cmd/go/internal/vcweb/bzr.go18
-rw-r--r--src/cmd/go/internal/vcweb/dir.go19
-rw-r--r--src/cmd/go/internal/vcweb/fossil.go61
-rw-r--r--src/cmd/go/internal/vcweb/git.go52
-rw-r--r--src/cmd/go/internal/vcweb/hg.go123
-rw-r--r--src/cmd/go/internal/vcweb/insecure.go42
-rw-r--r--src/cmd/go/internal/vcweb/script.go345
-rw-r--r--src/cmd/go/internal/vcweb/svn.go199
-rw-r--r--src/cmd/go/internal/vcweb/vcstest/vcstest.go169
-rw-r--r--src/cmd/go/internal/vcweb/vcstest/vcstest_test.go170
-rw-r--r--src/cmd/go/internal/vcweb/vcweb.go425
-rw-r--r--src/cmd/go/internal/vcweb/vcweb_test.go63
-rw-r--r--src/cmd/go/internal/version/version.go173
-rw-r--r--src/cmd/go/internal/vet/vet.go120
-rw-r--r--src/cmd/go/internal/vet/vetflag.go191
-rw-r--r--src/cmd/go/internal/web/api.go246
-rw-r--r--src/cmd/go/internal/web/bootstrap.go25
-rw-r--r--src/cmd/go/internal/web/file_test.go60
-rw-r--r--src/cmd/go/internal/web/http.go395
-rw-r--r--src/cmd/go/internal/web/url.go95
-rw-r--r--src/cmd/go/internal/web/url_other.go21
-rw-r--r--src/cmd/go/internal/web/url_other_test.go36
-rw-r--r--src/cmd/go/internal/web/url_test.go77
-rw-r--r--src/cmd/go/internal/web/url_windows.go43
-rw-r--r--src/cmd/go/internal/web/url_windows_test.go94
-rw-r--r--src/cmd/go/internal/work/action.go917
-rw-r--r--src/cmd/go/internal/work/build.go956
-rw-r--r--src/cmd/go/internal/work/build_test.go283
-rw-r--r--src/cmd/go/internal/work/buildid.go703
-rw-r--r--src/cmd/go/internal/work/exec.go3936
-rw-r--r--src/cmd/go/internal/work/exec_test.go87
-rw-r--r--src/cmd/go/internal/work/gc.go728
-rw-r--r--src/cmd/go/internal/work/gccgo.go677
-rw-r--r--src/cmd/go/internal/work/init.go424
-rw-r--r--src/cmd/go/internal/work/security.go334
-rw-r--r--src/cmd/go/internal/work/security_test.go318
-rw-r--r--src/cmd/go/internal/work/shell_test.go139
-rw-r--r--src/cmd/go/internal/workcmd/edit.go340
-rw-r--r--src/cmd/go/internal/workcmd/init.go66
-rw-r--r--src/cmd/go/internal/workcmd/sync.go146
-rw-r--r--src/cmd/go/internal/workcmd/use.go254
-rw-r--r--src/cmd/go/internal/workcmd/work.go78
295 files changed, 79519 insertions, 0 deletions
diff --git a/src/cmd/go/internal/auth/auth.go b/src/cmd/go/internal/auth/auth.go
new file mode 100644
index 0000000..77edeb8
--- /dev/null
+++ b/src/cmd/go/internal/auth/auth.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package auth provides access to user-provided authentication credentials.
+package auth
+
+import "net/http"
+
+// AddCredentials fills in the user's credentials for req, if any.
+// The return value reports whether any matching credentials were found.
+func AddCredentials(req *http.Request) (added bool) {
+ host := req.Host
+ if host == "" {
+ host = req.URL.Hostname()
+ }
+
+ // TODO(golang.org/issue/26232): Support arbitrary user-provided credentials.
+ netrcOnce.Do(readNetrc)
+ for _, l := range netrc {
+ if l.machine == host {
+ req.SetBasicAuth(l.login, l.password)
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/src/cmd/go/internal/auth/netrc.go b/src/cmd/go/internal/auth/netrc.go
new file mode 100644
index 0000000..0107f20
--- /dev/null
+++ b/src/cmd/go/internal/auth/netrc.go
@@ -0,0 +1,110 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package auth
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+type netrcLine struct {
+ machine string
+ login string
+ password string
+}
+
+var (
+ netrcOnce sync.Once
+ netrc []netrcLine
+ netrcErr error
+)
+
+func parseNetrc(data string) []netrcLine {
+ // See https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html
+ // for documentation on the .netrc format.
+ var nrc []netrcLine
+ var l netrcLine
+ inMacro := false
+ for _, line := range strings.Split(data, "\n") {
+ if inMacro {
+ if line == "" {
+ inMacro = false
+ }
+ continue
+ }
+
+ f := strings.Fields(line)
+ i := 0
+ for ; i < len(f)-1; i += 2 {
+ // Reset at each "machine" token.
+ // “The auto-login process searches the .netrc file for a machine token
+ // that matches […]. Once a match is made, the subsequent .netrc tokens
+ // are processed, stopping when the end of file is reached or another
+ // machine or a default token is encountered.”
+ switch f[i] {
+ case "machine":
+ l = netrcLine{machine: f[i+1]}
+ case "default":
+ break
+ case "login":
+ l.login = f[i+1]
+ case "password":
+ l.password = f[i+1]
+ case "macdef":
+ // “A macro is defined with the specified name; its contents begin with
+ // the next .netrc line and continue until a null line (consecutive
+ // new-line characters) is encountered.”
+ inMacro = true
+ }
+ if l.machine != "" && l.login != "" && l.password != "" {
+ nrc = append(nrc, l)
+ l = netrcLine{}
+ }
+ }
+
+ if i < len(f) && f[i] == "default" {
+ // “There can be only one default token, and it must be after all machine tokens.”
+ break
+ }
+ }
+
+ return nrc
+}
+
+func netrcPath() (string, error) {
+ if env := os.Getenv("NETRC"); env != "" {
+ return env, nil
+ }
+ dir, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ base := ".netrc"
+ if runtime.GOOS == "windows" {
+ base = "_netrc"
+ }
+ return filepath.Join(dir, base), nil
+}
+
+func readNetrc() {
+ path, err := netrcPath()
+ if err != nil {
+ netrcErr = err
+ return
+ }
+
+ data, err := os.ReadFile(path)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ netrcErr = err
+ }
+ return
+ }
+
+ netrc = parseNetrc(string(data))
+}
diff --git a/src/cmd/go/internal/auth/netrc_test.go b/src/cmd/go/internal/auth/netrc_test.go
new file mode 100644
index 0000000..e06c545
--- /dev/null
+++ b/src/cmd/go/internal/auth/netrc_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package auth
+
+import (
+ "reflect"
+ "testing"
+)
+
+var testNetrc = `
+machine incomplete
+password none
+
+machine api.github.com
+ login user
+ password pwd
+
+machine incomlete.host
+ login justlogin
+
+machine test.host
+login user2
+password pwd2
+
+machine oneline login user3 password pwd3
+
+machine ignore.host macdef ignore
+ login nobody
+ password nothing
+
+machine hasmacro.too macdef ignore-next-lines login user4 password pwd4
+ login nobody
+ password nothing
+
+default
+login anonymous
+password gopher@golang.org
+
+machine after.default
+login oops
+password too-late-in-file
+`
+
+func TestParseNetrc(t *testing.T) {
+ lines := parseNetrc(testNetrc)
+ want := []netrcLine{
+ {"api.github.com", "user", "pwd"},
+ {"test.host", "user2", "pwd2"},
+ {"oneline", "user3", "pwd3"},
+ {"hasmacro.too", "user4", "pwd4"},
+ }
+
+ if !reflect.DeepEqual(lines, want) {
+ t.Errorf("parseNetrc:\nhave %q\nwant %q", lines, want)
+ }
+}
diff --git a/src/cmd/go/internal/base/base.go b/src/cmd/go/internal/base/base.go
new file mode 100644
index 0000000..2171d13
--- /dev/null
+++ b/src/cmd/go/internal/base/base.go
@@ -0,0 +1,223 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package base defines shared basic pieces of the go command,
+// in particular logging and the Command structure.
+package base
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "reflect"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/str"
+)
+
+// A Command is an implementation of a go command
+// like go build or go fix.
+type Command struct {
+ // Run runs the command.
+ // The args are the arguments after the command name.
+ Run func(ctx context.Context, cmd *Command, args []string)
+
+ // UsageLine is the one-line usage message.
+ // The words between "go" and the first flag or argument in the line are taken to be the command name.
+ UsageLine string
+
+ // Short is the short description shown in the 'go help' output.
+ Short string
+
+ // Long is the long message shown in the 'go help <this-command>' output.
+ Long string
+
+ // Flag is a set of flags specific to this command.
+ Flag flag.FlagSet
+
+ // CustomFlags indicates that the command will do its own
+ // flag parsing.
+ CustomFlags bool
+
+ // Commands lists the available commands and help topics.
+ // The order here is the order in which they are printed by 'go help'.
+ // Note that subcommands are in general best avoided.
+ Commands []*Command
+}
+
+var Go = &Command{
+ UsageLine: "go",
+ Long: `Go is a tool for managing Go source code.`,
+ // Commands initialized in package main
+}
+
+// Lookup returns the subcommand with the given name, if any.
+// Otherwise it returns nil.
+//
+// Lookup ignores subcommands that have len(c.Commands) == 0 and c.Run == nil.
+// Such subcommands are only for use as arguments to "help".
+func (c *Command) Lookup(name string) *Command {
+ for _, sub := range c.Commands {
+ if sub.Name() == name && (len(c.Commands) > 0 || c.Runnable()) {
+ return sub
+ }
+ }
+ return nil
+}
+
+// hasFlag reports whether a command or any of its subcommands contain the given
+// flag.
+func hasFlag(c *Command, name string) bool {
+ if f := c.Flag.Lookup(name); f != nil {
+ return true
+ }
+ for _, sub := range c.Commands {
+ if hasFlag(sub, name) {
+ return true
+ }
+ }
+ return false
+}
+
+// LongName returns the command's long name: all the words in the usage line between "go" and a flag or argument,
+func (c *Command) LongName() string {
+ name := c.UsageLine
+ if i := strings.Index(name, " ["); i >= 0 {
+ name = name[:i]
+ }
+ if name == "go" {
+ return ""
+ }
+ return strings.TrimPrefix(name, "go ")
+}
+
+// Name returns the command's short name: the last word in the usage line before a flag or argument.
+func (c *Command) Name() string {
+ name := c.LongName()
+ if i := strings.LastIndex(name, " "); i >= 0 {
+ name = name[i+1:]
+ }
+ return name
+}
+
+func (c *Command) Usage() {
+ fmt.Fprintf(os.Stderr, "usage: %s\n", c.UsageLine)
+ fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", c.LongName())
+ SetExitStatus(2)
+ Exit()
+}
+
+// Runnable reports whether the command can be run; otherwise
+// it is a documentation pseudo-command such as importpath.
+func (c *Command) Runnable() bool {
+ return c.Run != nil
+}
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit() {
+ for _, f := range atExitFuncs {
+ f()
+ }
+ os.Exit(exitStatus)
+}
+
+func Fatalf(format string, args ...any) {
+ Errorf(format, args...)
+ Exit()
+}
+
+func Errorf(format string, args ...any) {
+ log.Printf(format, args...)
+ SetExitStatus(1)
+}
+
+func ExitIfErrors() {
+ if exitStatus != 0 {
+ Exit()
+ }
+}
+
+func Error(err error) {
+ // We use errors.Join to return multiple errors from various routines.
+ // If we receive multiple errors joined with a basic errors.Join,
+ // handle each one separately so that they all have the leading "go: " prefix.
+ // A plain interface check is not good enough because there might be
+ // other kinds of structured errors that are logically one unit and that
+ // add other context: only handling the wrapped errors would lose
+ // that context.
+ if err != nil && reflect.TypeOf(err).String() == "*errors.joinError" {
+ for _, e := range err.(interface{ Unwrap() []error }).Unwrap() {
+ Error(e)
+ }
+ return
+ }
+ Errorf("go: %v", err)
+}
+
+func Fatal(err error) {
+ Error(err)
+ Exit()
+}
+
+var exitStatus = 0
+var exitMu sync.Mutex
+
+func SetExitStatus(n int) {
+ exitMu.Lock()
+ if exitStatus < n {
+ exitStatus = n
+ }
+ exitMu.Unlock()
+}
+
+func GetExitStatus() int {
+ return exitStatus
+}
+
+// Run runs the command, with stdout and stderr
+// connected to the go command's own stdout and stderr.
+// If the command fails, Run reports the error using Errorf.
+func Run(cmdargs ...any) {
+ cmdline := str.StringList(cmdargs...)
+ if cfg.BuildN || cfg.BuildX {
+ fmt.Printf("%s\n", strings.Join(cmdline, " "))
+ if cfg.BuildN {
+ return
+ }
+ }
+
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ Errorf("%v", err)
+ }
+}
+
+// RunStdin is like run but connects Stdin.
+func RunStdin(cmdline []string) {
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Env = cfg.OrigEnv
+ StartSigHandlers()
+ if err := cmd.Run(); err != nil {
+ Errorf("%v", err)
+ }
+}
+
+// Usage is the usage-reporting function, filled in by package main
+// but here for reference by other packages.
+var Usage func()
diff --git a/src/cmd/go/internal/base/env.go b/src/cmd/go/internal/base/env.go
new file mode 100644
index 0000000..20ae06d
--- /dev/null
+++ b/src/cmd/go/internal/base/env.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "cmd/go/internal/cfg"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// AppendPWD returns the result of appending PWD=dir to the environment base.
+//
+// The resulting environment makes os.Getwd more efficient for a subprocess
+// running in dir, and also improves the accuracy of paths relative to dir
+// if one or more elements of dir is a symlink.
+func AppendPWD(base []string, dir string) []string {
+ // POSIX requires PWD to be absolute.
+ // Internally we only use absolute paths, so dir should already be absolute.
+ if !filepath.IsAbs(dir) {
+ panic(fmt.Sprintf("AppendPWD with relative path %q", dir))
+ }
+ return append(base, "PWD="+dir)
+}
+
+// AppendPATH returns the result of appending PATH=$GOROOT/bin:$PATH
+// (or the platform equivalent) to the environment base.
+func AppendPATH(base []string) []string {
+ if cfg.GOROOTbin == "" {
+ return base
+ }
+
+ pathVar := "PATH"
+ if runtime.GOOS == "plan9" {
+ pathVar = "path"
+ }
+
+ path := os.Getenv(pathVar)
+ if path == "" {
+ return append(base, pathVar+"="+cfg.GOROOTbin)
+ }
+ return append(base, pathVar+"="+cfg.GOROOTbin+string(os.PathListSeparator)+path)
+}
diff --git a/src/cmd/go/internal/base/flag.go b/src/cmd/go/internal/base/flag.go
new file mode 100644
index 0000000..74e1275
--- /dev/null
+++ b/src/cmd/go/internal/base/flag.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "flag"
+ "fmt"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/internal/quoted"
+)
+
+// A StringsFlag is a command-line flag that interprets its argument
+// as a space-separated list of possibly-quoted strings.
+type StringsFlag []string
+
+func (v *StringsFlag) Set(s string) error {
+ var err error
+ *v, err = quoted.Split(s)
+ if *v == nil {
+ *v = []string{}
+ }
+ return err
+}
+
+func (v *StringsFlag) String() string {
+ return "<StringsFlag>"
+}
+
+// explicitStringFlag is like a regular string flag, but it also tracks whether
+// the string was set explicitly to a non-empty value.
+type explicitStringFlag struct {
+ value *string
+ explicit *bool
+}
+
+func (f explicitStringFlag) String() string {
+ if f.value == nil {
+ return ""
+ }
+ return *f.value
+}
+
+func (f explicitStringFlag) Set(v string) error {
+ *f.value = v
+ if v != "" {
+ *f.explicit = true
+ }
+ return nil
+}
+
+// AddBuildFlagsNX adds the -n and -x build flags to the flag set.
+func AddBuildFlagsNX(flags *flag.FlagSet) {
+ flags.BoolVar(&cfg.BuildN, "n", false, "")
+ flags.BoolVar(&cfg.BuildX, "x", false, "")
+}
+
+// AddChdirFlag adds the -C flag to the flag set.
+func AddChdirFlag(flags *flag.FlagSet) {
+ // The usage message is never printed, but it's used in chdir_test.go
+ // to identify that the -C flag is from AddChdirFlag.
+ flags.Func("C", "AddChdirFlag", ChdirFlag)
+}
+
+// AddModFlag adds the -mod build flag to the flag set.
+func AddModFlag(flags *flag.FlagSet) {
+ flags.Var(explicitStringFlag{value: &cfg.BuildMod, explicit: &cfg.BuildModExplicit}, "mod", "")
+}
+
+// AddModCommonFlags adds the module-related flags common to build commands
+// and 'go mod' subcommands.
+func AddModCommonFlags(flags *flag.FlagSet) {
+ flags.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "")
+ flags.StringVar(&cfg.ModFile, "modfile", "", "")
+ flags.StringVar(&fsys.OverlayFile, "overlay", "", "")
+}
+
+func ChdirFlag(s string) error {
+ // main handles -C by removing it from the command line.
+ // If we see one during flag parsing, that's an error.
+ return fmt.Errorf("-C flag must be first flag on command line")
+}
diff --git a/src/cmd/go/internal/base/goflags.go b/src/cmd/go/internal/base/goflags.go
new file mode 100644
index 0000000..eced2c5
--- /dev/null
+++ b/src/cmd/go/internal/base/goflags.go
@@ -0,0 +1,162 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/cfg"
+ "cmd/internal/quoted"
+)
+
+var goflags []string // cached $GOFLAGS list; can be -x or --x form
+
+// GOFLAGS returns the flags from $GOFLAGS.
+// The list can be assumed to contain one string per flag,
+// with each string either beginning with -name or --name.
+func GOFLAGS() []string {
+ InitGOFLAGS()
+ return goflags
+}
+
+// InitGOFLAGS initializes the goflags list from $GOFLAGS.
+// If goflags is already initialized, it does nothing.
+func InitGOFLAGS() {
+ if goflags != nil { // already initialized
+ return
+ }
+
+ // Ignore bad flag in go env and go bug, because
+ // they are what people reach for when debugging
+ // a problem, and maybe they're debugging GOFLAGS.
+ // (Both will show the GOFLAGS setting if let succeed.)
+ hideErrors := cfg.CmdName == "env" || cfg.CmdName == "bug"
+
+ var err error
+ goflags, err = quoted.Split(cfg.Getenv("GOFLAGS"))
+ if err != nil {
+ if hideErrors {
+ return
+ }
+ Fatalf("go: parsing $GOFLAGS: %v", err)
+ }
+
+ if len(goflags) == 0 {
+ // nothing to do; avoid work on later InitGOFLAGS call
+ goflags = []string{}
+ return
+ }
+
+ // Each of the words returned by strings.Fields must be its own flag.
+ // To set flag arguments use -x=value instead of -x value.
+ // For boolean flags, -x is fine instead of -x=true.
+ for _, f := range goflags {
+ // Check that every flag looks like -x --x -x=value or --x=value.
+ if !strings.HasPrefix(f, "-") || f == "-" || f == "--" || strings.HasPrefix(f, "---") || strings.HasPrefix(f, "-=") || strings.HasPrefix(f, "--=") {
+ if hideErrors {
+ continue
+ }
+ Fatalf("go: parsing $GOFLAGS: non-flag %q", f)
+ }
+
+ name := f[1:]
+ if name[0] == '-' {
+ name = name[1:]
+ }
+ if i := strings.Index(name, "="); i >= 0 {
+ name = name[:i]
+ }
+ if !hasFlag(Go, name) {
+ if hideErrors {
+ continue
+ }
+ Fatalf("go: parsing $GOFLAGS: unknown flag -%s", name)
+ }
+ }
+}
+
+// boolFlag is the optional interface for flag.Value known to the flag package.
+// (It is not clear why package flag does not export this interface.)
+type boolFlag interface {
+ flag.Value
+ IsBoolFlag() bool
+}
+
+// SetFromGOFLAGS sets the flags in the given flag set using settings in $GOFLAGS.
+func SetFromGOFLAGS(flags *flag.FlagSet) {
+ InitGOFLAGS()
+
+ // This loop is similar to flag.Parse except that it ignores
+ // unknown flags found in goflags, so that setting, say, GOFLAGS=-ldflags=-w
+ // does not break commands that don't have a -ldflags.
+ // It also adjusts the output to be clear that the reported problem is from $GOFLAGS.
+ where := "$GOFLAGS"
+ if runtime.GOOS == "windows" {
+ where = "%GOFLAGS%"
+ }
+ for _, goflag := range goflags {
+ name, value, hasValue := goflag, "", false
+ // Ignore invalid flags like '=' or '=value'.
+ // If it is not reported in InitGOFlags it means we don't want to report it.
+ if i := strings.Index(goflag, "="); i == 0 {
+ continue
+ } else if i > 0 {
+ name, value, hasValue = goflag[:i], goflag[i+1:], true
+ }
+ if strings.HasPrefix(name, "--") {
+ name = name[1:]
+ }
+ f := flags.Lookup(name[1:])
+ if f == nil {
+ continue
+ }
+
+ // Use flags.Set consistently (instead of f.Value.Set) so that a subsequent
+ // call to flags.Visit will correctly visit the flags that have been set.
+
+ if fb, ok := f.Value.(boolFlag); ok && fb.IsBoolFlag() {
+ if hasValue {
+ if err := flags.Set(f.Name, value); err != nil {
+ fmt.Fprintf(flags.Output(), "go: invalid boolean value %q for flag %s (from %s): %v\n", value, name, where, err)
+ flags.Usage()
+ }
+ } else {
+ if err := flags.Set(f.Name, "true"); err != nil {
+ fmt.Fprintf(flags.Output(), "go: invalid boolean flag %s (from %s): %v\n", name, where, err)
+ flags.Usage()
+ }
+ }
+ } else {
+ if !hasValue {
+ fmt.Fprintf(flags.Output(), "go: flag needs an argument: %s (from %s)\n", name, where)
+ flags.Usage()
+ }
+ if err := flags.Set(f.Name, value); err != nil {
+ fmt.Fprintf(flags.Output(), "go: invalid value %q for flag %s (from %s): %v\n", value, name, where, err)
+ flags.Usage()
+ }
+ }
+ }
+}
+
+// InGOFLAGS returns whether GOFLAGS contains the given flag, such as "-mod".
+func InGOFLAGS(flag string) bool {
+ for _, goflag := range GOFLAGS() {
+ name := goflag
+ if strings.HasPrefix(name, "--") {
+ name = name[1:]
+ }
+ if i := strings.Index(name, "="); i >= 0 {
+ name = name[:i]
+ }
+ if name == flag {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/base/limit.go b/src/cmd/go/internal/base/limit.go
new file mode 100644
index 0000000..b4160bd
--- /dev/null
+++ b/src/cmd/go/internal/base/limit.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "internal/godebug"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var NetLimitGodebug = godebug.New("#cmdgonetlimit")
+
+// NetLimit returns the limit on concurrent network operations
+// configured by GODEBUG=cmdgonetlimit, if any.
+//
+// A limit of 0 (indicated by 0, true) means that network operations should not
+// be allowed.
+func NetLimit() (int, bool) {
+ netLimitOnce.Do(func() {
+ s := NetLimitGodebug.Value()
+ if s == "" {
+ return
+ }
+
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ Fatalf("invalid %s: %v", NetLimitGodebug.Name(), err)
+ }
+ if n < 0 {
+ // Treat negative values as unlimited.
+ return
+ }
+ netLimitSem = make(chan struct{}, n)
+ })
+
+ return cap(netLimitSem), netLimitSem != nil
+}
+
+// AcquireNet acquires a semaphore token for a network operation.
+func AcquireNet() (release func(), err error) {
+ hasToken := false
+ if n, ok := NetLimit(); ok {
+ if n == 0 {
+ return nil, fmt.Errorf("network disabled by %v=%v", NetLimitGodebug.Name(), NetLimitGodebug.Value())
+ }
+ netLimitSem <- struct{}{}
+ hasToken = true
+ }
+
+ checker := new(netTokenChecker)
+ runtime.SetFinalizer(checker, (*netTokenChecker).panicUnreleased)
+
+ return func() {
+ if checker.released {
+ panic("internal error: net token released twice")
+ }
+ checker.released = true
+ if hasToken {
+ <-netLimitSem
+ }
+ runtime.SetFinalizer(checker, nil)
+ }, nil
+}
+
+var (
+ netLimitOnce sync.Once
+ netLimitSem chan struct{}
+)
+
+type netTokenChecker struct {
+ released bool
+ // We want to use a finalizer to check that all acquired tokens are returned,
+ // so we arbitrarily pad the tokens with a string to defeat the runtime's
+ // “tiny allocator”.
+ unusedAvoidTinyAllocator string
+}
+
+func (c *netTokenChecker) panicUnreleased() {
+ panic("internal error: net token acquired but not released")
+}
diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go
new file mode 100644
index 0000000..64f213b
--- /dev/null
+++ b/src/cmd/go/internal/base/path.go
@@ -0,0 +1,79 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var cwd string
+var cwdOnce sync.Once
+
+// UncachedCwd returns the current working directory.
+// Most callers should use Cwd, which caches the result for future use.
+// UncachedCwd is appropriate to call early in program startup before flag parsing,
+// because the -C flag may change the current directory.
+func UncachedCwd() string {
+ wd, err := os.Getwd()
+ if err != nil {
+ Fatalf("cannot determine current directory: %v", err)
+ }
+ return wd
+}
+
+// Cwd returns the current working directory at the time of the first call.
+func Cwd() string {
+ cwdOnce.Do(func() {
+ cwd = UncachedCwd()
+ })
+ return cwd
+}
+
+// ShortPath returns an absolute or relative name for path, whatever is shorter.
+func ShortPath(path string) string {
+ if rel, err := filepath.Rel(Cwd(), path); err == nil && len(rel) < len(path) {
+ return rel
+ }
+ return path
+}
+
+// RelPaths returns a copy of paths with absolute paths
+// made relative to the current directory if they would be shorter.
+func RelPaths(paths []string) []string {
+ var out []string
+ for _, p := range paths {
+ rel, err := filepath.Rel(Cwd(), p)
+ if err == nil && len(rel) < len(p) {
+ p = rel
+ }
+ out = append(out, p)
+ }
+ return out
+}
+
+// IsTestFile reports whether the source file is a set of tests and should therefore
+// be excluded from coverage analysis.
+func IsTestFile(file string) bool {
+ // We don't cover tests, only the code they test.
+ return strings.HasSuffix(file, "_test.go")
+}
+
+// IsNull reports whether the path is a common name for the null device.
+// It returns true for /dev/null on Unix, or NUL (case-insensitive) on Windows.
+func IsNull(path string) bool {
+ if path == os.DevNull {
+ return true
+ }
+ if runtime.GOOS == "windows" {
+ if strings.EqualFold(path, "NUL") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/base/signal.go b/src/cmd/go/internal/base/signal.go
new file mode 100644
index 0000000..05befcf
--- /dev/null
+++ b/src/cmd/go/internal/base/signal.go
@@ -0,0 +1,31 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+)
+
+// Interrupted is closed when the go command receives an interrupt signal.
+var Interrupted = make(chan struct{})
+
+// processSignals setups signal handler.
+func processSignals() {
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, signalsToIgnore...)
+ go func() {
+ <-sig
+ close(Interrupted)
+ }()
+}
+
+var onceProcessSignals sync.Once
+
+// StartSigHandlers starts the signal handlers.
+func StartSigHandlers() {
+ onceProcessSignals.Do(processSignals)
+}
diff --git a/src/cmd/go/internal/base/signal_notunix.go b/src/cmd/go/internal/base/signal_notunix.go
new file mode 100644
index 0000000..682705f
--- /dev/null
+++ b/src/cmd/go/internal/base/signal_notunix.go
@@ -0,0 +1,17 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9 || windows
+
+package base
+
+import (
+ "os"
+)
+
+var signalsToIgnore = []os.Signal{os.Interrupt}
+
+// SignalTrace is the signal to send to make a Go program
+// crash with a stack trace (no such signal in this case).
+var SignalTrace os.Signal = nil
diff --git a/src/cmd/go/internal/base/signal_unix.go b/src/cmd/go/internal/base/signal_unix.go
new file mode 100644
index 0000000..0905971
--- /dev/null
+++ b/src/cmd/go/internal/base/signal_unix.go
@@ -0,0 +1,18 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || js || wasip1
+
+package base
+
+import (
+ "os"
+ "syscall"
+)
+
+var signalsToIgnore = []os.Signal{os.Interrupt, syscall.SIGQUIT}
+
+// SignalTrace is the signal to send to make a Go program
+// crash with a stack trace.
+var SignalTrace os.Signal = syscall.SIGQUIT
diff --git a/src/cmd/go/internal/base/tool.go b/src/cmd/go/internal/base/tool.go
new file mode 100644
index 0000000..ab623da
--- /dev/null
+++ b/src/cmd/go/internal/base/tool.go
@@ -0,0 +1,41 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/par"
+)
+
+// Tool returns the path to the named tool (for example, "vet").
+// If the tool cannot be found, Tool exits the process.
+func Tool(toolName string) string {
+ toolPath, err := ToolPath(toolName)
+ if err != nil && len(cfg.BuildToolexec) == 0 {
+ // Give a nice message if there is no tool with that name.
+ fmt.Fprintf(os.Stderr, "go: no such tool %q\n", toolName)
+ SetExitStatus(2)
+ Exit()
+ }
+ return toolPath
+}
+
+// Tool returns the path at which we expect to find the named tool
+// (for example, "vet"), and the error (if any) from statting that path.
+func ToolPath(toolName string) (string, error) {
+ toolPath := filepath.Join(build.ToolDir, toolName) + cfg.ToolExeSuffix()
+ err := toolStatCache.Do(toolPath, func() error {
+ _, err := os.Stat(toolPath)
+ return err
+ })
+ return toolPath, err
+}
+
+var toolStatCache par.Cache[string, error]
diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go
new file mode 100644
index 0000000..ed18136
--- /dev/null
+++ b/src/cmd/go/internal/bug/bug.go
@@ -0,0 +1,224 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bug implements the “go bug” command.
+package bug
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ urlpkg "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/envcmd"
+ "cmd/go/internal/web"
+ "cmd/go/internal/work"
+)
+
+var CmdBug = &base.Command{
+ Run: runBug,
+ UsageLine: "go bug",
+ Short: "start a bug report",
+ Long: `
+Bug opens the default browser and starts a new bug report.
+The report includes useful system information.
+ `,
+}
+
+func init() {
+ CmdBug.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+ base.AddChdirFlag(&CmdBug.Flag)
+}
+
+func runBug(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) > 0 {
+ base.Fatalf("go: bug takes no arguments")
+ }
+ work.BuildInit()
+
+ var buf strings.Builder
+ buf.WriteString(bugHeader)
+ printGoVersion(&buf)
+ buf.WriteString("### Does this issue reproduce with the latest release?\n\n\n")
+ printEnvDetails(&buf)
+ buf.WriteString(bugFooter)
+
+ body := buf.String()
+ url := "https://github.com/golang/go/issues/new?body=" + urlpkg.QueryEscape(body)
+ if !web.OpenBrowser(url) {
+ fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n")
+ fmt.Print(body)
+ }
+}
+
+const bugHeader = `<!-- Please answer these questions before submitting your issue. Thanks! -->
+
+`
+const bugFooter = `### What did you do?
+
+<!--
+If possible, provide a recipe for reproducing the error.
+A complete runnable program is good.
+A link on play.golang.org is best.
+-->
+
+
+
+### What did you expect to see?
+
+
+
+### What did you see instead?
+
+`
+
+func printGoVersion(w io.Writer) {
+ fmt.Fprintf(w, "### What version of Go are you using (`go version`)?\n\n")
+ fmt.Fprintf(w, "<pre>\n")
+ fmt.Fprintf(w, "$ go version\n")
+ fmt.Fprintf(w, "go version %s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(w, "</pre>\n")
+ fmt.Fprintf(w, "\n")
+}
+
+func printEnvDetails(w io.Writer) {
+ fmt.Fprintf(w, "### What operating system and processor architecture are you using (`go env`)?\n\n")
+ fmt.Fprintf(w, "<details><summary><code>go env</code> Output</summary><br><pre>\n")
+ fmt.Fprintf(w, "$ go env\n")
+ printGoEnv(w)
+ printGoDetails(w)
+ printOSDetails(w)
+ printCDetails(w)
+ fmt.Fprintf(w, "</pre></details>\n\n")
+}
+
+func printGoEnv(w io.Writer) {
+ env := envcmd.MkEnv()
+ env = append(env, envcmd.ExtraEnvVars()...)
+ env = append(env, envcmd.ExtraEnvVarsCostly()...)
+ envcmd.PrintEnv(w, env)
+}
+
+func printGoDetails(w io.Writer) {
+ gocmd := filepath.Join(runtime.GOROOT(), "bin/go")
+ printCmdOut(w, "GOROOT/bin/go version: ", gocmd, "version")
+ printCmdOut(w, "GOROOT/bin/go tool compile -V: ", gocmd, "tool", "compile", "-V")
+}
+
+func printOSDetails(w io.Writer) {
+ switch runtime.GOOS {
+ case "darwin", "ios":
+ printCmdOut(w, "uname -v: ", "uname", "-v")
+ printCmdOut(w, "", "sw_vers")
+ case "linux":
+ printCmdOut(w, "uname -sr: ", "uname", "-sr")
+ printCmdOut(w, "", "lsb_release", "-a")
+ printGlibcVersion(w)
+ case "openbsd", "netbsd", "freebsd", "dragonfly":
+ printCmdOut(w, "uname -v: ", "uname", "-v")
+ case "illumos", "solaris":
+ // Be sure to use the OS-supplied uname, in "/usr/bin":
+ printCmdOut(w, "uname -srv: ", "/usr/bin/uname", "-srv")
+ out, err := os.ReadFile("/etc/release")
+ if err == nil {
+ fmt.Fprintf(w, "/etc/release: %s\n", out)
+ } else {
+ if cfg.BuildV {
+ fmt.Printf("failed to read /etc/release: %v\n", err)
+ }
+ }
+ }
+}
+
+func printCDetails(w io.Writer) {
+ printCmdOut(w, "lldb --version: ", "lldb", "--version")
+ cmd := exec.Command("gdb", "--version")
+ out, err := cmd.Output()
+ if err == nil {
+ // There's apparently no combination of command line flags
+ // to get gdb to spit out its version without the license and warranty.
+ // Print up to the first newline.
+ fmt.Fprintf(w, "gdb --version: %s\n", firstLine(out))
+ } else {
+ if cfg.BuildV {
+ fmt.Printf("failed to run gdb --version: %v\n", err)
+ }
+ }
+}
+
+// printCmdOut prints the output of running the given command.
+// It ignores failures; 'go bug' is best effort.
+func printCmdOut(w io.Writer, prefix, path string, args ...string) {
+ cmd := exec.Command(path, args...)
+ out, err := cmd.Output()
+ if err != nil {
+ if cfg.BuildV {
+ fmt.Printf("%s %s: %v\n", path, strings.Join(args, " "), err)
+ }
+ return
+ }
+ fmt.Fprintf(w, "%s%s\n", prefix, bytes.TrimSpace(out))
+}
+
+// firstLine returns the first line of a given byte slice.
+func firstLine(buf []byte) []byte {
+ idx := bytes.IndexByte(buf, '\n')
+ if idx > 0 {
+ buf = buf[:idx]
+ }
+ return bytes.TrimSpace(buf)
+}
+
+// printGlibcVersion prints information about the glibc version.
+// It ignores failures.
+func printGlibcVersion(w io.Writer) {
+ tempdir := os.TempDir()
+ if tempdir == "" {
+ return
+ }
+ src := []byte(`int main() {}`)
+ srcfile := filepath.Join(tempdir, "go-bug.c")
+ outfile := filepath.Join(tempdir, "go-bug")
+ err := os.WriteFile(srcfile, src, 0644)
+ if err != nil {
+ return
+ }
+ defer os.Remove(srcfile)
+ cmd := exec.Command("gcc", "-o", outfile, srcfile)
+ if _, err = cmd.CombinedOutput(); err != nil {
+ return
+ }
+ defer os.Remove(outfile)
+
+ cmd = exec.Command("ldd", outfile)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return
+ }
+ re := regexp.MustCompile(`libc\.so[^ ]* => ([^ ]+)`)
+ m := re.FindStringSubmatch(string(out))
+ if m == nil {
+ return
+ }
+ cmd = exec.Command(m[1])
+ out, err = cmd.Output()
+ if err != nil {
+ return
+ }
+ fmt.Fprintf(w, "%s: %s\n", m[1], firstLine(out))
+
+ // print another line (the one containing version string) in case of musl libc
+ if idx := bytes.IndexByte(out, '\n'); bytes.Contains(out, []byte("musl")) && idx > -1 {
+ fmt.Fprintf(w, "%s\n", firstLine(out[idx+1:]))
+ }
+}
diff --git a/src/cmd/go/internal/cache/cache.go b/src/cmd/go/internal/cache/cache.go
new file mode 100644
index 0000000..4a82d27
--- /dev/null
+++ b/src/cmd/go/internal/cache/cache.go
@@ -0,0 +1,627 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cache implements a build artifact cache.
+package cache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/mmap"
+)
+
+// An ActionID is a cache action key, the hash of a complete description of a
+// repeatable computation (command line, environment variables,
+// input file contents, executable contents).
+type ActionID [HashSize]byte
+
+// An OutputID is a cache output key, the hash of an output of a computation.
+type OutputID [HashSize]byte
+
+// Cache is the interface as used by the cmd/go.
+type Cache interface {
+ // Get returns the cache entry for the provided ActionID.
+ // On miss, the error type should be of type *entryNotFoundError.
+ //
+ // After a success call to Get, OutputFile(Entry.OutputID) must
+ // exist on disk for until Close is called (at the end of the process).
+ Get(ActionID) (Entry, error)
+
+ // Put adds an item to the cache.
+ //
+ // The seeker is only used to seek to the beginning. After a call to Put,
+ // the seek position is not guaranteed to be in any particular state.
+ //
+ // As a special case, if the ReadSeeker is of type noVerifyReadSeeker,
+ // the verification from GODEBUG=goverifycache=1 is skipped.
+ //
+ // After a success call to Get, OutputFile(Entry.OutputID) must
+ // exist on disk for until Close is called (at the end of the process).
+ Put(ActionID, io.ReadSeeker) (_ OutputID, size int64, _ error)
+
+ // Close is called at the end of the go process. Implementations can do
+ // cache cleanup work at this phase, or wait for and report any errors from
+ // background cleanup work started earlier. Any cache trimming should in one
+ // process should not violate cause the invariants of this interface to be
+ // violated in another process. Namely, a cache trim from one process should
+ // not delete an ObjectID from disk that was recently Get or Put from
+ // another process. As a rule of thumb, don't trim things used in the last
+ // day.
+ Close() error
+
+ // OutputFile returns the path on disk where OutputID is stored.
+ //
+ // It's only called after a successful get or put call so it doesn't need
+ // to return an error; it's assumed that if the previous get or put succeeded,
+ // it's already on disk.
+ OutputFile(OutputID) string
+
+ // FuzzDir returns where fuzz files are stored.
+ FuzzDir() string
+}
+
+// A Cache is a package cache, backed by a file system directory tree.
+type DiskCache struct {
+ dir string
+ now func() time.Time
+}
+
+// Open opens and returns the cache in the given directory.
+//
+// It is safe for multiple processes on a single machine to use the
+// same cache directory in a local file system simultaneously.
+// They will coordinate using operating system file locks and may
+// duplicate effort but will not corrupt the cache.
+//
+// However, it is NOT safe for multiple processes on different machines
+// to share a cache directory (for example, if the directory were stored
+// in a network file system). File locking is notoriously unreliable in
+// network file systems and may not suffice to protect the cache.
+func Open(dir string) (*DiskCache, error) {
+ info, err := os.Stat(dir)
+ if err != nil {
+ return nil, err
+ }
+ if !info.IsDir() {
+ return nil, &fs.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
+ }
+ for i := 0; i < 256; i++ {
+ name := filepath.Join(dir, fmt.Sprintf("%02x", i))
+ if err := os.MkdirAll(name, 0777); err != nil {
+ return nil, err
+ }
+ }
+ c := &DiskCache{
+ dir: dir,
+ now: time.Now,
+ }
+ return c, nil
+}
+
+// fileName returns the name of the file corresponding to the given id.
+func (c *DiskCache) fileName(id [HashSize]byte, key string) string {
+ return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
+}
+
+// An entryNotFoundError indicates that a cache entry was not found, with an
+// optional underlying reason.
+type entryNotFoundError struct {
+ Err error
+}
+
+func (e *entryNotFoundError) Error() string {
+ if e.Err == nil {
+ return "cache entry not found"
+ }
+ return fmt.Sprintf("cache entry not found: %v", e.Err)
+}
+
+func (e *entryNotFoundError) Unwrap() error {
+ return e.Err
+}
+
+const (
+ // action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
+ hexSize = HashSize * 2
+ entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
+)
+
+// verify controls whether to run the cache in verify mode.
+// In verify mode, the cache always returns errMissing from Get
+// but then double-checks in Put that the data being written
+// exactly matches any existing entry. This provides an easy
+// way to detect program behavior that would have been different
+// had the cache entry been returned from Get.
+//
+// verify is enabled by setting the environment variable
+// GODEBUG=gocacheverify=1.
+var verify = false
+
+var errVerifyMode = errors.New("gocacheverify=1")
+
+// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
+var DebugTest = false
+
+func init() { initEnv() }
+
+var (
+ goCacheVerify = godebug.New("gocacheverify")
+ goDebugHash = godebug.New("gocachehash")
+ goCacheTest = godebug.New("gocachetest")
+)
+
+func initEnv() {
+ if goCacheVerify.Value() == "1" {
+ goCacheVerify.IncNonDefault()
+ verify = true
+ }
+ if goDebugHash.Value() == "1" {
+ goDebugHash.IncNonDefault()
+ debugHash = true
+ }
+ if goCacheTest.Value() == "1" {
+ goCacheTest.IncNonDefault()
+ DebugTest = true
+ }
+}
+
+// Get looks up the action ID in the cache,
+// returning the corresponding output ID and file size, if any.
+// Note that finding an output ID does not guarantee that the
+// saved file for that output ID is still available.
+func (c *DiskCache) Get(id ActionID) (Entry, error) {
+ if verify {
+ return Entry{}, &entryNotFoundError{Err: errVerifyMode}
+ }
+ return c.get(id)
+}
+
+type Entry struct {
+ OutputID OutputID
+ Size int64
+ Time time.Time // when added to cache
+}
+
+// get is Get but does not respect verify mode, so that Put can use it.
+func (c *DiskCache) get(id ActionID) (Entry, error) {
+ missing := func(reason error) (Entry, error) {
+ return Entry{}, &entryNotFoundError{Err: reason}
+ }
+ f, err := os.Open(c.fileName(id, "a"))
+ if err != nil {
+ return missing(err)
+ }
+ defer f.Close()
+ entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
+ if n, err := io.ReadFull(f, entry); n > entrySize {
+ return missing(errors.New("too long"))
+ } else if err != io.ErrUnexpectedEOF {
+ if err == io.EOF {
+ return missing(errors.New("file is empty"))
+ }
+ return missing(err)
+ } else if n < entrySize {
+ return missing(errors.New("entry file incomplete"))
+ }
+ if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
+ return missing(errors.New("invalid header"))
+ }
+ eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
+ eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
+ esize, entry := entry[1:1+20], entry[1+20:]
+ etime, entry := entry[1:1+20], entry[1+20:]
+ var buf [HashSize]byte
+ if _, err := hex.Decode(buf[:], eid); err != nil {
+ return missing(fmt.Errorf("decoding ID: %v", err))
+ } else if buf != id {
+ return missing(errors.New("mismatched ID"))
+ }
+ if _, err := hex.Decode(buf[:], eout); err != nil {
+ return missing(fmt.Errorf("decoding output ID: %v", err))
+ }
+ i := 0
+ for i < len(esize) && esize[i] == ' ' {
+ i++
+ }
+ size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
+ if err != nil {
+ return missing(fmt.Errorf("parsing size: %v", err))
+ } else if size < 0 {
+ return missing(errors.New("negative size"))
+ }
+ i = 0
+ for i < len(etime) && etime[i] == ' ' {
+ i++
+ }
+ tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
+ if err != nil {
+ return missing(fmt.Errorf("parsing timestamp: %v", err))
+ } else if tm < 0 {
+ return missing(errors.New("negative timestamp"))
+ }
+
+ c.used(c.fileName(id, "a"))
+
+ return Entry{buf, size, time.Unix(0, tm)}, nil
+}
+
+// GetFile looks up the action ID in the cache and returns
+// the name of the corresponding data file.
+func GetFile(c Cache, id ActionID) (file string, entry Entry, err error) {
+ entry, err = c.Get(id)
+ if err != nil {
+ return "", Entry{}, err
+ }
+ file = c.OutputFile(entry.OutputID)
+ info, err := os.Stat(file)
+ if err != nil {
+ return "", Entry{}, &entryNotFoundError{Err: err}
+ }
+ if info.Size() != entry.Size {
+ return "", Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
+ }
+ return file, entry, nil
+}
+
+// GetBytes looks up the action ID in the cache and returns
+// the corresponding output bytes.
+// GetBytes should only be used for data that can be expected to fit in memory.
+func GetBytes(c Cache, id ActionID) ([]byte, Entry, error) {
+ entry, err := c.Get(id)
+ if err != nil {
+ return nil, entry, err
+ }
+ data, _ := os.ReadFile(c.OutputFile(entry.OutputID))
+ if sha256.Sum256(data) != entry.OutputID {
+ return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")}
+ }
+ return data, entry, nil
+}
+
+// GetMmap looks up the action ID in the cache and returns
+// the corresponding output bytes.
+// GetMmap should only be used for data that can be expected to fit in memory.
+func GetMmap(c Cache, id ActionID) ([]byte, Entry, error) {
+ entry, err := c.Get(id)
+ if err != nil {
+ return nil, entry, err
+ }
+ md, err := mmap.Mmap(c.OutputFile(entry.OutputID))
+ if err != nil {
+ return nil, Entry{}, err
+ }
+ if int64(len(md.Data)) != entry.Size {
+ return nil, Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
+ }
+ return md.Data, entry, nil
+}
+
+// OutputFile returns the name of the cache file storing output with the given OutputID.
+func (c *DiskCache) OutputFile(out OutputID) string {
+ file := c.fileName(out, "d")
+ c.used(file)
+ return file
+}
+
+// Time constants for cache expiration.
+//
+// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
+// to avoid causing many unnecessary inode updates. The mtimes therefore
+// roughly reflect "time of last use" but may in fact be older by at most an hour.
+//
+// We scan the cache for entries to delete at most once per trimInterval (1 day).
+//
+// When we do scan the cache, we delete entries that have not been used for
+// at least trimLimit (5 days). Statistics gathered from a month of usage by
+// Go developers found that essentially all reuse of cached entries happened
+// within 5 days of the previous reuse. See golang.org/issue/22990.
+const (
+ mtimeInterval = 1 * time.Hour
+ trimInterval = 24 * time.Hour
+ trimLimit = 5 * 24 * time.Hour
+)
+
+// used makes a best-effort attempt to update mtime on file,
+// so that mtime reflects cache access time.
+//
+// Because the reflection only needs to be approximate,
+// and to reduce the amount of disk activity caused by using
+// cache entries, used only updates the mtime if the current
+// mtime is more than an hour old. This heuristic eliminates
+// nearly all of the mtime updates that would otherwise happen,
+// while still keeping the mtimes useful for cache trimming.
+func (c *DiskCache) used(file string) {
+ info, err := os.Stat(file)
+ if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
+ return
+ }
+ os.Chtimes(file, c.now(), c.now())
+}
+
+func (c *DiskCache) Close() error { return c.Trim() }
+
+// Trim removes old cache entries that are likely not to be reused.
+func (c *DiskCache) Trim() error {
+ now := c.now()
+
+ // We maintain in dir/trim.txt the time of the last completed cache trim.
+ // If the cache has been trimmed recently enough, do nothing.
+ // This is the common case.
+ // If the trim file is corrupt, detected if the file can't be parsed, or the
+ // trim time is too far in the future, attempt the trim anyway. It's possible that
+ // the cache was full when the corruption happened. Attempting a trim on
+ // an empty cache is cheap, so there wouldn't be a big performance hit in that case.
+ if data, err := lockedfile.Read(filepath.Join(c.dir, "trim.txt")); err == nil {
+ if t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64); err == nil {
+ lastTrim := time.Unix(t, 0)
+ if d := now.Sub(lastTrim); d < trimInterval && d > -mtimeInterval {
+ return nil
+ }
+ }
+ }
+
+ // Trim each of the 256 subdirectories.
+ // We subtract an additional mtimeInterval
+ // to account for the imprecision of our "last used" mtimes.
+ cutoff := now.Add(-trimLimit - mtimeInterval)
+ for i := 0; i < 256; i++ {
+ subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
+ c.trimSubdir(subdir, cutoff)
+ }
+
+ // Ignore errors from here: if we don't write the complete timestamp, the
+ // cache will appear older than it is, and we'll trim it again next time.
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%d", now.Unix())
+ if err := lockedfile.Write(filepath.Join(c.dir, "trim.txt"), &b, 0666); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// trimSubdir trims a single cache subdirectory.
+func (c *DiskCache) trimSubdir(subdir string, cutoff time.Time) {
+ // Read all directory entries from subdir before removing
+ // any files, in case removing files invalidates the file offset
+ // in the directory scan. Also, ignore error from f.Readdirnames,
+ // because we don't care about reporting the error and we still
+ // want to process any entries found before the error.
+ f, err := os.Open(subdir)
+ if err != nil {
+ return
+ }
+ names, _ := f.Readdirnames(-1)
+ f.Close()
+
+ for _, name := range names {
+ // Remove only cache entries (xxxx-a and xxxx-d).
+ if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
+ continue
+ }
+ entry := filepath.Join(subdir, name)
+ info, err := os.Stat(entry)
+ if err == nil && info.ModTime().Before(cutoff) {
+ os.Remove(entry)
+ }
+ }
+}
+
+// putIndexEntry adds an entry to the cache recording that executing the action
+// with the given id produces an output with the given output id (hash) and size.
+func (c *DiskCache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
+ // Note: We expect that for one reason or another it may happen
+ // that repeating an action produces a different output hash
+ // (for example, if the output contains a time stamp or temp dir name).
+ // While not ideal, this is also not a correctness problem, so we
+ // don't make a big deal about it. In particular, we leave the action
+ // cache entries writable specifically so that they can be overwritten.
+ //
+ // Setting GODEBUG=gocacheverify=1 does make a big deal:
+ // in verify mode we are double-checking that the cache entries
+ // are entirely reproducible. As just noted, this may be unrealistic
+ // in some cases but the check is also useful for shaking out real bugs.
+ entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
+ if verify && allowVerify {
+ old, err := c.get(id)
+ if err == nil && (old.OutputID != out || old.Size != size) {
+ // panic to show stack trace, so we can see what code is generating this cache entry.
+ msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
+ panic(msg)
+ }
+ }
+ file := c.fileName(id, "a")
+
+ // Copy file to cache directory.
+ mode := os.O_WRONLY | os.O_CREATE
+ f, err := os.OpenFile(file, mode, 0666)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(entry)
+ if err == nil {
+ // Truncate the file only *after* writing it.
+ // (This should be a no-op, but truncate just in case of previous corruption.)
+ //
+ // This differs from os.WriteFile, which truncates to 0 *before* writing
+ // via os.O_TRUNC. Truncating only after writing ensures that a second write
+ // of the same content to the same file is idempotent, and does not — even
+ // temporarily! — undo the effect of the first write.
+ err = f.Truncate(int64(len(entry)))
+ }
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ if err != nil {
+ // TODO(bcmills): This Remove potentially races with another go command writing to file.
+ // Can we eliminate it?
+ os.Remove(file)
+ return err
+ }
+ os.Chtimes(file, c.now(), c.now()) // mainly for tests
+
+ return nil
+}
+
+// noVerifyReadSeeker is a io.ReadSeeker wrapper sentinel type
+// that says that Cache.Put should skip the verify check
+// (from GODEBUG=goverifycache=1).
+type noVerifyReadSeeker struct {
+ io.ReadSeeker
+}
+
+// Put stores the given output in the cache as the output for the action ID.
+// It may read file twice. The content of file must not change between the two passes.
+func (c *DiskCache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
+ wrapper, isNoVerify := file.(noVerifyReadSeeker)
+ if isNoVerify {
+ file = wrapper.ReadSeeker
+ }
+ return c.put(id, file, !isNoVerify)
+}
+
+// PutNoVerify is like Put but disables the verify check
+// when GODEBUG=goverifycache=1 is set.
+// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
+// like test output containing times and the like.
+func PutNoVerify(c Cache, id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
+ return c.Put(id, noVerifyReadSeeker{file})
+}
+
+func (c *DiskCache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
+ // Compute output ID.
+ h := sha256.New()
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+ size, err := io.Copy(h, file)
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ var out OutputID
+ h.Sum(out[:0])
+
+ // Copy to cached output file (if not already present).
+ if err := c.copyFile(file, out, size); err != nil {
+ return out, size, err
+ }
+
+ // Add to cache index.
+ return out, size, c.putIndexEntry(id, out, size, allowVerify)
+}
+
+// PutBytes stores the given bytes in the cache as the output for the action ID.
+func PutBytes(c Cache, id ActionID, data []byte) error {
+ _, _, err := c.Put(id, bytes.NewReader(data))
+ return err
+}
+
+// copyFile copies file into the cache, expecting it to have the given
+// output ID and size, if that file is not present already.
+func (c *DiskCache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
+ name := c.fileName(out, "d")
+ info, err := os.Stat(name)
+ if err == nil && info.Size() == size {
+ // Check hash.
+ if f, err := os.Open(name); err == nil {
+ h := sha256.New()
+ io.Copy(h, f)
+ f.Close()
+ var out2 OutputID
+ h.Sum(out2[:0])
+ if out == out2 {
+ return nil
+ }
+ }
+ // Hash did not match. Fall through and rewrite file.
+ }
+
+ // Copy file to cache directory.
+ mode := os.O_RDWR | os.O_CREATE
+ if err == nil && info.Size() > size { // shouldn't happen but fix in case
+ mode |= os.O_TRUNC
+ }
+ f, err := os.OpenFile(name, mode, 0666)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if size == 0 {
+ // File now exists with correct size.
+ // Only one possible zero-length file, so contents are OK too.
+ // Early return here makes sure there's a "last byte" for code below.
+ return nil
+ }
+
+ // From here on, if any of the I/O writing the file fails,
+ // we make a best-effort attempt to truncate the file f
+ // before returning, to avoid leaving bad bytes in the file.
+
+ // Copy file to f, but also into h to double-check hash.
+ if _, err := file.Seek(0, 0); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ h := sha256.New()
+ w := io.MultiWriter(f, h)
+ if _, err := io.CopyN(w, file, size-1); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ // Check last byte before writing it; writing it will make the size match
+ // what other processes expect to find and might cause them to start
+ // using the file.
+ buf := make([]byte, 1)
+ if _, err := file.Read(buf); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ h.Write(buf)
+ sum := h.Sum(nil)
+ if !bytes.Equal(sum, out[:]) {
+ f.Truncate(0)
+ return fmt.Errorf("file content changed underfoot")
+ }
+
+ // Commit cache file entry.
+ if _, err := f.Write(buf); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ if err := f.Close(); err != nil {
+ // Data might not have been written,
+ // but file may look like it is the right size.
+ // To be extra careful, remove cached file.
+ os.Remove(name)
+ return err
+ }
+ os.Chtimes(name, c.now(), c.now()) // mainly for tests
+
+ return nil
+}
+
+// FuzzDir returns a subdirectory within the cache for storing fuzzing data.
+// The subdirectory may not exist.
+//
+// This directory is managed by the internal/fuzz package. Files in this
+// directory aren't removed by the 'go clean -cache' command or by Trim.
+// They may be removed with 'go clean -fuzzcache'.
+//
+// TODO(#48526): make Trim remove unused files from this directory.
+func (c *DiskCache) FuzzDir() string {
+ return filepath.Join(c.dir, "fuzz")
+}
diff --git a/src/cmd/go/internal/cache/cache_test.go b/src/cmd/go/internal/cache/cache_test.go
new file mode 100644
index 0000000..a12f1d2
--- /dev/null
+++ b/src/cmd/go/internal/cache/cache_test.go
@@ -0,0 +1,285 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+func init() {
+ verify = false // even if GODEBUG is set
+}
+
+func TestBasic(t *testing.T) {
+ dir, err := os.MkdirTemp("", "cachetest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ _, err = Open(filepath.Join(dir, "notexist"))
+ if err == nil {
+ t.Fatal(`Open("tmp/notexist") succeeded, want failure`)
+ }
+
+ cdir := filepath.Join(dir, "c1")
+ if err := os.Mkdir(cdir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ c1, err := Open(cdir)
+ if err != nil {
+ t.Fatalf("Open(c1) (create): %v", err)
+ }
+ if err := c1.putIndexEntry(dummyID(1), dummyID(12), 13, true); err != nil {
+ t.Fatalf("addIndexEntry: %v", err)
+ }
+ if err := c1.putIndexEntry(dummyID(1), dummyID(2), 3, true); err != nil { // overwrite entry
+ t.Fatalf("addIndexEntry: %v", err)
+ }
+ if entry, err := c1.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 {
+ t.Fatalf("c1.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3)
+ }
+
+ c2, err := Open(cdir)
+ if err != nil {
+ t.Fatalf("Open(c2) (reuse): %v", err)
+ }
+ if entry, err := c2.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 {
+ t.Fatalf("c2.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3)
+ }
+ if err := c2.putIndexEntry(dummyID(2), dummyID(3), 4, true); err != nil {
+ t.Fatalf("addIndexEntry: %v", err)
+ }
+ if entry, err := c1.Get(dummyID(2)); err != nil || entry.OutputID != dummyID(3) || entry.Size != 4 {
+ t.Fatalf("c1.Get(2) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(3), 4)
+ }
+}
+
+func TestGrowth(t *testing.T) {
+ dir, err := os.MkdirTemp("", "cachetest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ c, err := Open(dir)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+
+ n := 10000
+ if testing.Short() {
+ n = 10
+ }
+
+ for i := 0; i < n; i++ {
+ if err := c.putIndexEntry(dummyID(i), dummyID(i*99), int64(i)*101, true); err != nil {
+ t.Fatalf("addIndexEntry: %v", err)
+ }
+ id := ActionID(dummyID(i))
+ entry, err := c.Get(id)
+ if err != nil {
+ t.Fatalf("Get(%x): %v", id, err)
+ }
+ if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 {
+ t.Errorf("Get(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101)
+ }
+ }
+ for i := 0; i < n; i++ {
+ id := ActionID(dummyID(i))
+ entry, err := c.Get(id)
+ if err != nil {
+ t.Fatalf("Get2(%x): %v", id, err)
+ }
+ if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 {
+ t.Errorf("Get2(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101)
+ }
+ }
+}
+
+func TestVerifyPanic(t *testing.T) {
+ os.Setenv("GODEBUG", "gocacheverify=1")
+ initEnv()
+ defer func() {
+ os.Unsetenv("GODEBUG")
+ verify = false
+ }()
+
+ if !verify {
+ t.Fatal("initEnv did not set verify")
+ }
+
+ dir, err := os.MkdirTemp("", "cachetest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ c, err := Open(dir)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+
+ id := ActionID(dummyID(1))
+ if err := PutBytes(c, id, []byte("abc")); err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ if err := recover(); err != nil {
+ t.Log(err)
+ return
+ }
+ }()
+ PutBytes(c, id, []byte("def"))
+ t.Fatal("mismatched Put did not panic in verify mode")
+}
+
+func dummyID(x int) [HashSize]byte {
+ var out [HashSize]byte
+ binary.LittleEndian.PutUint64(out[:], uint64(x))
+ return out
+}
+
+func TestCacheTrim(t *testing.T) {
+ dir, err := os.MkdirTemp("", "cachetest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ c, err := Open(dir)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+ const start = 1000000000
+ now := int64(start)
+ c.now = func() time.Time { return time.Unix(now, 0) }
+
+ checkTime := func(name string, mtime int64) {
+ t.Helper()
+ file := filepath.Join(c.dir, name[:2], name)
+ info, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if info.ModTime().Unix() != mtime {
+ t.Fatalf("%s mtime = %d, want %d", name, info.ModTime().Unix(), mtime)
+ }
+ }
+
+ id := ActionID(dummyID(1))
+ PutBytes(c, id, []byte("abc"))
+ entry, _ := c.Get(id)
+ PutBytes(c, ActionID(dummyID(2)), []byte("def"))
+ mtime := now
+ checkTime(fmt.Sprintf("%x-a", id), mtime)
+ checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime)
+
+ // Get should not change recent mtimes.
+ now = start + 10
+ c.Get(id)
+ checkTime(fmt.Sprintf("%x-a", id), mtime)
+ checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime)
+
+ // Get should change distant mtimes.
+ now = start + 5000
+ mtime2 := now
+ if _, err := c.Get(id); err != nil {
+ t.Fatal(err)
+ }
+ c.OutputFile(entry.OutputID)
+ checkTime(fmt.Sprintf("%x-a", id), mtime2)
+ checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime2)
+
+ // Trim should leave everything alone: it's all too new.
+ if err := c.Trim(); err != nil {
+ if testenv.SyscallIsNotSupported(err) {
+ t.Skipf("skipping: Trim is unsupported (%v)", err)
+ }
+ t.Fatal(err)
+ }
+ if _, err := c.Get(id); err != nil {
+ t.Fatal(err)
+ }
+ c.OutputFile(entry.OutputID)
+ data, err := os.ReadFile(filepath.Join(dir, "trim.txt"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkTime(fmt.Sprintf("%x-a", dummyID(2)), start)
+
+ // Trim less than a day later should not do any work at all.
+ now = start + 80000
+ if err := c.Trim(); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Get(id); err != nil {
+ t.Fatal(err)
+ }
+ c.OutputFile(entry.OutputID)
+ data2, err := os.ReadFile(filepath.Join(dir, "trim.txt"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(data, data2) {
+ t.Fatalf("second trim did work: %q -> %q", data, data2)
+ }
+
+ // Fast forward and do another trim just before the 5 day cutoff.
+ // Note that because of usedQuantum the cutoff is actually 5 days + 1 hour.
+ // We used c.Get(id) just now, so 5 days later it should still be kept.
+ // On the other hand almost a full day has gone by since we wrote dummyID(2)
+ // and we haven't looked at it since, so 5 days later it should be gone.
+ now += 5 * 86400
+ checkTime(fmt.Sprintf("%x-a", dummyID(2)), start)
+ if err := c.Trim(); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Get(id); err != nil {
+ t.Fatal(err)
+ }
+ c.OutputFile(entry.OutputID)
+ mtime3 := now
+ if _, err := c.Get(dummyID(2)); err == nil { // haven't done a Get for this since original write above
+ t.Fatalf("Trim did not remove dummyID(2)")
+ }
+
+ // The c.Get(id) refreshed id's mtime again.
+ // Check that another 5 days later it is still not gone,
+ // but check by using checkTime, which doesn't bring mtime forward.
+ now += 5 * 86400
+ if err := c.Trim(); err != nil {
+ t.Fatal(err)
+ }
+ checkTime(fmt.Sprintf("%x-a", id), mtime3)
+ checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3)
+
+ // Half a day later Trim should still be a no-op, because there was a Trim recently.
+ // Even though the entry for id is now old enough to be trimmed,
+ // it gets a reprieve until the time comes for a new Trim scan.
+ now += 86400 / 2
+ if err := c.Trim(); err != nil {
+ t.Fatal(err)
+ }
+ checkTime(fmt.Sprintf("%x-a", id), mtime3)
+ checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3)
+
+ // Another half a day later, Trim should actually run, and it should remove id.
+ now += 86400/2 + 1
+ if err := c.Trim(); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Get(dummyID(1)); err == nil {
+ t.Fatal("Trim did not remove dummyID(1)")
+ }
+}
diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go
new file mode 100644
index 0000000..b5650ea
--- /dev/null
+++ b/src/cmd/go/internal/cache/default.go
@@ -0,0 +1,105 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "internal/goexperiment"
+)
+
+// Default returns the default cache to use.
+// It never returns nil.
+func Default() Cache {
+ defaultOnce.Do(initDefaultCache)
+ return defaultCache
+}
+
+var (
+ defaultOnce sync.Once
+ defaultCache Cache
+)
+
+// cacheREADME is a message stored in a README in the cache directory.
+// Because the cache lives outside the normal Go trees, we leave the
+// README as a courtesy to explain where it came from.
+const cacheREADME = `This directory holds cached build artifacts from the Go build system.
+Run "go clean -cache" if the directory is getting too large.
+Run "go clean -fuzzcache" to delete the fuzz cache.
+See golang.org to learn more about Go.
+`
+
+// initDefaultCache does the work of finding the default cache
+// the first time Default is called.
+func initDefaultCache() {
+ dir := DefaultDir()
+ if dir == "off" {
+ if defaultDirErr != nil {
+ base.Fatalf("build cache is required, but could not be located: %v", defaultDirErr)
+ }
+ base.Fatalf("build cache is disabled by GOCACHE=off, but required as of Go 1.12")
+ }
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
+ }
+ if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
+ // Best effort.
+ os.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
+ }
+
+ diskCache, err := Open(dir)
+ if err != nil {
+ base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
+ }
+
+ if v := cfg.Getenv("GOCACHEPROG"); v != "" && goexperiment.CacheProg {
+ defaultCache = startCacheProg(v, diskCache)
+ } else {
+ defaultCache = diskCache
+ }
+}
+
+var (
+ defaultDirOnce sync.Once
+ defaultDir string
+ defaultDirErr error
+)
+
+// DefaultDir returns the effective GOCACHE setting.
+// It returns "off" if the cache is disabled.
+func DefaultDir() string {
+ // Save the result of the first call to DefaultDir for later use in
+ // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
+ // subprocesses will inherit it, but that means initDefaultCache can't
+ // otherwise distinguish between an explicit "off" and a UserCacheDir error.
+
+ defaultDirOnce.Do(func() {
+ defaultDir = cfg.Getenv("GOCACHE")
+ if filepath.IsAbs(defaultDir) || defaultDir == "off" {
+ return
+ }
+ if defaultDir != "" {
+ defaultDir = "off"
+ defaultDirErr = fmt.Errorf("GOCACHE is not an absolute path")
+ return
+ }
+
+ // Compute default location.
+ dir, err := os.UserCacheDir()
+ if err != nil {
+ defaultDir = "off"
+ defaultDirErr = fmt.Errorf("GOCACHE is not defined and %v", err)
+ return
+ }
+ defaultDir = filepath.Join(dir, "go-build")
+ })
+
+ return defaultDir
+}
diff --git a/src/cmd/go/internal/cache/hash.go b/src/cmd/go/internal/cache/hash.go
new file mode 100644
index 0000000..4f79c31
--- /dev/null
+++ b/src/cmd/go/internal/cache/hash.go
@@ -0,0 +1,190 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var debugHash = false // set when GODEBUG=gocachehash=1
+
+// HashSize is the number of bytes in a hash.
+const HashSize = 32
+
+// A Hash provides access to the canonical hash function used to index the cache.
+// The current implementation uses salted SHA256, but clients must not assume this.
+type Hash struct {
+ h hash.Hash
+ name string // for debugging
+ buf *bytes.Buffer // for verify
+}
+
+// hashSalt is a salt string added to the beginning of every hash
+// created by NewHash. Using the Go version makes sure that different
+// versions of the go command (or even different Git commits during
+// work on the development branch) do not address the same cache
+// entries, so that a bug in one version does not affect the execution
+// of other versions. This salt will result in additional ActionID files
+// in the cache, but not additional copies of the large output files,
+// which are still addressed by unsalted SHA256.
+//
+// We strip any GOEXPERIMENTs the go tool was built with from this
+// version string on the assumption that they shouldn't affect go tool
+// execution. This allows bootstrapping to converge faster: dist builds
+// go_bootstrap without any experiments, so by stripping experiments
+// go_bootstrap and the final go binary will use the same salt.
+var hashSalt = []byte(stripExperiment(runtime.Version()))
+
+// stripExperiment strips any GOEXPERIMENT configuration from the Go
+// version string.
+func stripExperiment(version string) string {
+ if i := strings.Index(version, " X:"); i >= 0 {
+ return version[:i]
+ }
+ return version
+}
+
+// Subkey returns an action ID corresponding to mixing a parent
+// action ID with a string description of the subkey.
+func Subkey(parent ActionID, desc string) ActionID {
+ h := sha256.New()
+ h.Write([]byte("subkey:"))
+ h.Write(parent[:])
+ h.Write([]byte(desc))
+ var out ActionID
+ h.Sum(out[:0])
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
+ }
+ if verify {
+ hashDebug.Lock()
+ hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
+ hashDebug.Unlock()
+ }
+ return out
+}
+
+// NewHash returns a new Hash.
+// The caller is expected to Write data to it and then call Sum.
+func NewHash(name string) *Hash {
+ h := &Hash{h: sha256.New(), name: name}
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
+ }
+ h.Write(hashSalt)
+ if verify {
+ h.buf = new(bytes.Buffer)
+ }
+ return h
+}
+
+// Write writes data to the running hash.
+func (h *Hash) Write(b []byte) (int, error) {
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
+ }
+ if h.buf != nil {
+ h.buf.Write(b)
+ }
+ return h.h.Write(b)
+}
+
+// Sum returns the hash of the data written previously.
+func (h *Hash) Sum() [HashSize]byte {
+ var out [HashSize]byte
+ h.h.Sum(out[:0])
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
+ }
+ if h.buf != nil {
+ hashDebug.Lock()
+ if hashDebug.m == nil {
+ hashDebug.m = make(map[[HashSize]byte]string)
+ }
+ hashDebug.m[out] = h.buf.String()
+ hashDebug.Unlock()
+ }
+ return out
+}
+
+// In GODEBUG=gocacheverify=1 mode,
+// hashDebug holds the input to every computed hash ID,
+// so that we can work backward from the ID involved in a
+// cache entry mismatch to a description of what should be there.
+var hashDebug struct {
+ sync.Mutex
+ m map[[HashSize]byte]string
+}
+
+// reverseHash returns the input used to compute the hash id.
+func reverseHash(id [HashSize]byte) string {
+ hashDebug.Lock()
+ s := hashDebug.m[id]
+ hashDebug.Unlock()
+ return s
+}
+
+var hashFileCache struct {
+ sync.Mutex
+ m map[string][HashSize]byte
+}
+
+// FileHash returns the hash of the named file.
+// It caches repeated lookups for a given file,
+// and the cache entry for a file can be initialized
+// using SetFileHash.
+// The hash used by FileHash is not the same as
+// the hash used by NewHash.
+func FileHash(file string) ([HashSize]byte, error) {
+ hashFileCache.Lock()
+ out, ok := hashFileCache.m[file]
+ hashFileCache.Unlock()
+
+ if ok {
+ return out, nil
+ }
+
+ h := sha256.New()
+ f, err := os.Open(file)
+ if err != nil {
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
+ }
+ return [HashSize]byte{}, err
+ }
+ _, err = io.Copy(h, f)
+ f.Close()
+ if err != nil {
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
+ }
+ return [HashSize]byte{}, err
+ }
+ h.Sum(out[:0])
+ if debugHash {
+ fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
+ }
+
+ SetFileHash(file, out)
+ return out, nil
+}
+
+// SetFileHash sets the hash returned by FileHash for file.
+func SetFileHash(file string, sum [HashSize]byte) {
+ hashFileCache.Lock()
+ if hashFileCache.m == nil {
+ hashFileCache.m = make(map[string][HashSize]byte)
+ }
+ hashFileCache.m[file] = sum
+ hashFileCache.Unlock()
+}
diff --git a/src/cmd/go/internal/cache/hash_test.go b/src/cmd/go/internal/cache/hash_test.go
new file mode 100644
index 0000000..a035677
--- /dev/null
+++ b/src/cmd/go/internal/cache/hash_test.go
@@ -0,0 +1,51 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "os"
+ "testing"
+)
+
+func TestHash(t *testing.T) {
+ oldSalt := hashSalt
+ hashSalt = nil
+ defer func() {
+ hashSalt = oldSalt
+ }()
+
+ h := NewHash("alice")
+ h.Write([]byte("hello world"))
+ sum := fmt.Sprintf("%x", h.Sum())
+ want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
+ if sum != want {
+ t.Errorf("hash(hello world) = %v, want %v", sum, want)
+ }
+}
+
+func TestHashFile(t *testing.T) {
+ f, err := os.CreateTemp("", "cmd-go-test-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ name := f.Name()
+ fmt.Fprintf(f, "hello world")
+ defer os.Remove(name)
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var h ActionID // make sure hash result is assignable to ActionID
+ h, err = FileHash(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sum := fmt.Sprintf("%x", h)
+ want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
+ if sum != want {
+ t.Errorf("hash(hello world) = %v, want %v", sum, want)
+ }
+}
diff --git a/src/cmd/go/internal/cache/prog.go b/src/cmd/go/internal/cache/prog.go
new file mode 100644
index 0000000..30f69b3
--- /dev/null
+++ b/src/cmd/go/internal/cache/prog.go
@@ -0,0 +1,427 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bufio"
+ "cmd/go/internal/base"
+ "cmd/internal/quoted"
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ProgCache implements Cache via JSON messages over stdin/stdout to a child
+// helper process which can then implement whatever caching policy/mechanism it
+// wants.
+//
+// See https://github.com/golang/go/issues/59719
+type ProgCache struct {
+ cmd *exec.Cmd
+ stdout io.ReadCloser // from the child process
+ stdin io.WriteCloser // to the child process
+ bw *bufio.Writer // to stdin
+ jenc *json.Encoder // to bw
+
+ // can are the commands that the child process declared that it supports.
+ // This is effectively the versioning mechanism.
+ can map[ProgCmd]bool
+
+ // fuzzDirCache is another Cache implementation to use for the FuzzDir
+ // method. In practice this is the default GOCACHE disk-based
+ // implementation.
+ //
+ // TODO(bradfitz): maybe this isn't ideal. But we'd need to extend the Cache
+ // interface and the fuzzing callers to be less disk-y to do more here.
+ fuzzDirCache Cache
+
+ closing atomic.Bool
+ ctx context.Context // valid until Close via ctxClose
+ ctxCancel context.CancelFunc // called on Close
+ readLoopDone chan struct{} // closed when readLoop returns
+
+ mu sync.Mutex // guards following fields
+ nextID int64
+ inFlight map[int64]chan<- *ProgResponse
+ outputFile map[OutputID]string // object => abs path on disk
+
+ // writeMu serializes writing to the child process.
+ // It must never be held at the same time as mu.
+ writeMu sync.Mutex
+}
+
+// ProgCmd is a command that can be issued to a child process.
+//
+// If the interface needs to grow, we can add new commands or new versioned
+// commands like "get2".
+type ProgCmd string
+
+const (
+ cmdGet = ProgCmd("get")
+ cmdPut = ProgCmd("put")
+ cmdClose = ProgCmd("close")
+)
+
+// ProgRequest is the JSON-encoded message that's sent from cmd/go to
+// the GOCACHEPROG child process over stdin. Each JSON object is on its
+// own line. A ProgRequest of Type "put" with BodySize > 0 will be followed
+// by a line containing a base64-encoded JSON string literal of the body.
+type ProgRequest struct {
+ // ID is a unique number per process across all requests.
+ // It must be echoed in the ProgResponse from the child.
+ ID int64
+
+ // Command is the type of request.
+ // The cmd/go tool will only send commands that were declared
+ // as supported by the child.
+ Command ProgCmd
+
+ // ActionID is non-nil for get and puts.
+ ActionID []byte `json:",omitempty"` // or nil if not used
+
+ // ObjectID is set for Type "put" and "output-file".
+ ObjectID []byte `json:",omitempty"` // or nil if not used
+
+ // Body is the body for "put" requests. It's sent after the JSON object
+ // as a base64-encoded JSON string when BodySize is non-zero.
+ // It's sent as a separate JSON value instead of being a struct field
+ // send in this JSON object so large values can be streamed in both directions.
+ // The base64 string body of a ProgRequest will always be written
+ // immediately after the JSON object and a newline.
+ Body io.Reader `json:"-"`
+
+ // BodySize is the number of bytes of Body. If zero, the body isn't written.
+ BodySize int64 `json:",omitempty"`
+}
+
+// ProgResponse is the JSON response from the child process to cmd/go.
+//
+// With the exception of the first protocol message that the child writes to its
+// stdout with ID==0 and KnownCommands populated, these are only sent in
+// response to a ProgRequest from cmd/go.
+//
+// ProgResponses can be sent in any order. The ID must match the request they're
+// replying to.
+type ProgResponse struct {
+ ID int64 // that corresponds to ProgRequest; they can be answered out of order
+ Err string `json:",omitempty"` // if non-empty, the error
+
+ // KnownCommands is included in the first message that cache helper program
+ // writes to stdout on startup (with ID==0). It includes the
+ // ProgRequest.Command types that are supported by the program.
+ //
+ // This lets us extend the protocol gracefully over time (adding "get2",
+ // etc), or fail gracefully when needed. It also lets us verify the program
+ // wants to be a cache helper.
+ KnownCommands []ProgCmd `json:",omitempty"`
+
+ // For Get requests.
+
+ Miss bool `json:",omitempty"` // cache miss
+ OutputID []byte `json:",omitempty"`
+ Size int64 `json:",omitempty"` // in bytes
+ Time *time.Time `json:",omitempty"` // an Entry.Time; when the object was added to the docs
+
+ // DiskPath is the absolute path on disk of the ObjectID corresponding
+ // a "get" request's ActionID (on cache hit) or a "put" request's
+ // provided ObjectID.
+ DiskPath string `json:",omitempty"`
+}
+
+// startCacheProg starts the prog binary (with optional space-separated flags)
+// and returns a Cache implementation that talks to it.
+//
+// It blocks a few seconds to wait for the child process to successfully start
+// and advertise its capabilities.
+func startCacheProg(progAndArgs string, fuzzDirCache Cache) Cache {
+ if fuzzDirCache == nil {
+ panic("missing fuzzDirCache")
+ }
+ args, err := quoted.Split(progAndArgs)
+ if err != nil {
+ base.Fatalf("GOCACHEPROG args: %v", err)
+ }
+ var prog string
+ if len(args) > 0 {
+ prog = args[0]
+ args = args[1:]
+ }
+
+ ctx, ctxCancel := context.WithCancel(context.Background())
+
+ cmd := exec.CommandContext(ctx, prog, args...)
+ out, err := cmd.StdoutPipe()
+ if err != nil {
+ base.Fatalf("StdoutPipe to GOCACHEPROG: %v", err)
+ }
+ in, err := cmd.StdinPipe()
+ if err != nil {
+ base.Fatalf("StdinPipe to GOCACHEPROG: %v", err)
+ }
+ cmd.Stderr = os.Stderr
+ cmd.Cancel = in.Close
+
+ if err := cmd.Start(); err != nil {
+ base.Fatalf("error starting GOCACHEPROG program %q: %v", prog, err)
+ }
+
+ pc := &ProgCache{
+ ctx: ctx,
+ ctxCancel: ctxCancel,
+ fuzzDirCache: fuzzDirCache,
+ cmd: cmd,
+ stdout: out,
+ stdin: in,
+ bw: bufio.NewWriter(in),
+ inFlight: make(map[int64]chan<- *ProgResponse),
+ outputFile: make(map[OutputID]string),
+ readLoopDone: make(chan struct{}),
+ }
+
+ // Register our interest in the initial protocol message from the child to
+ // us, saying what it can do.
+ capResc := make(chan *ProgResponse, 1)
+ pc.inFlight[0] = capResc
+
+ pc.jenc = json.NewEncoder(pc.bw)
+ go pc.readLoop(pc.readLoopDone)
+
+ // Give the child process a few seconds to report its capabilities. This
+ // should be instant and not require any slow work by the program.
+ timer := time.NewTicker(5 * time.Second)
+ defer timer.Stop()
+ for {
+ select {
+ case <-timer.C:
+ log.Printf("# still waiting for GOCACHEPROG %v ...", prog)
+ case capRes := <-capResc:
+ can := map[ProgCmd]bool{}
+ for _, cmd := range capRes.KnownCommands {
+ can[cmd] = true
+ }
+ if len(can) == 0 {
+ base.Fatalf("GOCACHEPROG %v declared no supported commands", prog)
+ }
+ pc.can = can
+ return pc
+ }
+ }
+}
+
+func (c *ProgCache) readLoop(readLoopDone chan<- struct{}) {
+ defer close(readLoopDone)
+ jd := json.NewDecoder(c.stdout)
+ for {
+ res := new(ProgResponse)
+ if err := jd.Decode(res); err != nil {
+ if c.closing.Load() {
+ return // quietly
+ }
+ if errors.Is(err, io.EOF) {
+ c.mu.Lock()
+ inFlight := len(c.inFlight)
+ c.mu.Unlock()
+ base.Fatalf("GOCACHEPROG exited pre-Close with %v pending requests", inFlight)
+ }
+ base.Fatalf("error reading JSON from GOCACHEPROG: %v", err)
+ }
+ c.mu.Lock()
+ ch, ok := c.inFlight[res.ID]
+ delete(c.inFlight, res.ID)
+ c.mu.Unlock()
+ if ok {
+ ch <- res
+ } else {
+ base.Fatalf("GOCACHEPROG sent response for unknown request ID %v", res.ID)
+ }
+ }
+}
+
+func (c *ProgCache) send(ctx context.Context, req *ProgRequest) (*ProgResponse, error) {
+ resc := make(chan *ProgResponse, 1)
+ if err := c.writeToChild(req, resc); err != nil {
+ return nil, err
+ }
+ select {
+ case res := <-resc:
+ if res.Err != "" {
+ return nil, errors.New(res.Err)
+ }
+ return res, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (c *ProgCache) writeToChild(req *ProgRequest, resc chan<- *ProgResponse) (err error) {
+ c.mu.Lock()
+ c.nextID++
+ req.ID = c.nextID
+ c.inFlight[req.ID] = resc
+ c.mu.Unlock()
+
+ defer func() {
+ if err != nil {
+ c.mu.Lock()
+ delete(c.inFlight, req.ID)
+ c.mu.Unlock()
+ }
+ }()
+
+ c.writeMu.Lock()
+ defer c.writeMu.Unlock()
+
+ if err := c.jenc.Encode(req); err != nil {
+ return err
+ }
+ if err := c.bw.WriteByte('\n'); err != nil {
+ return err
+ }
+ if req.Body != nil && req.BodySize > 0 {
+ if err := c.bw.WriteByte('"'); err != nil {
+ return err
+ }
+ e := base64.NewEncoder(base64.StdEncoding, c.bw)
+ wrote, err := io.Copy(e, req.Body)
+ if err != nil {
+ return err
+ }
+ if err := e.Close(); err != nil {
+ return nil
+ }
+ if wrote != req.BodySize {
+ return fmt.Errorf("short write writing body to GOCACHEPROG for action %x, object %x: wrote %v; expected %v",
+ req.ActionID, req.ObjectID, wrote, req.BodySize)
+ }
+ if _, err := c.bw.WriteString("\"\n"); err != nil {
+ return err
+ }
+ }
+ if err := c.bw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *ProgCache) Get(a ActionID) (Entry, error) {
+ if !c.can[cmdGet] {
+ // They can't do a "get". Maybe they're a write-only cache.
+ //
+ // TODO(bradfitz,bcmills): figure out the proper error type here. Maybe
+ // errors.ErrUnsupported? Is entryNotFoundError even appropriate? There
+ // might be places where we rely on the fact that a recent Put can be
+ // read through a corresponding Get. Audit callers and check, and document
+ // error types on the Cache interface.
+ return Entry{}, &entryNotFoundError{}
+ }
+ res, err := c.send(c.ctx, &ProgRequest{
+ Command: cmdGet,
+ ActionID: a[:],
+ })
+ if err != nil {
+ return Entry{}, err // TODO(bradfitz): or entryNotFoundError? Audit callers.
+ }
+ if res.Miss {
+ return Entry{}, &entryNotFoundError{}
+ }
+ e := Entry{
+ Size: res.Size,
+ }
+ if res.Time != nil {
+ e.Time = *res.Time
+ } else {
+ e.Time = time.Now()
+ }
+ if res.DiskPath == "" {
+ return Entry{}, &entryNotFoundError{errors.New("GOCACHEPROG didn't populate DiskPath on get hit")}
+ }
+ if copy(e.OutputID[:], res.OutputID) != len(res.OutputID) {
+ return Entry{}, &entryNotFoundError{errors.New("incomplete ProgResponse OutputID")}
+ }
+ c.noteOutputFile(e.OutputID, res.DiskPath)
+ return e, nil
+}
+
+func (c *ProgCache) noteOutputFile(o OutputID, diskPath string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.outputFile[o] = diskPath
+}
+
+func (c *ProgCache) OutputFile(o OutputID) string {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.outputFile[o]
+}
+
+func (c *ProgCache) Put(a ActionID, file io.ReadSeeker) (_ OutputID, size int64, _ error) {
+ // Compute output ID.
+ h := sha256.New()
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+ size, err := io.Copy(h, file)
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ var out OutputID
+ h.Sum(out[:0])
+
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+
+ if !c.can[cmdPut] {
+ // Child is a read-only cache. Do nothing.
+ return out, size, nil
+ }
+
+ res, err := c.send(c.ctx, &ProgRequest{
+ Command: cmdPut,
+ ActionID: a[:],
+ ObjectID: out[:],
+ Body: file,
+ BodySize: size,
+ })
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ if res.DiskPath == "" {
+ return OutputID{}, 0, errors.New("GOCACHEPROG didn't return DiskPath in put response")
+ }
+ c.noteOutputFile(out, res.DiskPath)
+ return out, size, err
+}
+
+func (c *ProgCache) Close() error {
+ c.closing.Store(true)
+ var err error
+
+ // First write a "close" message to the child so it can exit nicely
+ // and clean up if it wants. Only after that exchange do we cancel
+ // the context that kills the process.
+ if c.can[cmdClose] {
+ _, err = c.send(c.ctx, &ProgRequest{Command: cmdClose})
+ }
+ c.ctxCancel()
+ <-c.readLoopDone
+ return err
+}
+
+func (c *ProgCache) FuzzDir() string {
+ // TODO(bradfitz): figure out what to do here. For now just use the
+ // disk-based default.
+ return c.fuzzDirCache.FuzzDir()
+}
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
new file mode 100644
index 0000000..8caa22a
--- /dev/null
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -0,0 +1,619 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cfg holds configuration shared by multiple parts
+// of the go command.
+package cfg
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/build"
+ "internal/buildcfg"
+ "internal/cfg"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/fsys"
+)
+
+// Global build parameters (used during package load)
+var (
+ Goos = envOr("GOOS", build.Default.GOOS)
+ Goarch = envOr("GOARCH", build.Default.GOARCH)
+
+ ExeSuffix = exeSuffix()
+
+ // ModulesEnabled specifies whether the go command is running
+ // in module-aware mode (as opposed to GOPATH mode).
+ // It is equal to modload.Enabled, but not all packages can import modload.
+ ModulesEnabled bool
+)
+
+func exeSuffix() string {
+ if Goos == "windows" {
+ return ".exe"
+ }
+ return ""
+}
+
+// Configuration for tools installed to GOROOT/bin.
+// Normally these match runtime.GOOS and runtime.GOARCH,
+// but when testing a cross-compiled cmd/go they will
+// indicate the GOOS and GOARCH of the installed cmd/go
+// rather than the test binary.
+var (
+ installedGOOS string
+ installedGOARCH string
+)
+
+// ToolExeSuffix returns the suffix for executables installed
+// in build.ToolDir.
+func ToolExeSuffix() string {
+ if installedGOOS == "windows" {
+ return ".exe"
+ }
+ return ""
+}
+
+// These are general "build flags" used by build and other commands.
+var (
+ BuildA bool // -a flag
+ BuildBuildmode string // -buildmode flag
+ BuildBuildvcs = "auto" // -buildvcs flag: "true", "false", or "auto"
+ BuildContext = defaultContext()
+ BuildMod string // -mod flag
+ BuildModExplicit bool // whether -mod was set explicitly
+ BuildModReason string // reason -mod was set, if set by default
+ BuildLinkshared bool // -linkshared flag
+ BuildMSan bool // -msan flag
+ BuildASan bool // -asan flag
+ BuildCover bool // -cover flag
+ BuildCoverMode string // -covermode flag
+ BuildCoverPkg []string // -coverpkg flag
+ BuildN bool // -n flag
+ BuildO string // -o flag
+ BuildP = runtime.GOMAXPROCS(0) // -p flag
+ BuildPGO string // -pgo flag
+ BuildPkgdir string // -pkgdir flag
+ BuildRace bool // -race flag
+ BuildToolexec []string // -toolexec flag
+ BuildToolchainName string
+ BuildToolchainCompiler func() string
+ BuildToolchainLinker func() string
+ BuildTrimpath bool // -trimpath flag
+ BuildV bool // -v flag
+ BuildWork bool // -work flag
+ BuildX bool // -x flag
+
+ ModCacheRW bool // -modcacherw flag
+ ModFile string // -modfile flag
+
+ CmdName string // "build", "install", "list", "mod tidy", etc.
+
+ DebugActiongraph string // -debug-actiongraph flag (undocumented, unstable)
+ DebugTrace string // -debug-trace flag
+ DebugRuntimeTrace string // -debug-runtime-trace flag (undocumented, unstable)
+
+ // GoPathError is set when GOPATH is not set. it contains an
+ // explanation why GOPATH is unset.
+ GoPathError string
+)
+
+func defaultContext() build.Context {
+ ctxt := build.Default
+
+ ctxt.JoinPath = filepath.Join // back door to say "do not use go command"
+
+ // Override defaults computed in go/build with defaults
+ // from go environment configuration file, if known.
+ ctxt.GOPATH = envOr("GOPATH", gopath(ctxt))
+ ctxt.GOOS = Goos
+ ctxt.GOARCH = Goarch
+
+ // Clear the GOEXPERIMENT-based tool tags, which we will recompute later.
+ var save []string
+ for _, tag := range ctxt.ToolTags {
+ if !strings.HasPrefix(tag, "goexperiment.") {
+ save = append(save, tag)
+ }
+ }
+ ctxt.ToolTags = save
+
+ // The go/build rule for whether cgo is enabled is:
+ // 1. If $CGO_ENABLED is set, respect it.
+ // 2. Otherwise, if this is a cross-compile, disable cgo.
+ // 3. Otherwise, use built-in default for GOOS/GOARCH.
+ // Recreate that logic here with the new GOOS/GOARCH setting.
+ if v := Getenv("CGO_ENABLED"); v == "0" || v == "1" {
+ ctxt.CgoEnabled = v[0] == '1'
+ } else if ctxt.GOOS != runtime.GOOS || ctxt.GOARCH != runtime.GOARCH {
+ ctxt.CgoEnabled = false
+ } else {
+ // Use built-in default cgo setting for GOOS/GOARCH.
+ // Note that ctxt.GOOS/GOARCH are derived from the preference list
+ // (1) environment, (2) go/env file, (3) runtime constants,
+ // while go/build.Default.GOOS/GOARCH are derived from the preference list
+ // (1) environment, (2) runtime constants.
+ //
+ // We know ctxt.GOOS/GOARCH == runtime.GOOS/GOARCH;
+ // no matter how that happened, go/build.Default will make the
+ // same decision (either the environment variables are set explicitly
+ // to match the runtime constants, or else they are unset, in which
+ // case go/build falls back to the runtime constants), so
+ // go/build.Default.GOOS/GOARCH == runtime.GOOS/GOARCH.
+ // So ctxt.CgoEnabled (== go/build.Default.CgoEnabled) is correct
+ // as is and can be left unmodified.
+ //
+ // All that said, starting in Go 1.20 we layer one more rule
+ // on top of the go/build decision: if CC is unset and
+ // the default C compiler we'd look for is not in the PATH,
+ // we automatically default cgo to off.
+ // This makes go builds work automatically on systems
+ // without a C compiler installed.
+ if ctxt.CgoEnabled {
+ if os.Getenv("CC") == "" {
+ cc := DefaultCC(ctxt.GOOS, ctxt.GOARCH)
+ if _, err := exec.LookPath(cc); err != nil {
+ ctxt.CgoEnabled = false
+ }
+ }
+ }
+ }
+
+ ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
+ return fsys.Open(path)
+ }
+ ctxt.ReadDir = fsys.ReadDir
+ ctxt.IsDir = func(path string) bool {
+ isDir, err := fsys.IsDir(path)
+ return err == nil && isDir
+ }
+
+ return ctxt
+}
+
+func init() {
+ SetGOROOT(Getenv("GOROOT"), false)
+ BuildToolchainCompiler = func() string { return "missing-compiler" }
+ BuildToolchainLinker = func() string { return "missing-linker" }
+}
+
+// SetGOROOT sets GOROOT and associated variables to the given values.
+//
+// If isTestGo is true, build.ToolDir is set based on the TESTGO_GOHOSTOS and
+// TESTGO_GOHOSTARCH environment variables instead of runtime.GOOS and
+// runtime.GOARCH.
+func SetGOROOT(goroot string, isTestGo bool) {
+ BuildContext.GOROOT = goroot
+
+ GOROOT = goroot
+ if goroot == "" {
+ GOROOTbin = ""
+ GOROOTpkg = ""
+ GOROOTsrc = ""
+ } else {
+ GOROOTbin = filepath.Join(goroot, "bin")
+ GOROOTpkg = filepath.Join(goroot, "pkg")
+ GOROOTsrc = filepath.Join(goroot, "src")
+ }
+ GOROOT_FINAL = findGOROOT_FINAL(goroot)
+
+ installedGOOS = runtime.GOOS
+ installedGOARCH = runtime.GOARCH
+ if isTestGo {
+ if testOS := os.Getenv("TESTGO_GOHOSTOS"); testOS != "" {
+ installedGOOS = testOS
+ }
+ if testArch := os.Getenv("TESTGO_GOHOSTARCH"); testArch != "" {
+ installedGOARCH = testArch
+ }
+ }
+
+ if runtime.Compiler != "gccgo" {
+ if goroot == "" {
+ build.ToolDir = ""
+ } else {
+ // Note that we must use the installed OS and arch here: the tool
+ // directory does not move based on environment variables, and even if we
+ // are testing a cross-compiled cmd/go all of the installed packages and
+ // tools would have been built using the native compiler and linker (and
+ // would spuriously appear stale if we used a cross-compiled compiler and
+ // linker).
+ //
+ // This matches the initialization of ToolDir in go/build, except for
+ // using ctxt.GOROOT and the installed GOOS and GOARCH rather than the
+ // GOROOT, GOOS, and GOARCH reported by the runtime package.
+ build.ToolDir = filepath.Join(GOROOTpkg, "tool", installedGOOS+"_"+installedGOARCH)
+ }
+ }
+}
+
+// Experiment configuration.
+var (
+ // RawGOEXPERIMENT is the GOEXPERIMENT value set by the user.
+ RawGOEXPERIMENT = envOr("GOEXPERIMENT", buildcfg.DefaultGOEXPERIMENT)
+ // CleanGOEXPERIMENT is the minimal GOEXPERIMENT value needed to reproduce the
+ // experiments enabled by RawGOEXPERIMENT.
+ CleanGOEXPERIMENT = RawGOEXPERIMENT
+
+ Experiment *buildcfg.ExperimentFlags
+ ExperimentErr error
+)
+
+func init() {
+ Experiment, ExperimentErr = buildcfg.ParseGOEXPERIMENT(Goos, Goarch, RawGOEXPERIMENT)
+ if ExperimentErr != nil {
+ return
+ }
+
+ // GOEXPERIMENT is valid, so convert it to canonical form.
+ CleanGOEXPERIMENT = Experiment.String()
+
+ // Add build tags based on the experiments in effect.
+ exps := Experiment.Enabled()
+ expTags := make([]string, 0, len(exps)+len(BuildContext.ToolTags))
+ for _, exp := range exps {
+ expTags = append(expTags, "goexperiment."+exp)
+ }
+ BuildContext.ToolTags = append(expTags, BuildContext.ToolTags...)
+}
+
+// An EnvVar is an environment variable Name=Value.
+type EnvVar struct {
+ Name string
+ Value string
+}
+
+// OrigEnv is the original environment of the program at startup.
+var OrigEnv []string
+
+// CmdEnv is the new environment for running go tool commands.
+// User binaries (during go test or go run) are run with OrigEnv,
+// not CmdEnv.
+var CmdEnv []EnvVar
+
+var envCache struct {
+ once sync.Once
+ m map[string]string
+}
+
+// EnvFile returns the name of the Go environment configuration file.
+func EnvFile() (string, error) {
+ if file := os.Getenv("GOENV"); file != "" {
+ if file == "off" {
+ return "", fmt.Errorf("GOENV=off")
+ }
+ return file, nil
+ }
+ dir, err := os.UserConfigDir()
+ if err != nil {
+ return "", err
+ }
+ if dir == "" {
+ return "", fmt.Errorf("missing user-config dir")
+ }
+ return filepath.Join(dir, "go/env"), nil
+}
+
+func initEnvCache() {
+ envCache.m = make(map[string]string)
+ if file, _ := EnvFile(); file != "" {
+ readEnvFile(file, "user")
+ }
+ goroot := findGOROOT(envCache.m["GOROOT"])
+ if goroot != "" {
+ readEnvFile(filepath.Join(goroot, "go.env"), "GOROOT")
+ }
+
+ // Save the goroot for func init calling SetGOROOT,
+ // and also overwrite anything that might have been in go.env.
+ // It makes no sense for GOROOT/go.env to specify
+ // a different GOROOT.
+ envCache.m["GOROOT"] = goroot
+}
+
+func readEnvFile(file string, source string) {
+ if file == "" {
+ return
+ }
+ data, err := os.ReadFile(file)
+ if err != nil {
+ return
+ }
+
+ for len(data) > 0 {
+ // Get next line.
+ line := data
+ i := bytes.IndexByte(data, '\n')
+ if i >= 0 {
+ line, data = line[:i], data[i+1:]
+ } else {
+ data = nil
+ }
+
+ i = bytes.IndexByte(line, '=')
+ if i < 0 || line[0] < 'A' || 'Z' < line[0] {
+ // Line is missing = (or empty) or a comment or not a valid env name. Ignore.
+ // This should not happen in the user file, since the file should be maintained almost
+ // exclusively by "go env -w", but better to silently ignore than to make
+ // the go command unusable just because somehow the env file has
+ // gotten corrupted.
+ // In the GOROOT/go.env file, we expect comments.
+ continue
+ }
+ key, val := line[:i], line[i+1:]
+
+ if source == "GOROOT" {
+ // In the GOROOT/go.env file, do not overwrite fields loaded from the user's go/env file.
+ if _, ok := envCache.m[string(key)]; ok {
+ continue
+ }
+ }
+ envCache.m[string(key)] = string(val)
+ }
+}
+
+// Getenv gets the value for the configuration key.
+// It consults the operating system environment
+// and then the go/env file.
+// If Getenv is called for a key that cannot be set
+// in the go/env file (for example GODEBUG), it panics.
+// This ensures that CanGetenv is accurate, so that
+// 'go env -w' stays in sync with what Getenv can retrieve.
+func Getenv(key string) string {
+ if !CanGetenv(key) {
+ switch key {
+ case "CGO_TEST_ALLOW", "CGO_TEST_DISALLOW", "CGO_test_ALLOW", "CGO_test_DISALLOW":
+ // used by internal/work/security_test.go; allow
+ default:
+ panic("internal error: invalid Getenv " + key)
+ }
+ }
+ val := os.Getenv(key)
+ if val != "" {
+ return val
+ }
+ envCache.once.Do(initEnvCache)
+ return envCache.m[key]
+}
+
+// CanGetenv reports whether key is a valid go/env configuration key.
+func CanGetenv(key string) bool {
+ envCache.once.Do(initEnvCache)
+ if _, ok := envCache.m[key]; ok {
+ // Assume anything in the user file or go.env file is valid.
+ return true
+ }
+ return strings.Contains(cfg.KnownEnv, "\t"+key+"\n")
+}
+
+var (
+ GOROOT string
+
+ // Either empty or produced by filepath.Join(GOROOT, …).
+ GOROOTbin string
+ GOROOTpkg string
+ GOROOTsrc string
+
+ GOROOT_FINAL string
+
+ GOBIN = Getenv("GOBIN")
+ GOMODCACHE = envOr("GOMODCACHE", gopathDir("pkg/mod"))
+
+ // Used in envcmd.MkEnv and build ID computations.
+ GOARM = envOr("GOARM", fmt.Sprint(buildcfg.GOARM))
+ GO386 = envOr("GO386", buildcfg.GO386)
+ GOAMD64 = envOr("GOAMD64", fmt.Sprintf("%s%d", "v", buildcfg.GOAMD64))
+ GOMIPS = envOr("GOMIPS", buildcfg.GOMIPS)
+ GOMIPS64 = envOr("GOMIPS64", buildcfg.GOMIPS64)
+ GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", buildcfg.GOPPC64))
+ GOWASM = envOr("GOWASM", fmt.Sprint(buildcfg.GOWASM))
+
+ GOPROXY = envOr("GOPROXY", "")
+ GOSUMDB = envOr("GOSUMDB", "")
+ GOPRIVATE = Getenv("GOPRIVATE")
+ GONOPROXY = envOr("GONOPROXY", GOPRIVATE)
+ GONOSUMDB = envOr("GONOSUMDB", GOPRIVATE)
+ GOINSECURE = Getenv("GOINSECURE")
+ GOVCS = Getenv("GOVCS")
+)
+
+var SumdbDir = gopathDir("pkg/sumdb")
+
+// GetArchEnv returns the name and setting of the
+// GOARCH-specific architecture environment variable.
+// If the current architecture has no GOARCH-specific variable,
+// GetArchEnv returns empty key and value.
+func GetArchEnv() (key, val string) {
+ switch Goarch {
+ case "arm":
+ return "GOARM", GOARM
+ case "386":
+ return "GO386", GO386
+ case "amd64":
+ return "GOAMD64", GOAMD64
+ case "mips", "mipsle":
+ return "GOMIPS", GOMIPS
+ case "mips64", "mips64le":
+ return "GOMIPS64", GOMIPS64
+ case "ppc64", "ppc64le":
+ return "GOPPC64", GOPPC64
+ case "wasm":
+ return "GOWASM", GOWASM
+ }
+ return "", ""
+}
+
+// envOr returns Getenv(key) if set, or else def.
+func envOr(key, def string) string {
+ val := Getenv(key)
+ if val == "" {
+ val = def
+ }
+ return val
+}
+
+// There is a copy of findGOROOT, isSameDir, and isGOROOT in
+// x/tools/cmd/godoc/goroot.go.
+// Try to keep them in sync for now.
+
+// findGOROOT returns the GOROOT value, using either an explicitly
+// provided environment variable, a GOROOT that contains the current
+// os.Executable value, or else the GOROOT that the binary was built
+// with from runtime.GOROOT().
+//
+// There is a copy of this code in x/tools/cmd/godoc/goroot.go.
+func findGOROOT(env string) string {
+ if env == "" {
+ // Not using Getenv because findGOROOT is called
+ // to find the GOROOT/go.env file. initEnvCache
+ // has passed in the setting from the user go/env file.
+ env = os.Getenv("GOROOT")
+ }
+ if env != "" {
+ return filepath.Clean(env)
+ }
+ def := ""
+ if r := runtime.GOROOT(); r != "" {
+ def = filepath.Clean(r)
+ }
+ if runtime.Compiler == "gccgo" {
+ // gccgo has no real GOROOT, and it certainly doesn't
+ // depend on the executable's location.
+ return def
+ }
+
+ // canonical returns a directory path that represents
+ // the same directory as dir,
+ // preferring the spelling in def if the two are the same.
+ canonical := func(dir string) string {
+ if isSameDir(def, dir) {
+ return def
+ }
+ return dir
+ }
+
+ exe, err := os.Executable()
+ if err == nil {
+ exe, err = filepath.Abs(exe)
+ if err == nil {
+ // cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH,
+ // depending on whether it was cross-compiled with a different
+ // GOHOSTOS (see https://go.dev/issue/62119). Try both.
+ if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
+ return canonical(dir)
+ }
+ if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
+ return canonical(dir)
+ }
+
+ // Depending on what was passed on the command line, it is possible
+ // that os.Executable is a symlink (like /usr/local/bin/go) referring
+ // to a binary installed in a real GOROOT elsewhere
+ // (like /usr/lib/go/bin/go).
+ // Try to find that GOROOT by resolving the symlinks.
+ exe, err = filepath.EvalSymlinks(exe)
+ if err == nil {
+ if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
+ return canonical(dir)
+ }
+ if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
+ return canonical(dir)
+ }
+ }
+ }
+ }
+ return def
+}
+
+func findGOROOT_FINAL(goroot string) string {
+ // $GOROOT_FINAL is only for use during make.bash
+ // so it is not settable using go/env, so we use os.Getenv here.
+ def := goroot
+ if env := os.Getenv("GOROOT_FINAL"); env != "" {
+ def = filepath.Clean(env)
+ }
+ return def
+}
+
+// isSameDir reports whether dir1 and dir2 are the same directory.
+func isSameDir(dir1, dir2 string) bool {
+ if dir1 == dir2 {
+ return true
+ }
+ info1, err1 := os.Stat(dir1)
+ info2, err2 := os.Stat(dir2)
+ return err1 == nil && err2 == nil && os.SameFile(info1, info2)
+}
+
+// isGOROOT reports whether path looks like a GOROOT.
+//
+// It does this by looking for the path/pkg/tool directory,
+// which is necessary for useful operation of the cmd/go tool,
+// and is not typically present in a GOPATH.
+//
+// There is a copy of this code in x/tools/cmd/godoc/goroot.go.
+func isGOROOT(path string) bool {
+ stat, err := os.Stat(filepath.Join(path, "pkg", "tool"))
+ if err != nil {
+ return false
+ }
+ return stat.IsDir()
+}
+
+func gopathDir(rel string) string {
+ list := filepath.SplitList(BuildContext.GOPATH)
+ if len(list) == 0 || list[0] == "" {
+ return ""
+ }
+ return filepath.Join(list[0], rel)
+}
+
+func gopath(ctxt build.Context) string {
+ if len(ctxt.GOPATH) > 0 {
+ return ctxt.GOPATH
+ }
+ env := "HOME"
+ if runtime.GOOS == "windows" {
+ env = "USERPROFILE"
+ } else if runtime.GOOS == "plan9" {
+ env = "home"
+ }
+ if home := os.Getenv(env); home != "" {
+ def := filepath.Join(home, "go")
+ if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
+ GoPathError = "cannot set GOROOT as GOPATH"
+ }
+ return ""
+ }
+ GoPathError = fmt.Sprintf("%s is not set", env)
+ return ""
+}
+
+// WithBuildXWriter returns a Context in which BuildX output is written
+// to given io.Writer.
+func WithBuildXWriter(ctx context.Context, xLog io.Writer) context.Context {
+ return context.WithValue(ctx, buildXContextKey{}, xLog)
+}
+
+type buildXContextKey struct{}
+
+// BuildXWriter returns nil if BuildX is false, or
+// the writer to which BuildX output should be written otherwise.
+func BuildXWriter(ctx context.Context) (io.Writer, bool) {
+ if !BuildX {
+ return nil, false
+ }
+ if v := ctx.Value(buildXContextKey{}); v != nil {
+ return v.(io.Writer), true
+ }
+ return os.Stderr, true
+}
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
new file mode 100644
index 0000000..e011237
--- /dev/null
+++ b/src/cmd/go/internal/clean/clean.go
@@ -0,0 +1,428 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package clean implements the “go clean” command.
+package clean
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/work"
+)
+
+var CmdClean = &base.Command{
+ UsageLine: "go clean [clean flags] [build flags] [packages]",
+ Short: "remove object files and cached files",
+ Long: `
+Clean removes object files from package source directories.
+The go command builds most objects in a temporary directory,
+so go clean is mainly concerned with object files left by other
+tools or by manual invocations of go build.
+
+If a package argument is given or the -i or -r flag is set,
+clean removes the following files from each of the
+source directories corresponding to the import paths:
+
+ _obj/ old object directory, left from Makefiles
+ _test/ old test directory, left from Makefiles
+ _testmain.go old gotest file, left from Makefiles
+ test.out old test log, left from Makefiles
+ build.out old test log, left from Makefiles
+ *.[568ao] object files, left from Makefiles
+
+ DIR(.exe) from go build
+ DIR.test(.exe) from go test -c
+ MAINFILE(.exe) from go build MAINFILE.go
+ *.so from SWIG
+
+In the list, DIR represents the final path element of the
+directory, and MAINFILE is the base name of any Go source
+file in the directory that is not included when building
+the package.
+
+The -i flag causes clean to remove the corresponding installed
+archive or binary (what 'go install' would create).
+
+The -n flag causes clean to print the remove commands it would execute,
+but not run them.
+
+The -r flag causes clean to be applied recursively to all the
+dependencies of the packages named by the import paths.
+
+The -x flag causes clean to print remove commands as it executes them.
+
+The -cache flag causes clean to remove the entire go build cache.
+
+The -testcache flag causes clean to expire all test results in the
+go build cache.
+
+The -modcache flag causes clean to remove the entire module
+download cache, including unpacked source code of versioned
+dependencies.
+
+The -fuzzcache flag causes clean to remove files stored in the Go build
+cache for fuzz testing. The fuzzing engine caches files that expand
+code coverage, so removing them may make fuzzing less effective until
+new inputs are found that provide the same coverage. These files are
+distinct from those stored in testdata directory; clean does not remove
+those files.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+ `,
+}
+
+var (
+ cleanI bool // clean -i flag
+ cleanR bool // clean -r flag
+ cleanCache bool // clean -cache flag
+ cleanFuzzcache bool // clean -fuzzcache flag
+ cleanModcache bool // clean -modcache flag
+ cleanTestcache bool // clean -testcache flag
+)
+
+func init() {
+ // break init cycle
+ CmdClean.Run = runClean
+
+ CmdClean.Flag.BoolVar(&cleanI, "i", false, "")
+ CmdClean.Flag.BoolVar(&cleanR, "r", false, "")
+ CmdClean.Flag.BoolVar(&cleanCache, "cache", false, "")
+ CmdClean.Flag.BoolVar(&cleanFuzzcache, "fuzzcache", false, "")
+ CmdClean.Flag.BoolVar(&cleanModcache, "modcache", false, "")
+ CmdClean.Flag.BoolVar(&cleanTestcache, "testcache", false, "")
+
+ // -n and -x are important enough to be
+ // mentioned explicitly in the docs but they
+ // are part of the build flags.
+
+ work.AddBuildFlags(CmdClean, work.DefaultBuildFlags)
+}
+
+func runClean(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) > 0 {
+ cacheFlag := ""
+ switch {
+ case cleanCache:
+ cacheFlag = "-cache"
+ case cleanTestcache:
+ cacheFlag = "-testcache"
+ case cleanFuzzcache:
+ cacheFlag = "-fuzzcache"
+ case cleanModcache:
+ cacheFlag = "-modcache"
+ }
+ if cacheFlag != "" {
+ base.Fatalf("go: clean %s cannot be used with package arguments", cacheFlag)
+ }
+ }
+
+ // golang.org/issue/29925: only load packages before cleaning if
+ // either the flags and arguments explicitly imply a package,
+ // or no other target (such as a cache) was requested to be cleaned.
+ cleanPkg := len(args) > 0 || cleanI || cleanR
+ if (!modload.Enabled() || modload.HasModRoot()) &&
+ !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache {
+ cleanPkg = true
+ }
+
+ if cleanPkg {
+ for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) {
+ clean(pkg)
+ }
+ }
+
+ var b work.Builder
+ b.Print = fmt.Print
+
+ if cleanCache {
+ dir := cache.DefaultDir()
+ if dir != "off" {
+ // Remove the cache subdirectories but not the top cache directory.
+ // The top cache directory may have been created with special permissions
+ // and not something that we want to remove. Also, we'd like to preserve
+ // the access log for future analysis, even if the cache is cleared.
+ subdirs, _ := filepath.Glob(filepath.Join(str.QuoteGlob(dir), "[0-9a-f][0-9a-f]"))
+ printedErrors := false
+ if len(subdirs) > 0 {
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -r %s", strings.Join(subdirs, " "))
+ }
+ if !cfg.BuildN {
+ for _, d := range subdirs {
+ // Only print the first error - there may be many.
+ // This also mimics what os.RemoveAll(dir) would do.
+ if err := os.RemoveAll(d); err != nil && !printedErrors {
+ printedErrors = true
+ base.Error(err)
+ }
+ }
+ }
+ }
+
+ logFile := filepath.Join(dir, "log.txt")
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -f %s", logFile)
+ }
+ if !cfg.BuildN {
+ if err := os.RemoveAll(logFile); err != nil && !printedErrors {
+ printedErrors = true
+ base.Error(err)
+ }
+ }
+ }
+ }
+
+ if cleanTestcache && !cleanCache {
+ // Instead of walking through the entire cache looking for test results,
+ // we write a file to the cache indicating that all test results from before
+ // right now are to be ignored.
+ dir := cache.DefaultDir()
+ if dir != "off" {
+ f, err := lockedfile.Edit(filepath.Join(dir, "testexpire.txt"))
+ if err == nil {
+ now := time.Now().UnixNano()
+ buf, _ := io.ReadAll(f)
+ prev, _ := strconv.ParseInt(strings.TrimSpace(string(buf)), 10, 64)
+ if now > prev {
+ if err = f.Truncate(0); err == nil {
+ if _, err = f.Seek(0, 0); err == nil {
+ _, err = fmt.Fprintf(f, "%d\n", now)
+ }
+ }
+ }
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ }
+ if err != nil {
+ if _, statErr := os.Stat(dir); !os.IsNotExist(statErr) {
+ base.Error(err)
+ }
+ }
+ }
+ }
+
+ if cleanModcache {
+ if cfg.GOMODCACHE == "" {
+ base.Fatalf("go: cannot clean -modcache without a module cache")
+ }
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -rf %s", cfg.GOMODCACHE)
+ }
+ if !cfg.BuildN {
+ if err := modfetch.RemoveAll(cfg.GOMODCACHE); err != nil {
+ base.Error(err)
+ }
+ }
+ }
+
+ if cleanFuzzcache {
+ fuzzDir := cache.Default().FuzzDir()
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -rf %s", fuzzDir)
+ }
+ if !cfg.BuildN {
+ if err := os.RemoveAll(fuzzDir); err != nil {
+ base.Error(err)
+ }
+ }
+ }
+}
+
+var cleaned = map[*load.Package]bool{}
+
+// TODO: These are dregs left by Makefile-based builds.
+// Eventually, can stop deleting these.
+var cleanDir = map[string]bool{
+ "_test": true,
+ "_obj": true,
+}
+
+var cleanFile = map[string]bool{
+ "_testmain.go": true,
+ "test.out": true,
+ "build.out": true,
+ "a.out": true,
+}
+
+var cleanExt = map[string]bool{
+ ".5": true,
+ ".6": true,
+ ".8": true,
+ ".a": true,
+ ".o": true,
+ ".so": true,
+}
+
+func clean(p *load.Package) {
+ if cleaned[p] {
+ return
+ }
+ cleaned[p] = true
+
+ if p.Dir == "" {
+ base.Errorf("%v", p.Error)
+ return
+ }
+ dirs, err := os.ReadDir(p.Dir)
+ if err != nil {
+ base.Errorf("go: %s: %v", p.Dir, err)
+ return
+ }
+
+ var b work.Builder
+ b.Print = fmt.Print
+
+ packageFile := map[string]bool{}
+ if p.Name != "main" {
+ // Record which files are not in package main.
+ // The others are.
+ keep := func(list []string) {
+ for _, f := range list {
+ packageFile[f] = true
+ }
+ }
+ keep(p.GoFiles)
+ keep(p.CgoFiles)
+ keep(p.TestGoFiles)
+ keep(p.XTestGoFiles)
+ }
+
+ _, elem := filepath.Split(p.Dir)
+ var allRemove []string
+
+ // Remove dir-named executable only if this is package main.
+ if p.Name == "main" {
+ allRemove = append(allRemove,
+ elem,
+ elem+".exe",
+ p.DefaultExecName(),
+ p.DefaultExecName()+".exe",
+ )
+ }
+
+ // Remove package test executables.
+ allRemove = append(allRemove,
+ elem+".test",
+ elem+".test.exe",
+ p.DefaultExecName()+".test",
+ p.DefaultExecName()+".test.exe",
+ )
+
+ // Remove a potential executable, test executable for each .go file in the directory that
+ // is not part of the directory's package.
+ for _, dir := range dirs {
+ name := dir.Name()
+ if packageFile[name] {
+ continue
+ }
+
+ if dir.IsDir() {
+ continue
+ }
+
+ if base, found := strings.CutSuffix(name, "_test.go"); found {
+ allRemove = append(allRemove, base+".test", base+".test.exe")
+ }
+
+ if base, found := strings.CutSuffix(name, ".go"); found {
+ // TODO(adg,rsc): check that this .go file is actually
+ // in "package main", and therefore capable of building
+ // to an executable file.
+ allRemove = append(allRemove, base, base+".exe")
+ }
+ }
+
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd(p.Dir, "rm -f %s", strings.Join(allRemove, " "))
+ }
+
+ toRemove := map[string]bool{}
+ for _, name := range allRemove {
+ toRemove[name] = true
+ }
+ for _, dir := range dirs {
+ name := dir.Name()
+ if dir.IsDir() {
+ // TODO: Remove once Makefiles are forgotten.
+ if cleanDir[name] {
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd(p.Dir, "rm -r %s", name)
+ if cfg.BuildN {
+ continue
+ }
+ }
+ if err := os.RemoveAll(filepath.Join(p.Dir, name)); err != nil {
+ base.Error(err)
+ }
+ }
+ continue
+ }
+
+ if cfg.BuildN {
+ continue
+ }
+
+ if cleanFile[name] || cleanExt[filepath.Ext(name)] || toRemove[name] {
+ removeFile(filepath.Join(p.Dir, name))
+ }
+ }
+
+ if cleanI && p.Target != "" {
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -f %s", p.Target)
+ }
+ if !cfg.BuildN {
+ removeFile(p.Target)
+ }
+ }
+
+ if cleanR {
+ for _, p1 := range p.Internal.Imports {
+ clean(p1)
+ }
+ }
+}
+
+// removeFile tries to remove file f, if error other than file doesn't exist
+// occurs, it will report the error.
+func removeFile(f string) {
+ err := os.Remove(f)
+ if err == nil || os.IsNotExist(err) {
+ return
+ }
+ // Windows does not allow deletion of a binary file while it is executing.
+ if runtime.GOOS == "windows" {
+ // Remove lingering ~ file from last attempt.
+ if _, err2 := os.Stat(f + "~"); err2 == nil {
+ os.Remove(f + "~")
+ }
+ // Try to move it out of the way. If the move fails,
+ // which is likely, we'll try again the
+ // next time we do an install of this binary.
+ if err2 := os.Rename(f, f+"~"); err2 == nil {
+ os.Remove(f + "~")
+ return
+ }
+ }
+ base.Error(err)
+}
diff --git a/src/cmd/go/internal/cmdflag/flag.go b/src/cmd/go/internal/cmdflag/flag.go
new file mode 100644
index 0000000..86e33ea
--- /dev/null
+++ b/src/cmd/go/internal/cmdflag/flag.go
@@ -0,0 +1,122 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmdflag handles flag processing common to several go tools.
+package cmdflag
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "strings"
+)
+
+// The flag handling part of go commands such as test is large and distracting.
+// We can't use the standard flag package because some of the flags from
+// our command line are for us, and some are for the binary we're running,
+// and some are for both.
+
+// ErrFlagTerminator indicates the distinguished token "--", which causes the
+// flag package to treat all subsequent arguments as non-flags.
+var ErrFlagTerminator = errors.New("flag terminator")
+
+// A FlagNotDefinedError indicates a flag-like argument that does not correspond
+// to any registered flag in a FlagSet.
+type FlagNotDefinedError struct {
+ RawArg string // the original argument, like --foo or -foo=value
+ Name string
+ HasValue bool // is this the -foo=value or --foo=value form?
+ Value string // only provided if HasValue is true
+}
+
+func (e FlagNotDefinedError) Error() string {
+ return fmt.Sprintf("flag provided but not defined: -%s", e.Name)
+}
+
+// A NonFlagError indicates an argument that is not a syntactically-valid flag.
+type NonFlagError struct {
+ RawArg string
+}
+
+func (e NonFlagError) Error() string {
+ return fmt.Sprintf("not a flag: %q", e.RawArg)
+}
+
+// ParseOne sees if args[0] is present in the given flag set and if so,
+// sets its value and returns the flag along with the remaining (unused) arguments.
+//
+// ParseOne always returns either a non-nil Flag or a non-nil error,
+// and always consumes at least one argument (even on error).
+//
+// Unlike (*flag.FlagSet).Parse, ParseOne does not log its own errors.
+func ParseOne(fs *flag.FlagSet, args []string) (f *flag.Flag, remainingArgs []string, err error) {
+ // This function is loosely derived from (*flag.FlagSet).parseOne.
+
+ raw, args := args[0], args[1:]
+ arg := raw
+ if strings.HasPrefix(arg, "--") {
+ if arg == "--" {
+ return nil, args, ErrFlagTerminator
+ }
+ arg = arg[1:] // reduce two minuses to one
+ }
+
+ switch arg {
+ case "-?", "-h", "-help":
+ return nil, args, flag.ErrHelp
+ }
+ if len(arg) < 2 || arg[0] != '-' || arg[1] == '-' || arg[1] == '=' {
+ return nil, args, NonFlagError{RawArg: raw}
+ }
+
+ name, value, hasValue := strings.Cut(arg[1:], "=")
+
+ f = fs.Lookup(name)
+ if f == nil {
+ return nil, args, FlagNotDefinedError{
+ RawArg: raw,
+ Name: name,
+ HasValue: hasValue,
+ Value: value,
+ }
+ }
+
+ // Use fs.Set instead of f.Value.Set below so that any subsequent call to
+ // fs.Visit will correctly visit the flags that have been set.
+
+ failf := func(format string, a ...any) (*flag.Flag, []string, error) {
+ return f, args, fmt.Errorf(format, a...)
+ }
+
+ if fv, ok := f.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
+ if hasValue {
+ if err := fs.Set(name, value); err != nil {
+ return failf("invalid boolean value %q for -%s: %v", value, name, err)
+ }
+ } else {
+ if err := fs.Set(name, "true"); err != nil {
+ return failf("invalid boolean flag %s: %v", name, err)
+ }
+ }
+ } else {
+ // It must have a value, which might be the next argument.
+ if !hasValue && len(args) > 0 {
+ // value is the next arg
+ hasValue = true
+ value, args = args[0], args[1:]
+ }
+ if !hasValue {
+ return failf("flag needs an argument: -%s", name)
+ }
+ if err := fs.Set(name, value); err != nil {
+ return failf("invalid value %q for flag -%s: %v", value, name, err)
+ }
+ }
+
+ return f, args, nil
+}
+
+type boolFlag interface {
+ IsBoolFlag() bool
+}
diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go
new file mode 100644
index 0000000..3b6cd94
--- /dev/null
+++ b/src/cmd/go/internal/doc/doc.go
@@ -0,0 +1,134 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package doc implements the “go doc” command.
+package doc
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "context"
+)
+
+var CmdDoc = &base.Command{
+ Run: runDoc,
+ UsageLine: "go doc [doc flags] [package|[package.]symbol[.methodOrField]]",
+ CustomFlags: true,
+ Short: "show documentation for package or symbol",
+ Long: `
+Doc prints the documentation comments associated with the item identified by its
+arguments (a package, const, func, type, var, method, or struct field)
+followed by a one-line summary of each of the first-level items "under"
+that item (package-level declarations for a package, methods for a type,
+etc.).
+
+Doc accepts zero, one, or two arguments.
+
+Given no arguments, that is, when run as
+
+ go doc
+
+it prints the package documentation for the package in the current directory.
+If the package is a command (package main), the exported symbols of the package
+are elided from the presentation unless the -cmd flag is provided.
+
+When run with one argument, the argument is treated as a Go-syntax-like
+representation of the item to be documented. What the argument selects depends
+on what is installed in GOROOT and GOPATH, as well as the form of the argument,
+which is schematically one of these:
+
+ go doc <pkg>
+ go doc <sym>[.<methodOrField>]
+ go doc [<pkg>.]<sym>[.<methodOrField>]
+ go doc [<pkg>.][<sym>.]<methodOrField>
+
+The first item in this list matched by the argument is the one whose documentation
+is printed. (See the examples below.) However, if the argument starts with a capital
+letter it is assumed to identify a symbol or method in the current directory.
+
+For packages, the order of scanning is determined lexically in breadth-first order.
+That is, the package presented is the one that matches the search and is nearest
+the root and lexically first at its level of the hierarchy. The GOROOT tree is
+always scanned in its entirety before GOPATH.
+
+If there is no package specified or matched, the package in the current
+directory is selected, so "go doc Foo" shows the documentation for symbol Foo in
+the current package.
+
+The package path must be either a qualified path or a proper suffix of a
+path. The go tool's usual package mechanism does not apply: package path
+elements like . and ... are not implemented by go doc.
+
+When run with two arguments, the first is a package path (full path or suffix),
+and the second is a symbol, or symbol with method or struct field:
+
+ go doc <pkg> <sym>[.<methodOrField>]
+
+In all forms, when matching symbols, lower-case letters in the argument match
+either case but upper-case letters match exactly. This means that there may be
+multiple matches of a lower-case argument in a package if different symbols have
+different cases. If this occurs, documentation for all matches is printed.
+
+Examples:
+ go doc
+ Show documentation for current package.
+ go doc Foo
+ Show documentation for Foo in the current package.
+ (Foo starts with a capital letter so it cannot match
+ a package path.)
+ go doc encoding/json
+ Show documentation for the encoding/json package.
+ go doc json
+ Shorthand for encoding/json.
+ go doc json.Number (or go doc json.number)
+ Show documentation and method summary for json.Number.
+ go doc json.Number.Int64 (or go doc json.number.int64)
+ Show documentation for json.Number's Int64 method.
+ go doc cmd/doc
+ Show package docs for the doc command.
+ go doc -cmd cmd/doc
+ Show package docs and exported symbols within the doc command.
+ go doc template.new
+ Show documentation for html/template's New function.
+ (html/template is lexically before text/template)
+ go doc text/template.new # One argument
+ Show documentation for text/template's New function.
+ go doc text/template new # Two arguments
+ Show documentation for text/template's New function.
+
+ At least in the current tree, these invocations all print the
+ documentation for json.Decoder's Decode method:
+
+ go doc json.Decoder.Decode
+ go doc json.decoder.decode
+ go doc json.decode
+ cd go/src/encoding/json; go doc decode
+
+Flags:
+ -all
+ Show all the documentation for the package.
+ -c
+ Respect case when matching symbols.
+ -cmd
+ Treat a command (package main) like a regular package.
+ Otherwise package main's exported symbols are hidden
+ when showing the package's top-level documentation.
+ -short
+ One-line representation for each symbol.
+ -src
+ Show the full source code for the symbol. This will
+ display the full Go source of its declaration and
+ definition, such as a function definition (including
+ the body), type declaration or enclosing const
+ block. The output may therefore include unexported
+ details.
+ -u
+ Show documentation for unexported as well as exported
+ symbols, methods, and fields.
+`,
+}
+
+func runDoc(ctx context.Context, cmd *base.Command, args []string) {
+ base.Run(cfg.BuildToolexec, base.Tool("doc"), args)
+}
diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
new file mode 100644
index 0000000..c7c2e83
--- /dev/null
+++ b/src/cmd/go/internal/envcmd/env.go
@@ -0,0 +1,691 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package envcmd implements the “go env” command.
+package envcmd
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/build"
+ "internal/buildcfg"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/work"
+ "cmd/internal/quoted"
+)
+
+var CmdEnv = &base.Command{
+ UsageLine: "go env [-json] [-u] [-w] [var ...]",
+ Short: "print Go environment information",
+ Long: `
+Env prints Go environment information.
+
+By default env prints information as a shell script
+(on Windows, a batch file). If one or more variable
+names is given as arguments, env prints the value of
+each named variable on its own line.
+
+The -json flag prints the environment in JSON format
+instead of as a shell script.
+
+The -u flag requires one or more arguments and unsets
+the default setting for the named environment variables,
+if one has been set with 'go env -w'.
+
+The -w flag requires one or more arguments of the
+form NAME=VALUE and changes the default settings
+of the named environment variables to the given values.
+
+For more about environment variables, see 'go help environment'.
+ `,
+}
+
+func init() {
+ CmdEnv.Run = runEnv // break init cycle
+ base.AddChdirFlag(&CmdEnv.Flag)
+ base.AddBuildFlagsNX(&CmdEnv.Flag)
+}
+
+var (
+ envJson = CmdEnv.Flag.Bool("json", false, "")
+ envU = CmdEnv.Flag.Bool("u", false, "")
+ envW = CmdEnv.Flag.Bool("w", false, "")
+)
+
+func MkEnv() []cfg.EnvVar {
+ envFile, _ := cfg.EnvFile()
+ env := []cfg.EnvVar{
+ {Name: "GO111MODULE", Value: cfg.Getenv("GO111MODULE")},
+ {Name: "GOARCH", Value: cfg.Goarch},
+ {Name: "GOBIN", Value: cfg.GOBIN},
+ {Name: "GOCACHE", Value: cache.DefaultDir()},
+ {Name: "GOENV", Value: envFile},
+ {Name: "GOEXE", Value: cfg.ExeSuffix},
+
+ // List the raw value of GOEXPERIMENT, not the cleaned one.
+ // The set of default experiments may change from one release
+ // to the next, so a GOEXPERIMENT setting that is redundant
+ // with the current toolchain might actually be relevant with
+ // a different version (for example, when bisecting a regression).
+ {Name: "GOEXPERIMENT", Value: cfg.RawGOEXPERIMENT},
+
+ {Name: "GOFLAGS", Value: cfg.Getenv("GOFLAGS")},
+ {Name: "GOHOSTARCH", Value: runtime.GOARCH},
+ {Name: "GOHOSTOS", Value: runtime.GOOS},
+ {Name: "GOINSECURE", Value: cfg.GOINSECURE},
+ {Name: "GOMODCACHE", Value: cfg.GOMODCACHE},
+ {Name: "GONOPROXY", Value: cfg.GONOPROXY},
+ {Name: "GONOSUMDB", Value: cfg.GONOSUMDB},
+ {Name: "GOOS", Value: cfg.Goos},
+ {Name: "GOPATH", Value: cfg.BuildContext.GOPATH},
+ {Name: "GOPRIVATE", Value: cfg.GOPRIVATE},
+ {Name: "GOPROXY", Value: cfg.GOPROXY},
+ {Name: "GOROOT", Value: cfg.GOROOT},
+ {Name: "GOSUMDB", Value: cfg.GOSUMDB},
+ {Name: "GOTMPDIR", Value: cfg.Getenv("GOTMPDIR")},
+ {Name: "GOTOOLCHAIN", Value: cfg.Getenv("GOTOOLCHAIN")},
+ {Name: "GOTOOLDIR", Value: build.ToolDir},
+ {Name: "GOVCS", Value: cfg.GOVCS},
+ {Name: "GOVERSION", Value: runtime.Version()},
+ }
+
+ if work.GccgoBin != "" {
+ env = append(env, cfg.EnvVar{Name: "GCCGO", Value: work.GccgoBin})
+ } else {
+ env = append(env, cfg.EnvVar{Name: "GCCGO", Value: work.GccgoName})
+ }
+
+ key, val := cfg.GetArchEnv()
+ if key != "" {
+ env = append(env, cfg.EnvVar{Name: key, Value: val})
+ }
+
+ cc := cfg.Getenv("CC")
+ if cc == "" {
+ cc = cfg.DefaultCC(cfg.Goos, cfg.Goarch)
+ }
+ cxx := cfg.Getenv("CXX")
+ if cxx == "" {
+ cxx = cfg.DefaultCXX(cfg.Goos, cfg.Goarch)
+ }
+ env = append(env, cfg.EnvVar{Name: "AR", Value: envOr("AR", "ar")})
+ env = append(env, cfg.EnvVar{Name: "CC", Value: cc})
+ env = append(env, cfg.EnvVar{Name: "CXX", Value: cxx})
+
+ if cfg.BuildContext.CgoEnabled {
+ env = append(env, cfg.EnvVar{Name: "CGO_ENABLED", Value: "1"})
+ } else {
+ env = append(env, cfg.EnvVar{Name: "CGO_ENABLED", Value: "0"})
+ }
+
+ return env
+}
+
+func envOr(name, def string) string {
+ val := cfg.Getenv(name)
+ if val != "" {
+ return val
+ }
+ return def
+}
+
+func findEnv(env []cfg.EnvVar, name string) string {
+ for _, e := range env {
+ if e.Name == name {
+ return e.Value
+ }
+ }
+ if cfg.CanGetenv(name) {
+ return cfg.Getenv(name)
+ }
+ return ""
+}
+
+// ExtraEnvVars returns environment variables that should not leak into child processes.
+func ExtraEnvVars() []cfg.EnvVar {
+ gomod := ""
+ modload.Init()
+ if modload.HasModRoot() {
+ gomod = modload.ModFilePath()
+ } else if modload.Enabled() {
+ gomod = os.DevNull
+ }
+ modload.InitWorkfile()
+ gowork := modload.WorkFilePath()
+ // As a special case, if a user set off explicitly, report that in GOWORK.
+ if cfg.Getenv("GOWORK") == "off" {
+ gowork = "off"
+ }
+ return []cfg.EnvVar{
+ {Name: "GOMOD", Value: gomod},
+ {Name: "GOWORK", Value: gowork},
+ }
+}
+
+// ExtraEnvVarsCostly returns environment variables that should not leak into child processes
+// but are costly to evaluate.
+func ExtraEnvVarsCostly() []cfg.EnvVar {
+ b := work.NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{})
+ if err != nil {
+ // Should not happen - b.CFlags was given an empty package.
+ fmt.Fprintf(os.Stderr, "go: invalid cflags: %v\n", err)
+ return nil
+ }
+ cmd := b.GccCmd(".", "")
+
+ join := func(s []string) string {
+ q, err := quoted.Join(s)
+ if err != nil {
+ return strings.Join(s, " ")
+ }
+ return q
+ }
+
+ return []cfg.EnvVar{
+ // Note: Update the switch in runEnv below when adding to this list.
+ {Name: "CGO_CFLAGS", Value: join(cflags)},
+ {Name: "CGO_CPPFLAGS", Value: join(cppflags)},
+ {Name: "CGO_CXXFLAGS", Value: join(cxxflags)},
+ {Name: "CGO_FFLAGS", Value: join(fflags)},
+ {Name: "CGO_LDFLAGS", Value: join(ldflags)},
+ {Name: "PKG_CONFIG", Value: b.PkgconfigCmd()},
+ {Name: "GOGCCFLAGS", Value: join(cmd[3:])},
+ }
+}
+
+// argKey returns the KEY part of the arg KEY=VAL, or else arg itself.
+func argKey(arg string) string {
+ i := strings.Index(arg, "=")
+ if i < 0 {
+ return arg
+ }
+ return arg[:i]
+}
+
+func runEnv(ctx context.Context, cmd *base.Command, args []string) {
+ if *envJson && *envU {
+ base.Fatalf("go: cannot use -json with -u")
+ }
+ if *envJson && *envW {
+ base.Fatalf("go: cannot use -json with -w")
+ }
+ if *envU && *envW {
+ base.Fatalf("go: cannot use -u with -w")
+ }
+
+ // Handle 'go env -w' and 'go env -u' before calling buildcfg.Check,
+ // so they can be used to recover from an invalid configuration.
+ if *envW {
+ runEnvW(args)
+ return
+ }
+
+ if *envU {
+ runEnvU(args)
+ return
+ }
+
+ buildcfg.Check()
+ if cfg.ExperimentErr != nil {
+ base.Fatal(cfg.ExperimentErr)
+ }
+
+ for _, arg := range args {
+ if strings.Contains(arg, "=") {
+ base.Fatalf("go: invalid variable name %q (use -w to set variable)", arg)
+ }
+ }
+
+ env := cfg.CmdEnv
+ env = append(env, ExtraEnvVars()...)
+
+ if err := fsys.Init(base.Cwd()); err != nil {
+ base.Fatal(err)
+ }
+
+ // Do we need to call ExtraEnvVarsCostly, which is a bit expensive?
+ needCostly := false
+ if len(args) == 0 {
+ // We're listing all environment variables ("go env"),
+ // including the expensive ones.
+ needCostly = true
+ } else {
+ needCostly = false
+ checkCostly:
+ for _, arg := range args {
+ switch argKey(arg) {
+ case "CGO_CFLAGS",
+ "CGO_CPPFLAGS",
+ "CGO_CXXFLAGS",
+ "CGO_FFLAGS",
+ "CGO_LDFLAGS",
+ "PKG_CONFIG",
+ "GOGCCFLAGS":
+ needCostly = true
+ break checkCostly
+ }
+ }
+ }
+ if needCostly {
+ work.BuildInit()
+ env = append(env, ExtraEnvVarsCostly()...)
+ }
+
+ if len(args) > 0 {
+ if *envJson {
+ var es []cfg.EnvVar
+ for _, name := range args {
+ e := cfg.EnvVar{Name: name, Value: findEnv(env, name)}
+ es = append(es, e)
+ }
+ printEnvAsJSON(es)
+ } else {
+ for _, name := range args {
+ fmt.Printf("%s\n", findEnv(env, name))
+ }
+ }
+ return
+ }
+
+ if *envJson {
+ printEnvAsJSON(env)
+ return
+ }
+
+ PrintEnv(os.Stdout, env)
+}
+
+func runEnvW(args []string) {
+ // Process and sanity-check command line.
+ if len(args) == 0 {
+ base.Fatalf("go: no KEY=VALUE arguments given")
+ }
+ osEnv := make(map[string]string)
+ for _, e := range cfg.OrigEnv {
+ if i := strings.Index(e, "="); i >= 0 {
+ osEnv[e[:i]] = e[i+1:]
+ }
+ }
+ add := make(map[string]string)
+ for _, arg := range args {
+ key, val, found := strings.Cut(arg, "=")
+ if !found {
+ base.Fatalf("go: arguments must be KEY=VALUE: invalid argument: %s", arg)
+ }
+ if err := checkEnvWrite(key, val); err != nil {
+ base.Fatal(err)
+ }
+ if _, ok := add[key]; ok {
+ base.Fatalf("go: multiple values for key: %s", key)
+ }
+ add[key] = val
+ if osVal := osEnv[key]; osVal != "" && osVal != val {
+ fmt.Fprintf(os.Stderr, "warning: go env -w %s=... does not override conflicting OS environment variable\n", key)
+ }
+ }
+
+ if err := checkBuildConfig(add, nil); err != nil {
+ base.Fatal(err)
+ }
+
+ gotmp, okGOTMP := add["GOTMPDIR"]
+ if okGOTMP {
+ if !filepath.IsAbs(gotmp) && gotmp != "" {
+ base.Fatalf("go: GOTMPDIR must be an absolute path")
+ }
+ }
+
+ updateEnvFile(add, nil)
+}
+
+func runEnvU(args []string) {
+ // Process and sanity-check command line.
+ if len(args) == 0 {
+ base.Fatalf("go: 'go env -u' requires an argument")
+ }
+ del := make(map[string]bool)
+ for _, arg := range args {
+ if err := checkEnvWrite(arg, ""); err != nil {
+ base.Fatal(err)
+ }
+ del[arg] = true
+ }
+
+ if err := checkBuildConfig(nil, del); err != nil {
+ base.Fatal(err)
+ }
+
+ updateEnvFile(nil, del)
+}
+
+// checkBuildConfig checks whether the build configuration is valid
+// after the specified configuration environment changes are applied.
+func checkBuildConfig(add map[string]string, del map[string]bool) error {
+ // get returns the value for key after applying add and del and
+ // reports whether it changed. cur should be the current value
+ // (i.e., before applying changes) and def should be the default
+ // value (i.e., when no environment variables are provided at all).
+ get := func(key, cur, def string) (string, bool) {
+ if val, ok := add[key]; ok {
+ return val, true
+ }
+ if del[key] {
+ val := getOrigEnv(key)
+ if val == "" {
+ val = def
+ }
+ return val, true
+ }
+ return cur, false
+ }
+
+ goos, okGOOS := get("GOOS", cfg.Goos, build.Default.GOOS)
+ goarch, okGOARCH := get("GOARCH", cfg.Goarch, build.Default.GOARCH)
+ if okGOOS || okGOARCH {
+ if err := work.CheckGOOSARCHPair(goos, goarch); err != nil {
+ return err
+ }
+ }
+
+ goexperiment, okGOEXPERIMENT := get("GOEXPERIMENT", cfg.RawGOEXPERIMENT, buildcfg.DefaultGOEXPERIMENT)
+ if okGOEXPERIMENT {
+ if _, err := buildcfg.ParseGOEXPERIMENT(goos, goarch, goexperiment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// PrintEnv prints the environment variables to w.
+func PrintEnv(w io.Writer, env []cfg.EnvVar) {
+ for _, e := range env {
+ if e.Name != "TERM" {
+ if runtime.GOOS != "plan9" && bytes.Contains([]byte(e.Value), []byte{0}) {
+ base.Fatalf("go: internal error: encountered null byte in environment variable %s on non-plan9 platform", e.Name)
+ }
+ switch runtime.GOOS {
+ default:
+ fmt.Fprintf(w, "%s=%s\n", e.Name, shellQuote(e.Value))
+ case "plan9":
+ if strings.IndexByte(e.Value, '\x00') < 0 {
+ fmt.Fprintf(w, "%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''"))
+ } else {
+ v := strings.Split(e.Value, "\x00")
+ fmt.Fprintf(w, "%s=(", e.Name)
+ for x, s := range v {
+ if x > 0 {
+ fmt.Fprintf(w, " ")
+ }
+ fmt.Fprintf(w, "'%s'", strings.ReplaceAll(s, "'", "''"))
+ }
+ fmt.Fprintf(w, ")\n")
+ }
+ case "windows":
+ if hasNonGraphic(e.Value) {
+ base.Errorf("go: stripping unprintable or unescapable characters from %%%q%%", e.Name)
+ }
+ fmt.Fprintf(w, "set %s=%s\n", e.Name, batchEscape(e.Value))
+ }
+ }
+ }
+}
+
+func hasNonGraphic(s string) bool {
+ for _, c := range []byte(s) {
+ if c == '\r' || c == '\n' || (!unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c))) {
+ return true
+ }
+ }
+ return false
+}
+
+func shellQuote(s string) string {
+ var b bytes.Buffer
+ b.WriteByte('\'')
+ for _, x := range []byte(s) {
+ if x == '\'' {
+ // Close the single quoted string, add an escaped single quote,
+ // and start another single quoted string.
+ b.WriteString(`'\''`)
+ } else {
+ b.WriteByte(x)
+ }
+ }
+ b.WriteByte('\'')
+ return b.String()
+}
+
+func batchEscape(s string) string {
+ var b bytes.Buffer
+ for _, x := range []byte(s) {
+ if x == '\r' || x == '\n' || (!unicode.IsGraphic(rune(x)) && !unicode.IsSpace(rune(x))) {
+ b.WriteRune(unicode.ReplacementChar)
+ continue
+ }
+ switch x {
+ case '%':
+ b.WriteString("%%")
+ case '<', '>', '|', '&', '^':
+ // These are special characters that need to be escaped with ^. See
+ // https://learn.microsoft.com/en-us/windows-server/administration/windows-commands/set_1.
+ b.WriteByte('^')
+ b.WriteByte(x)
+ default:
+ b.WriteByte(x)
+ }
+ }
+ return b.String()
+}
+
+func printEnvAsJSON(env []cfg.EnvVar) {
+ m := make(map[string]string)
+ for _, e := range env {
+ if e.Name == "TERM" {
+ continue
+ }
+ m[e.Name] = e.Value
+ }
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", "\t")
+ if err := enc.Encode(m); err != nil {
+ base.Fatalf("go: %s", err)
+ }
+}
+
+func getOrigEnv(key string) string {
+ for _, v := range cfg.OrigEnv {
+ if v, found := strings.CutPrefix(v, key+"="); found {
+ return v
+ }
+ }
+ return ""
+}
+
+func checkEnvWrite(key, val string) error {
+ switch key {
+ case "GOEXE", "GOGCCFLAGS", "GOHOSTARCH", "GOHOSTOS", "GOMOD", "GOWORK", "GOTOOLDIR", "GOVERSION":
+ return fmt.Errorf("%s cannot be modified", key)
+ case "GOENV":
+ return fmt.Errorf("%s can only be set using the OS environment", key)
+ }
+
+ // To catch typos and the like, check that we know the variable.
+ // If it's already in the env file, we assume it's known.
+ if !cfg.CanGetenv(key) {
+ return fmt.Errorf("unknown go command variable %s", key)
+ }
+
+ // Some variables can only have one of a few valid values. If set to an
+ // invalid value, the next cmd/go invocation might fail immediately,
+ // even 'go env -w' itself.
+ switch key {
+ case "GO111MODULE":
+ switch val {
+ case "", "auto", "on", "off":
+ default:
+ return fmt.Errorf("invalid %s value %q", key, val)
+ }
+ case "GOPATH":
+ if strings.HasPrefix(val, "~") {
+ return fmt.Errorf("GOPATH entry cannot start with shell metacharacter '~': %q", val)
+ }
+ if !filepath.IsAbs(val) && val != "" {
+ return fmt.Errorf("GOPATH entry is relative; must be absolute path: %q", val)
+ }
+ case "GOMODCACHE":
+ if !filepath.IsAbs(val) && val != "" {
+ return fmt.Errorf("GOMODCACHE entry is relative; must be absolute path: %q", val)
+ }
+ case "CC", "CXX":
+ if val == "" {
+ break
+ }
+ args, err := quoted.Split(val)
+ if err != nil {
+ return fmt.Errorf("invalid %s: %v", key, err)
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("%s entry cannot contain only space", key)
+ }
+ if !filepath.IsAbs(args[0]) && args[0] != filepath.Base(args[0]) {
+ return fmt.Errorf("%s entry is relative; must be absolute path: %q", key, args[0])
+ }
+ }
+
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("invalid UTF-8 in %s=... value", key)
+ }
+ if strings.Contains(val, "\x00") {
+ return fmt.Errorf("invalid NUL in %s=... value", key)
+ }
+ if strings.ContainsAny(val, "\v\r\n") {
+ return fmt.Errorf("invalid newline in %s=... value", key)
+ }
+ return nil
+}
+
+func readEnvFileLines(mustExist bool) []string {
+ file, err := cfg.EnvFile()
+ if file == "" {
+ if mustExist {
+ base.Fatalf("go: cannot find go env config: %v", err)
+ }
+ return nil
+ }
+ data, err := os.ReadFile(file)
+ if err != nil && (!os.IsNotExist(err) || mustExist) {
+ base.Fatalf("go: reading go env config: %v", err)
+ }
+ lines := strings.SplitAfter(string(data), "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ } else {
+ lines[len(lines)-1] += "\n"
+ }
+ return lines
+}
+
+func updateEnvFile(add map[string]string, del map[string]bool) {
+ lines := readEnvFileLines(len(add) == 0)
+
+ // Delete all but last copy of any duplicated variables,
+ // since the last copy is the one that takes effect.
+ prev := make(map[string]int)
+ for l, line := range lines {
+ if key := lineToKey(line); key != "" {
+ if p, ok := prev[key]; ok {
+ lines[p] = ""
+ }
+ prev[key] = l
+ }
+ }
+
+ // Add variables (go env -w). Update existing lines in file if present, add to end otherwise.
+ for key, val := range add {
+ if p, ok := prev[key]; ok {
+ lines[p] = key + "=" + val + "\n"
+ delete(add, key)
+ }
+ }
+ for key, val := range add {
+ lines = append(lines, key+"="+val+"\n")
+ }
+
+ // Delete requested variables (go env -u).
+ for key := range del {
+ if p, ok := prev[key]; ok {
+ lines[p] = ""
+ }
+ }
+
+ // Sort runs of KEY=VALUE lines
+ // (that is, blocks of lines where blocks are separated
+ // by comments, blank lines, or invalid lines).
+ start := 0
+ for i := 0; i <= len(lines); i++ {
+ if i == len(lines) || lineToKey(lines[i]) == "" {
+ sortKeyValues(lines[start:i])
+ start = i + 1
+ }
+ }
+
+ file, err := cfg.EnvFile()
+ if file == "" {
+ base.Fatalf("go: cannot find go env config: %v", err)
+ }
+ data := []byte(strings.Join(lines, ""))
+ err = os.WriteFile(file, data, 0666)
+ if err != nil {
+ // Try creating directory.
+ os.MkdirAll(filepath.Dir(file), 0777)
+ err = os.WriteFile(file, data, 0666)
+ if err != nil {
+ base.Fatalf("go: writing go env config: %v", err)
+ }
+ }
+}
+
+// lineToKey returns the KEY part of the line KEY=VALUE or else an empty string.
+func lineToKey(line string) string {
+ i := strings.Index(line, "=")
+ if i < 0 || strings.Contains(line[:i], "#") {
+ return ""
+ }
+ return line[:i]
+}
+
+// sortKeyValues sorts a sequence of lines by key.
+// It differs from sort.Strings in that keys which are GOx where x is an ASCII
+// character smaller than = sort after GO=.
+// (There are no such keys currently. It used to matter for GO386 which was
+// removed in Go 1.16.)
+func sortKeyValues(lines []string) {
+ sort.Slice(lines, func(i, j int) bool {
+ return lineToKey(lines[i]) < lineToKey(lines[j])
+ })
+}
diff --git a/src/cmd/go/internal/envcmd/env_test.go b/src/cmd/go/internal/envcmd/env_test.go
new file mode 100644
index 0000000..7419cf3
--- /dev/null
+++ b/src/cmd/go/internal/envcmd/env_test.go
@@ -0,0 +1,93 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package envcmd
+
+import (
+ "bytes"
+ "cmd/go/internal/cfg"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "unicode"
+)
+
+func FuzzPrintEnvEscape(f *testing.F) {
+ f.Add(`$(echo 'cc"'; echo 'OOPS="oops')`)
+ f.Add("$(echo shell expansion 1>&2)")
+ f.Add("''")
+ f.Add(`C:\"Program Files"\`)
+ f.Add(`\\"Quoted Host"\\share`)
+ f.Add("\xfb")
+ f.Add("0")
+ f.Add("")
+ f.Add("''''''''")
+ f.Add("\r")
+ f.Add("\n")
+ f.Add("E,%")
+ f.Fuzz(func(t *testing.T, s string) {
+ t.Parallel()
+
+ for _, c := range []byte(s) {
+ if c == 0 {
+ t.Skipf("skipping %q: contains a null byte. Null bytes can't occur in the environment"+
+ " outside of Plan 9, which has different code path than Windows and Unix that this test"+
+ " isn't testing.", s)
+ }
+ if c > unicode.MaxASCII {
+ t.Skipf("skipping %#q: contains a non-ASCII character %q", s, c)
+ }
+ if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) {
+ t.Skipf("skipping %#q: contains non-graphic character %q", s, c)
+ }
+ if runtime.GOOS == "windows" && c == '\r' || c == '\n' {
+ t.Skipf("skipping %#q on Windows: contains unescapable character %q", s, c)
+ }
+ }
+
+ var b bytes.Buffer
+ if runtime.GOOS == "windows" {
+ b.WriteString("@echo off\n")
+ }
+ PrintEnv(&b, []cfg.EnvVar{{Name: "var", Value: s}})
+ var want string
+ if runtime.GOOS == "windows" {
+ fmt.Fprintf(&b, "echo \"%%var%%\"\n")
+ want += "\"" + s + "\"\r\n"
+ } else {
+ fmt.Fprintf(&b, "printf '%%s\\n' \"$var\"\n")
+ want += s + "\n"
+ }
+ scriptfilename := "script.sh"
+ if runtime.GOOS == "windows" {
+ scriptfilename = "script.bat"
+ }
+ var cmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ scriptfile := filepath.Join(t.TempDir(), scriptfilename)
+ if err := os.WriteFile(scriptfile, b.Bytes(), 0777); err != nil {
+ t.Fatal(err)
+ }
+ cmd = testenv.Command(t, "cmd.exe", "/C", scriptfile)
+ } else {
+ cmd = testenv.Command(t, "sh", "-c", b.String())
+ }
+ out, err := cmd.Output()
+ t.Log(string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(out) != want {
+ t.Fatalf("output of running PrintEnv script and echoing variable: got: %q, want: %q",
+ string(out), want)
+ }
+ })
+}
diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go
new file mode 100644
index 0000000..3705b30
--- /dev/null
+++ b/src/cmd/go/internal/fix/fix.go
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fix implements the “go fix” command.
+package fix
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/work"
+ "context"
+ "fmt"
+ "go/build"
+ "os"
+)
+
+var CmdFix = &base.Command{
+ UsageLine: "go fix [-fix list] [packages]",
+ Short: "update packages to use new APIs",
+ Long: `
+Fix runs the Go fix command on the packages named by the import paths.
+
+The -fix flag sets a comma-separated list of fixes to run.
+The default is all known fixes.
+(Its value is passed to 'go tool fix -r'.)
+
+For more about fix, see 'go doc cmd/fix'.
+For more about specifying packages, see 'go help packages'.
+
+To run fix with other options, run 'go tool fix'.
+
+See also: go fmt, go vet.
+ `,
+}
+
+var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply")
+
+func init() {
+ work.AddBuildFlags(CmdFix, work.DefaultBuildFlags)
+ CmdFix.Run = runFix // fix cycle
+}
+
+func runFix(ctx context.Context, cmd *base.Command, args []string) {
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args)
+ w := 0
+ for _, pkg := range pkgs {
+ if pkg.Error != nil {
+ base.Errorf("%v", pkg.Error)
+ continue
+ }
+ pkgs[w] = pkg
+ w++
+ }
+ pkgs = pkgs[:w]
+
+ printed := false
+ for _, pkg := range pkgs {
+ if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
+ if !printed {
+ fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n")
+ printed = true
+ }
+ continue
+ }
+ // Use pkg.gofiles instead of pkg.Dir so that
+ // the command only applies to this package,
+ // not to packages in subdirectories.
+ files := base.RelPaths(pkg.InternalAllGoFiles())
+ goVersion := ""
+ if pkg.Module != nil {
+ goVersion = "go" + pkg.Module.GoVersion
+ } else if pkg.Standard {
+ goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1]
+ }
+ var fixArg []string
+ if *fixes != "" {
+ fixArg = []string{"-r=" + *fixes}
+ }
+ base.Run(str.StringList(cfg.BuildToolexec, base.Tool("fix"), "-go="+goVersion, fixArg, files))
+ }
+}
diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go
new file mode 100644
index 0000000..62b22f6
--- /dev/null
+++ b/src/cmd/go/internal/fmtcmd/fmt.go
@@ -0,0 +1,115 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtcmd implements the “go fmt” command.
+package fmtcmd
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/internal/sys"
+)
+
+func init() {
+ base.AddBuildFlagsNX(&CmdFmt.Flag)
+ base.AddChdirFlag(&CmdFmt.Flag)
+ base.AddModFlag(&CmdFmt.Flag)
+ base.AddModCommonFlags(&CmdFmt.Flag)
+}
+
+var CmdFmt = &base.Command{
+ Run: runFmt,
+ UsageLine: "go fmt [-n] [-x] [packages]",
+ Short: "gofmt (reformat) package sources",
+ Long: `
+Fmt runs the command 'gofmt -l -w' on the packages named
+by the import paths. It prints the names of the files that are modified.
+
+For more about gofmt, see 'go doc cmd/gofmt'.
+For more about specifying packages, see 'go help packages'.
+
+The -n flag prints commands that would be executed.
+The -x flag prints commands as they are executed.
+
+The -mod flag's value sets which module download mode
+to use: readonly or vendor. See 'go help modules' for more.
+
+To run gofmt with specific options, run gofmt itself.
+
+See also: go fix, go vet.
+ `,
+}
+
+func runFmt(ctx context.Context, cmd *base.Command, args []string) {
+ printed := false
+ gofmt := gofmtPath()
+
+ gofmtArgs := []string{gofmt, "-l", "-w"}
+ gofmtArgLen := len(gofmt) + len(" -l -w")
+
+ baseGofmtArgs := len(gofmtArgs)
+ baseGofmtArgLen := gofmtArgLen
+
+ for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) {
+ if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
+ if !printed {
+ fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n")
+ printed = true
+ }
+ continue
+ }
+ if pkg.Error != nil {
+ var nogo *load.NoGoError
+ var embed *load.EmbedError
+ if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 {
+ // Skip this error, as we will format
+ // all files regardless.
+ } else {
+ base.Errorf("%v", pkg.Error)
+ continue
+ }
+ }
+ // Use pkg.gofiles instead of pkg.Dir so that
+ // the command only applies to this package,
+ // not to packages in subdirectories.
+ files := base.RelPaths(pkg.InternalAllGoFiles())
+ for _, file := range files {
+ gofmtArgs = append(gofmtArgs, file)
+ gofmtArgLen += 1 + len(file) // plus separator
+ if gofmtArgLen >= sys.ExecArgLengthLimit {
+ base.Run(gofmtArgs)
+ gofmtArgs = gofmtArgs[:baseGofmtArgs]
+ gofmtArgLen = baseGofmtArgLen
+ }
+ }
+ }
+ if len(gofmtArgs) > baseGofmtArgs {
+ base.Run(gofmtArgs)
+ }
+}
+
+func gofmtPath() string {
+ gofmt := "gofmt" + cfg.ToolExeSuffix()
+
+ gofmtPath := filepath.Join(cfg.GOBIN, gofmt)
+ if _, err := os.Stat(gofmtPath); err == nil {
+ return gofmtPath
+ }
+
+ gofmtPath = filepath.Join(cfg.GOROOT, "bin", gofmt)
+ if _, err := os.Stat(gofmtPath); err == nil {
+ return gofmtPath
+ }
+
+ // fallback to looking for gofmt in $PATH
+ return "gofmt"
+}
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
new file mode 100644
index 0000000..b83c5a3
--- /dev/null
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -0,0 +1,784 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fsys is an abstraction for reading files that
+// allows for virtual overlays on top of the files on disk.
+package fsys
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io/fs"
+ "log"
+ "os"
+ pathpkg "path"
+ "path/filepath"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Trace emits a trace event for the operation and file path to the trace log,
+// but only when $GODEBUG contains gofsystrace=1.
+// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error.
+// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths
+// matching that glob pattern (using path.Match) will be followed by a full stack trace.
+func Trace(op, path string) {
+ if !doTrace {
+ return
+ }
+ traceMu.Lock()
+ defer traceMu.Unlock()
+ fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path)
+ if pattern := gofsystracestack.Value(); pattern != "" {
+ if match, _ := pathpkg.Match(pattern, path); match {
+ traceFile.Write(debug.Stack())
+ }
+ }
+}
+
+var (
+ doTrace bool
+ traceFile *os.File
+ traceMu sync.Mutex
+
+ gofsystrace = godebug.New("#gofsystrace")
+ gofsystracelog = godebug.New("#gofsystracelog")
+ gofsystracestack = godebug.New("#gofsystracestack")
+)
+
+func init() {
+ if gofsystrace.Value() != "1" {
+ return
+ }
+ doTrace = true
+ if f := gofsystracelog.Value(); f != "" {
+ // Note: No buffering on writes to this file, so no need to worry about closing it at exit.
+ var err error
+ traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ traceFile = os.Stderr
+ }
+}
+
+// OverlayFile is the path to a text file in the OverlayJSON format.
+// It is the value of the -overlay flag.
+var OverlayFile string
+
+// OverlayJSON is the format overlay files are expected to be in.
+// The Replace map maps from overlaid paths to replacement paths:
+// the Go command will forward all reads trying to open
+// each overlaid path to its replacement path, or consider the overlaid
+// path not to exist if the replacement path is empty.
+type OverlayJSON struct {
+ Replace map[string]string
+}
+
+type node struct {
+ actualFilePath string // empty if a directory
+ children map[string]*node // path element → file or directory
+}
+
+func (n *node) isDir() bool {
+ return n.actualFilePath == "" && n.children != nil
+}
+
+func (n *node) isDeleted() bool {
+ return n.actualFilePath == "" && n.children == nil
+}
+
+// TODO(matloob): encapsulate these in an io/fs-like interface
+var overlay map[string]*node // path -> file or directory node
+var cwd string // copy of base.Cwd() to avoid dependency
+
+// canonicalize a path for looking it up in the overlay.
+// Important: filepath.Join(cwd, path) doesn't always produce
+// the correct absolute path if path is relative, because on
+// Windows producing the correct absolute path requires making
+// a syscall. So this should only be used when looking up paths
+// in the overlay, or canonicalizing the paths in the overlay.
+func canonicalize(path string) string {
+ if path == "" {
+ return ""
+ }
+ if filepath.IsAbs(path) {
+ return filepath.Clean(path)
+ }
+
+ if v := filepath.VolumeName(cwd); v != "" && path[0] == filepath.Separator {
+ // On Windows filepath.Join(cwd, path) doesn't always work. In general
+ // filepath.Abs needs to make a syscall on Windows. Elsewhere in cmd/go
+ // use filepath.Join(cwd, path), but cmd/go specifically supports Windows
+ // paths that start with "\" which implies the path is relative to the
+ // volume of the working directory. See golang.org/issue/8130.
+ return filepath.Join(v, path)
+ }
+
+ // Make the path absolute.
+ return filepath.Join(cwd, path)
+}
+
+// Init initializes the overlay, if one is being used.
+func Init(wd string) error {
+ if overlay != nil {
+ // already initialized
+ return nil
+ }
+
+ cwd = wd
+
+ if OverlayFile == "" {
+ return nil
+ }
+
+ Trace("ReadFile", OverlayFile)
+ b, err := os.ReadFile(OverlayFile)
+ if err != nil {
+ return fmt.Errorf("reading overlay file: %v", err)
+ }
+
+ var overlayJSON OverlayJSON
+ if err := json.Unmarshal(b, &overlayJSON); err != nil {
+ return fmt.Errorf("parsing overlay JSON: %v", err)
+ }
+
+ return initFromJSON(overlayJSON)
+}
+
+func initFromJSON(overlayJSON OverlayJSON) error {
+ // Canonicalize the paths in the overlay map.
+ // Use reverseCanonicalized to check for collisions:
+ // no two 'from' paths should canonicalize to the same path.
+ overlay = make(map[string]*node)
+ reverseCanonicalized := make(map[string]string) // inverse of canonicalize operation, to check for duplicates
+ // Build a table of file and directory nodes from the replacement map.
+
+ // Remove any potential non-determinism from iterating over map by sorting it.
+ replaceFrom := make([]string, 0, len(overlayJSON.Replace))
+ for k := range overlayJSON.Replace {
+ replaceFrom = append(replaceFrom, k)
+ }
+ sort.Strings(replaceFrom)
+
+ for _, from := range replaceFrom {
+ to := overlayJSON.Replace[from]
+ // Canonicalize paths and check for a collision.
+ if from == "" {
+ return fmt.Errorf("empty string key in overlay file Replace map")
+ }
+ cfrom := canonicalize(from)
+ if to != "" {
+ // Don't canonicalize "", meaning to delete a file, because then it will turn into ".".
+ to = canonicalize(to)
+ }
+ if otherFrom, seen := reverseCanonicalized[cfrom]; seen {
+ return fmt.Errorf(
+ "paths %q and %q both canonicalize to %q in overlay file Replace map", otherFrom, from, cfrom)
+ }
+ reverseCanonicalized[cfrom] = from
+ from = cfrom
+
+ // Create node for overlaid file.
+ dir, base := filepath.Dir(from), filepath.Base(from)
+ if n, ok := overlay[from]; ok {
+ // All 'from' paths in the overlay are file paths. Since the from paths
+ // are in a map, they are unique, so if the node already exists we added
+ // it below when we create parent directory nodes. That is, that
+ // both a file and a path to one of its parent directories exist as keys
+ // in the Replace map.
+ //
+ // This only applies if the overlay directory has any files or directories
+ // in it: placeholder directories that only contain deleted files don't
+ // count. They are safe to be overwritten with actual files.
+ for _, f := range n.children {
+ if !f.isDeleted() {
+ return fmt.Errorf("invalid overlay: path %v is used as both file and directory", from)
+ }
+ }
+ }
+ overlay[from] = &node{actualFilePath: to}
+
+ // Add parent directory nodes to overlay structure.
+ childNode := overlay[from]
+ for {
+ dirNode := overlay[dir]
+ if dirNode == nil || dirNode.isDeleted() {
+ dirNode = &node{children: make(map[string]*node)}
+ overlay[dir] = dirNode
+ }
+ if childNode.isDeleted() {
+ // Only create one parent for a deleted file:
+ // the directory only conditionally exists if
+ // there are any non-deleted children, so
+ // we don't create their parents.
+ if dirNode.isDir() {
+ dirNode.children[base] = childNode
+ }
+ break
+ }
+ if !dirNode.isDir() {
+ // This path already exists as a file, so it can't be a parent
+ // directory. See comment at error above.
+ return fmt.Errorf("invalid overlay: path %v is used as both file and directory", dir)
+ }
+ dirNode.children[base] = childNode
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ break // reached the top; there is no parent
+ }
+ dir, base = parent, filepath.Base(dir)
+ childNode = dirNode
+ }
+ }
+
+ return nil
+}
+
+// IsDir returns true if path is a directory on disk or in the
+// overlay.
+func IsDir(path string) (bool, error) {
+ Trace("IsDir", path)
+ path = canonicalize(path)
+
+ if _, ok := parentIsOverlayFile(path); ok {
+ return false, nil
+ }
+
+ if n, ok := overlay[path]; ok {
+ return n.isDir(), nil
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ return false, err
+ }
+
+ return fi.IsDir(), nil
+}
+
+// parentIsOverlayFile returns whether name or any of
+// its parents are files in the overlay, and the first parent found,
+// including name itself, that's a file in the overlay.
+func parentIsOverlayFile(name string) (string, bool) {
+ if overlay != nil {
+ // Check if name can't possibly be a directory because
+ // it or one of its parents is overlaid with a file.
+ // TODO(matloob): Maybe save this to avoid doing it every time?
+ prefix := name
+ for {
+ node := overlay[prefix]
+ if node != nil && !node.isDir() {
+ return prefix, true
+ }
+ parent := filepath.Dir(prefix)
+ if parent == prefix {
+ break
+ }
+ prefix = parent
+ }
+ }
+
+ return "", false
+}
+
+// errNotDir is used to communicate from ReadDir to IsDirWithGoFiles
+// that the argument is not a directory, so that IsDirWithGoFiles doesn't
+// return an error.
+var errNotDir = errors.New("not a directory")
+
+func nonFileInOverlayError(overlayPath string) error {
+ return fmt.Errorf("replacement path %q is a directory, not a file", overlayPath)
+}
+
+// readDir reads a dir on disk, returning an error that is errNotDir if the dir is not a directory.
+// Unfortunately, the error returned by os.ReadDir if dir is not a directory
+// can vary depending on the OS (Linux, Mac, Windows return ENOTDIR; BSD returns EINVAL).
+func readDir(dir string) ([]fs.FileInfo, error) {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, err
+ }
+ if dirfi, staterr := os.Stat(dir); staterr == nil && !dirfi.IsDir() {
+ return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
+ }
+ return nil, err
+ }
+
+ fis := make([]fs.FileInfo, 0, len(entries))
+ for _, entry := range entries {
+ info, err := entry.Info()
+ if err != nil {
+ continue
+ }
+ fis = append(fis, info)
+ }
+ return fis, nil
+}
+
+// ReadDir provides a slice of fs.FileInfo entries corresponding
+// to the overlaid files in the directory.
+func ReadDir(dir string) ([]fs.FileInfo, error) {
+ Trace("ReadDir", dir)
+ dir = canonicalize(dir)
+ if _, ok := parentIsOverlayFile(dir); ok {
+ return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
+ }
+
+ dirNode := overlay[dir]
+ if dirNode == nil {
+ return readDir(dir)
+ }
+ if dirNode.isDeleted() {
+ return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: fs.ErrNotExist}
+ }
+ diskfis, err := readDir(dir)
+ if err != nil && !os.IsNotExist(err) && !errors.Is(err, errNotDir) {
+ return nil, err
+ }
+
+ // Stat files in overlay to make composite list of fileinfos
+ files := make(map[string]fs.FileInfo)
+ for _, f := range diskfis {
+ files[f.Name()] = f
+ }
+ for name, to := range dirNode.children {
+ switch {
+ case to.isDir():
+ files[name] = fakeDir(name)
+ case to.isDeleted():
+ delete(files, name)
+ default:
+ // To keep the data model simple, if the overlay contains a symlink we
+ // always stat through it (using Stat, not Lstat). That way we don't need
+ // to worry about the interaction between Lstat and directories: if a
+ // symlink in the overlay points to a directory, we reject it like an
+ // ordinary directory.
+ fi, err := os.Stat(to.actualFilePath)
+ if err != nil {
+ files[name] = missingFile(name)
+ continue
+ } else if fi.IsDir() {
+ return nil, &fs.PathError{Op: "Stat", Path: filepath.Join(dir, name), Err: nonFileInOverlayError(to.actualFilePath)}
+ }
+ // Add a fileinfo for the overlaid file, so that it has
+ // the original file's name, but the overlaid file's metadata.
+ files[name] = fakeFile{name, fi}
+ }
+ }
+ sortedFiles := diskfis[:0]
+ for _, f := range files {
+ sortedFiles = append(sortedFiles, f)
+ }
+ sort.Slice(sortedFiles, func(i, j int) bool { return sortedFiles[i].Name() < sortedFiles[j].Name() })
+ return sortedFiles, nil
+}
+
+// OverlayPath returns the path to the overlaid contents of the
+// file, the empty string if the overlay deletes the file, or path
+// itself if the file is not in the overlay, the file is a directory
+// in the overlay, or there is no overlay.
+// It returns true if the path is overlaid with a regular file
+// or deleted, and false otherwise.
+func OverlayPath(path string) (string, bool) {
+ if p, ok := overlay[canonicalize(path)]; ok && !p.isDir() {
+ return p.actualFilePath, ok
+ }
+
+ return path, false
+}
+
+// Open opens the file at or overlaid on the given path.
+func Open(path string) (*os.File, error) {
+ Trace("Open", path)
+ return openFile(path, os.O_RDONLY, 0)
+}
+
+// OpenFile opens the file at or overlaid on the given path with the flag and perm.
+func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+ Trace("OpenFile", path)
+ return openFile(path, flag, perm)
+}
+
+func openFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+ cpath := canonicalize(path)
+ if node, ok := overlay[cpath]; ok {
+ // Opening a file in the overlay.
+ if node.isDir() {
+ return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("fsys.OpenFile doesn't support opening directories yet")}
+ }
+ // We can't open overlaid paths for write.
+ if perm != os.FileMode(os.O_RDONLY) {
+ return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("overlaid files can't be opened for write")}
+ }
+ return os.OpenFile(node.actualFilePath, flag, perm)
+ }
+ if parent, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok {
+ // The file is deleted explicitly in the Replace map,
+ // or implicitly because one of its parent directories was
+ // replaced by a file.
+ return nil, &fs.PathError{
+ Op: "Open",
+ Path: path,
+ Err: fmt.Errorf("file %s does not exist: parent directory %s is replaced by a file in overlay", path, parent),
+ }
+ }
+ return os.OpenFile(cpath, flag, perm)
+}
+
+// IsDirWithGoFiles reports whether dir is a directory containing Go files
+// either on disk or in the overlay.
+func IsDirWithGoFiles(dir string) (bool, error) {
+ Trace("IsDirWithGoFiles", dir)
+ fis, err := ReadDir(dir)
+ if os.IsNotExist(err) || errors.Is(err, errNotDir) {
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+
+ var firstErr error
+ for _, fi := range fis {
+ if fi.IsDir() {
+ continue
+ }
+
+ // TODO(matloob): this enforces that the "from" in the map
+ // has a .go suffix, but the actual destination file
+ // doesn't need to have a .go suffix. Is this okay with the
+ // compiler?
+ if !strings.HasSuffix(fi.Name(), ".go") {
+ continue
+ }
+ if fi.Mode().IsRegular() {
+ return true, nil
+ }
+
+ // fi is the result of an Lstat, so it doesn't follow symlinks.
+ // But it's okay if the file is a symlink pointing to a regular
+ // file, so use os.Stat to follow symlinks and check that.
+ actualFilePath, _ := OverlayPath(filepath.Join(dir, fi.Name()))
+ fi, err := os.Stat(actualFilePath)
+ if err == nil && fi.Mode().IsRegular() {
+ return true, nil
+ }
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ // No go files found in directory.
+ return false, firstErr
+}
+
+// walk recursively descends path, calling walkFn. Copied, with some
+// modifications from path/filepath.walk.
+func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
+ if err := walkFn(path, info, nil); err != nil || !info.IsDir() {
+ return err
+ }
+
+ fis, err := ReadDir(path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, fi := range fis {
+ filename := filepath.Join(path, fi.Name())
+ if err := walk(filename, fi, walkFn); err != nil {
+ if !fi.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root.
+func Walk(root string, walkFn filepath.WalkFunc) error {
+ Trace("Walk", root)
+ info, err := Lstat(root)
+ if err != nil {
+ err = walkFn(root, nil, err)
+ } else {
+ err = walk(root, info, walkFn)
+ }
+ if err == filepath.SkipDir {
+ return nil
+ }
+ return err
+}
+
+// Lstat implements a version of os.Lstat that operates on the overlay filesystem.
+func Lstat(path string) (fs.FileInfo, error) {
+ Trace("Lstat", path)
+ return overlayStat(path, os.Lstat, "lstat")
+}
+
+// Stat implements a version of os.Stat that operates on the overlay filesystem.
+func Stat(path string) (fs.FileInfo, error) {
+ Trace("Stat", path)
+ return overlayStat(path, os.Stat, "stat")
+}
+
+// overlayStat implements lstat or Stat (depending on whether os.Lstat or os.Stat is passed in).
+func overlayStat(path string, osStat func(string) (fs.FileInfo, error), opName string) (fs.FileInfo, error) {
+ cpath := canonicalize(path)
+
+ if _, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok {
+ return nil, &fs.PathError{Op: opName, Path: cpath, Err: fs.ErrNotExist}
+ }
+
+ node, ok := overlay[cpath]
+ if !ok {
+ // The file or directory is not overlaid.
+ return osStat(path)
+ }
+
+ switch {
+ case node.isDeleted():
+ return nil, &fs.PathError{Op: opName, Path: cpath, Err: fs.ErrNotExist}
+ case node.isDir():
+ return fakeDir(filepath.Base(path)), nil
+ default:
+ // To keep the data model simple, if the overlay contains a symlink we
+ // always stat through it (using Stat, not Lstat). That way we don't need to
+ // worry about the interaction between Lstat and directories: if a symlink
+ // in the overlay points to a directory, we reject it like an ordinary
+ // directory.
+ fi, err := os.Stat(node.actualFilePath)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return nil, &fs.PathError{Op: opName, Path: cpath, Err: nonFileInOverlayError(node.actualFilePath)}
+ }
+ return fakeFile{name: filepath.Base(path), real: fi}, nil
+ }
+}
+
+// fakeFile provides an fs.FileInfo implementation for an overlaid file,
+// so that the file has the name of the overlaid file, but takes all
+// other characteristics of the replacement file.
+type fakeFile struct {
+ name string
+ real fs.FileInfo
+}
+
+func (f fakeFile) Name() string { return f.name }
+func (f fakeFile) Size() int64 { return f.real.Size() }
+func (f fakeFile) Mode() fs.FileMode { return f.real.Mode() }
+func (f fakeFile) ModTime() time.Time { return f.real.ModTime() }
+func (f fakeFile) IsDir() bool { return f.real.IsDir() }
+func (f fakeFile) Sys() any { return f.real.Sys() }
+
+func (f fakeFile) String() string {
+ return fs.FormatFileInfo(f)
+}
+
+// missingFile provides an fs.FileInfo for an overlaid file where the
+// destination file in the overlay doesn't exist. It returns zero values
+// for the fileInfo methods other than Name, set to the file's name, and Mode
+// set to ModeIrregular.
+type missingFile string
+
+func (f missingFile) Name() string { return string(f) }
+func (f missingFile) Size() int64 { return 0 }
+func (f missingFile) Mode() fs.FileMode { return fs.ModeIrregular }
+func (f missingFile) ModTime() time.Time { return time.Unix(0, 0) }
+func (f missingFile) IsDir() bool { return false }
+func (f missingFile) Sys() any { return nil }
+
+func (f missingFile) String() string {
+ return fs.FormatFileInfo(f)
+}
+
+// fakeDir provides an fs.FileInfo implementation for directories that are
+// implicitly created by overlaid files. Each directory in the
+// path of an overlaid file is considered to exist in the overlay filesystem.
+type fakeDir string
+
+func (f fakeDir) Name() string { return string(f) }
+func (f fakeDir) Size() int64 { return 0 }
+func (f fakeDir) Mode() fs.FileMode { return fs.ModeDir | 0500 }
+func (f fakeDir) ModTime() time.Time { return time.Unix(0, 0) }
+func (f fakeDir) IsDir() bool { return true }
+func (f fakeDir) Sys() any { return nil }
+
+func (f fakeDir) String() string {
+ return fs.FormatFileInfo(f)
+}
+
+// Glob is like filepath.Glob but uses the overlay file system.
+func Glob(pattern string) (matches []string, err error) {
+ Trace("Glob", pattern)
+ // Check pattern is well-formed.
+ if _, err := filepath.Match(pattern, ""); err != nil {
+ return nil, err
+ }
+ if !hasMeta(pattern) {
+ if _, err = Lstat(pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ volumeLen := 0
+ if runtime.GOOS == "windows" {
+ volumeLen, dir = cleanGlobPathWindows(dir)
+ } else {
+ dir = cleanGlobPath(dir)
+ }
+
+ if !hasMeta(dir[volumeLen:]) {
+ return glob(dir, file, nil)
+ }
+
+ // Prevent infinite recursion. See issue 15879.
+ if dir == pattern {
+ return nil, filepath.ErrBadPattern
+ }
+
+ var m []string
+ m, err = Glob(dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// cleanGlobPath prepares path for glob matching.
+func cleanGlobPath(path string) string {
+ switch path {
+ case "":
+ return "."
+ case string(filepath.Separator):
+ // do nothing to the path
+ return path
+ default:
+ return path[0 : len(path)-1] // chop off trailing separator
+ }
+}
+
+func volumeNameLen(path string) int {
+ isSlash := func(c uint8) bool {
+ return c == '\\' || c == '/'
+ }
+ if len(path) < 2 {
+ return 0
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ return 2
+ }
+ // is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+ if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
+ !isSlash(path[2]) && path[2] != '.' {
+ // first, leading `\\` and next shouldn't be `\`. its server name.
+ for n := 3; n < l-1; n++ {
+ // second, next '\' shouldn't be repeated.
+ if isSlash(path[n]) {
+ n++
+ // third, following something characters. its share name.
+ if !isSlash(path[n]) {
+ if path[n] == '.' {
+ break
+ }
+ for ; n < l; n++ {
+ if isSlash(path[n]) {
+ break
+ }
+ }
+ return n
+ }
+ break
+ }
+ }
+ }
+ return 0
+}
+
+// cleanGlobPathWindows is windows version of cleanGlobPath.
+func cleanGlobPathWindows(path string) (prefixLen int, cleaned string) {
+ vollen := volumeNameLen(path)
+ switch {
+ case path == "":
+ return 0, "."
+ case vollen+1 == len(path) && os.IsPathSeparator(path[len(path)-1]): // /, \, C:\ and C:/
+ // do nothing to the path
+ return vollen + 1, path
+ case vollen == len(path) && len(path) == 2: // C:
+ return vollen, path + "." // convert C: into C:.
+ default:
+ if vollen >= len(path) {
+ vollen = len(path) - 1
+ }
+ return vollen, path[0 : len(path)-1] // chop off trailing separator
+ }
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := Stat(dir)
+ if err != nil {
+ return // ignore I/O error
+ }
+ if !fi.IsDir() {
+ return // ignore I/O error
+ }
+
+ list, err := ReadDir(dir)
+ if err != nil {
+ return // ignore I/O error
+ }
+
+ var names []string
+ for _, info := range list {
+ names = append(names, info.Name())
+ }
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by filepath.Match.
+func hasMeta(path string) bool {
+ magicChars := `*?[`
+ if runtime.GOOS != "windows" {
+ magicChars = `*?[\`
+ }
+ return strings.ContainsAny(path, magicChars)
+}
diff --git a/src/cmd/go/internal/fsys/fsys_test.go b/src/cmd/go/internal/fsys/fsys_test.go
new file mode 100644
index 0000000..2ab2bb2
--- /dev/null
+++ b/src/cmd/go/internal/fsys/fsys_test.go
@@ -0,0 +1,1139 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsys
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "internal/testenv"
+ "internal/txtar"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+// initOverlay resets the overlay state to reflect the config.
+// config should be a text archive string. The comment is the overlay config
+// json, and the files, in the archive are laid out in a temp directory
+// that cwd is set to.
+func initOverlay(t *testing.T, config string) {
+ t.Helper()
+
+ // Create a temporary directory and chdir to it.
+ prevwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cwd = filepath.Join(t.TempDir(), "root")
+ if err := os.Mkdir(cwd, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Chdir(cwd); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() {
+ overlay = nil
+ if err := os.Chdir(prevwd); err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ a := txtar.Parse([]byte(config))
+ for _, f := range a.Files {
+ name := filepath.Join(cwd, f.Name)
+ if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(name, f.Data, 0666); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ var overlayJSON OverlayJSON
+ if err := json.Unmarshal(a.Comment, &overlayJSON); err != nil {
+ t.Fatal(fmt.Errorf("parsing overlay JSON: %v", err))
+ }
+
+ initFromJSON(overlayJSON)
+}
+
+func TestIsDir(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt",
+ "subdir4": "overlayfiles/subdir4",
+ "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt",
+ "subdir5": "",
+ "subdir6": ""
+ }
+}
+-- subdir1/file1.txt --
+
+-- subdir3/file3a.txt --
+33
+-- subdir4/file4.txt --
+444
+-- overlayfiles/subdir2_file2.txt --
+2
+-- overlayfiles/subdir3_file3b.txt --
+66666
+-- overlayfiles/subdir4 --
+x
+-- subdir6/file6.txt --
+six
+`)
+
+ testCases := []struct {
+ path string
+ want, wantErr bool
+ }{
+ {"", true, true},
+ {".", true, false},
+ {cwd, true, false},
+ {cwd + string(filepath.Separator), true, false},
+ // subdir1 is only on disk
+ {filepath.Join(cwd, "subdir1"), true, false},
+ {"subdir1", true, false},
+ {"subdir1" + string(filepath.Separator), true, false},
+ {"subdir1/file1.txt", false, false},
+ {"subdir1/doesntexist.txt", false, true},
+ {"doesntexist", false, true},
+ // subdir2 is only in overlay
+ {filepath.Join(cwd, "subdir2"), true, false},
+ {"subdir2", true, false},
+ {"subdir2" + string(filepath.Separator), true, false},
+ {"subdir2/file2.txt", false, false},
+ {"subdir2/doesntexist.txt", false, true},
+ // subdir3 has files on disk and in overlay
+ {filepath.Join(cwd, "subdir3"), true, false},
+ {"subdir3", true, false},
+ {"subdir3" + string(filepath.Separator), true, false},
+ {"subdir3/file3a.txt", false, false},
+ {"subdir3/file3b.txt", false, false},
+ {"subdir3/doesntexist.txt", false, true},
+ // subdir4 is overlaid with a file
+ {filepath.Join(cwd, "subdir4"), false, false},
+ {"subdir4", false, false},
+ {"subdir4" + string(filepath.Separator), false, false},
+ {"subdir4/file4.txt", false, false},
+ {"subdir4/doesntexist.txt", false, false},
+ // subdir5 doesn't exist, and is overlaid with a "delete" entry
+ {filepath.Join(cwd, "subdir5"), false, false},
+ {"subdir5", false, false},
+ {"subdir5" + string(filepath.Separator), false, false},
+ {"subdir5/file5.txt", false, false},
+ {"subdir5/doesntexist.txt", false, false},
+ // subdir6 does exist, and is overlaid with a "delete" entry
+ {filepath.Join(cwd, "subdir6"), false, false},
+ {"subdir6", false, false},
+ {"subdir6" + string(filepath.Separator), false, false},
+ {"subdir6/file6.txt", false, false},
+ {"subdir6/doesntexist.txt", false, false},
+ }
+
+ for _, tc := range testCases {
+ got, err := IsDir(tc.path)
+ if err != nil {
+ if !tc.wantErr {
+ t.Errorf("IsDir(%q): got error with string %q, want no error", tc.path, err.Error())
+ }
+ continue
+ }
+ if tc.wantErr {
+ t.Errorf("IsDir(%q): got no error, want error", tc.path)
+ }
+ if tc.want != got {
+ t.Errorf("IsDir(%q) = %v, want %v", tc.path, got, tc.want)
+ }
+ }
+}
+
+const readDirOverlay = `
+{
+ "Replace": {
+ "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt",
+ "subdir4": "overlayfiles/subdir4",
+ "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt",
+ "subdir5": "",
+ "subdir6/asubsubdir/afile.txt": "overlayfiles/subdir6_asubsubdir_afile.txt",
+ "subdir6/asubsubdir/zfile.txt": "overlayfiles/subdir6_asubsubdir_zfile.txt",
+ "subdir6/zsubsubdir/file.txt": "overlayfiles/subdir6_zsubsubdir_file.txt",
+ "subdir7/asubsubdir/file.txt": "overlayfiles/subdir7_asubsubdir_file.txt",
+ "subdir7/zsubsubdir/file.txt": "overlayfiles/subdir7_zsubsubdir_file.txt",
+ "subdir8/doesntexist": "this_file_doesnt_exist_anywhere",
+ "other/pointstodir": "overlayfiles/this_is_a_directory",
+ "parentoverwritten/subdir1": "overlayfiles/parentoverwritten_subdir1",
+ "subdir9/this_file_is_overlaid.txt": "overlayfiles/subdir9_this_file_is_overlaid.txt",
+ "subdir10/only_deleted_file.txt": "",
+ "subdir11/deleted.txt": "",
+ "subdir11": "overlayfiles/subdir11",
+ "textfile.txt/file.go": "overlayfiles/textfile_txt_file.go"
+ }
+}
+-- subdir1/file1.txt --
+
+-- subdir3/file3a.txt --
+33
+-- subdir4/file4.txt --
+444
+-- subdir6/file.txt --
+-- subdir6/asubsubdir/file.txt --
+-- subdir6/anothersubsubdir/file.txt --
+-- subdir9/this_file_is_overlaid.txt --
+-- subdir10/only_deleted_file.txt --
+this will be deleted in overlay
+-- subdir11/deleted.txt --
+-- parentoverwritten/subdir1/subdir2/subdir3/file.txt --
+-- textfile.txt --
+this will be overridden by textfile.txt/file.go
+-- overlayfiles/subdir2_file2.txt --
+2
+-- overlayfiles/subdir3_file3b.txt --
+66666
+-- overlayfiles/subdir4 --
+x
+-- overlayfiles/subdir6_asubsubdir_afile.txt --
+-- overlayfiles/subdir6_asubsubdir_zfile.txt --
+-- overlayfiles/subdir6_zsubsubdir_file.txt --
+-- overlayfiles/subdir7_asubsubdir_file.txt --
+-- overlayfiles/subdir7_zsubsubdir_file.txt --
+-- overlayfiles/parentoverwritten_subdir1 --
+x
+-- overlayfiles/subdir9_this_file_is_overlaid.txt --
+99999999
+-- overlayfiles/subdir11 --
+-- overlayfiles/this_is_a_directory/file.txt --
+-- overlayfiles/textfile_txt_file.go --
+x
+`
+
+func TestReadDir(t *testing.T) {
+ initOverlay(t, readDirOverlay)
+
+ type entry struct {
+ name string
+ size int64
+ isDir bool
+ }
+
+ testCases := []struct {
+ dir string
+ want []entry
+ }{
+ {
+ ".", []entry{
+ {"other", 0, true},
+ {"overlayfiles", 0, true},
+ {"parentoverwritten", 0, true},
+ {"subdir1", 0, true},
+ {"subdir10", 0, true},
+ {"subdir11", 0, false},
+ {"subdir2", 0, true},
+ {"subdir3", 0, true},
+ {"subdir4", 2, false},
+ // no subdir5.
+ {"subdir6", 0, true},
+ {"subdir7", 0, true},
+ {"subdir8", 0, true},
+ {"subdir9", 0, true},
+ {"textfile.txt", 0, true},
+ },
+ },
+ {
+ "subdir1", []entry{
+ {"file1.txt", 1, false},
+ },
+ },
+ {
+ "subdir2", []entry{
+ {"file2.txt", 2, false},
+ },
+ },
+ {
+ "subdir3", []entry{
+ {"file3a.txt", 3, false},
+ {"file3b.txt", 6, false},
+ },
+ },
+ {
+ "subdir6", []entry{
+ {"anothersubsubdir", 0, true},
+ {"asubsubdir", 0, true},
+ {"file.txt", 0, false},
+ {"zsubsubdir", 0, true},
+ },
+ },
+ {
+ "subdir6/asubsubdir", []entry{
+ {"afile.txt", 0, false},
+ {"file.txt", 0, false},
+ {"zfile.txt", 0, false},
+ },
+ },
+ {
+ "subdir8", []entry{
+ {"doesntexist", 0, false}, // entry is returned even if destination file doesn't exist
+ },
+ },
+ {
+ // check that read dir actually redirects files that already exist
+ // the original this_file_is_overlaid.txt is empty
+ "subdir9", []entry{
+ {"this_file_is_overlaid.txt", 9, false},
+ },
+ },
+ {
+ "subdir10", []entry{},
+ },
+ {
+ "parentoverwritten", []entry{
+ {"subdir1", 2, false},
+ },
+ },
+ {
+ "textfile.txt", []entry{
+ {"file.go", 2, false},
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ dir, want := tc.dir, tc.want
+ infos, err := ReadDir(dir)
+ if err != nil {
+ t.Errorf("ReadDir(%q): %v", dir, err)
+ continue
+ }
+ // Sorted diff of want and infos.
+ for len(infos) > 0 || len(want) > 0 {
+ switch {
+ case len(want) == 0 || len(infos) > 0 && infos[0].Name() < want[0].name:
+ t.Errorf("ReadDir(%q): unexpected entry: %s IsDir=%v Size=%v", dir, infos[0].Name(), infos[0].IsDir(), infos[0].Size())
+ infos = infos[1:]
+ case len(infos) == 0 || len(want) > 0 && want[0].name < infos[0].Name():
+ t.Errorf("ReadDir(%q): missing entry: %s IsDir=%v Size=%v", dir, want[0].name, want[0].isDir, want[0].size)
+ want = want[1:]
+ default:
+ infoSize := infos[0].Size()
+ if want[0].isDir {
+ infoSize = 0
+ }
+ if infos[0].IsDir() != want[0].isDir || want[0].isDir && infoSize != want[0].size {
+ t.Errorf("ReadDir(%q): %s: IsDir=%v Size=%v, want IsDir=%v Size=%v", dir, want[0].name, infos[0].IsDir(), infoSize, want[0].isDir, want[0].size)
+ }
+ infos = infos[1:]
+ want = want[1:]
+ }
+ }
+ }
+
+ errCases := []string{
+ "subdir1/file1.txt", // regular file on disk
+ "subdir2/file2.txt", // regular file in overlay
+ "subdir4", // directory overlaid with regular file
+ "subdir5", // directory deleted in overlay
+ "parentoverwritten/subdir1/subdir2/subdir3", // parentoverwritten/subdir1 overlaid with regular file
+ "parentoverwritten/subdir1/subdir2", // parentoverwritten/subdir1 overlaid with regular file
+ "subdir11", // directory with deleted child, overlaid with regular file
+ "other/pointstodir",
+ }
+
+ for _, dir := range errCases {
+ _, err := ReadDir(dir)
+ if _, ok := err.(*fs.PathError); !ok {
+ t.Errorf("ReadDir(%q): err = %T (%v), want fs.PathError", dir, err, err)
+ }
+ }
+}
+
+func TestGlob(t *testing.T) {
+ initOverlay(t, readDirOverlay)
+
+ testCases := []struct {
+ pattern string
+ match []string
+ }{
+ {
+ "*o*",
+ []string{
+ "other",
+ "overlayfiles",
+ "parentoverwritten",
+ },
+ },
+ {
+ "subdir2/file2.txt",
+ []string{
+ "subdir2/file2.txt",
+ },
+ },
+ {
+ "*/*.txt",
+ []string{
+ "overlayfiles/subdir2_file2.txt",
+ "overlayfiles/subdir3_file3b.txt",
+ "overlayfiles/subdir6_asubsubdir_afile.txt",
+ "overlayfiles/subdir6_asubsubdir_zfile.txt",
+ "overlayfiles/subdir6_zsubsubdir_file.txt",
+ "overlayfiles/subdir7_asubsubdir_file.txt",
+ "overlayfiles/subdir7_zsubsubdir_file.txt",
+ "overlayfiles/subdir9_this_file_is_overlaid.txt",
+ "subdir1/file1.txt",
+ "subdir2/file2.txt",
+ "subdir3/file3a.txt",
+ "subdir3/file3b.txt",
+ "subdir6/file.txt",
+ "subdir9/this_file_is_overlaid.txt",
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ pattern := tc.pattern
+ match, err := Glob(pattern)
+ if err != nil {
+ t.Errorf("Glob(%q): %v", pattern, err)
+ continue
+ }
+ want := tc.match
+ for i, name := range want {
+ if name != tc.pattern {
+ want[i] = filepath.FromSlash(name)
+ }
+ }
+ for len(match) > 0 || len(want) > 0 {
+ switch {
+ case len(match) == 0 || len(want) > 0 && want[0] < match[0]:
+ t.Errorf("Glob(%q): missing match: %s", pattern, want[0])
+ want = want[1:]
+ case len(want) == 0 || len(match) > 0 && match[0] < want[0]:
+ t.Errorf("Glob(%q): extra match: %s", pattern, match[0])
+ match = match[1:]
+ default:
+ want = want[1:]
+ match = match[1:]
+ }
+ }
+ }
+}
+
+func TestOverlayPath(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt",
+ "subdir3/doesntexist": "this_file_doesnt_exist_anywhere",
+ "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt",
+ "subdir5/deleted.txt": "",
+ "parentoverwritten/subdir1": ""
+ }
+}
+-- subdir1/file1.txt --
+file 1
+-- subdir4/this_file_is_overlaid.txt --
+these contents are replaced by the overlay
+-- parentoverwritten/subdir1/subdir2/subdir3/file.txt --
+-- subdir5/deleted.txt --
+deleted
+-- overlayfiles/subdir2_file2.txt --
+file 2
+-- overlayfiles/subdir4_this_file_is_overlaid.txt --
+99999999
+`)
+
+ testCases := []struct {
+ path string
+ wantPath string
+ wantOK bool
+ }{
+ {"subdir1/file1.txt", "subdir1/file1.txt", false},
+ // OverlayPath returns false for directories
+ {"subdir2", "subdir2", false},
+ {"subdir2/file2.txt", filepath.Join(cwd, "overlayfiles/subdir2_file2.txt"), true},
+ // OverlayPath doesn't stat a file to see if it exists, so it happily returns
+ // the 'to' path and true even if the 'to' path doesn't exist on disk.
+ {"subdir3/doesntexist", filepath.Join(cwd, "this_file_doesnt_exist_anywhere"), true},
+ // Like the subdir2/file2.txt case above, but subdir4 exists on disk, but subdir2 does not.
+ {"subdir4/this_file_is_overlaid.txt", filepath.Join(cwd, "overlayfiles/subdir4_this_file_is_overlaid.txt"), true},
+ {"subdir5", "subdir5", false},
+ {"subdir5/deleted.txt", "", true},
+ }
+
+ for _, tc := range testCases {
+ gotPath, gotOK := OverlayPath(tc.path)
+ if gotPath != tc.wantPath || gotOK != tc.wantOK {
+ t.Errorf("OverlayPath(%q): got %v, %v; want %v, %v",
+ tc.path, gotPath, gotOK, tc.wantPath, tc.wantOK)
+ }
+ }
+}
+
+func TestOpen(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt",
+ "subdir3/doesntexist": "this_file_doesnt_exist_anywhere",
+ "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt",
+ "subdir5/deleted.txt": "",
+ "parentoverwritten/subdir1": "",
+ "childoverlay/subdir1.txt/child.txt": "overlayfiles/child.txt",
+ "subdir11/deleted.txt": "",
+ "subdir11": "overlayfiles/subdir11",
+ "parentdeleted": "",
+ "parentdeleted/file.txt": "overlayfiles/parentdeleted_file.txt"
+ }
+}
+-- subdir11/deleted.txt --
+-- subdir1/file1.txt --
+file 1
+-- subdir4/this_file_is_overlaid.txt --
+these contents are replaced by the overlay
+-- parentoverwritten/subdir1/subdir2/subdir3/file.txt --
+-- childoverlay/subdir1.txt --
+this file doesn't exist because the path
+childoverlay/subdir1.txt/child.txt is in the overlay
+-- subdir5/deleted.txt --
+deleted
+-- parentdeleted --
+this will be deleted so that parentdeleted/file.txt can exist
+-- overlayfiles/subdir2_file2.txt --
+file 2
+-- overlayfiles/subdir4_this_file_is_overlaid.txt --
+99999999
+-- overlayfiles/child.txt --
+-- overlayfiles/subdir11 --
+11
+-- overlayfiles/parentdeleted_file.txt --
+this can exist because the parent directory is deleted
+`)
+
+ testCases := []struct {
+ path string
+ wantContents string
+ isErr bool
+ }{
+ {"subdir1/file1.txt", "file 1\n", false},
+ {"subdir2/file2.txt", "file 2\n", false},
+ {"subdir3/doesntexist", "", true},
+ {"subdir4/this_file_is_overlaid.txt", "99999999\n", false},
+ {"subdir5/deleted.txt", "", true},
+ {"parentoverwritten/subdir1/subdir2/subdir3/file.txt", "", true},
+ {"childoverlay/subdir1.txt", "", true},
+ {"subdir11", "11\n", false},
+ {"parentdeleted/file.txt", "this can exist because the parent directory is deleted\n", false},
+ }
+
+ for _, tc := range testCases {
+ f, err := Open(tc.path)
+ if tc.isErr {
+ if err == nil {
+ f.Close()
+ t.Errorf("Open(%q): got no error, but want error", tc.path)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("Open(%q): got error %v, want nil", tc.path, err)
+ continue
+ }
+ contents, err := io.ReadAll(f)
+ if err != nil {
+ t.Errorf("unexpected error reading contents of file: %v", err)
+ }
+ if string(contents) != tc.wantContents {
+ t.Errorf("contents of file opened with Open(%q): got %q, want %q",
+ tc.path, contents, tc.wantContents)
+ }
+ f.Close()
+ }
+}
+
+func TestIsDirWithGoFiles(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "goinoverlay/file.go": "dummy",
+ "directory/removed/by/file": "dummy",
+ "directory_with_go_dir/dir.go/file.txt": "dummy",
+ "otherdirectory/deleted.go": "",
+ "nonexistentdirectory/deleted.go": "",
+ "textfile.txt/file.go": "dummy"
+ }
+}
+-- dummy --
+a destination file for the overlay entries to point to
+contents don't matter for this test
+-- nogo/file.txt --
+-- goondisk/file.go --
+-- goinoverlay/file.txt --
+-- directory/removed/by/file/in/overlay/file.go --
+-- otherdirectory/deleted.go --
+-- textfile.txt --
+`)
+
+ testCases := []struct {
+ dir string
+ want bool
+ wantErr bool
+ }{
+ {"nogo", false, false},
+ {"goondisk", true, false},
+ {"goinoverlay", true, false},
+ {"directory/removed/by/file/in/overlay", false, false},
+ {"directory_with_go_dir", false, false},
+ {"otherdirectory", false, false},
+ {"nonexistentdirectory", false, false},
+ {"textfile.txt", true, false},
+ }
+
+ for _, tc := range testCases {
+ got, gotErr := IsDirWithGoFiles(tc.dir)
+ if tc.wantErr {
+ if gotErr == nil {
+ t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want non-nil error", tc.dir, got, gotErr)
+ }
+ continue
+ }
+ if gotErr != nil {
+ t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want nil error", tc.dir, got, gotErr)
+ }
+ if got != tc.want {
+ t.Errorf("IsDirWithGoFiles(%q) = %v; want %v", tc.dir, got, tc.want)
+ }
+ }
+}
+
+func TestWalk(t *testing.T) {
+ // The root of the walk must be a name with an actual basename, not just ".".
+ // Walk uses Lstat to obtain the name of the root, and Lstat on platforms
+ // other than Plan 9 reports the name "." instead of the actual base name of
+ // the directory. (See https://golang.org/issue/42115.)
+
+ type file struct {
+ path string
+ name string
+ size int64
+ mode fs.FileMode
+ isDir bool
+ }
+ testCases := []struct {
+ name string
+ overlay string
+ root string
+ wantFiles []file
+ }{
+ {"no overlay", `
+{}
+-- dir/file.txt --
+`,
+ "dir",
+ []file{
+ {"dir", "dir", 0, fs.ModeDir | 0700, true},
+ {"dir/file.txt", "file.txt", 0, 0600, false},
+ },
+ },
+ {"overlay with different file", `
+{
+ "Replace": {
+ "dir/file.txt": "dir/other.txt"
+ }
+}
+-- dir/file.txt --
+-- dir/other.txt --
+contents of other file
+`,
+ "dir",
+ []file{
+ {"dir", "dir", 0, fs.ModeDir | 0500, true},
+ {"dir/file.txt", "file.txt", 23, 0600, false},
+ {"dir/other.txt", "other.txt", 23, 0600, false},
+ },
+ },
+ {"overlay with new file", `
+{
+ "Replace": {
+ "dir/file.txt": "dir/other.txt"
+ }
+}
+-- dir/other.txt --
+contents of other file
+`,
+ "dir",
+ []file{
+ {"dir", "dir", 0, fs.ModeDir | 0500, true},
+ {"dir/file.txt", "file.txt", 23, 0600, false},
+ {"dir/other.txt", "other.txt", 23, 0600, false},
+ },
+ },
+ {"overlay with new directory", `
+{
+ "Replace": {
+ "dir/subdir/file.txt": "dir/other.txt"
+ }
+}
+-- dir/other.txt --
+contents of other file
+`,
+ "dir",
+ []file{
+ {"dir", "dir", 0, fs.ModeDir | 0500, true},
+ {"dir/other.txt", "other.txt", 23, 0600, false},
+ {"dir/subdir", "subdir", 0, fs.ModeDir | 0500, true},
+ {"dir/subdir/file.txt", "file.txt", 23, 0600, false},
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ initOverlay(t, tc.overlay)
+
+ var got []file
+ Walk(tc.root, func(path string, info fs.FileInfo, err error) error {
+ got = append(got, file{path, info.Name(), info.Size(), info.Mode(), info.IsDir()})
+ return nil
+ })
+
+ if len(got) != len(tc.wantFiles) {
+ t.Errorf("Walk: saw %#v in walk; want %#v", got, tc.wantFiles)
+ }
+ for i := 0; i < len(got) && i < len(tc.wantFiles); i++ {
+ wantPath := filepath.FromSlash(tc.wantFiles[i].path)
+ if got[i].path != wantPath {
+ t.Errorf("path of file #%v in walk, got %q, want %q", i, got[i].path, wantPath)
+ }
+ if got[i].name != tc.wantFiles[i].name {
+ t.Errorf("name of file #%v in walk, got %q, want %q", i, got[i].name, tc.wantFiles[i].name)
+ }
+ if got[i].mode&(fs.ModeDir|0700) != tc.wantFiles[i].mode {
+ t.Errorf("mode&(fs.ModeDir|0700) for mode of file #%v in walk, got %v, want %v", i, got[i].mode&(fs.ModeDir|0700), tc.wantFiles[i].mode)
+ }
+ if got[i].isDir != tc.wantFiles[i].isDir {
+ t.Errorf("isDir for file #%v in walk, got %v, want %v", i, got[i].isDir, tc.wantFiles[i].isDir)
+ }
+ if tc.wantFiles[i].isDir {
+ continue // don't check size for directories
+ }
+ if got[i].size != tc.wantFiles[i].size {
+ t.Errorf("size of file #%v in walk, got %v, want %v", i, got[i].size, tc.wantFiles[i].size)
+ }
+ }
+ })
+ }
+}
+
+func TestWalkSkipDir(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "dir/skip/file.go": "dummy.txt",
+ "dir/dontskip/file.go": "dummy.txt",
+ "dir/dontskip/skip/file.go": "dummy.txt"
+ }
+}
+-- dummy.txt --
+`)
+
+ var seen []string
+ Walk("dir", func(path string, info fs.FileInfo, err error) error {
+ seen = append(seen, filepath.ToSlash(path))
+ if info.Name() == "skip" {
+ return filepath.SkipDir
+ }
+ return nil
+ })
+
+ wantSeen := []string{"dir", "dir/dontskip", "dir/dontskip/file.go", "dir/dontskip/skip", "dir/skip"}
+
+ if len(seen) != len(wantSeen) {
+ t.Errorf("paths seen in walk: got %v entries; want %v entries", len(seen), len(wantSeen))
+ }
+
+ for i := 0; i < len(seen) && i < len(wantSeen); i++ {
+ if seen[i] != wantSeen[i] {
+ t.Errorf("path #%v seen walking tree: want %q, got %q", i, seen[i], wantSeen[i])
+ }
+ }
+}
+
+func TestWalkSkipAll(t *testing.T) {
+ initOverlay(t, `
+{
+ "Replace": {
+ "dir/subdir1/foo1": "dummy.txt",
+ "dir/subdir1/foo2": "dummy.txt",
+ "dir/subdir1/foo3": "dummy.txt",
+ "dir/subdir2/foo4": "dummy.txt",
+ "dir/zzlast": "dummy.txt"
+ }
+}
+-- dummy.txt --
+`)
+
+ var seen []string
+ Walk("dir", func(path string, info fs.FileInfo, err error) error {
+ seen = append(seen, filepath.ToSlash(path))
+ if info.Name() == "foo2" {
+ return filepath.SkipAll
+ }
+ return nil
+ })
+
+ wantSeen := []string{"dir", "dir/subdir1", "dir/subdir1/foo1", "dir/subdir1/foo2"}
+
+ if len(seen) != len(wantSeen) {
+ t.Errorf("paths seen in walk: got %v entries; want %v entries", len(seen), len(wantSeen))
+ }
+
+ for i := 0; i < len(seen) && i < len(wantSeen); i++ {
+ if seen[i] != wantSeen[i] {
+ t.Errorf("path %#v seen walking tree: got %q, want %q", i, seen[i], wantSeen[i])
+ }
+ }
+}
+
+func TestWalkError(t *testing.T) {
+ initOverlay(t, "{}")
+
+ alreadyCalled := false
+ err := Walk("foo", func(path string, info fs.FileInfo, err error) error {
+ if alreadyCalled {
+ t.Fatal("expected walk function to be called exactly once, but it was called more than once")
+ }
+ alreadyCalled = true
+ return errors.New("returned from function")
+ })
+ if !alreadyCalled {
+ t.Fatal("expected walk function to be called exactly once, but it was never called")
+
+ }
+ if err == nil {
+ t.Fatalf("Walk: got no error, want error")
+ }
+ if err.Error() != "returned from function" {
+ t.Fatalf("Walk: got error %v, want \"returned from function\" error", err)
+ }
+}
+
+func TestWalkSymlink(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+
+ initOverlay(t, `{
+ "Replace": {"overlay_symlink/file": "symlink/file"}
+}
+-- dir/file --`)
+
+ // Create symlink
+ if err := os.Symlink("dir", "symlink"); err != nil {
+ t.Error(err)
+ }
+
+ testCases := []struct {
+ name string
+ dir string
+ wantFiles []string
+ }{
+ {"control", "dir", []string{"dir", filepath.Join("dir", "file")}},
+ // ensure Walk doesn't walk into the directory pointed to by the symlink
+ // (because it's supposed to use Lstat instead of Stat).
+ {"symlink_to_dir", "symlink", []string{"symlink"}},
+ {"overlay_to_symlink_to_dir", "overlay_symlink", []string{"overlay_symlink", filepath.Join("overlay_symlink", "file")}},
+
+ // However, adding filepath.Separator should cause the link to be resolved.
+ {"symlink_with_slash", "symlink" + string(filepath.Separator), []string{"symlink" + string(filepath.Separator), filepath.Join("symlink", "file")}},
+ {"overlay_to_symlink_to_dir", "overlay_symlink" + string(filepath.Separator), []string{"overlay_symlink" + string(filepath.Separator), filepath.Join("overlay_symlink", "file")}},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var got []string
+
+ err := Walk(tc.dir, func(path string, info fs.FileInfo, err error) error {
+ t.Logf("walk %q", path)
+ got = append(got, path)
+ if err != nil {
+ t.Errorf("walkfn: got non nil err argument: %v, want nil err argument", err)
+ }
+ return nil
+ })
+ if err != nil {
+ t.Errorf("Walk: got error %q, want nil", err)
+ }
+
+ if !reflect.DeepEqual(got, tc.wantFiles) {
+ t.Errorf("files examined by walk: got %v, want %v", got, tc.wantFiles)
+ }
+ })
+ }
+
+}
+
+func TestLstat(t *testing.T) {
+ type file struct {
+ name string
+ size int64
+ mode fs.FileMode // mode & (fs.ModeDir|0x700): only check 'user' permissions
+ isDir bool
+ }
+
+ testCases := []struct {
+ name string
+ overlay string
+ path string
+
+ want file
+ wantErr bool
+ }{
+ {
+ "regular_file",
+ `{}
+-- file.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "new_file_in_overlay",
+ `{"Replace": {"file.txt": "dummy.txt"}}
+-- dummy.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "file_replaced_in_overlay",
+ `{"Replace": {"file.txt": "dummy.txt"}}
+-- file.txt --
+-- dummy.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "file_cant_exist",
+ `{"Replace": {"deleted": "dummy.txt"}}
+-- deleted/file.txt --
+-- dummy.txt --
+`,
+ "deleted/file.txt",
+ file{},
+ true,
+ },
+ {
+ "deleted",
+ `{"Replace": {"deleted": ""}}
+-- deleted --
+`,
+ "deleted",
+ file{},
+ true,
+ },
+ {
+ "dir_on_disk",
+ `{}
+-- dir/foo.txt --
+`,
+ "dir",
+ file{"dir", 0, 0700 | fs.ModeDir, true},
+ false,
+ },
+ {
+ "dir_in_overlay",
+ `{"Replace": {"dir/file.txt": "dummy.txt"}}
+-- dummy.txt --
+`,
+ "dir",
+ file{"dir", 0, 0500 | fs.ModeDir, true},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ initOverlay(t, tc.overlay)
+ got, err := Lstat(tc.path)
+ if tc.wantErr {
+ if err == nil {
+ t.Errorf("lstat(%q): got no error, want error", tc.path)
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("lstat(%q): got error %v, want no error", tc.path, err)
+ }
+ if got.Name() != tc.want.name {
+ t.Errorf("lstat(%q).Name(): got %q, want %q", tc.path, got.Name(), tc.want.name)
+ }
+ if got.Mode()&(fs.ModeDir|0700) != tc.want.mode {
+ t.Errorf("lstat(%q).Mode()&(fs.ModeDir|0700): got %v, want %v", tc.path, got.Mode()&(fs.ModeDir|0700), tc.want.mode)
+ }
+ if got.IsDir() != tc.want.isDir {
+ t.Errorf("lstat(%q).IsDir(): got %v, want %v", tc.path, got.IsDir(), tc.want.isDir)
+ }
+ if tc.want.isDir {
+ return // don't check size for directories
+ }
+ if got.Size() != tc.want.size {
+ t.Errorf("lstat(%q).Size(): got %v, want %v", tc.path, got.Size(), tc.want.size)
+ }
+ })
+ }
+}
+
+func TestStat(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+
+ type file struct {
+ name string
+ size int64
+ mode os.FileMode // mode & (os.ModeDir|0x700): only check 'user' permissions
+ isDir bool
+ }
+
+ testCases := []struct {
+ name string
+ overlay string
+ path string
+
+ want file
+ wantErr bool
+ }{
+ {
+ "regular_file",
+ `{}
+-- file.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "new_file_in_overlay",
+ `{"Replace": {"file.txt": "dummy.txt"}}
+-- dummy.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "file_replaced_in_overlay",
+ `{"Replace": {"file.txt": "dummy.txt"}}
+-- file.txt --
+-- dummy.txt --
+contents`,
+ "file.txt",
+ file{"file.txt", 9, 0600, false},
+ false,
+ },
+ {
+ "file_cant_exist",
+ `{"Replace": {"deleted": "dummy.txt"}}
+-- deleted/file.txt --
+-- dummy.txt --
+`,
+ "deleted/file.txt",
+ file{},
+ true,
+ },
+ {
+ "deleted",
+ `{"Replace": {"deleted": ""}}
+-- deleted --
+`,
+ "deleted",
+ file{},
+ true,
+ },
+ {
+ "dir_on_disk",
+ `{}
+-- dir/foo.txt --
+`,
+ "dir",
+ file{"dir", 0, 0700 | os.ModeDir, true},
+ false,
+ },
+ {
+ "dir_in_overlay",
+ `{"Replace": {"dir/file.txt": "dummy.txt"}}
+-- dummy.txt --
+`,
+ "dir",
+ file{"dir", 0, 0500 | os.ModeDir, true},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ initOverlay(t, tc.overlay)
+ got, err := Stat(tc.path)
+ if tc.wantErr {
+ if err == nil {
+ t.Errorf("Stat(%q): got no error, want error", tc.path)
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("Stat(%q): got error %v, want no error", tc.path, err)
+ }
+ if got.Name() != tc.want.name {
+ t.Errorf("Stat(%q).Name(): got %q, want %q", tc.path, got.Name(), tc.want.name)
+ }
+ if got.Mode()&(os.ModeDir|0700) != tc.want.mode {
+ t.Errorf("Stat(%q).Mode()&(os.ModeDir|0700): got %v, want %v", tc.path, got.Mode()&(os.ModeDir|0700), tc.want.mode)
+ }
+ if got.IsDir() != tc.want.isDir {
+ t.Errorf("Stat(%q).IsDir(): got %v, want %v", tc.path, got.IsDir(), tc.want.isDir)
+ }
+ if tc.want.isDir {
+ return // don't check size for directories
+ }
+ if got.Size() != tc.want.size {
+ t.Errorf("Stat(%q).Size(): got %v, want %v", tc.path, got.Size(), tc.want.size)
+ }
+ })
+ }
+}
+
+func TestStatSymlink(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+
+ initOverlay(t, `{
+ "Replace": {"file.go": "symlink"}
+}
+-- to.go --
+0123456789
+`)
+
+ // Create symlink
+ if err := os.Symlink("to.go", "symlink"); err != nil {
+ t.Error(err)
+ }
+
+ f := "file.go"
+ fi, err := Stat(f)
+ if err != nil {
+ t.Errorf("Stat(%q): got error %q, want nil error", f, err)
+ }
+
+ if !fi.Mode().IsRegular() {
+ t.Errorf("Stat(%q).Mode(): got %v, want regular mode", f, fi.Mode())
+ }
+
+ if fi.Size() != 11 {
+ t.Errorf("Stat(%q).Size(): got %v, want 11", f, fi.Size())
+ }
+}
diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go
new file mode 100644
index 0000000..353506f
--- /dev/null
+++ b/src/cmd/go/internal/generate/generate.go
@@ -0,0 +1,502 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package generate implements the “go generate” command.
+package generate
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/work"
+)
+
+var CmdGenerate = &base.Command{
+ Run: runGenerate,
+ UsageLine: "go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]",
+ Short: "generate Go files by processing source",
+ Long: `
+Generate runs commands described by directives within existing
+files. Those commands can run any process but the intent is to
+create or update Go source files.
+
+Go generate is never run automatically by go build, go test,
+and so on. It must be run explicitly.
+
+Go generate scans the file for directives, which are lines of
+the form,
+
+ //go:generate command argument...
+
+(note: no leading spaces and no space in "//go") where command
+is the generator to be run, corresponding to an executable file
+that can be run locally. It must either be in the shell path
+(gofmt), a fully qualified path (/usr/you/bin/mytool), or a
+command alias, described below.
+
+Note that go generate does not parse the file, so lines that look
+like directives in comments or multiline strings will be treated
+as directives.
+
+The arguments to the directive are space-separated tokens or
+double-quoted strings passed to the generator as individual
+arguments when it is run.
+
+Quoted strings use Go syntax and are evaluated before execution; a
+quoted string appears as a single argument to the generator.
+
+To convey to humans and machine tools that code is generated,
+generated source should have a line that matches the following
+regular expression (in Go syntax):
+
+ ^// Code generated .* DO NOT EDIT\.$
+
+This line must appear before the first non-comment, non-blank
+text in the file.
+
+Go generate sets several variables when it runs the generator:
+
+ $GOARCH
+ The execution architecture (arm, amd64, etc.)
+ $GOOS
+ The execution operating system (linux, windows, etc.)
+ $GOFILE
+ The base name of the file.
+ $GOLINE
+ The line number of the directive in the source file.
+ $GOPACKAGE
+ The name of the package of the file containing the directive.
+ $GOROOT
+ The GOROOT directory for the 'go' command that invoked the
+ generator, containing the Go toolchain and standard library.
+ $DOLLAR
+ A dollar sign.
+ $PATH
+ The $PATH of the parent process, with $GOROOT/bin
+ placed at the beginning. This causes generators
+ that execute 'go' commands to use the same 'go'
+ as the parent 'go generate' command.
+
+Other than variable substitution and quoted-string evaluation, no
+special processing such as "globbing" is performed on the command
+line.
+
+As a last step before running the command, any invocations of any
+environment variables with alphanumeric names, such as $GOFILE or
+$HOME, are expanded throughout the command line. The syntax for
+variable expansion is $NAME on all operating systems. Due to the
+order of evaluation, variables are expanded even inside quoted
+strings. If the variable NAME is not set, $NAME expands to the
+empty string.
+
+A directive of the form,
+
+ //go:generate -command xxx args...
+
+specifies, for the remainder of this source file only, that the
+string xxx represents the command identified by the arguments. This
+can be used to create aliases or to handle multiword generators.
+For example,
+
+ //go:generate -command foo go tool foo
+
+specifies that the command "foo" represents the generator
+"go tool foo".
+
+Generate processes packages in the order given on the command line,
+one at a time. If the command line lists .go files from a single directory,
+they are treated as a single package. Within a package, generate processes the
+source files in a package in file name order, one at a time. Within
+a source file, generate runs generators in the order they appear
+in the file, one at a time. The go generate tool also sets the build
+tag "generate" so that files may be examined by go generate but ignored
+during build.
+
+For packages with invalid code, generate processes only source files with a
+valid package clause.
+
+If any generator returns an error exit status, "go generate" skips
+all further processing for that package.
+
+The generator is run in the package's source directory.
+
+Go generate accepts two specific flags:
+
+ -run=""
+ if non-empty, specifies a regular expression to select
+ directives whose full original source text (excluding
+ any trailing spaces and final newline) matches the
+ expression.
+
+ -skip=""
+ if non-empty, specifies a regular expression to suppress
+ directives whose full original source text (excluding
+ any trailing spaces and final newline) matches the
+ expression. If a directive matches both the -run and
+ the -skip arguments, it is skipped.
+
+It also accepts the standard build flags including -v, -n, and -x.
+The -v flag prints the names of packages and files as they are
+processed.
+The -n flag prints commands that would be executed.
+The -x flag prints commands as they are executed.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+ `,
+}
+
+var (
+ generateRunFlag string // generate -run flag
+ generateRunRE *regexp.Regexp // compiled expression for -run
+
+ generateSkipFlag string // generate -skip flag
+ generateSkipRE *regexp.Regexp // compiled expression for -skip
+)
+
+func init() {
+ work.AddBuildFlags(CmdGenerate, work.DefaultBuildFlags)
+ CmdGenerate.Flag.StringVar(&generateRunFlag, "run", "", "")
+ CmdGenerate.Flag.StringVar(&generateSkipFlag, "skip", "", "")
+}
+
+func runGenerate(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ if generateRunFlag != "" {
+ var err error
+ generateRunRE, err = regexp.Compile(generateRunFlag)
+ if err != nil {
+ log.Fatalf("generate: %s", err)
+ }
+ }
+ if generateSkipFlag != "" {
+ var err error
+ generateSkipRE, err = regexp.Compile(generateSkipFlag)
+ if err != nil {
+ log.Fatalf("generate: %s", err)
+ }
+ }
+
+ cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, "generate")
+
+ // Even if the arguments are .go files, this loop suffices.
+ printed := false
+ pkgOpts := load.PackageOpts{IgnoreImports: true}
+ for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) {
+ if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
+ if !printed {
+ fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n")
+ printed = true
+ }
+ continue
+ }
+
+ for _, file := range pkg.InternalGoFiles() {
+ if !generate(file) {
+ break
+ }
+ }
+
+ for _, file := range pkg.InternalXGoFiles() {
+ if !generate(file) {
+ break
+ }
+ }
+ }
+}
+
+// generate runs the generation directives for a single file.
+func generate(absFile string) bool {
+ src, err := os.ReadFile(absFile)
+ if err != nil {
+ log.Fatalf("generate: %s", err)
+ }
+
+ // Parse package clause
+ filePkg, err := parser.ParseFile(token.NewFileSet(), "", src, parser.PackageClauseOnly)
+ if err != nil {
+ // Invalid package clause - ignore file.
+ return true
+ }
+
+ g := &Generator{
+ r: bytes.NewReader(src),
+ path: absFile,
+ pkg: filePkg.Name.String(),
+ commands: make(map[string][]string),
+ }
+ return g.run()
+}
+
+// A Generator represents the state of a single Go source file
+// being scanned for generator commands.
+type Generator struct {
+ r io.Reader
+ path string // full rooted path name.
+ dir string // full rooted directory of file.
+ file string // base name of file.
+ pkg string
+ commands map[string][]string
+ lineNum int // current line number.
+ env []string
+}
+
+// run runs the generators in the current file.
+func (g *Generator) run() (ok bool) {
+ // Processing below here calls g.errorf on failure, which does panic(stop).
+ // If we encounter an error, we abort the package.
+ defer func() {
+ e := recover()
+ if e != nil {
+ ok = false
+ if e != stop {
+ panic(e)
+ }
+ base.SetExitStatus(1)
+ }
+ }()
+ g.dir, g.file = filepath.Split(g.path)
+ g.dir = filepath.Clean(g.dir) // No final separator please.
+ if cfg.BuildV {
+ fmt.Fprintf(os.Stderr, "%s\n", base.ShortPath(g.path))
+ }
+
+ // Scan for lines that start "//go:generate".
+ // Can't use bufio.Scanner because it can't handle long lines,
+ // which are likely to appear when using generate.
+ input := bufio.NewReader(g.r)
+ var err error
+ // One line per loop.
+ for {
+ g.lineNum++ // 1-indexed.
+ var buf []byte
+ buf, err = input.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // Line too long - consume and ignore.
+ if isGoGenerate(buf) {
+ g.errorf("directive too long")
+ }
+ for err == bufio.ErrBufferFull {
+ _, err = input.ReadSlice('\n')
+ }
+ if err != nil {
+ break
+ }
+ continue
+ }
+
+ if err != nil {
+ // Check for marker at EOF without final \n.
+ if err == io.EOF && isGoGenerate(buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+
+ if !isGoGenerate(buf) {
+ continue
+ }
+ if generateRunFlag != "" && !generateRunRE.Match(bytes.TrimSpace(buf)) {
+ continue
+ }
+ if generateSkipFlag != "" && generateSkipRE.Match(bytes.TrimSpace(buf)) {
+ continue
+ }
+
+ g.setEnv()
+ words := g.split(string(buf))
+ if len(words) == 0 {
+ g.errorf("no arguments to directive")
+ }
+ if words[0] == "-command" {
+ g.setShorthand(words)
+ continue
+ }
+ // Run the command line.
+ if cfg.BuildN || cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "%s\n", strings.Join(words, " "))
+ }
+ if cfg.BuildN {
+ continue
+ }
+ g.exec(words)
+ }
+ if err != nil && err != io.EOF {
+ g.errorf("error reading %s: %s", base.ShortPath(g.path), err)
+ }
+ return true
+}
+
+func isGoGenerate(buf []byte) bool {
+ return bytes.HasPrefix(buf, []byte("//go:generate ")) || bytes.HasPrefix(buf, []byte("//go:generate\t"))
+}
+
+// setEnv sets the extra environment variables used when executing a
+// single go:generate command.
+func (g *Generator) setEnv() {
+ env := []string{
+ "GOROOT=" + cfg.GOROOT,
+ "GOARCH=" + cfg.BuildContext.GOARCH,
+ "GOOS=" + cfg.BuildContext.GOOS,
+ "GOFILE=" + g.file,
+ "GOLINE=" + strconv.Itoa(g.lineNum),
+ "GOPACKAGE=" + g.pkg,
+ "DOLLAR=" + "$",
+ }
+ env = base.AppendPATH(env)
+ env = base.AppendPWD(env, g.dir)
+ g.env = env
+}
+
+// split breaks the line into words, evaluating quoted
+// strings and evaluating environment variables.
+// The initial //go:generate element is present in line.
+func (g *Generator) split(line string) []string {
+ // Parse line, obeying quoted strings.
+ var words []string
+ line = line[len("//go:generate ") : len(line)-1] // Drop preamble and final newline.
+ // There may still be a carriage return.
+ if len(line) > 0 && line[len(line)-1] == '\r' {
+ line = line[:len(line)-1]
+ }
+ // One (possibly quoted) word per iteration.
+Words:
+ for {
+ line = strings.TrimLeft(line, " \t")
+ if len(line) == 0 {
+ break
+ }
+ if line[0] == '"' {
+ for i := 1; i < len(line); i++ {
+ c := line[i] // Only looking for ASCII so this is OK.
+ switch c {
+ case '\\':
+ if i+1 == len(line) {
+ g.errorf("bad backslash")
+ }
+ i++ // Absorb next byte (If it's a multibyte we'll get an error in Unquote).
+ case '"':
+ word, err := strconv.Unquote(line[0 : i+1])
+ if err != nil {
+ g.errorf("bad quoted string")
+ }
+ words = append(words, word)
+ line = line[i+1:]
+ // Check the next character is space or end of line.
+ if len(line) > 0 && line[0] != ' ' && line[0] != '\t' {
+ g.errorf("expect space after quoted argument")
+ }
+ continue Words
+ }
+ }
+ g.errorf("mismatched quoted string")
+ }
+ i := strings.IndexAny(line, " \t")
+ if i < 0 {
+ i = len(line)
+ }
+ words = append(words, line[0:i])
+ line = line[i:]
+ }
+ // Substitute command if required.
+ if len(words) > 0 && g.commands[words[0]] != nil {
+ // Replace 0th word by command substitution.
+ //
+ // Force a copy of the command definition to
+ // ensure words doesn't end up as a reference
+ // to the g.commands content.
+ tmpCmdWords := append([]string(nil), (g.commands[words[0]])...)
+ words = append(tmpCmdWords, words[1:]...)
+ }
+ // Substitute environment variables.
+ for i, word := range words {
+ words[i] = os.Expand(word, g.expandVar)
+ }
+ return words
+}
+
+var stop = fmt.Errorf("error in generation")
+
+// errorf logs an error message prefixed with the file and line number.
+// It then exits the program (with exit status 1) because generation stops
+// at the first error.
+func (g *Generator) errorf(format string, args ...any) {
+ fmt.Fprintf(os.Stderr, "%s:%d: %s\n", base.ShortPath(g.path), g.lineNum,
+ fmt.Sprintf(format, args...))
+ panic(stop)
+}
+
+// expandVar expands the $XXX invocation in word. It is called
+// by os.Expand.
+func (g *Generator) expandVar(word string) string {
+ w := word + "="
+ for _, e := range g.env {
+ if strings.HasPrefix(e, w) {
+ return e[len(w):]
+ }
+ }
+ return os.Getenv(word)
+}
+
+// setShorthand installs a new shorthand as defined by a -command directive.
+func (g *Generator) setShorthand(words []string) {
+ // Create command shorthand.
+ if len(words) == 1 {
+ g.errorf("no command specified for -command")
+ }
+ command := words[1]
+ if g.commands[command] != nil {
+ g.errorf("command %q multiply defined", command)
+ }
+ g.commands[command] = slices.Clip(words[2:])
+}
+
+// exec runs the command specified by the argument. The first word is
+// the command name itself.
+func (g *Generator) exec(words []string) {
+ path := words[0]
+ if path != "" && !strings.Contains(path, string(os.PathSeparator)) {
+ // If a generator says '//go:generate go run <blah>' it almost certainly
+ // intends to use the same 'go' as 'go generate' itself.
+ // Prefer to resolve the binary from GOROOT/bin, and for consistency
+ // prefer to resolve any other commands there too.
+ gorootBinPath, err := exec.LookPath(filepath.Join(cfg.GOROOTbin, path))
+ if err == nil {
+ path = gorootBinPath
+ }
+ }
+ cmd := exec.Command(path, words[1:]...)
+ cmd.Args[0] = words[0] // Overwrite with the original in case it was rewritten above.
+
+ // Standard in and out of generator should be the usual.
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ // Run the command in the package directory.
+ cmd.Dir = g.dir
+ cmd.Env = str.StringList(cfg.OrigEnv, g.env)
+ err := cmd.Run()
+ if err != nil {
+ g.errorf("running %q: %s", words[0], err)
+ }
+}
diff --git a/src/cmd/go/internal/generate/generate_test.go b/src/cmd/go/internal/generate/generate_test.go
new file mode 100644
index 0000000..d61ecf1
--- /dev/null
+++ b/src/cmd/go/internal/generate/generate_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generate
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+type splitTest struct {
+ in string
+ out []string
+}
+
+// Same as above, except including source line number to set
+type splitTestWithLine struct {
+ in string
+ out []string
+ lineNumber int
+}
+
+const anyLineNo = 0
+
+var splitTests = []splitTest{
+ {"", nil},
+ {"x", []string{"x"}},
+ {" a b\tc ", []string{"a", "b", "c"}},
+ {` " a " `, []string{" a "}},
+ {"$GOARCH", []string{runtime.GOARCH}},
+ {"$GOOS", []string{runtime.GOOS}},
+ {"$GOFILE", []string{"proc.go"}},
+ {"$GOPACKAGE", []string{"sys"}},
+ {"a $XXNOTDEFINEDXX b", []string{"a", "", "b"}},
+ {"/$XXNOTDEFINED/", []string{"//"}},
+ {"/$DOLLAR/", []string{"/$/"}},
+ {"yacc -o $GOARCH/yacc_$GOFILE", []string{"go", "tool", "yacc", "-o", runtime.GOARCH + "/yacc_proc.go"}},
+}
+
+func TestGenerateCommandParse(t *testing.T) {
+ dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
+ g := &Generator{
+ r: nil, // Unused here.
+ path: filepath.Join(dir, "proc.go"),
+ dir: dir,
+ file: "proc.go",
+ pkg: "sys",
+ commands: make(map[string][]string),
+ }
+ g.setEnv()
+ g.setShorthand([]string{"-command", "yacc", "go", "tool", "yacc"})
+ for _, test := range splitTests {
+ // First with newlines.
+ got := g.split("//go:generate " + test.in + "\n")
+ if !reflect.DeepEqual(got, test.out) {
+ t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
+ }
+ // Then with CRLFs, thank you Windows.
+ got = g.split("//go:generate " + test.in + "\r\n")
+ if !reflect.DeepEqual(got, test.out) {
+ t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
+ }
+ }
+}
+
+// These environment variables will be undefined before the splitTestWithLine tests
+var undefEnvList = []string{
+ "_XYZZY_",
+}
+
+// These environment variables will be defined before the splitTestWithLine tests
+var defEnvMap = map[string]string{
+ "_PLUGH_": "SomeVal",
+ "_X": "Y",
+}
+
+// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
+// except:
+// 1. if the result starts with -command, record that shorthand
+// before moving on to the next test.
+// 2. If a source line number is specified, set that in the parser
+// before executing the test. i.e., execute the split as if it
+// processing that source line.
+func TestGenerateCommandShorthand(t *testing.T) {
+ dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
+ g := &Generator{
+ r: nil, // Unused here.
+ path: filepath.Join(dir, "proc.go"),
+ dir: dir,
+ file: "proc.go",
+ pkg: "sys",
+ commands: make(map[string][]string),
+ }
+
+ var inLine string
+ var expected, got []string
+
+ g.setEnv()
+
+ // Set up the system environment variables
+ for i := range undefEnvList {
+ os.Unsetenv(undefEnvList[i])
+ }
+ for k := range defEnvMap {
+ os.Setenv(k, defEnvMap[k])
+ }
+
+ // simple command from environment variable
+ inLine = "//go:generate -command CMD0 \"ab${_X}cd\""
+ expected = []string{"-command", "CMD0", "abYcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ // try again, with an extra level of indirection (should leave variable in command)
+ inLine = "//go:generate -command CMD0 \"ab${DOLLAR}{_X}cd\""
+ expected = []string{"-command", "CMD0", "ab${_X}cd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ // Now the interesting part, record that output as a command
+ g.setShorthand(got)
+
+ // see that the command still substitutes correctly from env. variable
+ inLine = "//go:generate CMD0"
+ expected = []string{"abYcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ // Now change the value of $X and see if the recorded definition is
+ // still intact (vs. having the $_X already substituted out)
+
+ os.Setenv("_X", "Z")
+ inLine = "//go:generate CMD0"
+ expected = []string{"abZcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ // What if the variable is now undefined? Should be empty substitution.
+
+ os.Unsetenv("_X")
+ inLine = "//go:generate CMD0"
+ expected = []string{"abcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ // Try another undefined variable as an extra check
+ os.Unsetenv("_Z")
+ inLine = "//go:generate -command CMD1 \"ab${_Z}cd\""
+ expected = []string{"-command", "CMD1", "abcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ g.setShorthand(got)
+
+ inLine = "//go:generate CMD1"
+ expected = []string{"abcd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ const val = "someNewValue"
+ os.Setenv("_Z", val)
+
+ // try again with the properly-escaped variable.
+
+ inLine = "//go:generate -command CMD2 \"ab${DOLLAR}{_Z}cd\""
+ expected = []string{"-command", "CMD2", "ab${_Z}cd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+
+ g.setShorthand(got)
+
+ inLine = "//go:generate CMD2"
+ expected = []string{"ab" + val + "cd"}
+ got = g.split(inLine + "\n")
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("split(%q): got %q expected %q", inLine, got, expected)
+ }
+}
+
+// Command-related tests for TestGenerateCommandShortHand2
+// -- Note line numbers included to check substitutions from "build-in" variable - $GOLINE
+var splitTestsLines = []splitTestWithLine{
+ {"-command TEST1 $GOLINE", []string{"-command", "TEST1", "22"}, 22},
+ {"-command TEST2 ${DOLLAR}GOLINE", []string{"-command", "TEST2", "$GOLINE"}, 26},
+ {"TEST1", []string{"22"}, 33},
+ {"TEST2", []string{"66"}, 66},
+ {"TEST1 ''", []string{"22", "''"}, 99},
+ {"TEST2 ''", []string{"44", "''"}, 44},
+}
+
+// TestGenerateCommandShortHand - similar to TestGenerateCommandParse,
+// except:
+// 1. if the result starts with -command, record that shorthand
+// before moving on to the next test.
+// 2. If a source line number is specified, set that in the parser
+// before executing the test. i.e., execute the split as if it
+// processing that source line.
+func TestGenerateCommandShortHand2(t *testing.T) {
+ dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
+ g := &Generator{
+ r: nil, // Unused here.
+ path: filepath.Join(dir, "proc.go"),
+ dir: dir,
+ file: "proc.go",
+ pkg: "sys",
+ commands: make(map[string][]string),
+ }
+ g.setEnv()
+ for _, test := range splitTestsLines {
+ // if the test specified a line number, reflect that
+ if test.lineNumber != anyLineNo {
+ g.lineNum = test.lineNumber
+ g.setEnv()
+ }
+ // First with newlines.
+ got := g.split("//go:generate " + test.in + "\n")
+ if !reflect.DeepEqual(got, test.out) {
+ t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
+ }
+ // Then with CRLFs, thank you Windows.
+ got = g.split("//go:generate " + test.in + "\r\n")
+ if !reflect.DeepEqual(got, test.out) {
+ t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
+ }
+ if got[0] == "-command" { // record commands
+ g.setShorthand(got)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go
new file mode 100644
index 0000000..06b567a
--- /dev/null
+++ b/src/cmd/go/internal/get/get.go
@@ -0,0 +1,640 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package get implements the “go get” command.
+package get
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/vcs"
+ "cmd/go/internal/web"
+ "cmd/go/internal/work"
+
+ "golang.org/x/mod/module"
+)
+
+var CmdGet = &base.Command{
+ UsageLine: "go get [-d] [-f] [-t] [-u] [-v] [-fix] [build flags] [packages]",
+ Short: "download and install packages and dependencies",
+ Long: `
+Get downloads the packages named by the import paths, along with their
+dependencies. It then installs the named packages, like 'go install'.
+
+The -d flag instructs get to stop after downloading the packages; that is,
+it instructs get not to install the packages.
+
+The -f flag, valid only when -u is set, forces get -u not to verify that
+each package has been checked out from the source control repository
+implied by its import path. This can be useful if the source is a local fork
+of the original.
+
+The -fix flag instructs get to run the fix tool on the downloaded packages
+before resolving dependencies or building the code.
+
+The -t flag instructs get to also download the packages required to build
+the tests for the specified packages.
+
+The -u flag instructs get to use the network to update the named packages
+and their dependencies. By default, get uses the network to check out
+missing packages but does not use it to look for updates to existing packages.
+
+The -v flag enables verbose progress and debug output.
+
+Get also accepts build flags to control the installation. See 'go help build'.
+
+When checking out a new package, get creates the target directory
+GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
+get uses the first one. For more details see: 'go help gopath'.
+
+When checking out or updating a package, get looks for a branch or tag
+that matches the locally installed version of Go. The most important
+rule is that if the local installation is running version "go1", get
+searches for a branch or tag named "go1". If no such version exists
+it retrieves the default branch of the package.
+
+When go get checks out or updates a Git repository,
+it also updates any git submodules referenced by the repository.
+
+Get never checks out or updates code stored in vendor directories.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+
+For more about how 'go get' finds source code to
+download, see 'go help importpath'.
+
+This text describes the behavior of get when using GOPATH
+to manage source code and dependencies.
+If instead the go command is running in module-aware mode,
+the details of get's flags and effects change, as does 'go help get'.
+See 'go help modules' and 'go help module-get'.
+
+See also: go build, go install, go clean.
+ `,
+}
+
+var HelpGopathGet = &base.Command{
+ UsageLine: "gopath-get",
+ Short: "legacy GOPATH go get",
+ Long: `
+The 'go get' command changes behavior depending on whether the
+go command is running in module-aware mode or legacy GOPATH mode.
+This help text, accessible as 'go help gopath-get' even in module-aware mode,
+describes 'go get' as it operates in legacy GOPATH mode.
+
+Usage: ` + CmdGet.UsageLine + `
+` + CmdGet.Long,
+}
+
+var (
+ getD = CmdGet.Flag.Bool("d", false, "")
+ getF = CmdGet.Flag.Bool("f", false, "")
+ getT = CmdGet.Flag.Bool("t", false, "")
+ getU = CmdGet.Flag.Bool("u", false, "")
+ getFix = CmdGet.Flag.Bool("fix", false, "")
+ getInsecure = CmdGet.Flag.Bool("insecure", false, "")
+)
+
+func init() {
+ work.AddBuildFlags(CmdGet, work.OmitModFlag|work.OmitModCommonFlags)
+ CmdGet.Run = runGet // break init loop
+}
+
+func runGet(ctx context.Context, cmd *base.Command, args []string) {
+ if cfg.ModulesEnabled {
+ // Should not happen: main.go should install the separate module-enabled get code.
+ base.Fatalf("go: modules not implemented")
+ }
+
+ work.BuildInit()
+
+ if *getF && !*getU {
+ base.Fatalf("go: cannot use -f flag without -u")
+ }
+ if *getInsecure {
+ base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead")
+ }
+
+ // Disable any prompting for passwords by Git itself.
+ // Only has an effect for 2.3.0 or later, but avoiding
+ // the prompt in earlier versions is just too hard.
+ // If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
+ // prompting.
+ // See golang.org/issue/9341 and golang.org/issue/12706.
+ if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
+ os.Setenv("GIT_TERMINAL_PROMPT", "0")
+ }
+
+ // Also disable prompting for passwords by the 'ssh' subprocess spawned by
+ // Git, because apparently GIT_TERMINAL_PROMPT isn't sufficient to do that.
+ // Adding '-o BatchMode=yes' should do the trick.
+ //
+ // If a Git subprocess forks a child into the background to cache a new connection,
+ // that child keeps stdout/stderr open. After the Git subprocess exits,
+ // os /exec expects to be able to read from the stdout/stderr pipe
+ // until EOF to get all the data that the Git subprocess wrote before exiting.
+ // The EOF doesn't come until the child exits too, because the child
+ // is holding the write end of the pipe.
+ // This is unfortunate, but it has come up at least twice
+ // (see golang.org/issue/13453 and golang.org/issue/16104)
+ // and confuses users when it does.
+ // If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
+ // assume they know what they are doing and don't step on it.
+ // But default to turning off ControlMaster.
+ if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
+ os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes")
+ }
+
+ // And one more source of Git prompts: the Git Credential Manager Core for Windows.
+ //
+ // See https://github.com/microsoft/Git-Credential-Manager-Core/blob/master/docs/environment.md#gcm_interactive.
+ if os.Getenv("GCM_INTERACTIVE") == "" {
+ os.Setenv("GCM_INTERACTIVE", "never")
+ }
+
+ // Phase 1. Download/update.
+ var stk load.ImportStack
+ mode := 0
+ if *getT {
+ mode |= load.GetTestDeps
+ }
+ for _, pkg := range downloadPaths(args) {
+ download(ctx, pkg, nil, &stk, mode)
+ }
+ base.ExitIfErrors()
+
+ // Phase 2. Rescan packages and re-evaluate args list.
+
+ // Code we downloaded and all code that depends on it
+ // needs to be evicted from the package cache so that
+ // the information will be recomputed. Instead of keeping
+ // track of the reverse dependency information, evict
+ // everything.
+ load.ClearPackageCache()
+
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args)
+ load.CheckPackageErrors(pkgs)
+
+ // Phase 3. Install.
+ if *getD {
+ // Download only.
+ // Check delayed until now so that downloadPaths
+ // and CheckPackageErrors have a chance to print errors.
+ return
+ }
+
+ work.InstallPackages(ctx, args, pkgs)
+}
+
+// downloadPaths prepares the list of paths to pass to download.
+// It expands ... patterns that can be expanded. If there is no match
+// for a particular pattern, downloadPaths leaves it in the result list,
+// in the hope that we can figure out the repository from the
+// initial ...-free prefix.
+func downloadPaths(patterns []string) []string {
+ for _, arg := range patterns {
+ if strings.Contains(arg, "@") {
+ base.Fatalf("go: can only use path@version syntax with 'go get' and 'go install' in module-aware mode")
+ }
+
+ // Guard against 'go get x.go', a common mistake.
+ // Note that package and module paths may end with '.go', so only print an error
+ // if the argument has no slash or refers to an existing file.
+ if strings.HasSuffix(arg, ".go") {
+ if !strings.Contains(arg, "/") {
+ base.Errorf("go: %s: arguments must be package or module paths", arg)
+ continue
+ }
+ if fi, err := os.Stat(arg); err == nil && !fi.IsDir() {
+ base.Errorf("go: %s exists as a file, but 'go get' requires package arguments", arg)
+ }
+ }
+ }
+ base.ExitIfErrors()
+
+ var pkgs []string
+ noModRoots := []string{}
+ for _, m := range search.ImportPathsQuiet(patterns, noModRoots) {
+ if len(m.Pkgs) == 0 && strings.Contains(m.Pattern(), "...") {
+ pkgs = append(pkgs, m.Pattern())
+ } else {
+ pkgs = append(pkgs, m.Pkgs...)
+ }
+ }
+ return pkgs
+}
+
+// downloadCache records the import paths we have already
+// considered during the download, to avoid duplicate work when
+// there is more than one dependency sequence leading to
+// a particular package.
+var downloadCache = map[string]bool{}
+
+// downloadRootCache records the version control repository
+// root directories we have already considered during the download.
+// For example, all the packages in the github.com/google/codesearch repo
+// share the same root (the directory for that path), and we only need
+// to run the hg commands to consider each repository once.
+var downloadRootCache = map[string]bool{}
+
+// download runs the download half of the get command
+// for the package or pattern named by the argument.
+func download(ctx context.Context, arg string, parent *load.Package, stk *load.ImportStack, mode int) {
+ if mode&load.ResolveImport != 0 {
+ // Caller is responsible for expanding vendor paths.
+ panic("internal error: download mode has useVendor set")
+ }
+ load1 := func(path string, mode int) *load.Package {
+ if parent == nil {
+ mode := 0 // don't do module or vendor resolution
+ return load.LoadPackage(ctx, load.PackageOpts{}, path, base.Cwd(), stk, nil, mode)
+ }
+ p, err := load.LoadImport(ctx, load.PackageOpts{}, path, parent.Dir, parent, stk, nil, mode|load.ResolveModule)
+ if err != nil {
+ base.Errorf("%s", err)
+ }
+ return p
+ }
+
+ p := load1(arg, mode)
+ if p.Error != nil && p.Error.Hard {
+ base.Errorf("%s", p.Error)
+ return
+ }
+
+ // loadPackage inferred the canonical ImportPath from arg.
+ // Use that in the following to prevent hysteresis effects
+ // in e.g. downloadCache and packageCache.
+ // This allows invocations such as:
+ // mkdir -p $GOPATH/src/github.com/user
+ // cd $GOPATH/src/github.com/user
+ // go get ./foo
+ // see: golang.org/issue/9767
+ arg = p.ImportPath
+
+ // There's nothing to do if this is a package in the standard library.
+ if p.Standard {
+ return
+ }
+
+ // Only process each package once.
+ // (Unless we're fetching test dependencies for this package,
+ // in which case we want to process it again.)
+ if downloadCache[arg] && mode&load.GetTestDeps == 0 {
+ return
+ }
+ downloadCache[arg] = true
+
+ pkgs := []*load.Package{p}
+ wildcardOkay := len(*stk) == 0
+ isWildcard := false
+
+ // Download if the package is missing, or update if we're using -u.
+ if p.Dir == "" || *getU {
+ // The actual download.
+ stk.Push(arg)
+ err := downloadPackage(p)
+ if err != nil {
+ base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err})
+ stk.Pop()
+ return
+ }
+ stk.Pop()
+
+ args := []string{arg}
+ // If the argument has a wildcard in it, re-evaluate the wildcard.
+ // We delay this until after reloadPackage so that the old entry
+ // for p has been replaced in the package cache.
+ if wildcardOkay && strings.Contains(arg, "...") {
+ match := search.NewMatch(arg)
+ if match.IsLocal() {
+ noModRoots := []string{} // We're in gopath mode, so there are no modroots.
+ match.MatchDirs(noModRoots)
+ args = match.Dirs
+ } else {
+ match.MatchPackages()
+ args = match.Pkgs
+ }
+ for _, err := range match.Errs {
+ base.Errorf("%s", err)
+ }
+ isWildcard = true
+ }
+
+ // Clear all relevant package cache entries before
+ // doing any new loads.
+ load.ClearPackageCachePartial(args)
+
+ pkgs = pkgs[:0]
+ for _, arg := range args {
+ // Note: load calls loadPackage or loadImport,
+ // which push arg onto stk already.
+ // Do not push here too, or else stk will say arg imports arg.
+ p := load1(arg, mode)
+ if p.Error != nil {
+ base.Errorf("%s", p.Error)
+ continue
+ }
+ pkgs = append(pkgs, p)
+ }
+ }
+
+ // Process package, which might now be multiple packages
+ // due to wildcard expansion.
+ for _, p := range pkgs {
+ if *getFix {
+ files := base.RelPaths(p.InternalAllGoFiles())
+ base.Run(cfg.BuildToolexec, str.StringList(base.Tool("fix"), files))
+
+ // The imports might have changed, so reload again.
+ p = load.ReloadPackageNoFlags(arg, stk)
+ if p.Error != nil {
+ base.Errorf("%s", p.Error)
+ return
+ }
+ }
+
+ if isWildcard {
+ // Report both the real package and the
+ // wildcard in any error message.
+ stk.Push(p.ImportPath)
+ }
+
+ // Process dependencies, now that we know what they are.
+ imports := p.Imports
+ if mode&load.GetTestDeps != 0 {
+ // Process test dependencies when -t is specified.
+ // (But don't get test dependencies for test dependencies:
+ // we always pass mode 0 to the recursive calls below.)
+ imports = str.StringList(imports, p.TestImports, p.XTestImports)
+ }
+ for i, path := range imports {
+ if path == "C" {
+ continue
+ }
+ // Fail fast on import naming full vendor path.
+ // Otherwise expand path as needed for test imports.
+ // Note that p.Imports can have additional entries beyond p.Internal.Build.Imports.
+ orig := path
+ if i < len(p.Internal.Build.Imports) {
+ orig = p.Internal.Build.Imports[i]
+ }
+ if j, ok := load.FindVendor(orig); ok {
+ stk.Push(path)
+ err := &load.PackageError{
+ ImportStack: stk.Copy(),
+ Err: load.ImportErrorf(path, "%s must be imported as %s", path, path[j+len("vendor/"):]),
+ }
+ stk.Pop()
+ base.Errorf("%s", err)
+ continue
+ }
+ // If this is a test import, apply module and vendor lookup now.
+ // We cannot pass ResolveImport to download, because
+ // download does caching based on the value of path,
+ // so it must be the fully qualified path already.
+ if i >= len(p.Imports) {
+ path = load.ResolveImportPath(p, path)
+ }
+ download(ctx, path, p, stk, 0)
+ }
+
+ if isWildcard {
+ stk.Pop()
+ }
+ }
+}
+
+// downloadPackage runs the create or download command
+// to make the first copy of or update a copy of the given package.
+func downloadPackage(p *load.Package) error {
+ var (
+ vcsCmd *vcs.Cmd
+ repo, rootPath, repoDir string
+ err error
+ blindRepo bool // set if the repo has unusual configuration
+ )
+
+ // p can be either a real package, or a pseudo-package whose “import path” is
+ // actually a wildcard pattern.
+ // Trim the path at the element containing the first wildcard,
+ // and hope that it applies to the wildcarded parts too.
+ // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH.
+ importPrefix := p.ImportPath
+ if i := strings.Index(importPrefix, "..."); i >= 0 {
+ slash := strings.LastIndexByte(importPrefix[:i], '/')
+ if slash < 0 {
+ return fmt.Errorf("cannot expand ... in %q", p.ImportPath)
+ }
+ importPrefix = importPrefix[:slash]
+ }
+ if err := checkImportPath(importPrefix); err != nil {
+ return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err)
+ }
+ security := web.SecureOnly
+ if module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) {
+ security = web.Insecure
+ }
+
+ if p.Internal.Build.SrcRoot != "" {
+ // Directory exists. Look for checkout along path to src.
+ const allowNesting = false
+ repoDir, vcsCmd, err = vcs.FromDir(p.Dir, p.Internal.Build.SrcRoot, allowNesting)
+ if err != nil {
+ return err
+ }
+ if !str.HasFilePathPrefix(repoDir, p.Internal.Build.SrcRoot) {
+ panic(fmt.Sprintf("repository %q not in source root %q", repo, p.Internal.Build.SrcRoot))
+ }
+ rootPath = str.TrimFilePathPrefix(repoDir, p.Internal.Build.SrcRoot)
+ if err := vcs.CheckGOVCS(vcsCmd, rootPath); err != nil {
+ return err
+ }
+
+ repo = "<local>" // should be unused; make distinctive
+
+ // Double-check where it came from.
+ if *getU && vcsCmd.RemoteRepo != nil {
+ dir := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
+ remote, err := vcsCmd.RemoteRepo(vcsCmd, dir)
+ if err != nil {
+ // Proceed anyway. The package is present; we likely just don't understand
+ // the repo configuration (e.g. unusual remote protocol).
+ blindRepo = true
+ }
+ repo = remote
+ if !*getF && err == nil {
+ if rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security); err == nil {
+ repo := rr.Repo
+ if rr.VCS.ResolveRepo != nil {
+ resolved, err := rr.VCS.ResolveRepo(rr.VCS, dir, repo)
+ if err == nil {
+ repo = resolved
+ }
+ }
+ if remote != repo && rr.IsCustom {
+ return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.Root, repo, dir, remote)
+ }
+ }
+ }
+ }
+ } else {
+ // Analyze the import path to determine the version control system,
+ // repository, and the import path for the root of the repository.
+ rr, err := vcs.RepoRootForImportPath(importPrefix, vcs.IgnoreMod, security)
+ if err != nil {
+ return err
+ }
+ vcsCmd, repo, rootPath = rr.VCS, rr.Repo, rr.Root
+ }
+ if !blindRepo && !vcsCmd.IsSecure(repo) && security != web.Insecure {
+ return fmt.Errorf("cannot download: %v uses insecure protocol", repo)
+ }
+
+ if p.Internal.Build.SrcRoot == "" {
+ // Package not found. Put in first directory of $GOPATH.
+ list := filepath.SplitList(cfg.BuildContext.GOPATH)
+ if len(list) == 0 {
+ return fmt.Errorf("cannot download: $GOPATH not set. For more details see: 'go help gopath'")
+ }
+ // Guard against people setting GOPATH=$GOROOT.
+ if filepath.Clean(list[0]) == filepath.Clean(cfg.GOROOT) {
+ return fmt.Errorf("cannot download: $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
+ }
+ if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
+ return fmt.Errorf("cannot download: %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
+ }
+ p.Internal.Build.Root = list[0]
+ p.Internal.Build.SrcRoot = filepath.Join(list[0], "src")
+ p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg")
+ }
+ root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath))
+
+ if err := vcs.CheckNested(vcsCmd, root, p.Internal.Build.SrcRoot); err != nil {
+ return err
+ }
+
+ // If we've considered this repository already, don't do it again.
+ if downloadRootCache[root] {
+ return nil
+ }
+ downloadRootCache[root] = true
+
+ if cfg.BuildV {
+ fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath)
+ }
+
+ // Check that this is an appropriate place for the repo to be checked out.
+ // The target directory must either not exist or have a repo checked out already.
+ meta := filepath.Join(root, "."+vcsCmd.Cmd)
+ if _, err := os.Stat(meta); err != nil {
+ // Metadata file or directory does not exist. Prepare to checkout new copy.
+ // Some version control tools require the target directory not to exist.
+ // We require that too, just to avoid stepping on existing work.
+ if _, err := os.Stat(root); err == nil {
+ return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
+ }
+
+ _, err := os.Stat(p.Internal.Build.Root)
+ gopathExisted := err == nil
+
+ // Some version control tools require the parent of the target to exist.
+ parent, _ := filepath.Split(root)
+ if err = os.MkdirAll(parent, 0777); err != nil {
+ return err
+ }
+ if cfg.BuildV && !gopathExisted && p.Internal.Build.Root == cfg.BuildContext.GOPATH {
+ fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.Internal.Build.Root)
+ }
+
+ if err = vcsCmd.Create(root, repo); err != nil {
+ return err
+ }
+ } else {
+ // Metadata directory does exist; download incremental updates.
+ if err = vcsCmd.Download(root); err != nil {
+ return err
+ }
+ }
+
+ if cfg.BuildN {
+ // Do not show tag sync in -n; it's noise more than anything,
+ // and since we're not running commands, no tag will be found.
+ // But avoid printing nothing.
+ fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcsCmd.Cmd)
+ return nil
+ }
+
+ // Select and sync to appropriate version of the repository.
+ tags, err := vcsCmd.Tags(root)
+ if err != nil {
+ return err
+ }
+ vers := runtime.Version()
+ if i := strings.Index(vers, " "); i >= 0 {
+ vers = vers[:i]
+ }
+ if err := vcsCmd.TagSync(root, selectTag(vers, tags)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// selectTag returns the closest matching tag for a given version.
+// Closest means the latest one that is not after the current release.
+// Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form.
+// Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number).
+// Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD".
+//
+// NOTE(rsc): Eventually we will need to decide on some logic here.
+// For now, there is only "go1". This matches the docs in go help get.
+func selectTag(goVersion string, tags []string) (match string) {
+ for _, t := range tags {
+ if t == "go1" {
+ return "go1"
+ }
+ }
+ return ""
+}
+
+// checkImportPath is like module.CheckImportPath, but it forbids leading dots
+// in path elements. This can lead to 'go get' creating .git and other VCS
+// directories in places we might run VCS tools later.
+func checkImportPath(path string) error {
+ if err := module.CheckImportPath(path); err != nil {
+ return err
+ }
+ checkElem := func(elem string) error {
+ if elem[0] == '.' {
+ return fmt.Errorf("malformed import path %q: leading dot in path element", path)
+ }
+ return nil
+ }
+ elemStart := 0
+ for i, r := range path {
+ if r == '/' {
+ if err := checkElem(path[elemStart:]); err != nil {
+ return err
+ }
+ elemStart = i + 1
+ }
+ }
+ if err := checkElem(path[elemStart:]); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/cmd/go/internal/get/tag_test.go b/src/cmd/go/internal/get/tag_test.go
new file mode 100644
index 0000000..9a25dfa
--- /dev/null
+++ b/src/cmd/go/internal/get/tag_test.go
@@ -0,0 +1,100 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package get
+
+import "testing"
+
+var selectTagTestTags = []string{
+ "go.r58",
+ "go.r58.1",
+ "go.r59",
+ "go.r59.1",
+ "go.r61",
+ "go.r61.1",
+ "go.weekly.2010-01-02",
+ "go.weekly.2011-10-12",
+ "go.weekly.2011-10-12.1",
+ "go.weekly.2011-10-14",
+ "go.weekly.2011-11-01",
+ "go1",
+ "go1.0.1",
+ "go1.999",
+ "go1.9.2",
+ "go5",
+
+ // these should be ignored:
+ "release.r59",
+ "release.r59.1",
+ "release",
+ "weekly.2011-10-12",
+ "weekly.2011-10-12.1",
+ "weekly",
+ "foo",
+ "bar",
+ "go.f00",
+ "go!r60",
+ "go.1999-01-01",
+ "go.2x",
+ "go.20000000000000",
+ "go.2.",
+ "go.2.0",
+ "go2x",
+ "go20000000000000",
+ "go2.",
+ "go2.0",
+}
+
+var selectTagTests = []struct {
+ version string
+ selected string
+}{
+ /*
+ {"release.r57", ""},
+ {"release.r58.2", "go.r58.1"},
+ {"release.r59", "go.r59"},
+ {"release.r59.1", "go.r59.1"},
+ {"release.r60", "go.r59.1"},
+ {"release.r60.1", "go.r59.1"},
+ {"release.r61", "go.r61"},
+ {"release.r66", "go.r61.1"},
+ {"weekly.2010-01-01", ""},
+ {"weekly.2010-01-02", "go.weekly.2010-01-02"},
+ {"weekly.2010-01-02.1", "go.weekly.2010-01-02"},
+ {"weekly.2010-01-03", "go.weekly.2010-01-02"},
+ {"weekly.2011-10-12", "go.weekly.2011-10-12"},
+ {"weekly.2011-10-12.1", "go.weekly.2011-10-12.1"},
+ {"weekly.2011-10-13", "go.weekly.2011-10-12.1"},
+ {"weekly.2011-10-14", "go.weekly.2011-10-14"},
+ {"weekly.2011-10-14.1", "go.weekly.2011-10-14"},
+ {"weekly.2011-11-01", "go.weekly.2011-11-01"},
+ {"weekly.2014-01-01", "go.weekly.2011-11-01"},
+ {"weekly.3000-01-01", "go.weekly.2011-11-01"},
+ {"go1", "go1"},
+ {"go1.1", "go1.0.1"},
+ {"go1.998", "go1.9.2"},
+ {"go1.1000", "go1.999"},
+ {"go6", "go5"},
+
+ // faulty versions:
+ {"release.f00", ""},
+ {"weekly.1999-01-01", ""},
+ {"junk", ""},
+ {"", ""},
+ {"go2x", ""},
+ {"go200000000000", ""},
+ {"go2.", ""},
+ {"go2.0", ""},
+ */
+ {"anything", "go1"},
+}
+
+func TestSelectTag(t *testing.T) {
+ for _, c := range selectTagTests {
+ selected := selectTag(c.version, selectTagTestTags)
+ if selected != c.selected {
+ t.Errorf("selectTag(%q) = %q, want %q", c.version, selected, c.selected)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/gover/gomod.go b/src/cmd/go/internal/gover/gomod.go
new file mode 100644
index 0000000..4a4ae53
--- /dev/null
+++ b/src/cmd/go/internal/gover/gomod.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "bytes"
+ "strings"
+)
+
+var nl = []byte("\n")
+
+// GoModLookup takes go.mod or go.work content,
+// finds the first line in the file starting with the given key,
+// and returns the value associated with that key.
+//
+// Lookup should only be used with non-factored verbs
+// such as "go" and "toolchain", usually to find versions
+// or version-like strings.
+func GoModLookup(gomod []byte, key string) string {
+ for len(gomod) > 0 {
+ var line []byte
+ line, gomod, _ = bytes.Cut(gomod, nl)
+ line = bytes.TrimSpace(line)
+ if v, ok := parseKey(line, key); ok {
+ return v
+ }
+ }
+ return ""
+}
+
+func parseKey(line []byte, key string) (string, bool) {
+ if !strings.HasPrefix(string(line), key) {
+ return "", false
+ }
+ s := strings.TrimPrefix(string(line), key)
+ if len(s) == 0 || (s[0] != ' ' && s[0] != '\t') {
+ return "", false
+ }
+ s, _, _ = strings.Cut(s, "//") // strip comments
+ return strings.TrimSpace(s), true
+}
diff --git a/src/cmd/go/internal/gover/gover.go b/src/cmd/go/internal/gover/gover.go
new file mode 100644
index 0000000..b2a8261
--- /dev/null
+++ b/src/cmd/go/internal/gover/gover.go
@@ -0,0 +1,254 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gover implements support for Go toolchain versions like 1.21.0 and 1.21rc1.
+// (For historical reasons, Go does not use semver for its toolchains.)
+// This package provides the same basic analysis that golang.org/x/mod/semver does for semver.
+// It also provides some helpers for extracting versions from go.mod files
+// and for dealing with module.Versions that may use Go versions or semver
+// depending on the module path.
+package gover
+
+import (
+ "cmp"
+)
+
+// A version is a parsed Go version: major[.minor[.patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type version struct {
+ major string // decimal
+ minor string // decimal or ""
+ patch string // decimal or ""
+ kind string // "", "alpha", "beta", "rc"
+ pre string // decimal or ""
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func Compare(x, y string) int {
+ vx := parse(x)
+ vy := parse(y)
+
+ if c := cmpInt(vx.major, vy.major); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.minor, vy.minor); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.patch, vy.patch); c != 0 {
+ return c
+ }
+ if c := cmp.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
+ return c
+ }
+ if c := cmpInt(vx.pre, vy.pre); c != 0 {
+ return c
+ }
+ return 0
+}
+
+// Max returns the maximum of x and y interpreted as toolchain versions,
+// compared using Compare.
+// If x and y compare equal, Max returns x.
+func Max(x, y string) string {
+ if Compare(x, y) < 0 {
+ return y
+ }
+ return x
+}
+
+// Toolchain returns the maximum of x and y interpreted as toolchain names,
+// compared using Compare(FromToolchain(x), FromToolchain(y)).
+// If x and y compare equal, Max returns x.
+func ToolchainMax(x, y string) string {
+ if Compare(FromToolchain(x), FromToolchain(y)) < 0 {
+ return y
+ }
+ return x
+}
+
+// IsLang reports whether v denotes the overall Go language version
+// and not a specific release. Starting with the Go 1.21 release, "1.x" denotes
+// the overall language version; the first release is "1.x.0".
+// The distinction is important because the relative ordering is
+//
+// 1.21 < 1.21rc1 < 1.21.0
+//
+// meaning that Go 1.21rc1 and Go 1.21.0 will both handle go.mod files that
+// say "go 1.21", but Go 1.21rc1 will not handle files that say "go 1.21.0".
+func IsLang(x string) bool {
+ v := parse(x)
+ return v != version{} && v.patch == "" && v.kind == "" && v.pre == ""
+}
+
+// Lang returns the Go language version. For example, Lang("1.2.3") == "1.2".
+func Lang(x string) string {
+ v := parse(x)
+ if v.minor == "" {
+ return v.major
+ }
+ return v.major + "." + v.minor
+}
+
+// IsPrerelease reports whether v denotes a Go prerelease version.
+func IsPrerelease(x string) bool {
+ return parse(x).kind != ""
+}
+
+// Prev returns the Go major release immediately preceding v,
+// or v itself if v is the first Go major release (1.0) or not a supported
+// Go version.
+//
+// Examples:
+//
+// Prev("1.2") = "1.1"
+// Prev("1.3rc4") = "1.2"
+func Prev(x string) string {
+ v := parse(x)
+ if cmpInt(v.minor, "1") <= 0 {
+ return v.major
+ }
+ return v.major + "." + decInt(v.minor)
+}
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool {
+ return parse(x) != version{}
+}
+
+// parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func parse(x string) version {
+ var v version
+
+ // Parse major version.
+ var ok bool
+ v.major, x, ok = cutInt(x)
+ if !ok {
+ return version{}
+ }
+ if x == "" {
+ // Interpret "1" as "1.0.0".
+ v.minor = "0"
+ v.patch = "0"
+ return v
+ }
+
+ // Parse . before minor version.
+ if x[0] != '.' {
+ return version{}
+ }
+
+ // Parse minor version.
+ v.minor, x, ok = cutInt(x[1:])
+ if !ok {
+ return version{}
+ }
+ if x == "" {
+ // Patch missing is same as "0" for older versions.
+ // Starting in Go 1.21, patch missing is different from explicit .0.
+ if cmpInt(v.minor, "21") < 0 {
+ v.patch = "0"
+ }
+ return v
+ }
+
+ // Parse patch if present.
+ if x[0] == '.' {
+ v.patch, x, ok = cutInt(x[1:])
+ if !ok || x != "" {
+ // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+ // Allowing them would be a bit confusing because we already have:
+ // 1.21 < 1.21rc1
+ // But a prerelease of a patch would have the opposite effect:
+ // 1.21.3rc1 < 1.21.3
+ // We've never needed them before, so let's not start now.
+ return version{}
+ }
+ return v
+ }
+
+ // Parse prerelease.
+ i := 0
+ for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+ if x[i] < 'a' || 'z' < x[i] {
+ return version{}
+ }
+ i++
+ }
+ if i == 0 {
+ return version{}
+ }
+ v.kind, x = x[:i], x[i:]
+ if x == "" {
+ return v
+ }
+ v.pre, x, ok = cutInt(x)
+ if !ok || x != "" {
+ return version{}
+ }
+
+ return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+ i := 0
+ for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+ i++
+ }
+ if i == 0 || x[0] == '0' && i != 1 {
+ return "", "", false
+ }
+ return x[:i], x[i:], true
+}
+
+// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func cmpInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+// decInt returns the decimal string decremented by 1, or the empty string
+// if the decimal is all zeroes.
+// (Copied from golang.org/x/mod/module's decDecimal.)
+func decInt(decimal string) string {
+ // Scan right to left turning 0s to 9s until you find a digit to decrement.
+ digits := []byte(decimal)
+ i := len(digits) - 1
+ for ; i >= 0 && digits[i] == '0'; i-- {
+ digits[i] = '9'
+ }
+ if i < 0 {
+ // decimal is all zeros
+ return ""
+ }
+ if i == 0 && digits[i] == '1' && len(digits) > 1 {
+ digits = digits[1:]
+ } else {
+ digits[i]--
+ }
+ return string(digits)
+}
diff --git a/src/cmd/go/internal/gover/gover_test.go b/src/cmd/go/internal/gover/gover_test.go
new file mode 100644
index 0000000..3a0bf10
--- /dev/null
+++ b/src/cmd/go/internal/gover/gover_test.go
@@ -0,0 +1,160 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestCompare(t *testing.T) { test2(t, compareTests, "Compare", Compare) }
+
+var compareTests = []testCase2[string, string, int]{
+ {"", "", 0},
+ {"x", "x", 0},
+ {"", "x", 0},
+ {"1", "1.1", -1},
+ {"1.5", "1.6", -1},
+ {"1.5", "1.10", -1},
+ {"1.6", "1.6.1", -1},
+ {"1.19", "1.19.0", 0},
+ {"1.19rc1", "1.19", -1},
+ {"1.20", "1.20.0", 0},
+ {"1.20rc1", "1.20", -1},
+ {"1.21", "1.21.0", -1},
+ {"1.21", "1.21rc1", -1},
+ {"1.21rc1", "1.21.0", -1},
+ {"1.6", "1.19", -1},
+ {"1.19", "1.19.1", -1},
+ {"1.19rc1", "1.19", -1},
+ {"1.19rc1", "1.19.1", -1},
+ {"1.19rc1", "1.19rc2", -1},
+ {"1.19.0", "1.19.1", -1},
+ {"1.19rc1", "1.19.0", -1},
+ {"1.19alpha3", "1.19beta2", -1},
+ {"1.19beta2", "1.19rc1", -1},
+ {"1.1", "1.99999999999999998", -1},
+ {"1.99999999999999998", "1.99999999999999999", -1},
+}
+
+func TestParse(t *testing.T) { test1(t, parseTests, "parse", parse) }
+
+var parseTests = []testCase1[string, version]{
+ {"1", version{"1", "0", "0", "", ""}},
+ {"1.2", version{"1", "2", "0", "", ""}},
+ {"1.2.3", version{"1", "2", "3", "", ""}},
+ {"1.2rc3", version{"1", "2", "", "rc", "3"}},
+ {"1.20", version{"1", "20", "0", "", ""}},
+ {"1.21", version{"1", "21", "", "", ""}},
+ {"1.21rc3", version{"1", "21", "", "rc", "3"}},
+ {"1.21.0", version{"1", "21", "0", "", ""}},
+ {"1.24", version{"1", "24", "", "", ""}},
+ {"1.24rc3", version{"1", "24", "", "rc", "3"}},
+ {"1.24.0", version{"1", "24", "0", "", ""}},
+ {"1.999testmod", version{"1", "999", "", "testmod", ""}},
+ {"1.99999999999999999", version{"1", "99999999999999999", "", "", ""}},
+}
+
+func TestLang(t *testing.T) { test1(t, langTests, "Lang", Lang) }
+
+var langTests = []testCase1[string, string]{
+ {"1.2rc3", "1.2"},
+ {"1.2.3", "1.2"},
+ {"1.2", "1.2"},
+ {"1", "1.0"},
+ {"1.999testmod", "1.999"},
+}
+
+func TestIsLang(t *testing.T) { test1(t, isLangTests, "IsLang", IsLang) }
+
+var isLangTests = []testCase1[string, bool]{
+ {"1.2rc3", false},
+ {"1.2.3", false},
+ {"1.999testmod", false},
+ {"1.22", true},
+ {"1.21", true},
+ {"1.20", false}, // == 1.20.0
+ {"1.19", false}, // == 1.20.0
+ {"1.3", false}, // == 1.3.0
+ {"1.2", false}, // == 1.2.0
+ {"1", false}, // == 1.0.0
+}
+
+func TestPrev(t *testing.T) { test1(t, prevTests, "Prev", Prev) }
+
+var prevTests = []testCase1[string, string]{
+ {"", ""},
+ {"0", "0"},
+ {"1.3rc4", "1.2"},
+ {"1.3.5", "1.2"},
+ {"1.3", "1.2"},
+ {"1", "1"},
+ {"1.99999999999999999", "1.99999999999999998"},
+ {"1.40000000000000000", "1.39999999999999999"},
+}
+
+func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) }
+
+var isValidTests = []testCase1[string, bool]{
+ {"1.2rc3", true},
+ {"1.2.3", true},
+ {"1.999testmod", true},
+ {"1.600+auto", false},
+ {"1.22", true},
+ {"1.21.0", true},
+ {"1.21rc2", true},
+ {"1.21", true},
+ {"1.20.0", true},
+ {"1.20", true},
+ {"1.19", true},
+ {"1.3", true},
+ {"1.2", true},
+ {"1", true},
+}
+
+type testCase1[In, Out any] struct {
+ in In
+ out Out
+}
+
+type testCase2[In1, In2, Out any] struct {
+ in1 In1
+ in2 In2
+ out Out
+}
+
+type testCase3[In1, In2, In3, Out any] struct {
+ in1 In1
+ in2 In2
+ in3 In3
+ out Out
+}
+
+func test1[In, Out any](t *testing.T, tests []testCase1[In, Out], name string, f func(In) Out) {
+ t.Helper()
+ for _, tt := range tests {
+ if out := f(tt.in); !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("%s(%v) = %v, want %v", name, tt.in, out, tt.out)
+ }
+ }
+}
+
+func test2[In1, In2, Out any](t *testing.T, tests []testCase2[In1, In2, Out], name string, f func(In1, In2) Out) {
+ t.Helper()
+ for _, tt := range tests {
+ if out := f(tt.in1, tt.in2); !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("%s(%+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, out, tt.out)
+ }
+ }
+}
+
+func test3[In1, In2, In3, Out any](t *testing.T, tests []testCase3[In1, In2, In3, Out], name string, f func(In1, In2, In3) Out) {
+ t.Helper()
+ for _, tt := range tests {
+ if out := f(tt.in1, tt.in2, tt.in3); !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("%s(%+v, %+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, tt.in3, out, tt.out)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/gover/local.go b/src/cmd/go/internal/gover/local.go
new file mode 100644
index 0000000..8183a5c
--- /dev/null
+++ b/src/cmd/go/internal/gover/local.go
@@ -0,0 +1,42 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "internal/goversion"
+ "runtime"
+ "strconv"
+)
+
+// TestVersion is initialized in the go command test binary
+// to be $TESTGO_VERSION, to allow tests to override the
+// go command's idea of its own version as returned by Local.
+var TestVersion string
+
+// Local returns the local Go version, the one implemented by this go command.
+func Local() string {
+ v, _ := local()
+ return v
+}
+
+// LocalToolchain returns the local toolchain name, the one implemented by this go command.
+func LocalToolchain() string {
+ _, t := local()
+ return t
+}
+
+func local() (goVers, toolVers string) {
+ toolVers = runtime.Version()
+ if TestVersion != "" {
+ toolVers = TestVersion
+ }
+ goVers = FromToolchain(toolVers)
+ if goVers == "" {
+ // Development branch. Use "Dev" version with just 1.N, no rc1 or .0 suffix.
+ goVers = "1." + strconv.Itoa(goversion.Version)
+ toolVers = "go" + goVers
+ }
+ return goVers, toolVers
+}
diff --git a/src/cmd/go/internal/gover/mod.go b/src/cmd/go/internal/gover/mod.go
new file mode 100644
index 0000000..d3cc170
--- /dev/null
+++ b/src/cmd/go/internal/gover/mod.go
@@ -0,0 +1,127 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "sort"
+ "strings"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+// IsToolchain reports whether the module path corresponds to the
+// virtual, non-downloadable module tracking go or toolchain directives in the go.mod file.
+//
+// Note that IsToolchain only matches "go" and "toolchain", not the
+// real, downloadable module "golang.org/toolchain" containing toolchain files.
+//
+// IsToolchain("go") = true
+// IsToolchain("toolchain") = true
+// IsToolchain("golang.org/x/tools") = false
+// IsToolchain("golang.org/toolchain") = false
+func IsToolchain(path string) bool {
+ return path == "go" || path == "toolchain"
+}
+
+// ModCompare returns the result of comparing the versions x and y
+// for the module with the given path.
+// The path is necessary because the "go" and "toolchain" modules
+// use a different version syntax and semantics (gover, this package)
+// than most modules (semver).
+func ModCompare(path string, x, y string) int {
+ if path == "go" {
+ return Compare(x, y)
+ }
+ if path == "toolchain" {
+ return Compare(maybeToolchainVersion(x), maybeToolchainVersion(y))
+ }
+ return semver.Compare(x, y)
+}
+
+// ModSort is like module.Sort but understands the "go" and "toolchain"
+// modules and their version ordering.
+func ModSort(list []module.Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.Path != mj.Path {
+ return mi.Path < mj.Path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.Version
+ vj := mj.Version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return ModCompare(mi.Path, vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
+
+// ModIsValid reports whether vers is a valid version syntax for the module with the given path.
+func ModIsValid(path, vers string) bool {
+ if IsToolchain(path) {
+ if path == "toolchain" {
+ return IsValid(FromToolchain(vers))
+ }
+ return IsValid(vers)
+ }
+ return semver.IsValid(vers)
+}
+
+// ModIsPrefix reports whether v is a valid version syntax prefix for the module with the given path.
+// The caller is assumed to have checked that ModIsValid(path, vers) is true.
+func ModIsPrefix(path, vers string) bool {
+ if IsToolchain(path) {
+ if path == "toolchain" {
+ return IsLang(FromToolchain(vers))
+ }
+ return IsLang(vers)
+ }
+ // Semver
+ dots := 0
+ for i := 0; i < len(vers); i++ {
+ switch vers[i] {
+ case '-', '+':
+ return false
+ case '.':
+ dots++
+ if dots >= 2 {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ModIsPrerelease reports whether v is a prerelease version for the module with the given path.
+// The caller is assumed to have checked that ModIsValid(path, vers) is true.
+func ModIsPrerelease(path, vers string) bool {
+ if IsToolchain(path) {
+ return IsPrerelease(vers)
+ }
+ return semver.Prerelease(vers) != ""
+}
+
+// ModMajorMinor returns the "major.minor" truncation of the version v,
+// for use as a prefix in "@patch" queries.
+func ModMajorMinor(path, vers string) string {
+ if IsToolchain(path) {
+ if path == "toolchain" {
+ return "go" + Lang(FromToolchain(vers))
+ }
+ return Lang(vers)
+ }
+ return semver.MajorMinor(vers)
+}
diff --git a/src/cmd/go/internal/gover/mod_test.go b/src/cmd/go/internal/gover/mod_test.go
new file mode 100644
index 0000000..c92169c
--- /dev/null
+++ b/src/cmd/go/internal/gover/mod_test.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "slices"
+ "strings"
+ "testing"
+
+ "golang.org/x/mod/module"
+)
+
+func TestIsToolchain(t *testing.T) { test1(t, isToolchainTests, "IsToolchain", IsToolchain) }
+
+var isToolchainTests = []testCase1[string, bool]{
+ {"go", true},
+ {"toolchain", true},
+ {"anything", false},
+ {"golang.org/toolchain", false},
+}
+
+func TestModCompare(t *testing.T) { test3(t, modCompareTests, "ModCompare", ModCompare) }
+
+var modCompareTests = []testCase3[string, string, string, int]{
+ {"go", "1.2", "1.3", -1},
+ {"go", "v1.2", "v1.3", 0}, // equal because invalid
+ {"go", "1.2", "1.2", 0},
+ {"toolchain", "go1.2", "go1.3", -1},
+ {"toolchain", "go1.2", "go1.2", 0},
+ {"toolchain", "1.2", "1.3", -1}, // accepted but non-standard
+ {"toolchain", "v1.2", "v1.3", 0}, // equal because invalid
+ {"rsc.io/quote", "v1.2", "v1.3", -1},
+ {"rsc.io/quote", "1.2", "1.3", 0}, // equal because invalid
+}
+
+func TestModIsValid(t *testing.T) { test2(t, modIsValidTests, "ModIsValid", ModIsValid) }
+
+var modIsValidTests = []testCase2[string, string, bool]{
+ {"go", "1.2", true},
+ {"go", "v1.2", false},
+ {"toolchain", "go1.2", true},
+ {"toolchain", "v1.2", false},
+ {"rsc.io/quote", "v1.2", true},
+ {"rsc.io/quote", "1.2", false},
+}
+
+func TestModSort(t *testing.T) {
+ test1(t, modSortTests, "ModSort", func(list []module.Version) []module.Version {
+ out := slices.Clone(list)
+ ModSort(out)
+ return out
+ })
+}
+
+var modSortTests = []testCase1[[]module.Version, []module.Version]{
+ {
+ mvl(`z v1.1; a v1.2; a v1.1; go 1.3; toolchain 1.3; toolchain 1.2; go 1.2`),
+ mvl(`a v1.1; a v1.2; go 1.2; go 1.3; toolchain 1.2; toolchain 1.3; z v1.1`),
+ },
+}
+
+func mvl(s string) []module.Version {
+ var list []module.Version
+ for _, f := range strings.Split(s, ";") {
+ f = strings.TrimSpace(f)
+ path, vers, _ := strings.Cut(f, " ")
+ list = append(list, module.Version{Path: path, Version: vers})
+ }
+ return list
+}
diff --git a/src/cmd/go/internal/gover/toolchain.go b/src/cmd/go/internal/gover/toolchain.go
new file mode 100644
index 0000000..a24df98
--- /dev/null
+++ b/src/cmd/go/internal/gover/toolchain.go
@@ -0,0 +1,98 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import (
+ "cmd/go/internal/base"
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// FromToolchain returns the Go version for the named toolchain,
+// derived from the name itself (not by running the toolchain).
+// A toolchain is named "goVERSION".
+// A suffix after the VERSION introduced by a -, space, or tab is removed.
+// Examples:
+//
+// FromToolchain("go1.2.3") == "1.2.3"
+// FromToolchain("go1.2.3-bigcorp") == "1.2.3"
+// FromToolchain("invalid") == ""
+func FromToolchain(name string) string {
+ if strings.ContainsAny(name, "\\/") {
+ // The suffix must not include a path separator, since that would cause
+ // exec.LookPath to resolve it from a relative directory instead of from
+ // $PATH.
+ return ""
+ }
+
+ var v string
+ if strings.HasPrefix(name, "go") {
+ v = name[2:]
+ } else {
+ return ""
+ }
+ // Some builds use custom suffixes; strip them.
+ if i := strings.IndexAny(v, " \t-"); i >= 0 {
+ v = v[:i]
+ }
+ if !IsValid(v) {
+ return ""
+ }
+ return v
+}
+
+func maybeToolchainVersion(name string) string {
+ if IsValid(name) {
+ return name
+ }
+ return FromToolchain(name)
+}
+
+// Startup records the information that went into the startup-time version switch.
+// It is initialized by switchGoToolchain.
+var Startup struct {
+ GOTOOLCHAIN string // $GOTOOLCHAIN setting
+ AutoFile string // go.mod or go.work file consulted
+ AutoGoVersion string // go line found in file
+ AutoToolchain string // toolchain line found in file
+}
+
+// A TooNewError explains that a module is too new for this version of Go.
+type TooNewError struct {
+ What string
+ GoVersion string
+ Toolchain string // for callers if they want to use it, but not printed
+}
+
+func (e *TooNewError) Error() string {
+ var explain string
+ if Startup.GOTOOLCHAIN != "" && Startup.GOTOOLCHAIN != "auto" {
+ explain = "; GOTOOLCHAIN=" + Startup.GOTOOLCHAIN
+ }
+ if Startup.AutoFile != "" && (Startup.AutoGoVersion != "" || Startup.AutoToolchain != "") {
+ explain += fmt.Sprintf("; %s sets ", base.ShortPath(Startup.AutoFile))
+ if Startup.AutoToolchain != "" {
+ explain += "toolchain " + Startup.AutoToolchain
+ } else {
+ explain += "go " + Startup.AutoGoVersion
+ }
+ }
+ return fmt.Sprintf("%v requires go >= %v (running go %v%v)", e.What, e.GoVersion, Local(), explain)
+}
+
+var ErrTooNew = errors.New("module too new")
+
+func (e *TooNewError) Is(err error) bool {
+ return err == ErrTooNew
+}
+
+// A Switcher provides the ability to switch to a new toolchain in response to TooNewErrors.
+// See [cmd/go/internal/toolchain.Switcher] for documentation.
+type Switcher interface {
+ Error(err error)
+ Switch(ctx context.Context)
+}
diff --git a/src/cmd/go/internal/gover/toolchain_test.go b/src/cmd/go/internal/gover/toolchain_test.go
new file mode 100644
index 0000000..d1c22fb
--- /dev/null
+++ b/src/cmd/go/internal/gover/toolchain_test.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import "testing"
+
+func TestFromToolchain(t *testing.T) { test1(t, fromToolchainTests, "FromToolchain", FromToolchain) }
+
+var fromToolchainTests = []testCase1[string, string]{
+ {"go1.2.3", "1.2.3"},
+ {"1.2.3", ""},
+ {"go1.2.3+bigcorp", ""},
+ {"go1.2.3-bigcorp", "1.2.3"},
+ {"go1.2.3-bigcorp more text", "1.2.3"},
+ {"gccgo-go1.23rc4", ""},
+ {"gccgo-go1.23rc4-bigdwarf", ""},
+}
diff --git a/src/cmd/go/internal/gover/version.go b/src/cmd/go/internal/gover/version.go
new file mode 100644
index 0000000..2681013
--- /dev/null
+++ b/src/cmd/go/internal/gover/version.go
@@ -0,0 +1,74 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gover
+
+import "golang.org/x/mod/modfile"
+
+const (
+ // narrowAllVersion is the Go version at which the
+ // module-module "all" pattern no longer closes over the dependencies of
+ // tests outside of the main module.
+ NarrowAllVersion = "1.16"
+
+ // DefaultGoModVersion is the Go version to assume for go.mod files
+ // that do not declare a Go version. The go command has been
+ // writing go versions to modules since Go 1.12, so a go.mod
+ // without a version is either very old or recently hand-written.
+ // Since we can't tell which, we have to assume it's very old.
+ // The semantics of the go.mod changed at Go 1.17 to support
+ // graph pruning. If see a go.mod without a go line, we have to
+ // assume Go 1.16 so that we interpret the requirements correctly.
+ // Note that this default must stay at Go 1.16; it cannot be moved forward.
+ DefaultGoModVersion = "1.16"
+
+ // DefaultGoWorkVersion is the Go version to assume for go.work files
+ // that do not declare a Go version. Workspaces were added in Go 1.18,
+ // so use that.
+ DefaultGoWorkVersion = "1.18"
+
+ // ExplicitIndirectVersion is the Go version at which a
+ // module's go.mod file is expected to list explicit requirements on every
+ // module that provides any package transitively imported by that module.
+ //
+ // Other indirect dependencies of such a module can be safely pruned out of
+ // the module graph; see https://golang.org/ref/mod#graph-pruning.
+ ExplicitIndirectVersion = "1.17"
+
+ // separateIndirectVersion is the Go version at which
+ // "// indirect" dependencies are added in a block separate from the direct
+ // ones. See https://golang.org/issue/45965.
+ SeparateIndirectVersion = "1.17"
+
+ // tidyGoModSumVersion is the Go version at which
+ // 'go mod tidy' preserves go.mod checksums needed to build test dependencies
+ // of packages in "all", so that 'go test all' can be run without checksum
+ // errors.
+ // See https://go.dev/issue/56222.
+ TidyGoModSumVersion = "1.21"
+
+ // goStrictVersion is the Go version at which the Go versions
+ // became "strict" in the sense that, restricted to modules at this version
+ // or later, every module must have a go version line ≥ all its dependencies.
+ // It is also the version after which "too new" a version is considered a fatal error.
+ GoStrictVersion = "1.21"
+)
+
+// FromGoMod returns the go version from the go.mod file.
+// It returns DefaultGoModVersion if the go.mod file does not contain a go line or if mf is nil.
+func FromGoMod(mf *modfile.File) string {
+ if mf == nil || mf.Go == nil {
+ return DefaultGoModVersion
+ }
+ return mf.Go.Version
+}
+
+// FromGoWork returns the go version from the go.mod file.
+// It returns DefaultGoWorkVersion if the go.mod file does not contain a go line or if wf is nil.
+func FromGoWork(wf *modfile.WorkFile) string {
+ if wf == nil || wf.Go == nil {
+ return DefaultGoWorkVersion
+ }
+ return wf.Go.Version
+}
diff --git a/src/cmd/go/internal/help/help.go b/src/cmd/go/internal/help/help.go
new file mode 100644
index 0000000..aeaba78
--- /dev/null
+++ b/src/cmd/go/internal/help/help.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package help implements the “go help” command.
+package help
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "text/template"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/modload"
+)
+
+// Help implements the 'help' command.
+func Help(w io.Writer, args []string) {
+ // 'go help documentation' generates doc.go.
+ if len(args) == 1 && args[0] == "documentation" {
+ fmt.Fprintln(w, "// Copyright 2011 The Go Authors. All rights reserved.")
+ fmt.Fprintln(w, "// Use of this source code is governed by a BSD-style")
+ fmt.Fprintln(w, "// license that can be found in the LICENSE file.")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "// Code generated by 'go test cmd/go -v -run=TestDocsUpToDate -fixdocs'; DO NOT EDIT.")
+ fmt.Fprintln(w, "// Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one.")
+ fmt.Fprintln(w)
+ buf := new(strings.Builder)
+ PrintUsage(buf, base.Go)
+ usage := &base.Command{Long: buf.String()}
+ cmds := []*base.Command{usage}
+ for _, cmd := range base.Go.Commands {
+ // Avoid duplication of the "get" documentation.
+ if cmd.UsageLine == "module-get" && modload.Enabled() {
+ continue
+ } else if cmd.UsageLine == "gopath-get" && !modload.Enabled() {
+ continue
+ }
+ cmds = append(cmds, cmd)
+ cmds = append(cmds, cmd.Commands...)
+ }
+ tmpl(&commentWriter{W: w}, documentationTemplate, cmds)
+ fmt.Fprintln(w, "package main")
+ return
+ }
+
+ cmd := base.Go
+Args:
+ for i, arg := range args {
+ for _, sub := range cmd.Commands {
+ if sub.Name() == arg {
+ cmd = sub
+ continue Args
+ }
+ }
+
+ // helpSuccess is the help command using as many args as possible that would succeed.
+ helpSuccess := "go help"
+ if i > 0 {
+ helpSuccess += " " + strings.Join(args[:i], " ")
+ }
+ fmt.Fprintf(os.Stderr, "go help %s: unknown help topic. Run '%s'.\n", strings.Join(args, " "), helpSuccess)
+ base.SetExitStatus(2) // failed at 'go help cmd'
+ base.Exit()
+ }
+
+ if len(cmd.Commands) > 0 {
+ PrintUsage(os.Stdout, cmd)
+ } else {
+ tmpl(os.Stdout, helpTemplate, cmd)
+ }
+ // not exit 2: succeeded at 'go help cmd'.
+ return
+}
+
+var usageTemplate = `{{.Long | trim}}
+
+Usage:
+
+ {{.UsageLine}} <command> [arguments]
+
+The commands are:
+{{range .Commands}}{{if or (.Runnable) .Commands}}
+ {{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
+
+Use "go help{{with .LongName}} {{.}}{{end}} <command>" for more information about a command.
+{{if eq (.UsageLine) "go"}}
+Additional help topics:
+{{range .Commands}}{{if and (not .Runnable) (not .Commands)}}
+ {{.Name | printf "%-15s"}} {{.Short}}{{end}}{{end}}
+
+Use "go help{{with .LongName}} {{.}}{{end}} <topic>" for more information about that topic.
+{{end}}
+`
+
+var helpTemplate = `{{if .Runnable}}usage: {{.UsageLine}}
+
+{{end}}{{.Long | trim}}
+`
+
+var documentationTemplate = `{{range .}}{{if .Short}}{{.Short | capitalize}}
+
+{{end}}{{if .Commands}}` + usageTemplate + `{{else}}{{if .Runnable}}Usage:
+
+ {{.UsageLine}}
+
+{{end}}{{.Long | trim}}
+
+
+{{end}}{{end}}`
+
+// commentWriter writes a Go comment to the underlying io.Writer,
+// using line comment form (//).
+type commentWriter struct {
+ W io.Writer
+ wroteSlashes bool // Wrote "//" at the beginning of the current line.
+}
+
+func (c *commentWriter) Write(p []byte) (int, error) {
+ var n int
+ for i, b := range p {
+ if !c.wroteSlashes {
+ s := "//"
+ if b != '\n' {
+ s = "// "
+ }
+ if _, err := io.WriteString(c.W, s); err != nil {
+ return n, err
+ }
+ c.wroteSlashes = true
+ }
+ n0, err := c.W.Write(p[i : i+1])
+ n += n0
+ if err != nil {
+ return n, err
+ }
+ if b == '\n' {
+ c.wroteSlashes = false
+ }
+ }
+ return len(p), nil
+}
+
+// An errWriter wraps a writer, recording whether a write error occurred.
+type errWriter struct {
+ w io.Writer
+ err error
+}
+
+func (w *errWriter) Write(b []byte) (int, error) {
+ n, err := w.w.Write(b)
+ if err != nil {
+ w.err = err
+ }
+ return n, err
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data any) {
+ t := template.New("top")
+ t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "capitalize": capitalize})
+ template.Must(t.Parse(text))
+ ew := &errWriter{w: w}
+ err := t.Execute(ew, data)
+ if ew.err != nil {
+ // I/O error writing. Ignore write on closed pipe.
+ if strings.Contains(ew.err.Error(), "pipe") {
+ base.SetExitStatus(1)
+ base.Exit()
+ }
+ base.Fatalf("writing output: %v", ew.err)
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
+func capitalize(s string) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(unicode.ToTitle(r)) + s[n:]
+}
+
+func PrintUsage(w io.Writer, cmd *base.Command) {
+ bw := bufio.NewWriter(w)
+ tmpl(bw, usageTemplate, cmd)
+ bw.Flush()
+}
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
new file mode 100644
index 0000000..68ac4d2
--- /dev/null
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -0,0 +1,945 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package help
+
+import "cmd/go/internal/base"
+
+var HelpC = &base.Command{
+ UsageLine: "c",
+ Short: "calling between Go and C",
+ Long: `
+There are two different ways to call between Go and C/C++ code.
+
+The first is the cgo tool, which is part of the Go distribution. For
+information on how to use it see the cgo documentation (go doc cmd/cgo).
+
+The second is the SWIG program, which is a general tool for
+interfacing between languages. For information on SWIG see
+http://swig.org/. When running go build, any file with a .swig
+extension will be passed to SWIG. Any file with a .swigcxx extension
+will be passed to SWIG with the -c++ option.
+
+When either cgo or SWIG is used, go build will pass any .c, .m, .s, .S
+or .sx files to the C compiler, and any .cc, .cpp, .cxx files to the C++
+compiler. The CC or CXX environment variables may be set to determine
+the C or C++ compiler, respectively, to use.
+ `,
+}
+
+var HelpPackages = &base.Command{
+ UsageLine: "packages",
+ Short: "package lists and patterns",
+ Long: `
+Many commands apply to a set of packages:
+
+ go <action> [packages]
+
+Usually, [packages] is a list of import paths.
+
+An import path that is a rooted path or that begins with
+a . or .. element is interpreted as a file system path and
+denotes the package in that directory.
+
+Otherwise, the import path P denotes the package found in
+the directory DIR/src/P for some DIR listed in the GOPATH
+environment variable (For more details see: 'go help gopath').
+
+If no import paths are given, the action applies to the
+package in the current directory.
+
+There are four reserved names for paths that should not be used
+for packages to be built with the go tool:
+
+- "main" denotes the top-level package in a stand-alone executable.
+
+- "all" expands to all packages found in all the GOPATH
+trees. For example, 'go list all' lists all the packages on the local
+system. When using modules, "all" expands to all packages in
+the main module and their dependencies, including dependencies
+needed by tests of any of those.
+
+- "std" is like all but expands to just the packages in the standard
+Go library.
+
+- "cmd" expands to the Go repository's commands and their
+internal libraries.
+
+Import paths beginning with "cmd/" only match source code in
+the Go repository.
+
+An import path is a pattern if it includes one or more "..." wildcards,
+each of which can match any string, including the empty string and
+strings containing slashes. Such a pattern expands to all package
+directories found in the GOPATH trees with names matching the
+patterns.
+
+To make common patterns more convenient, there are two special cases.
+First, /... at the end of the pattern can match an empty string,
+so that net/... matches both net and packages in its subdirectories, like net/http.
+Second, any slash-separated pattern element containing a wildcard never
+participates in a match of the "vendor" element in the path of a vendored
+package, so that ./... does not match packages in subdirectories of
+./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do.
+Note, however, that a directory named vendor that itself contains code
+is not a vendored package: cmd/vendor would be a command named vendor,
+and the pattern cmd/... matches it.
+See golang.org/s/go15vendor for more about vendoring.
+
+An import path can also name a package to be downloaded from
+a remote repository. Run 'go help importpath' for details.
+
+Every package in a program must have a unique import path.
+By convention, this is arranged by starting each path with a
+unique prefix that belongs to you. For example, paths used
+internally at Google all begin with 'google', and paths
+denoting remote repositories begin with the path to the code,
+such as 'github.com/user/repo'.
+
+Packages in a program need not have unique package names,
+but there are two reserved package names with special meaning.
+The name main indicates a command, not a library.
+Commands are built into binaries and cannot be imported.
+The name documentation indicates documentation for
+a non-Go program in the directory. Files in package documentation
+are ignored by the go command.
+
+As a special case, if the package list is a list of .go files from a
+single directory, the command is applied to a single synthesized
+package made up of exactly those files, ignoring any build constraints
+in those files and ignoring any other files in the directory.
+
+Directory and file names that begin with "." or "_" are ignored
+by the go tool, as are directories named "testdata".
+ `,
+}
+
+var HelpImportPath = &base.Command{
+ UsageLine: "importpath",
+ Short: "import path syntax",
+ Long: `
+
+An import path (see 'go help packages') denotes a package stored in the local
+file system. In general, an import path denotes either a standard package (such
+as "unicode/utf8") or a package found in one of the work spaces (For more
+details see: 'go help gopath').
+
+Relative import paths
+
+An import path beginning with ./ or ../ is called a relative path.
+The toolchain supports relative import paths as a shortcut in two ways.
+
+First, a relative path can be used as a shorthand on the command line.
+If you are working in the directory containing the code imported as
+"unicode" and want to run the tests for "unicode/utf8", you can type
+"go test ./utf8" instead of needing to specify the full path.
+Similarly, in the reverse situation, "go test .." will test "unicode" from
+the "unicode/utf8" directory. Relative patterns are also allowed, like
+"go test ./..." to test all subdirectories. See 'go help packages' for details
+on the pattern syntax.
+
+Second, if you are compiling a Go program not in a work space,
+you can use a relative path in an import statement in that program
+to refer to nearby code also not in a work space.
+This makes it easy to experiment with small multipackage programs
+outside of the usual work spaces, but such programs cannot be
+installed with "go install" (there is no work space in which to install them),
+so they are rebuilt from scratch each time they are built.
+To avoid ambiguity, Go programs cannot use relative import paths
+within a work space.
+
+Remote import paths
+
+Certain import paths also
+describe how to obtain the source code for the package using
+a revision control system.
+
+A few common code hosting sites have special syntax:
+
+ Bitbucket (Git, Mercurial)
+
+ import "bitbucket.org/user/project"
+ import "bitbucket.org/user/project/sub/directory"
+
+ GitHub (Git)
+
+ import "github.com/user/project"
+ import "github.com/user/project/sub/directory"
+
+ Launchpad (Bazaar)
+
+ import "launchpad.net/project"
+ import "launchpad.net/project/series"
+ import "launchpad.net/project/series/sub/directory"
+
+ import "launchpad.net/~user/project/branch"
+ import "launchpad.net/~user/project/branch/sub/directory"
+
+ IBM DevOps Services (Git)
+
+ import "hub.jazz.net/git/user/project"
+ import "hub.jazz.net/git/user/project/sub/directory"
+
+For code hosted on other servers, import paths may either be qualified
+with the version control type, or the go tool can dynamically fetch
+the import path over https/http and discover where the code resides
+from a <meta> tag in the HTML.
+
+To declare the code location, an import path of the form
+
+ repository.vcs/path
+
+specifies the given repository, with or without the .vcs suffix,
+using the named version control system, and then the path inside
+that repository. The supported version control systems are:
+
+ Bazaar .bzr
+ Fossil .fossil
+ Git .git
+ Mercurial .hg
+ Subversion .svn
+
+For example,
+
+ import "example.org/user/foo.hg"
+
+denotes the root directory of the Mercurial repository at
+example.org/user/foo or foo.hg, and
+
+ import "example.org/repo.git/foo/bar"
+
+denotes the foo/bar directory of the Git repository at
+example.org/repo or repo.git.
+
+When a version control system supports multiple protocols,
+each is tried in turn when downloading. For example, a Git
+download tries https://, then git+ssh://.
+
+By default, downloads are restricted to known secure protocols
+(e.g. https, ssh). To override this setting for Git downloads, the
+GIT_ALLOW_PROTOCOL environment variable can be set (For more details see:
+'go help environment').
+
+If the import path is not a known code hosting site and also lacks a
+version control qualifier, the go tool attempts to fetch the import
+over https/http and looks for a <meta> tag in the document's HTML
+<head>.
+
+The meta tag has the form:
+
+ <meta name="go-import" content="import-prefix vcs repo-root">
+
+The import-prefix is the import path corresponding to the repository
+root. It must be a prefix or an exact match of the package being
+fetched with "go get". If it's not an exact match, another http
+request is made at the prefix to verify the <meta> tags match.
+
+The meta tag should appear as early in the file as possible.
+In particular, it should appear before any raw JavaScript or CSS,
+to avoid confusing the go command's restricted parser.
+
+The vcs is one of "bzr", "fossil", "git", "hg", "svn".
+
+The repo-root is the root of the version control system
+containing a scheme and not containing a .vcs qualifier.
+
+For example,
+
+ import "example.org/pkg/foo"
+
+will result in the following requests:
+
+ https://example.org/pkg/foo?go-get=1 (preferred)
+ http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE)
+
+If that page contains the meta tag
+
+ <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
+
+the go tool will verify that https://example.org/?go-get=1 contains the
+same meta tag and then git clone https://code.org/r/p/exproj into
+GOPATH/src/example.org.
+
+When using GOPATH, downloaded packages are written to the first directory
+listed in the GOPATH environment variable.
+(See 'go help gopath-get' and 'go help gopath'.)
+
+When using modules, downloaded packages are stored in the module cache.
+See https://golang.org/ref/mod#module-cache.
+
+When using modules, an additional variant of the go-import meta tag is
+recognized and is preferred over those listing version control systems.
+That variant uses "mod" as the vcs in the content value, as in:
+
+ <meta name="go-import" content="example.org mod https://code.org/moduleproxy">
+
+This tag means to fetch modules with paths beginning with example.org
+from the module proxy available at the URL https://code.org/moduleproxy.
+See https://golang.org/ref/mod#goproxy-protocol for details about the
+proxy protocol.
+
+Import path checking
+
+When the custom import path feature described above redirects to a
+known code hosting site, each of the resulting packages has two possible
+import paths, using the custom domain or the known hosting site.
+
+A package statement is said to have an "import comment" if it is immediately
+followed (before the next newline) by a comment of one of these two forms:
+
+ package math // import "path"
+ package math /* import "path" */
+
+The go command will refuse to install a package with an import comment
+unless it is being referred to by that import path. In this way, import comments
+let package authors make sure the custom import path is used and not a
+direct path to the underlying code hosting site.
+
+Import path checking is disabled for code found within vendor trees.
+This makes it possible to copy code into alternate locations in vendor trees
+without needing to update import comments.
+
+Import path checking is also disabled when using modules.
+Import path comments are obsoleted by the go.mod file's module statement.
+
+See https://golang.org/s/go14customimport for details.
+ `,
+}
+
+var HelpGopath = &base.Command{
+ UsageLine: "gopath",
+ Short: "GOPATH environment variable",
+ Long: `
+The Go path is used to resolve import statements.
+It is implemented by and documented in the go/build package.
+
+The GOPATH environment variable lists places to look for Go code.
+On Unix, the value is a colon-separated string.
+On Windows, the value is a semicolon-separated string.
+On Plan 9, the value is a list.
+
+If the environment variable is unset, GOPATH defaults
+to a subdirectory named "go" in the user's home directory
+($HOME/go on Unix, %USERPROFILE%\go on Windows),
+unless that directory holds a Go distribution.
+Run "go env GOPATH" to see the current GOPATH.
+
+See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH.
+
+Each directory listed in GOPATH must have a prescribed structure:
+
+The src directory holds source code. The path below src
+determines the import path or executable name.
+
+The pkg directory holds installed package objects.
+As in the Go tree, each target operating system and
+architecture pair has its own subdirectory of pkg
+(pkg/GOOS_GOARCH).
+
+If DIR is a directory listed in the GOPATH, a package with
+source in DIR/src/foo/bar can be imported as "foo/bar" and
+has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a".
+
+The bin directory holds compiled commands.
+Each command is named for its source directory, but only
+the final element, not the entire path. That is, the
+command with source in DIR/src/foo/quux is installed into
+DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped
+so that you can add DIR/bin to your PATH to get at the
+installed commands. If the GOBIN environment variable is
+set, commands are installed to the directory it names instead
+of DIR/bin. GOBIN must be an absolute path.
+
+Here's an example directory layout:
+
+ GOPATH=/home/user/go
+
+ /home/user/go/
+ src/
+ foo/
+ bar/ (go code in package bar)
+ x.go
+ quux/ (go code in package main)
+ y.go
+ bin/
+ quux (installed command)
+ pkg/
+ linux_amd64/
+ foo/
+ bar.a (installed package object)
+
+Go searches each directory listed in GOPATH to find source code,
+but new packages are always downloaded into the first directory
+in the list.
+
+See https://golang.org/doc/code.html for an example.
+
+GOPATH and Modules
+
+When using modules, GOPATH is no longer used for resolving imports.
+However, it is still used to store downloaded source code (in GOPATH/pkg/mod)
+and compiled commands (in GOPATH/bin).
+
+Internal Directories
+
+Code in or below a directory named "internal" is importable only
+by code in the directory tree rooted at the parent of "internal".
+Here's an extended version of the directory layout above:
+
+ /home/user/go/
+ src/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ foo/ (go code in package foo)
+ f.go
+ bar/ (go code in package bar)
+ x.go
+ internal/
+ baz/ (go code in package baz)
+ z.go
+ quux/ (go code in package main)
+ y.go
+
+
+The code in z.go is imported as "foo/internal/baz", but that
+import statement can only appear in source files in the subtree
+rooted at foo. The source files foo/f.go, foo/bar/x.go, and
+foo/quux/y.go can all import "foo/internal/baz", but the source file
+crash/bang/b.go cannot.
+
+See https://golang.org/s/go14internal for details.
+
+Vendor Directories
+
+Go 1.6 includes support for using local copies of external dependencies
+to satisfy imports of those dependencies, often referred to as vendoring.
+
+Code below a directory named "vendor" is importable only
+by code in the directory tree rooted at the parent of "vendor",
+and only using an import path that omits the prefix up to and
+including the vendor element.
+
+Here's the example from the previous section,
+but with the "internal" directory renamed to "vendor"
+and a new foo/vendor/crash/bang directory added:
+
+ /home/user/go/
+ src/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ foo/ (go code in package foo)
+ f.go
+ bar/ (go code in package bar)
+ x.go
+ vendor/
+ crash/
+ bang/ (go code in package bang)
+ b.go
+ baz/ (go code in package baz)
+ z.go
+ quux/ (go code in package main)
+ y.go
+
+The same visibility rules apply as for internal, but the code
+in z.go is imported as "baz", not as "foo/vendor/baz".
+
+Code in vendor directories deeper in the source tree shadows
+code in higher directories. Within the subtree rooted at foo, an import
+of "crash/bang" resolves to "foo/vendor/crash/bang", not the
+top-level "crash/bang".
+
+Code in vendor directories is not subject to import path
+checking (see 'go help importpath').
+
+When 'go get' checks out or updates a git repository, it now also
+updates submodules.
+
+Vendor directories do not affect the placement of new repositories
+being checked out for the first time by 'go get': those are always
+placed in the main GOPATH, never in a vendor subtree.
+
+See https://golang.org/s/go15vendor for details.
+ `,
+}
+
+var HelpEnvironment = &base.Command{
+ UsageLine: "environment",
+ Short: "environment variables",
+ Long: `
+
+The go command and the tools it invokes consult environment variables
+for configuration. If an environment variable is unset or empty, the go
+command uses a sensible default setting. To see the effective setting of
+the variable <NAME>, run 'go env <NAME>'. To change the default setting,
+run 'go env -w <NAME>=<VALUE>'. Defaults changed using 'go env -w'
+are recorded in a Go environment configuration file stored in the
+per-user configuration directory, as reported by os.UserConfigDir.
+The location of the configuration file can be changed by setting
+the environment variable GOENV, and 'go env GOENV' prints the
+effective location, but 'go env -w' cannot change the default location.
+See 'go help env' for details.
+
+General-purpose environment variables:
+
+ GO111MODULE
+ Controls whether the go command runs in module-aware mode or GOPATH mode.
+ May be "off", "on", or "auto".
+ See https://golang.org/ref/mod#mod-commands.
+ GCCGO
+ The gccgo command to run for 'go build -compiler=gccgo'.
+ GOARCH
+ The architecture, or processor, for which to compile code.
+ Examples are amd64, 386, arm, ppc64.
+ GOBIN
+ The directory where 'go install' will install a command.
+ GOCACHE
+ The directory where the go command will store cached
+ information for reuse in future builds.
+ GOMODCACHE
+ The directory where the go command will store downloaded modules.
+ GODEBUG
+ Enable various debugging facilities. See https://go.dev/doc/godebug
+ for details.
+ GOENV
+ The location of the Go environment configuration file.
+ Cannot be set using 'go env -w'.
+ Setting GOENV=off in the environment disables the use of the
+ default configuration file.
+ GOFLAGS
+ A space-separated list of -flag=value settings to apply
+ to go commands by default, when the given flag is known by
+ the current command. Each entry must be a standalone flag.
+ Because the entries are space-separated, flag values must
+ not contain spaces. Flags listed on the command line
+ are applied after this list and therefore override it.
+ GOINSECURE
+ Comma-separated list of glob patterns (in the syntax of Go's path.Match)
+ of module path prefixes that should always be fetched in an insecure
+ manner. Only applies to dependencies that are being fetched directly.
+ GOINSECURE does not disable checksum database validation. GOPRIVATE or
+ GONOSUMDB may be used to achieve that.
+ GOOS
+ The operating system for which to compile code.
+ Examples are linux, darwin, windows, netbsd.
+ GOPATH
+ Controls where various files are stored. See: 'go help gopath'.
+ GOPROXY
+ URL of Go module proxy. See https://golang.org/ref/mod#environment-variables
+ and https://golang.org/ref/mod#module-proxy for details.
+ GOPRIVATE, GONOPROXY, GONOSUMDB
+ Comma-separated list of glob patterns (in the syntax of Go's path.Match)
+ of module path prefixes that should always be fetched directly
+ or that should not be compared against the checksum database.
+ See https://golang.org/ref/mod#private-modules.
+ GOROOT
+ The root of the go tree.
+ GOSUMDB
+ The name of checksum database to use and optionally its public key and
+ URL. See https://golang.org/ref/mod#authenticating.
+ GOTOOLCHAIN
+ Controls which Go toolchain is used. See https://go.dev/doc/toolchain.
+ GOTMPDIR
+ The directory where the go command will write
+ temporary source files, packages, and binaries.
+ GOVCS
+ Lists version control commands that may be used with matching servers.
+ See 'go help vcs'.
+ GOWORK
+ In module aware mode, use the given go.work file as a workspace file.
+ By default or when GOWORK is "auto", the go command searches for a
+ file named go.work in the current directory and then containing directories
+ until one is found. If a valid go.work file is found, the modules
+ specified will collectively be used as the main modules. If GOWORK
+ is "off", or a go.work file is not found in "auto" mode, workspace
+ mode is disabled.
+
+Environment variables for use with cgo:
+
+ AR
+ The command to use to manipulate library archives when
+ building with the gccgo compiler.
+ The default is 'ar'.
+ CC
+ The command to use to compile C code.
+ CGO_ENABLED
+ Whether the cgo command is supported. Either 0 or 1.
+ CGO_CFLAGS
+ Flags that cgo will pass to the compiler when compiling
+ C code.
+ CGO_CFLAGS_ALLOW
+ A regular expression specifying additional flags to allow
+ to appear in #cgo CFLAGS source code directives.
+ Does not apply to the CGO_CFLAGS environment variable.
+ CGO_CFLAGS_DISALLOW
+ A regular expression specifying flags that must be disallowed
+ from appearing in #cgo CFLAGS source code directives.
+ Does not apply to the CGO_CFLAGS environment variable.
+ CGO_CPPFLAGS, CGO_CPPFLAGS_ALLOW, CGO_CPPFLAGS_DISALLOW
+ Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+ but for the C preprocessor.
+ CGO_CXXFLAGS, CGO_CXXFLAGS_ALLOW, CGO_CXXFLAGS_DISALLOW
+ Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+ but for the C++ compiler.
+ CGO_FFLAGS, CGO_FFLAGS_ALLOW, CGO_FFLAGS_DISALLOW
+ Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+ but for the Fortran compiler.
+ CGO_LDFLAGS, CGO_LDFLAGS_ALLOW, CGO_LDFLAGS_DISALLOW
+ Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW,
+ but for the linker.
+ CXX
+ The command to use to compile C++ code.
+ FC
+ The command to use to compile Fortran code.
+ PKG_CONFIG
+ Path to pkg-config tool.
+
+Architecture-specific environment variables:
+
+ GOARM
+ For GOARCH=arm, the ARM architecture for which to compile.
+ Valid values are 5, 6, 7.
+ GO386
+ For GOARCH=386, how to implement floating point instructions.
+ Valid values are sse2 (default), softfloat.
+ GOAMD64
+ For GOARCH=amd64, the microarchitecture level for which to compile.
+ Valid values are v1 (default), v2, v3, v4.
+ See https://golang.org/wiki/MinimumRequirements#amd64
+ GOMIPS
+ For GOARCH=mips{,le}, whether to use floating point instructions.
+ Valid values are hardfloat (default), softfloat.
+ GOMIPS64
+ For GOARCH=mips64{,le}, whether to use floating point instructions.
+ Valid values are hardfloat (default), softfloat.
+ GOPPC64
+ For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture).
+ Valid values are power8 (default), power9, power10.
+ GOWASM
+ For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use.
+ Valid values are satconv, signext.
+
+Environment variables for use with code coverage:
+
+ GOCOVERDIR
+ Directory into which to write code coverage data files
+ generated by running a "go build -cover" binary.
+ Requires that GOEXPERIMENT=coverageredesign is enabled.
+
+Special-purpose environment variables:
+
+ GCCGOTOOLDIR
+ If set, where to find gccgo tools, such as cgo.
+ The default is based on how gccgo was configured.
+ GOEXPERIMENT
+ Comma-separated list of toolchain experiments to enable or disable.
+ The list of available experiments may change arbitrarily over time.
+ See src/internal/goexperiment/flags.go for currently valid values.
+ Warning: This variable is provided for the development and testing
+ of the Go toolchain itself. Use beyond that purpose is unsupported.
+ GOROOT_FINAL
+ The root of the installed Go tree, when it is
+ installed in a location other than where it is built.
+ File names in stack traces are rewritten from GOROOT to
+ GOROOT_FINAL.
+ GO_EXTLINK_ENABLED
+ Whether the linker should use external linking mode
+ when using -linkmode=auto with code that uses cgo.
+ Set to 0 to disable external linking mode, 1 to enable it.
+ GIT_ALLOW_PROTOCOL
+ Defined by Git. A colon-separated list of schemes that are allowed
+ to be used with git fetch/clone. If set, any scheme not explicitly
+ mentioned will be considered insecure by 'go get'.
+ Because the variable is defined by Git, the default value cannot
+ be set using 'go env -w'.
+
+Additional information available from 'go env' but not read from the environment:
+
+ GOEXE
+ The executable file name suffix (".exe" on Windows, "" on other systems).
+ GOGCCFLAGS
+ A space-separated list of arguments supplied to the CC command.
+ GOHOSTARCH
+ The architecture (GOARCH) of the Go toolchain binaries.
+ GOHOSTOS
+ The operating system (GOOS) of the Go toolchain binaries.
+ GOMOD
+ The absolute path to the go.mod of the main module.
+ If module-aware mode is enabled, but there is no go.mod, GOMOD will be
+ os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows).
+ If module-aware mode is disabled, GOMOD will be the empty string.
+ GOTOOLDIR
+ The directory where the go tools (compile, cover, doc, etc...) are installed.
+ GOVERSION
+ The version of the installed Go tree, as reported by runtime.Version.
+ `,
+}
+
+var HelpFileType = &base.Command{
+ UsageLine: "filetype",
+ Short: "file types",
+ Long: `
+The go command examines the contents of a restricted set of files
+in each directory. It identifies which files to examine based on
+the extension of the file name. These extensions are:
+
+ .go
+ Go source files.
+ .c, .h
+ C source files.
+ If the package uses cgo or SWIG, these will be compiled with the
+ OS-native compiler (typically gcc); otherwise they will
+ trigger an error.
+ .cc, .cpp, .cxx, .hh, .hpp, .hxx
+ C++ source files. Only useful with cgo or SWIG, and always
+ compiled with the OS-native compiler.
+ .m
+ Objective-C source files. Only useful with cgo, and always
+ compiled with the OS-native compiler.
+ .s, .S, .sx
+ Assembler source files.
+ If the package uses cgo or SWIG, these will be assembled with the
+ OS-native assembler (typically gcc (sic)); otherwise they
+ will be assembled with the Go assembler.
+ .swig, .swigcxx
+ SWIG definition files.
+ .syso
+ System object files.
+
+Files of each of these types except .syso may contain build
+constraints, but the go command stops scanning for build constraints
+at the first item in the file that is not a blank line or //-style
+line comment. See the go/build package documentation for
+more details.
+ `,
+}
+
+var HelpBuildmode = &base.Command{
+ UsageLine: "buildmode",
+ Short: "build modes",
+ Long: `
+The 'go build' and 'go install' commands take a -buildmode argument which
+indicates which kind of object file is to be built. Currently supported values
+are:
+
+ -buildmode=archive
+ Build the listed non-main packages into .a files. Packages named
+ main are ignored.
+
+ -buildmode=c-archive
+ Build the listed main package, plus all packages it imports,
+ into a C archive file. The only callable symbols will be those
+ functions exported using a cgo //export comment. Requires
+ exactly one main package to be listed.
+
+ -buildmode=c-shared
+ Build the listed main package, plus all packages it imports,
+ into a C shared library. The only callable symbols will
+ be those functions exported using a cgo //export comment.
+ Requires exactly one main package to be listed.
+
+ -buildmode=default
+ Listed main packages are built into executables and listed
+ non-main packages are built into .a files (the default
+ behavior).
+
+ -buildmode=shared
+ Combine all the listed non-main packages into a single shared
+ library that will be used when building with the -linkshared
+ option. Packages named main are ignored.
+
+ -buildmode=exe
+ Build the listed main packages and everything they import into
+ executables. Packages not named main are ignored.
+
+ -buildmode=pie
+ Build the listed main packages and everything they import into
+ position independent executables (PIE). Packages not named
+ main are ignored.
+
+ -buildmode=plugin
+ Build the listed main packages, plus all packages that they
+ import, into a Go plugin. Packages not named main are ignored.
+
+On AIX, when linking a C program that uses a Go archive built with
+-buildmode=c-archive, you must pass -Wl,-bnoobjreorder to the C compiler.
+`,
+}
+
+var HelpCache = &base.Command{
+ UsageLine: "cache",
+ Short: "build and test caching",
+ Long: `
+The go command caches build outputs for reuse in future builds.
+The default location for cache data is a subdirectory named go-build
+in the standard user cache directory for the current operating system.
+Setting the GOCACHE environment variable overrides this default,
+and running 'go env GOCACHE' prints the current cache directory.
+
+The go command periodically deletes cached data that has not been
+used recently. Running 'go clean -cache' deletes all cached data.
+
+The build cache correctly accounts for changes to Go source files,
+compilers, compiler options, and so on: cleaning the cache explicitly
+should not be necessary in typical use. However, the build cache
+does not detect changes to C libraries imported with cgo.
+If you have made changes to the C libraries on your system, you
+will need to clean the cache explicitly or else use the -a build flag
+(see 'go help build') to force rebuilding of packages that
+depend on the updated C libraries.
+
+The go command also caches successful package test results.
+See 'go help test' for details. Running 'go clean -testcache' removes
+all cached test results (but not cached build results).
+
+The go command also caches values used in fuzzing with 'go test -fuzz',
+specifically, values that expanded code coverage when passed to a
+fuzz function. These values are not used for regular building and
+testing, but they're stored in a subdirectory of the build cache.
+Running 'go clean -fuzzcache' removes all cached fuzzing values.
+This may make fuzzing less effective, temporarily.
+
+The GODEBUG environment variable can enable printing of debugging
+information about the state of the cache:
+
+GODEBUG=gocacheverify=1 causes the go command to bypass the
+use of any cache entries and instead rebuild everything and check
+that the results match existing cache entries.
+
+GODEBUG=gocachehash=1 causes the go command to print the inputs
+for all of the content hashes it uses to construct cache lookup keys.
+The output is voluminous but can be useful for debugging the cache.
+
+GODEBUG=gocachetest=1 causes the go command to print details of its
+decisions about whether to reuse a cached test result.
+`,
+}
+
+var HelpBuildConstraint = &base.Command{
+ UsageLine: "buildconstraint",
+ Short: "build constraints",
+ Long: `
+A build constraint, also known as a build tag, is a condition under which a
+file should be included in the package. Build constraints are given by a
+line comment that begins
+
+ //go:build
+
+Constraints may appear in any kind of source file (not just Go), but
+they must appear near the top of the file, preceded
+only by blank lines and other line comments. These rules mean that in Go
+files a build constraint must appear before the package clause.
+
+To distinguish build constraints from package documentation,
+a build constraint should be followed by a blank line.
+
+A build constraint comment is evaluated as an expression containing
+build tags combined by ||, &&, and ! operators and parentheses.
+Operators have the same meaning as in Go.
+
+For example, the following build constraint constrains a file to
+build when the "linux" and "386" constraints are satisfied, or when
+"darwin" is satisfied and "cgo" is not:
+
+ //go:build (linux && 386) || (darwin && !cgo)
+
+It is an error for a file to have more than one //go:build line.
+
+During a particular build, the following build tags are satisfied:
+
+ - the target operating system, as spelled by runtime.GOOS, set with the
+ GOOS environment variable.
+ - the target architecture, as spelled by runtime.GOARCH, set with the
+ GOARCH environment variable.
+ - any architecture features, in the form GOARCH.feature
+ (for example, "amd64.v2"), as detailed below.
+ - "unix", if GOOS is a Unix or Unix-like system.
+ - the compiler being used, either "gc" or "gccgo"
+ - "cgo", if the cgo command is supported (see CGO_ENABLED in
+ 'go help environment').
+ - a term for each Go major release, through the current version:
+ "go1.1" from Go version 1.1 onward, "go1.12" from Go 1.12, and so on.
+ - any additional tags given by the -tags flag (see 'go help build').
+
+There are no separate build tags for beta or minor releases.
+
+If a file's name, after stripping the extension and a possible _test suffix,
+matches any of the following patterns:
+ *_GOOS
+ *_GOARCH
+ *_GOOS_GOARCH
+(example: source_windows_amd64.go) where GOOS and GOARCH represent
+any known operating system and architecture values respectively, then
+the file is considered to have an implicit build constraint requiring
+those terms (in addition to any explicit constraints in the file).
+
+Using GOOS=android matches build tags and files as for GOOS=linux
+in addition to android tags and files.
+
+Using GOOS=illumos matches build tags and files as for GOOS=solaris
+in addition to illumos tags and files.
+
+Using GOOS=ios matches build tags and files as for GOOS=darwin
+in addition to ios tags and files.
+
+The defined architecture feature build tags are:
+
+ - For GOARCH=386, GO386=387 and GO386=sse2
+ set the 386.387 and 386.sse2 build tags, respectively.
+ - For GOARCH=amd64, GOAMD64=v1, v2, and v3
+ correspond to the amd64.v1, amd64.v2, and amd64.v3 feature build tags.
+ - For GOARCH=arm, GOARM=5, 6, and 7
+ correspond to the arm.5, arm.6, and arm.7 feature build tags.
+ - For GOARCH=mips or mipsle,
+ GOMIPS=hardfloat and softfloat
+ correspond to the mips.hardfloat and mips.softfloat
+ (or mipsle.hardfloat and mipsle.softfloat) feature build tags.
+ - For GOARCH=mips64 or mips64le,
+ GOMIPS64=hardfloat and softfloat
+ correspond to the mips64.hardfloat and mips64.softfloat
+ (or mips64le.hardfloat and mips64le.softfloat) feature build tags.
+ - For GOARCH=ppc64 or ppc64le,
+ GOPPC64=power8, power9, and power10 correspond to the
+ ppc64.power8, ppc64.power9, and ppc64.power10
+ (or ppc64le.power8, ppc64le.power9, and ppc64le.power10)
+ feature build tags.
+ - For GOARCH=wasm, GOWASM=satconv and signext
+ correspond to the wasm.satconv and wasm.signext feature build tags.
+
+For GOARCH=amd64, arm, ppc64, and ppc64le, a particular feature level
+sets the feature build tags for all previous levels as well.
+For example, GOAMD64=v2 sets the amd64.v1 and amd64.v2 feature flags.
+This ensures that code making use of v2 features continues to compile
+when, say, GOAMD64=v4 is introduced.
+Code handling the absence of a particular feature level
+should use a negation:
+
+ //go:build !amd64.v2
+
+To keep a file from being considered for any build:
+
+ //go:build ignore
+
+(Any other unsatisfied word will work as well, but "ignore" is conventional.)
+
+To build a file only when using cgo, and only on Linux and OS X:
+
+ //go:build cgo && (linux || darwin)
+
+Such a file is usually paired with another file implementing the
+default functionality for other systems, which in this case would
+carry the constraint:
+
+ //go:build !(cgo && (linux || darwin))
+
+Naming a file dns_windows.go will cause it to be included only when
+building the package for Windows; similarly, math_386.s will be included
+only when building the package for 32-bit x86.
+
+Go versions 1.16 and earlier used a different syntax for build constraints,
+with a "// +build" prefix. The gofmt command will add an equivalent //go:build
+constraint when encountering the older syntax.
+`,
+}
diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go
new file mode 100644
index 0000000..3a4a66b
--- /dev/null
+++ b/src/cmd/go/internal/imports/build.go
@@ -0,0 +1,374 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied from Go distribution src/go/build/build.go, syslist.go.
+// That package does not export the ability to process raw file data,
+// although we could fake it with an appropriate build.Context
+// and a lot of unwrapping.
+// More importantly, that package does not implement the tags["*"]
+// special case, in which both tag and !tag are considered to be true
+// for essentially all tags (except "ignore").
+//
+// If we added this API to go/build directly, we wouldn't need this
+// file anymore, but this API is not terribly general-purpose and we
+// don't really want to commit to any public form of it, nor do we
+// want to move the core parts of go/build into a top-level internal package.
+// These details change very infrequently, so the copy is fine.
+
+package imports
+
+import (
+ "bytes"
+ "cmd/go/internal/cfg"
+ "errors"
+ "fmt"
+ "go/build/constraint"
+ "strings"
+ "unicode"
+)
+
+var (
+ bSlashSlash = []byte("//")
+ bStarSlash = []byte("*/")
+ bSlashStar = []byte("/*")
+ bPlusBuild = []byte("+build")
+
+ goBuildComment = []byte("//go:build")
+
+ errMultipleGoBuild = errors.New("multiple //go:build comments")
+)
+
+func isGoBuildComment(line []byte) bool {
+ if !bytes.HasPrefix(line, goBuildComment) {
+ return false
+ }
+ line = bytes.TrimSpace(line)
+ rest := line[len(goBuildComment):]
+ return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
+}
+
+// ShouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+// // +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+// If tags["*"] is true, then ShouldBuild will consider every
+// build tag except "ignore" to be both true and false for
+// the purpose of satisfying build tags, in order to estimate
+// (conservatively) whether a file could ever possibly be used
+// in any build.
+func ShouldBuild(content []byte, tags map[string]bool) bool {
+ // Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ // Also identify any //go:build comments.
+ content, goBuild, _, err := parseFileHeader(content)
+ if err != nil {
+ return false
+ }
+
+ // If //go:build line is present, it controls.
+ // Otherwise fall back to +build processing.
+ var shouldBuild bool
+ switch {
+ case goBuild != nil:
+ x, err := constraint.Parse(string(goBuild))
+ if err != nil {
+ return false
+ }
+ shouldBuild = eval(x, tags, true)
+
+ default:
+ shouldBuild = true
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) {
+ continue
+ }
+ text := string(line)
+ if !constraint.IsPlusBuild(text) {
+ continue
+ }
+ if x, err := constraint.Parse(text); err == nil {
+ if !eval(x, tags, true) {
+ shouldBuild = false
+ }
+ }
+ }
+ }
+
+ return shouldBuild
+}
+
+func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
+ end := 0
+ p := content
+ ended := false // found non-blank, non-// line, so stopped accepting // +build lines
+ inSlashStar := false // in /* */ comment
+
+Lines:
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 && !ended { // Blank line
+ // Remember position of most recent blank line.
+ // When we find the first non-blank, non-// line,
+ // this "end" position marks the latest file position
+ // where a // +build line can appear.
+ // (It must appear _before_ a blank line before the non-blank, non-// line.
+ // Yes, that's confusing, which is part of why we moved to //go:build lines.)
+ // Note that ended==false here means that inSlashStar==false,
+ // since seeing a /* would have set ended==true.
+ end = len(content) - len(p)
+ continue Lines
+ }
+ if !bytes.HasPrefix(line, bSlashSlash) { // Not comment line
+ ended = true
+ }
+
+ if !inSlashStar && isGoBuildComment(line) {
+ if goBuild != nil {
+ return nil, nil, false, errMultipleGoBuild
+ }
+ goBuild = line
+ }
+
+ Comments:
+ for len(line) > 0 {
+ if inSlashStar {
+ if i := bytes.Index(line, bStarSlash); i >= 0 {
+ inSlashStar = false
+ line = bytes.TrimSpace(line[i+len(bStarSlash):])
+ continue Comments
+ }
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashSlash) {
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashStar) {
+ inSlashStar = true
+ line = bytes.TrimSpace(line[len(bSlashStar):])
+ continue Comments
+ }
+ // Found non-comment text.
+ break Lines
+ }
+ }
+
+ return content[:end], goBuild, sawBinaryOnly, nil
+}
+
+// matchTag reports whether the tag name is valid and tags[name] is true.
+// As a special case, if tags["*"] is true and name is not empty or ignore,
+// then matchTag will return prefer instead of the actual answer,
+// which allows the caller to pretend in that case that most tags are
+// both true and false.
+func matchTag(name string, tags map[string]bool, prefer bool) bool {
+ // Tags must be letters, digits, underscores or dots.
+ // Unlike in Go identifiers, all digits are fine (e.g., "386").
+ for _, c := range name {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
+ return false
+ }
+ }
+
+ if tags["*"] && name != "" && name != "ignore" {
+ // Special case for gathering all possible imports:
+ // if we put * in the tags map then all tags
+ // except "ignore" are considered both present and not
+ // (so we return true no matter how 'want' is set).
+ return prefer
+ }
+
+ if tags[name] {
+ return true
+ }
+
+ switch name {
+ case "linux":
+ return tags["android"]
+ case "solaris":
+ return tags["illumos"]
+ case "darwin":
+ return tags["ios"]
+ case "unix":
+ return unixOS[cfg.BuildContext.GOOS]
+ default:
+ return false
+ }
+}
+
+// eval is like
+//
+// x.Eval(func(tag string) bool { return matchTag(tag, tags) })
+//
+// except that it implements the special case for tags["*"] meaning
+// all tags are both true and false at the same time.
+func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
+ switch x := x.(type) {
+ case *constraint.TagExpr:
+ return matchTag(x.Tag, tags, prefer)
+ case *constraint.NotExpr:
+ return !eval(x.X, tags, !prefer)
+ case *constraint.AndExpr:
+ return eval(x.X, tags, prefer) && eval(x.Y, tags, prefer)
+ case *constraint.OrExpr:
+ return eval(x.X, tags, prefer) || eval(x.Y, tags, prefer)
+ }
+ panic(fmt.Sprintf("unexpected constraint expression %T", x))
+}
+
+// Eval is like
+//
+// x.Eval(func(tag string) bool { return matchTag(tag, tags) })
+//
+// except that it implements the special case for tags["*"] meaning
+// all tags are both true and false at the same time.
+func Eval(x constraint.Expr, tags map[string]bool, prefer bool) bool {
+ return eval(x, tags, prefer)
+}
+
+// MatchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+// Exceptions:
+//
+// if GOOS=android, then files with GOOS=linux are also matched.
+// if GOOS=illumos, then files with GOOS=solaris are also matched.
+// if GOOS=ios, then files with GOOS=darwin are also matched.
+//
+// If tags["*"] is true, then MatchFile will consider all possible
+// GOOS and GOARCH to be available and will consequently
+// always return true.
+func MatchFile(name string, tags map[string]bool) bool {
+ if tags["*"] {
+ return true
+ }
+ if dot := strings.Index(name, "."); dot != -1 {
+ name = name[:dot]
+ }
+
+ // Before Go 1.4, a file called "linux.go" would be equivalent to having a
+ // build tag "linux" in that file. For Go 1.4 and beyond, we require this
+ // auto-tagging to apply only to files with a non-empty prefix, so
+ // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
+ // systems, such as android, to arrive without breaking existing code with
+ // innocuous source code in "android.go". The easiest fix: cut everything
+ // in the name before the initial _.
+ i := strings.Index(name, "_")
+ if i < 0 {
+ return true
+ }
+ name = name[i:] // ignore everything before first _
+
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && KnownOS[l[n-2]] && KnownArch[l[n-1]] {
+ return matchTag(l[n-2], tags, true) && matchTag(l[n-1], tags, true)
+ }
+ if n >= 1 && KnownOS[l[n-1]] {
+ return matchTag(l[n-1], tags, true)
+ }
+ if n >= 1 && KnownArch[l[n-1]] {
+ return matchTag(l[n-1], tags, true)
+ }
+ return true
+}
+
+var KnownOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "js": true,
+ "linux": true,
+ "nacl": true, // legacy; don't remove
+ "netbsd": true,
+ "openbsd": true,
+ "plan9": true,
+ "solaris": true,
+ "wasip1": true,
+ "windows": true,
+ "zos": true,
+}
+
+// unixOS is the set of GOOS values matched by the "unix" build tag.
+// This is not used for filename matching.
+// This is the same list as in go/build/syslist.go and cmd/dist/build.go.
+var unixOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "linux": true,
+ "netbsd": true,
+ "openbsd": true,
+ "solaris": true,
+}
+
+var KnownArch = map[string]bool{
+ "386": true,
+ "amd64": true,
+ "amd64p32": true, // legacy; don't remove
+ "arm": true,
+ "armbe": true,
+ "arm64": true,
+ "arm64be": true,
+ "ppc64": true,
+ "ppc64le": true,
+ "mips": true,
+ "mipsle": true,
+ "mips64": true,
+ "mips64le": true,
+ "mips64p32": true,
+ "mips64p32le": true,
+ "loong64": true,
+ "ppc": true,
+ "riscv": true,
+ "riscv64": true,
+ "s390": true,
+ "s390x": true,
+ "sparc": true,
+ "sparc64": true,
+ "wasm": true,
+}
diff --git a/src/cmd/go/internal/imports/read.go b/src/cmd/go/internal/imports/read.go
new file mode 100644
index 0000000..70d5190
--- /dev/null
+++ b/src/cmd/go/internal/imports/read.go
@@ -0,0 +1,263 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied from Go distribution src/go/build/read.go.
+
+package imports
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+}
+
+var bom = []byte{0xef, 0xbb, 0xbf}
+
+func newImportReader(b *bufio.Reader) *importReader {
+ // Remove leading UTF-8 BOM.
+ // Per https://golang.org/ref/spec#Source_code_representation:
+ // a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
+ // if it is the first Unicode code point in the source text.
+ if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
+ b.Discard(3)
+ }
+ return &importReader{b: b}
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString(save *[]string) {
+ switch r.nextByte(true) {
+ case '`':
+ start := len(r.buf) - 1
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ if save != nil {
+ *save = append(*save, string(r.buf[start:]))
+ }
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ start := len(r.buf) - 1
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ if save != nil {
+ *save = append(*save, string(r.buf[start:]))
+ }
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport(imports *[]string) {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString(imports)
+}
+
+// ReadComments is like io.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func ReadComments(f io.Reader) ([]byte, error) {
+ r := newImportReader(bufio.NewReader(f))
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// ReadImports is like io.ReadAll, except that it expects a Go file as input
+// and stops reading the input once the imports have completed.
+func ReadImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, error) {
+ r := newImportReader(bufio.NewReader(f))
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport(imports)
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport(imports)
+ }
+ }
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ return r.buf[:len(r.buf)-1], nil
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax && !reportSyntaxError {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ }
+
+ return r.buf, r.err
+}
diff --git a/src/cmd/go/internal/imports/read_test.go b/src/cmd/go/internal/imports/read_test.go
new file mode 100644
index 0000000..6a1a652
--- /dev/null
+++ b/src/cmd/go/internal/imports/read_test.go
@@ -0,0 +1,254 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied from Go distribution src/go/build/read.go.
+
+package imports
+
+import (
+ "io"
+ "strings"
+ "testing"
+)
+
+const quote = "`"
+
+type readTest struct {
+ // Test input contains ℙ where readImports should stop.
+ in string
+ err string
+}
+
+var readImportsTests = []readTest{
+ {
+ `package p`,
+ "",
+ },
+ {
+ `package p; import "x"`,
+ "",
+ },
+ {
+ `package p; import . "x"`,
+ "",
+ },
+ {
+ `package p; import "x";ℙvar x = 1`,
+ "",
+ },
+ {
+ `package p
+
+ // comment
+
+ import "x"
+ import _ "x"
+ import a "x"
+
+ /* comment */
+
+ import (
+ "x" /* comment */
+ _ "x"
+ a "x" // comment
+ ` + quote + `x` + quote + `
+ _ /*comment*/ ` + quote + `x` + quote + `
+ a ` + quote + `x` + quote + `
+ )
+ import (
+ )
+ import ()
+ import()import()import()
+ import();import();import()
+
+ ℙvar x = 1
+ `,
+ "",
+ },
+ {
+ "\ufeff𝔻" + `package p; import "x";ℙvar x = 1`,
+ "",
+ },
+}
+
+var readCommentsTests = []readTest{
+ {
+ `ℙpackage p`,
+ "",
+ },
+ {
+ `ℙpackage p; import "x"`,
+ "",
+ },
+ {
+ `ℙpackage p; import . "x"`,
+ "",
+ },
+ {
+ "\ufeff𝔻" + `ℙpackage p; import . "x"`,
+ "",
+ },
+ {
+ `// foo
+
+ /* bar */
+
+ /* quux */ // baz
+
+ /*/ zot */
+
+ // asdf
+ ℙHello, world`,
+ "",
+ },
+ {
+ "\ufeff𝔻" + `// foo
+
+ /* bar */
+
+ /* quux */ // baz
+
+ /*/ zot */
+
+ // asdf
+ ℙHello, world`,
+ "",
+ },
+}
+
+func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) {
+ for i, tt := range tests {
+ var in, testOut string
+ j := strings.Index(tt.in, "ℙ")
+ if j < 0 {
+ in = tt.in
+ testOut = tt.in
+ } else {
+ in = tt.in[:j] + tt.in[j+len("ℙ"):]
+ testOut = tt.in[:j]
+ }
+ d := strings.Index(tt.in, "𝔻")
+ if d >= 0 {
+ in = in[:d] + in[d+len("𝔻"):]
+ testOut = testOut[d+len("𝔻"):]
+ }
+ r := strings.NewReader(in)
+ buf, err := read(r)
+ if err != nil {
+ if tt.err == "" {
+ t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf))
+ continue
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("#%d: err=%q, expected %q", i, err, tt.err)
+ continue
+ }
+ continue
+ }
+ if err == nil && tt.err != "" {
+ t.Errorf("#%d: success, expected %q", i, tt.err)
+ continue
+ }
+
+ out := string(buf)
+ if out != testOut {
+ t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut)
+ }
+ }
+}
+
+func TestReadImports(t *testing.T) {
+ testRead(t, readImportsTests, func(r io.Reader) ([]byte, error) { return ReadImports(r, true, nil) })
+}
+
+func TestReadComments(t *testing.T) {
+ testRead(t, readCommentsTests, ReadComments)
+}
+
+var readFailuresTests = []readTest{
+ {
+ `package`,
+ "syntax error",
+ },
+ {
+ "package p\n\x00\nimport `math`\n",
+ "unexpected NUL in input",
+ },
+ {
+ `package p; import`,
+ "syntax error",
+ },
+ {
+ `package p; import "`,
+ "syntax error",
+ },
+ {
+ "package p; import ` \n\n",
+ "syntax error",
+ },
+ {
+ `package p; import "x`,
+ "syntax error",
+ },
+ {
+ `package p; import _`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "x`,
+ "syntax error",
+ },
+ {
+ `package p; import .`,
+ "syntax error",
+ },
+ {
+ `package p; import . "`,
+ "syntax error",
+ },
+ {
+ `package p; import . "x`,
+ "syntax error",
+ },
+ {
+ `package p; import (`,
+ "syntax error",
+ },
+ {
+ `package p; import ("`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x"`,
+ "syntax error",
+ },
+}
+
+func TestReadFailures(t *testing.T) {
+ // Errors should be reported (true arg to readImports).
+ testRead(t, readFailuresTests, func(r io.Reader) ([]byte, error) { return ReadImports(r, true, nil) })
+}
+
+func TestReadFailuresIgnored(t *testing.T) {
+ // Syntax errors should not be reported (false arg to readImports).
+ // Instead, entire file should be the output and no error.
+ // Convert tests not to return syntax errors.
+ tests := make([]readTest, len(readFailuresTests))
+ copy(tests, readFailuresTests)
+ for i := range tests {
+ tt := &tests[i]
+ if !strings.Contains(tt.err, "NUL") {
+ tt.err = ""
+ }
+ }
+ testRead(t, tests, func(r io.Reader) ([]byte, error) { return ReadImports(r, false, nil) })
+}
diff --git a/src/cmd/go/internal/imports/scan.go b/src/cmd/go/internal/imports/scan.go
new file mode 100644
index 0000000..ee11a87
--- /dev/null
+++ b/src/cmd/go/internal/imports/scan.go
@@ -0,0 +1,107 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "fmt"
+ "io/fs"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/go/internal/fsys"
+)
+
+func ScanDir(dir string, tags map[string]bool) ([]string, []string, error) {
+ infos, err := fsys.ReadDir(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ var files []string
+ for _, info := range infos {
+ name := info.Name()
+
+ // If the directory entry is a symlink, stat it to obtain the info for the
+ // link target instead of the link itself.
+ if info.Mode()&fs.ModeSymlink != 0 {
+ info, err = fsys.Stat(filepath.Join(dir, name))
+ if err != nil {
+ continue // Ignore broken symlinks.
+ }
+ }
+
+ if info.Mode().IsRegular() && !strings.HasPrefix(name, "_") && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && MatchFile(name, tags) {
+ files = append(files, filepath.Join(dir, name))
+ }
+ }
+ return scanFiles(files, tags, false)
+}
+
+func ScanFiles(files []string, tags map[string]bool) ([]string, []string, error) {
+ return scanFiles(files, tags, true)
+}
+
+func scanFiles(files []string, tags map[string]bool, explicitFiles bool) ([]string, []string, error) {
+ imports := make(map[string]bool)
+ testImports := make(map[string]bool)
+ numFiles := 0
+Files:
+ for _, name := range files {
+ r, err := fsys.Open(name)
+ if err != nil {
+ return nil, nil, err
+ }
+ var list []string
+ data, err := ReadImports(r, false, &list)
+ r.Close()
+ if err != nil {
+ return nil, nil, fmt.Errorf("reading %s: %v", name, err)
+ }
+
+ // import "C" is implicit requirement of cgo tag.
+ // When listing files on the command line (explicitFiles=true)
+ // we do not apply build tag filtering but we still do apply
+ // cgo filtering, so no explicitFiles check here.
+ // Why? Because we always have, and it's not worth breaking
+ // that behavior now.
+ for _, path := range list {
+ if path == `"C"` && !tags["cgo"] && !tags["*"] {
+ continue Files
+ }
+ }
+
+ if !explicitFiles && !ShouldBuild(data, tags) {
+ continue
+ }
+ numFiles++
+ m := imports
+ if strings.HasSuffix(name, "_test.go") {
+ m = testImports
+ }
+ for _, p := range list {
+ q, err := strconv.Unquote(p)
+ if err != nil {
+ continue
+ }
+ m[q] = true
+ }
+ }
+ if numFiles == 0 {
+ return nil, nil, ErrNoGo
+ }
+ return keys(imports), keys(testImports), nil
+}
+
+var ErrNoGo = fmt.Errorf("no Go source files")
+
+func keys(m map[string]bool) []string {
+ var list []string
+ for k := range m {
+ list = append(list, k)
+ }
+ sort.Strings(list)
+ return list
+}
diff --git a/src/cmd/go/internal/imports/scan_test.go b/src/cmd/go/internal/imports/scan_test.go
new file mode 100644
index 0000000..56efa90
--- /dev/null
+++ b/src/cmd/go/internal/imports/scan_test.go
@@ -0,0 +1,93 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "bytes"
+ "internal/testenv"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestScan(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ imports, testImports, err := ScanDir(filepath.Join(testenv.GOROOT(t), "src/encoding/json"), Tags())
+ if err != nil {
+ t.Fatal(err)
+ }
+ foundBase64 := false
+ for _, p := range imports {
+ if p == "encoding/base64" {
+ foundBase64 = true
+ }
+ if p == "encoding/binary" {
+ // A dependency but not an import
+ t.Errorf("json reported as importing encoding/binary but does not")
+ }
+ if p == "net/http" {
+ // A test import but not an import
+ t.Errorf("json reported as importing net/http but does not")
+ }
+ }
+ if !foundBase64 {
+ t.Errorf("json missing import encoding/base64 (%q)", imports)
+ }
+
+ foundHTTP := false
+ for _, p := range testImports {
+ if p == "net/http" {
+ foundHTTP = true
+ }
+ if p == "unicode/utf16" {
+ // A package import but not a test import
+ t.Errorf("json reported as test-importing unicode/utf16 but does not")
+ }
+ }
+ if !foundHTTP {
+ t.Errorf("json missing test import net/http (%q)", testImports)
+ }
+}
+func TestScanDir(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ dirs, err := os.ReadDir("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, dir := range dirs {
+ if !dir.IsDir() || strings.HasPrefix(dir.Name(), ".") {
+ continue
+ }
+ t.Run(dir.Name(), func(t *testing.T) {
+ tagsData, err := os.ReadFile(filepath.Join("testdata", dir.Name(), "tags.txt"))
+ if err != nil {
+ t.Fatalf("error reading tags: %v", err)
+ }
+ tags := make(map[string]bool)
+ for _, t := range strings.Fields(string(tagsData)) {
+ tags[t] = true
+ }
+
+ wantData, err := os.ReadFile(filepath.Join("testdata", dir.Name(), "want.txt"))
+ if err != nil {
+ t.Fatalf("error reading want: %v", err)
+ }
+ want := string(bytes.TrimSpace(wantData))
+
+ imports, _, err := ScanDir(path.Join("testdata", dir.Name()), tags)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := strings.Join(imports, "\n")
+ if got != want {
+ t.Errorf("ScanDir: got imports:\n%s\n\nwant:\n%s", got, want)
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/imports/tags.go b/src/cmd/go/internal/imports/tags.go
new file mode 100644
index 0000000..d1467b8
--- /dev/null
+++ b/src/cmd/go/internal/imports/tags.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "cmd/go/internal/cfg"
+ "sync"
+)
+
+var (
+ tags map[string]bool
+ tagsOnce sync.Once
+)
+
+// Tags returns a set of build tags that are true for the target platform.
+// It includes GOOS, GOARCH, the compiler, possibly "cgo",
+// release tags like "go1.13", and user-specified build tags.
+func Tags() map[string]bool {
+ tagsOnce.Do(func() {
+ tags = loadTags()
+ })
+ return tags
+}
+
+func loadTags() map[string]bool {
+ tags := map[string]bool{
+ cfg.BuildContext.GOOS: true,
+ cfg.BuildContext.GOARCH: true,
+ cfg.BuildContext.Compiler: true,
+ }
+ if cfg.BuildContext.CgoEnabled {
+ tags["cgo"] = true
+ }
+ for _, tag := range cfg.BuildContext.BuildTags {
+ tags[tag] = true
+ }
+ for _, tag := range cfg.BuildContext.ToolTags {
+ tags[tag] = true
+ }
+ for _, tag := range cfg.BuildContext.ReleaseTags {
+ tags[tag] = true
+ }
+ return tags
+}
+
+var (
+ anyTags map[string]bool
+ anyTagsOnce sync.Once
+)
+
+// AnyTags returns a special set of build tags that satisfy nearly all
+// build tag expressions. Only "ignore" and malformed build tag requirements
+// are considered false.
+func AnyTags() map[string]bool {
+ anyTagsOnce.Do(func() {
+ anyTags = map[string]bool{"*": true}
+ })
+ return anyTags
+}
diff --git a/src/cmd/go/internal/imports/testdata/android/.h.go b/src/cmd/go/internal/imports/testdata/android/.h.go
new file mode 100644
index 0000000..53c529e
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/.h.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "h"
diff --git a/src/cmd/go/internal/imports/testdata/android/a_android.go b/src/cmd/go/internal/imports/testdata/android/a_android.go
new file mode 100644
index 0000000..2ed972e
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/a_android.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "a"
diff --git a/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go b/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go
new file mode 100644
index 0000000..ee9c312
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "b"
diff --git a/src/cmd/go/internal/imports/testdata/android/c_linux.go b/src/cmd/go/internal/imports/testdata/android/c_linux.go
new file mode 100644
index 0000000..91624ce
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/c_linux.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "c"
diff --git a/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go b/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go
new file mode 100644
index 0000000..34e07df
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "d"
diff --git a/src/cmd/go/internal/imports/testdata/android/e.go b/src/cmd/go/internal/imports/testdata/android/e.go
new file mode 100644
index 0000000..f1b9c88
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/e.go
@@ -0,0 +1,6 @@
+//go:build android
+// +build android
+
+package android
+
+import _ "e"
diff --git a/src/cmd/go/internal/imports/testdata/android/f.go b/src/cmd/go/internal/imports/testdata/android/f.go
new file mode 100644
index 0000000..bb0ff7b
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/f.go
@@ -0,0 +1,6 @@
+//go:build linux
+// +build linux
+
+package android
+
+import _ "f"
diff --git a/src/cmd/go/internal/imports/testdata/android/g.go b/src/cmd/go/internal/imports/testdata/android/g.go
new file mode 100644
index 0000000..ee19424
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/g.go
@@ -0,0 +1,6 @@
+//go:build !android
+// +build !android
+
+package android
+
+import _ "g"
diff --git a/src/cmd/go/internal/imports/testdata/android/tags.txt b/src/cmd/go/internal/imports/testdata/android/tags.txt
new file mode 100644
index 0000000..aaf5a6b
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/tags.txt
@@ -0,0 +1 @@
+android arm64 \ No newline at end of file
diff --git a/src/cmd/go/internal/imports/testdata/android/want.txt b/src/cmd/go/internal/imports/testdata/android/want.txt
new file mode 100644
index 0000000..0fdf397
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/android/want.txt
@@ -0,0 +1,6 @@
+a
+b
+c
+d
+e
+f
diff --git a/src/cmd/go/internal/imports/testdata/illumos/.h.go b/src/cmd/go/internal/imports/testdata/illumos/.h.go
new file mode 100644
index 0000000..53c529e
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/.h.go
@@ -0,0 +1,3 @@
+package android
+
+import _ "h"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go b/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go
new file mode 100644
index 0000000..2e6cb50
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go
@@ -0,0 +1,3 @@
+package illumos
+
+import _ "a"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go b/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go
new file mode 100644
index 0000000..2834d80
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go
@@ -0,0 +1,3 @@
+package illumos
+
+import _ "b"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go b/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go
new file mode 100644
index 0000000..d7f9462
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go
@@ -0,0 +1,3 @@
+package illumos
+
+import _ "c"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go b/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go
new file mode 100644
index 0000000..0f52c2b
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go
@@ -0,0 +1,3 @@
+package illumos
+
+import _ "d"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/e.go b/src/cmd/go/internal/imports/testdata/illumos/e.go
new file mode 100644
index 0000000..fddf2c4
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/e.go
@@ -0,0 +1,6 @@
+//go:build illumos
+// +build illumos
+
+package illumos
+
+import _ "e"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/f.go b/src/cmd/go/internal/imports/testdata/illumos/f.go
new file mode 100644
index 0000000..4b6d528
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/f.go
@@ -0,0 +1,6 @@
+//go:build solaris
+// +build solaris
+
+package illumos
+
+import _ "f"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/g.go b/src/cmd/go/internal/imports/testdata/illumos/g.go
new file mode 100644
index 0000000..1bf826b
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/g.go
@@ -0,0 +1,6 @@
+//go:build !illumos
+// +build !illumos
+
+package illumos
+
+import _ "g"
diff --git a/src/cmd/go/internal/imports/testdata/illumos/tags.txt b/src/cmd/go/internal/imports/testdata/illumos/tags.txt
new file mode 100644
index 0000000..b6386a3
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/tags.txt
@@ -0,0 +1 @@
+illumos amd64
diff --git a/src/cmd/go/internal/imports/testdata/illumos/want.txt b/src/cmd/go/internal/imports/testdata/illumos/want.txt
new file mode 100644
index 0000000..0fdf397
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/illumos/want.txt
@@ -0,0 +1,6 @@
+a
+b
+c
+d
+e
+f
diff --git a/src/cmd/go/internal/imports/testdata/star/tags.txt b/src/cmd/go/internal/imports/testdata/star/tags.txt
new file mode 100644
index 0000000..f59ec20
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/tags.txt
@@ -0,0 +1 @@
+* \ No newline at end of file
diff --git a/src/cmd/go/internal/imports/testdata/star/want.txt b/src/cmd/go/internal/imports/testdata/star/want.txt
new file mode 100644
index 0000000..139f5f4
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/want.txt
@@ -0,0 +1,4 @@
+import1
+import2
+import3
+import4
diff --git a/src/cmd/go/internal/imports/testdata/star/x.go b/src/cmd/go/internal/imports/testdata/star/x.go
new file mode 100644
index 0000000..98f9191
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/x.go
@@ -0,0 +1,3 @@
+package x
+
+import "import1"
diff --git a/src/cmd/go/internal/imports/testdata/star/x1.go b/src/cmd/go/internal/imports/testdata/star/x1.go
new file mode 100644
index 0000000..eaaea97
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/x1.go
@@ -0,0 +1,6 @@
+//go:build blahblh && linux && !linux && windows && darwin
+// +build blahblh,linux,!linux,windows,darwin
+
+package x
+
+import "import4"
diff --git a/src/cmd/go/internal/imports/testdata/star/x_darwin.go b/src/cmd/go/internal/imports/testdata/star/x_darwin.go
new file mode 100644
index 0000000..a0c3fdd
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/x_darwin.go
@@ -0,0 +1,3 @@
+package xxxx
+
+import "import3"
diff --git a/src/cmd/go/internal/imports/testdata/star/x_windows.go b/src/cmd/go/internal/imports/testdata/star/x_windows.go
new file mode 100644
index 0000000..63c5082
--- /dev/null
+++ b/src/cmd/go/internal/imports/testdata/star/x_windows.go
@@ -0,0 +1,3 @@
+package x
+
+import "import2"
diff --git a/src/cmd/go/internal/list/context.go b/src/cmd/go/internal/list/context.go
new file mode 100644
index 0000000..9d6494c
--- /dev/null
+++ b/src/cmd/go/internal/list/context.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package list
+
+import (
+ "go/build"
+)
+
+type Context struct {
+ GOARCH string `json:",omitempty"` // target architecture
+ GOOS string `json:",omitempty"` // target operating system
+ GOROOT string `json:",omitempty"` // Go root
+ GOPATH string `json:",omitempty"` // Go path
+ CgoEnabled bool `json:",omitempty"` // whether cgo can be used
+ UseAllFiles bool `json:",omitempty"` // use files regardless of //go:build lines, file names
+ Compiler string `json:",omitempty"` // compiler to assume when computing target paths
+ BuildTags []string `json:",omitempty"` // build constraints to match in +build lines
+ ToolTags []string `json:",omitempty"` // toolchain-specific build constraints
+ ReleaseTags []string `json:",omitempty"` // releases the current release is compatible with
+ InstallSuffix string `json:",omitempty"` // suffix to use in the name of the install dir
+}
+
+func newContext(c *build.Context) *Context {
+ return &Context{
+ GOARCH: c.GOARCH,
+ GOOS: c.GOOS,
+ GOROOT: c.GOROOT,
+ GOPATH: c.GOPATH,
+ CgoEnabled: c.CgoEnabled,
+ UseAllFiles: c.UseAllFiles,
+ Compiler: c.Compiler,
+ BuildTags: c.BuildTags,
+ ToolTags: c.ToolTags,
+ ReleaseTags: c.ReleaseTags,
+ InstallSuffix: c.InstallSuffix,
+ }
+}
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
new file mode 100644
index 0000000..92020da
--- /dev/null
+++ b/src/cmd/go/internal/list/list.go
@@ -0,0 +1,1001 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package list implements the “go list” command.
+package list
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+
+ "golang.org/x/sync/semaphore"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modinfo"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/work"
+)
+
+var CmdList = &base.Command{
+ // Note: -f -json -m are listed explicitly because they are the most common list flags.
+ // Do not send CLs removing them because they're covered by [list flags].
+ UsageLine: "go list [-f format] [-json] [-m] [list flags] [build flags] [packages]",
+ Short: "list packages or modules",
+ Long: `
+List lists the named packages, one per line.
+The most commonly-used flags are -f and -json, which control the form
+of the output printed for each package. Other list flags, documented below,
+control more specific details.
+
+The default output shows the package import path:
+
+ bytes
+ encoding/json
+ github.com/gorilla/mux
+ golang.org/x/net/html
+
+The -f flag specifies an alternate format for the list, using the
+syntax of package template. The default output is equivalent
+to -f '{{.ImportPath}}'. The struct being passed to the template is:
+
+ type Package struct {
+ Dir string // directory containing package sources
+ ImportPath string // import path of package in dir
+ ImportComment string // path in import comment on package statement
+ Name string // package name
+ Doc string // package documentation string
+ Target string // install path
+ Shlib string // the shared library that contains this package (only set when -linkshared)
+ Goroot bool // is this package in the Go root?
+ Standard bool // is this package part of the standard Go library?
+ Stale bool // would 'go install' do anything for this package?
+ StaleReason string // explanation for Stale==true
+ Root string // Go root or Go path dir containing this package
+ ConflictDir string // this directory shadows Dir in $GOPATH
+ BinaryOnly bool // binary-only package (no longer supported)
+ ForTest string // package is only for use in named test
+ Export string // file containing export data (when using -export)
+ BuildID string // build ID of the compiled package (when using -export)
+ Module *Module // info about package's containing module, if any (can be nil)
+ Match []string // command-line patterns matching this package
+ DepOnly bool // package is only a dependency, not explicitly listed
+ DefaultGODEBUG string // default GODEBUG setting, for main packages
+
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ CompiledGoFiles []string // .go files presented to compiler (when using -compiled)
+ IgnoredGoFiles []string // .go source files ignored due to build constraints
+ IgnoredOtherFiles []string // non-.go source files ignored due to build constraints
+ CFiles []string // .c source files
+ CXXFiles []string // .cc, .cxx and .cpp source files
+ MFiles []string // .m source files
+ HFiles []string // .h, .hh, .hpp and .hxx source files
+ FFiles []string // .f, .F, .for and .f90 Fortran source files
+ SFiles []string // .s source files
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
+ SysoFiles []string // .syso object files to add to archive
+ TestGoFiles []string // _test.go files in package
+ XTestGoFiles []string // _test.go files outside package
+
+ // Embedded files
+ EmbedPatterns []string // //go:embed patterns
+ EmbedFiles []string // files matched by EmbedPatterns
+ TestEmbedPatterns []string // //go:embed patterns in TestGoFiles
+ TestEmbedFiles []string // files matched by TestEmbedPatterns
+ XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles
+ XTestEmbedFiles []string // files matched by XTestEmbedPatterns
+
+ // Cgo directives
+ CgoCFLAGS []string // cgo: flags for C compiler
+ CgoCPPFLAGS []string // cgo: flags for C preprocessor
+ CgoCXXFLAGS []string // cgo: flags for C++ compiler
+ CgoFFLAGS []string // cgo: flags for Fortran compiler
+ CgoLDFLAGS []string // cgo: flags for linker
+ CgoPkgConfig []string // cgo: pkg-config names
+
+ // Dependency information
+ Imports []string // import paths used by this package
+ ImportMap map[string]string // map from source import to ImportPath (identity entries omitted)
+ Deps []string // all (recursively) imported dependencies
+ TestImports []string // imports from TestGoFiles
+ XTestImports []string // imports from XTestGoFiles
+
+ // Error information
+ Incomplete bool // this package or a dependency has an error
+ Error *PackageError // error loading package
+ DepsErrors []*PackageError // errors loading dependencies
+ }
+
+Packages stored in vendor directories report an ImportPath that includes the
+path to the vendor directory (for example, "d/vendor/p" instead of "p"),
+so that the ImportPath uniquely identifies a given copy of a package.
+The Imports, Deps, TestImports, and XTestImports lists also contain these
+expanded import paths. See golang.org/s/go15vendor for more about vendoring.
+
+The error information, if any, is
+
+ type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error (if present, file:line:col)
+ Err string // the error itself
+ }
+
+The module information is a Module struct, defined in the discussion
+of list -m below.
+
+The template function "join" calls strings.Join.
+
+The template function "context" returns the build context, defined as:
+
+ type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go path
+ CgoEnabled bool // whether cgo can be used
+ UseAllFiles bool // use files regardless of //go:build lines, file names
+ Compiler string // compiler to assume when computing target paths
+ BuildTags []string // build constraints to match in //go:build lines
+ ToolTags []string // toolchain-specific build constraints
+ ReleaseTags []string // releases the current release is compatible with
+ InstallSuffix string // suffix to use in the name of the install dir
+ }
+
+For more information about the meaning of these fields see the documentation
+for the go/build package's Context type.
+
+The -json flag causes the package data to be printed in JSON format
+instead of using the template format. The JSON flag can optionally be
+provided with a set of comma-separated required field names to be output.
+If so, those required fields will always appear in JSON output, but
+others may be omitted to save work in computing the JSON struct.
+
+The -compiled flag causes list to set CompiledGoFiles to the Go source
+files presented to the compiler. Typically this means that it repeats
+the files listed in GoFiles and then also adds the Go code generated
+by processing CgoFiles and SwigFiles. The Imports list contains the
+union of all imports from both GoFiles and CompiledGoFiles.
+
+The -deps flag causes list to iterate over not just the named packages
+but also all their dependencies. It visits them in a depth-first post-order
+traversal, so that a package is listed only after all its dependencies.
+Packages not explicitly listed on the command line will have the DepOnly
+field set to true.
+
+The -e flag changes the handling of erroneous packages, those that
+cannot be found or are malformed. By default, the list command
+prints an error to standard error for each erroneous package and
+omits the packages from consideration during the usual printing.
+With the -e flag, the list command never prints errors to standard
+error and instead processes the erroneous packages with the usual
+printing. Erroneous packages will have a non-empty ImportPath and
+a non-nil Error field; other information may or may not be missing
+(zeroed).
+
+The -export flag causes list to set the Export field to the name of a
+file containing up-to-date export information for the given package,
+and the BuildID field to the build ID of the compiled package.
+
+The -find flag causes list to identify the named packages but not
+resolve their dependencies: the Imports and Deps lists will be empty.
+With the -find flag, the -deps, -test and -export commands cannot be
+used.
+
+The -test flag causes list to report not only the named packages
+but also their test binaries (for packages with tests), to convey to
+source code analysis tools exactly how test binaries are constructed.
+The reported import path for a test binary is the import path of
+the package followed by a ".test" suffix, as in "math/rand.test".
+When building a test, it is sometimes necessary to rebuild certain
+dependencies specially for that test (most commonly the tested
+package itself). The reported import path of a package recompiled
+for a particular test binary is followed by a space and the name of
+the test binary in brackets, as in "math/rand [math/rand.test]"
+or "regexp [sort.test]". The ForTest field is also set to the name
+of the package being tested ("math/rand" or "sort" in the previous
+examples).
+
+The Dir, Target, Shlib, Root, ConflictDir, and Export file paths
+are all absolute paths.
+
+By default, the lists GoFiles, CgoFiles, and so on hold names of files in Dir
+(that is, paths relative to Dir, not absolute paths).
+The generated files added when using the -compiled and -test flags
+are absolute paths referring to cached copies of generated Go source files.
+Although they are Go source files, the paths may not end in ".go".
+
+The -m flag causes list to list modules instead of packages.
+
+When listing modules, the -f flag still specifies a format template
+applied to a Go struct, but now a Module struct:
+
+ type Module struct {
+ Path string // module path
+ Query string // version query corresponding to this version
+ Version string // module version
+ Versions []string // available module versions
+ Replace *Module // replaced by this module
+ Time *time.Time // time version was created
+ Update *Module // available update (with -u)
+ Main bool // is this the main module?
+ Indirect bool // module is only indirectly needed by main module
+ Dir string // directory holding local copy of files, if any
+ GoMod string // path to go.mod file describing module, if any
+ GoVersion string // go version used in module
+ Retracted []string // retraction information, if any (with -retracted or -u)
+ Deprecated string // deprecation message, if any (with -u)
+ Error *ModuleError // error loading module
+ Origin any // provenance of module
+ Reuse bool // reuse of old module info is safe
+ }
+
+ type ModuleError struct {
+ Err string // the error itself
+ }
+
+The file GoMod refers to may be outside the module directory if the
+module is in the module cache or if the -modfile flag is used.
+
+The default output is to print the module path and then
+information about the version and replacement if any.
+For example, 'go list -m all' might print:
+
+ my/main/module
+ golang.org/x/text v0.3.0 => /tmp/text
+ rsc.io/pdf v0.1.1
+
+The Module struct has a String method that formats this
+line of output, so that the default format is equivalent
+to -f '{{.String}}'.
+
+Note that when a module has been replaced, its Replace field
+describes the replacement module, and its Dir field is set to
+the replacement's source code, if present. (That is, if Replace
+is non-nil, then Dir is set to Replace.Dir, with no access to
+the replaced source code.)
+
+The -u flag adds information about available upgrades.
+When the latest version of a given module is newer than
+the current one, list -u sets the Module's Update field
+to information about the newer module. list -u will also set
+the module's Retracted field if the current version is retracted.
+The Module's String method indicates an available upgrade by
+formatting the newer version in brackets after the current version.
+If a version is retracted, the string "(retracted)" will follow it.
+For example, 'go list -m -u all' might print:
+
+ my/main/module
+ golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text
+ rsc.io/pdf v0.1.1 (retracted) [v0.1.2]
+
+(For tools, 'go list -m -u -json all' may be more convenient to parse.)
+
+The -versions flag causes list to set the Module's Versions field
+to a list of all known versions of that module, ordered according
+to semantic versioning, earliest to latest. The flag also changes
+the default output format to display the module path followed by the
+space-separated version list.
+
+The -retracted flag causes list to report information about retracted
+module versions. When -retracted is used with -f or -json, the Retracted
+field will be set to a string explaining why the version was retracted.
+The string is taken from comments on the retract directive in the
+module's go.mod file. When -retracted is used with -versions, retracted
+versions are listed together with unretracted versions. The -retracted
+flag may be used with or without -m.
+
+The arguments to list -m are interpreted as a list of modules, not packages.
+The main module is the module containing the current directory.
+The active modules are the main module and its dependencies.
+With no arguments, list -m shows the main module.
+With arguments, list -m shows the modules specified by the arguments.
+Any of the active modules can be specified by its module path.
+The special pattern "all" specifies all the active modules, first the main
+module and then dependencies sorted by module path.
+A pattern containing "..." specifies the active modules whose
+module paths match the pattern.
+A query of the form path@version specifies the result of that query,
+which is not limited to active modules.
+See 'go help modules' for more about module queries.
+
+The template function "module" takes a single string argument
+that must be a module path or query and returns the specified
+module as a Module struct. If an error occurs, the result will
+be a Module struct with a non-nil Error field.
+
+When using -m, the -reuse=old.json flag accepts the name of file containing
+the JSON output of a previous 'go list -m -json' invocation with the
+same set of modifier flags (such as -u, -retracted, and -versions).
+The go command may use this file to determine that a module is unchanged
+since the previous invocation and avoid redownloading information about it.
+Modules that are not redownloaded will be marked in the new output by
+setting the Reuse field to true. Normally the module cache provides this
+kind of reuse automatically; the -reuse flag can be useful on systems that
+do not preserve the module cache.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+
+For more about modules, see https://golang.org/ref/mod.
+ `,
+}
+
+func init() {
+ CmdList.Run = runList // break init cycle
+ work.AddBuildFlags(CmdList, work.DefaultBuildFlags)
+ if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+ work.AddCoverFlags(CmdList, nil)
+ }
+ CmdList.Flag.Var(&listJsonFields, "json", "")
+}
+
+var (
+ listCompiled = CmdList.Flag.Bool("compiled", false, "")
+ listDeps = CmdList.Flag.Bool("deps", false, "")
+ listE = CmdList.Flag.Bool("e", false, "")
+ listExport = CmdList.Flag.Bool("export", false, "")
+ listFmt = CmdList.Flag.String("f", "", "")
+ listFind = CmdList.Flag.Bool("find", false, "")
+ listJson bool
+ listJsonFields jsonFlag // If not empty, only output these fields.
+ listM = CmdList.Flag.Bool("m", false, "")
+ listRetracted = CmdList.Flag.Bool("retracted", false, "")
+ listReuse = CmdList.Flag.String("reuse", "", "")
+ listTest = CmdList.Flag.Bool("test", false, "")
+ listU = CmdList.Flag.Bool("u", false, "")
+ listVersions = CmdList.Flag.Bool("versions", false, "")
+)
+
+// A StringsFlag is a command-line flag that interprets its argument
+// as a space-separated list of possibly-quoted strings.
+type jsonFlag map[string]bool
+
+func (v *jsonFlag) Set(s string) error {
+ if v, err := strconv.ParseBool(s); err == nil {
+ listJson = v
+ return nil
+ }
+ listJson = true
+ if *v == nil {
+ *v = make(map[string]bool)
+ }
+ for _, f := range strings.Split(s, ",") {
+ (*v)[f] = true
+ }
+ return nil
+}
+
+func (v *jsonFlag) String() string {
+ var fields []string
+ for f := range *v {
+ fields = append(fields, f)
+ }
+ sort.Strings(fields)
+ return strings.Join(fields, ",")
+}
+
+func (v *jsonFlag) IsBoolFlag() bool {
+ return true
+}
+
+func (v *jsonFlag) needAll() bool {
+ return len(*v) == 0
+}
+
+func (v *jsonFlag) needAny(fields ...string) bool {
+ if v.needAll() {
+ return true
+ }
+ for _, f := range fields {
+ if (*v)[f] {
+ return true
+ }
+ }
+ return false
+}
+
+var nl = []byte{'\n'}
+
+func runList(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ if *listFmt != "" && listJson {
+ base.Fatalf("go list -f cannot be used with -json")
+ }
+ if *listReuse != "" && !*listM {
+ base.Fatalf("go list -reuse cannot be used without -m")
+ }
+ if *listReuse != "" && modload.HasModRoot() {
+ base.Fatalf("go list -reuse cannot be used inside a module")
+ }
+
+ work.BuildInit()
+ out := newTrackingWriter(os.Stdout)
+ defer out.w.Flush()
+
+ if *listFmt == "" {
+ if *listM {
+ *listFmt = "{{.String}}"
+ if *listVersions {
+ *listFmt = `{{.Path}}{{range .Versions}} {{.}}{{end}}{{if .Deprecated}} (deprecated){{end}}`
+ }
+ } else {
+ *listFmt = "{{.ImportPath}}"
+ }
+ }
+
+ var do func(x any)
+ if listJson {
+ do = func(x any) {
+ if !listJsonFields.needAll() {
+ v := reflect.ValueOf(x).Elem() // do is always called with a non-nil pointer.
+ // Clear all non-requested fields.
+ for i := 0; i < v.NumField(); i++ {
+ if !listJsonFields.needAny(v.Type().Field(i).Name) {
+ v.Field(i).SetZero()
+ }
+ }
+ }
+ b, err := json.MarshalIndent(x, "", "\t")
+ if err != nil {
+ out.Flush()
+ base.Fatalf("%s", err)
+ }
+ out.Write(b)
+ out.Write(nl)
+ }
+ } else {
+ var cachedCtxt *Context
+ context := func() *Context {
+ if cachedCtxt == nil {
+ cachedCtxt = newContext(&cfg.BuildContext)
+ }
+ return cachedCtxt
+ }
+ fm := template.FuncMap{
+ "join": strings.Join,
+ "context": context,
+ "module": func(path string) *modinfo.ModulePublic { return modload.ModuleInfo(ctx, path) },
+ }
+ tmpl, err := template.New("main").Funcs(fm).Parse(*listFmt)
+ if err != nil {
+ base.Fatalf("%s", err)
+ }
+ do = func(x any) {
+ if err := tmpl.Execute(out, x); err != nil {
+ out.Flush()
+ base.Fatalf("%s", err)
+ }
+ if out.NeedNL() {
+ out.Write(nl)
+ }
+ }
+ }
+
+ modload.Init()
+ if *listRetracted {
+ if cfg.BuildMod == "vendor" {
+ base.Fatalf("go list -retracted cannot be used when vendoring is enabled")
+ }
+ if !modload.Enabled() {
+ base.Fatalf("go list -retracted can only be used in module-aware mode")
+ }
+ }
+
+ if *listM {
+ // Module mode.
+ if *listCompiled {
+ base.Fatalf("go list -compiled cannot be used with -m")
+ }
+ if *listDeps {
+ // TODO(rsc): Could make this mean something with -m.
+ base.Fatalf("go list -deps cannot be used with -m")
+ }
+ if *listExport {
+ base.Fatalf("go list -export cannot be used with -m")
+ }
+ if *listFind {
+ base.Fatalf("go list -find cannot be used with -m")
+ }
+ if *listTest {
+ base.Fatalf("go list -test cannot be used with -m")
+ }
+
+ if modload.Init(); !modload.Enabled() {
+ base.Fatalf("go: list -m cannot be used with GO111MODULE=off")
+ }
+
+ modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect.
+ if cfg.BuildMod == "vendor" {
+ const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)"
+
+ if *listVersions {
+ base.Fatalf(actionDisabledFormat, "determine available versions")
+ }
+ if *listU {
+ base.Fatalf(actionDisabledFormat, "determine available upgrades")
+ }
+
+ for _, arg := range args {
+ // In vendor mode, the module graph is incomplete: it contains only the
+ // explicit module dependencies and the modules that supply packages in
+ // the import graph. Reject queries that imply more information than that.
+ if arg == "all" {
+ base.Fatalf(actionDisabledFormat, "compute 'all'")
+ }
+ if strings.Contains(arg, "...") {
+ base.Fatalf(actionDisabledFormat, "match module patterns")
+ }
+ }
+ }
+
+ var mode modload.ListMode
+ if *listU {
+ mode |= modload.ListU | modload.ListRetracted | modload.ListDeprecated
+ }
+ if *listRetracted {
+ mode |= modload.ListRetracted
+ }
+ if *listVersions {
+ mode |= modload.ListVersions
+ if *listRetracted {
+ mode |= modload.ListRetractedVersions
+ }
+ }
+ if *listReuse != "" && len(args) == 0 {
+ base.Fatalf("go: list -m -reuse only has an effect with module@version arguments")
+ }
+ mods, err := modload.ListModules(ctx, args, mode, *listReuse)
+ if !*listE {
+ for _, m := range mods {
+ if m.Error != nil {
+ base.Error(errors.New(m.Error.Err))
+ }
+ }
+ if err != nil {
+ base.Error(err)
+ }
+ base.ExitIfErrors()
+ }
+ for _, m := range mods {
+ do(m)
+ }
+ return
+ }
+
+ // Package mode (not -m).
+ if *listU {
+ base.Fatalf("go list -u can only be used with -m")
+ }
+ if *listVersions {
+ base.Fatalf("go list -versions can only be used with -m")
+ }
+
+ // These pairings make no sense.
+ if *listFind && *listDeps {
+ base.Fatalf("go list -deps cannot be used with -find")
+ }
+ if *listFind && *listTest {
+ base.Fatalf("go list -test cannot be used with -find")
+ }
+ if *listFind && *listExport {
+ base.Fatalf("go list -export cannot be used with -find")
+ }
+
+ pkgOpts := load.PackageOpts{
+ IgnoreImports: *listFind,
+ ModResolveTests: *listTest,
+ AutoVCS: true,
+ SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"),
+ SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"),
+ }
+ pkgs := load.PackagesAndErrors(ctx, pkgOpts, args)
+ if !*listE {
+ w := 0
+ for _, pkg := range pkgs {
+ if pkg.Error != nil {
+ base.Errorf("%v", pkg.Error)
+ continue
+ }
+ pkgs[w] = pkg
+ w++
+ }
+ pkgs = pkgs[:w]
+ base.ExitIfErrors()
+ }
+
+ if *listTest {
+ c := cache.Default()
+ // Add test binaries to packages to be listed.
+
+ var wg sync.WaitGroup
+ sema := semaphore.NewWeighted(int64(runtime.GOMAXPROCS(0)))
+ type testPackageSet struct {
+ p, pmain, ptest, pxtest *load.Package
+ }
+ var testPackages []testPackageSet
+ for _, p := range pkgs {
+ if len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 {
+ var pmain, ptest, pxtest *load.Package
+ var err error
+ if *listE {
+ sema.Acquire(ctx, 1)
+ wg.Add(1)
+ done := func() {
+ sema.Release(1)
+ wg.Done()
+ }
+ pmain, ptest, pxtest = load.TestPackagesAndErrors(ctx, done, pkgOpts, p, nil)
+ } else {
+ pmain, ptest, pxtest, err = load.TestPackagesFor(ctx, pkgOpts, p, nil)
+ if err != nil {
+ base.Fatalf("go: can't load test package: %s", err)
+ }
+ }
+ testPackages = append(testPackages, testPackageSet{p, pmain, ptest, pxtest})
+ }
+ }
+ wg.Wait()
+ for _, pkgset := range testPackages {
+ p, pmain, ptest, pxtest := pkgset.p, pkgset.pmain, pkgset.ptest, pkgset.pxtest
+ if pmain != nil {
+ pkgs = append(pkgs, pmain)
+ data := *pmain.Internal.TestmainGo
+ sema.Acquire(ctx, 1)
+ wg.Add(1)
+ go func() {
+ h := cache.NewHash("testmain")
+ h.Write([]byte("testmain\n"))
+ h.Write(data)
+ out, _, err := c.Put(h.Sum(), bytes.NewReader(data))
+ if err != nil {
+ base.Fatalf("%s", err)
+ }
+ pmain.GoFiles[0] = c.OutputFile(out)
+ sema.Release(1)
+ wg.Done()
+ }()
+
+ }
+ if ptest != nil && ptest != p {
+ pkgs = append(pkgs, ptest)
+ }
+ if pxtest != nil {
+ pkgs = append(pkgs, pxtest)
+ }
+ }
+
+ wg.Wait()
+ }
+
+ // Remember which packages are named on the command line.
+ cmdline := make(map[*load.Package]bool)
+ for _, p := range pkgs {
+ cmdline[p] = true
+ }
+
+ if *listDeps {
+ // Note: This changes the order of the listed packages
+ // from "as written on the command line" to
+ // "a depth-first post-order traversal".
+ // (The dependency exploration order for a given node
+ // is alphabetical, same as listed in .Deps.)
+ // Note that -deps is applied after -test,
+ // so that you only get descriptions of tests for the things named
+ // explicitly on the command line, not for all dependencies.
+ pkgs = loadPackageList(pkgs)
+ }
+
+ // Do we need to run a build to gather information?
+ needStale := (listJson && listJsonFields.needAny("Stale", "StaleReason")) || strings.Contains(*listFmt, ".Stale")
+ if needStale || *listExport || *listCompiled {
+ b := work.NewBuilder("")
+ if *listE {
+ b.AllowErrors = true
+ }
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ b.IsCmdList = true
+ b.NeedExport = *listExport
+ b.NeedCompiledGoFiles = *listCompiled
+ a := &work.Action{}
+ // TODO: Use pkgsFilter?
+ for _, p := range pkgs {
+ if len(p.GoFiles)+len(p.CgoFiles) > 0 {
+ a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p))
+ }
+ }
+ if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+ load.PrepareForCoverageBuild(pkgs)
+ }
+ b.Do(ctx, a)
+ }
+
+ for _, p := range pkgs {
+ // Show vendor-expanded paths in listing
+ p.TestImports = p.Resolve(p.TestImports)
+ p.XTestImports = p.Resolve(p.XTestImports)
+ p.DepOnly = !cmdline[p]
+
+ if *listCompiled {
+ p.Imports = str.StringList(p.Imports, p.Internal.CompiledImports)
+ }
+ }
+
+ if *listTest || (cfg.BuildPGO == "auto" && len(cmdline) > 1) {
+ all := pkgs
+ if !*listDeps {
+ all = loadPackageList(pkgs)
+ }
+ // Update import paths to distinguish the real package p
+ // from p recompiled for q.test, or to distinguish between
+ // p compiled with different PGO profiles.
+ // This must happen only once the build code is done
+ // looking at import paths, because it will get very confused
+ // if it sees these.
+ old := make(map[string]string)
+ for _, p := range all {
+ if p.ForTest != "" || p.Internal.ForMain != "" {
+ new := p.Desc()
+ old[new] = p.ImportPath
+ p.ImportPath = new
+ }
+ p.DepOnly = !cmdline[p]
+ }
+ // Update import path lists to use new strings.
+ m := make(map[string]string)
+ for _, p := range all {
+ for _, p1 := range p.Internal.Imports {
+ if p1.ForTest != "" || p1.Internal.ForMain != "" {
+ m[old[p1.ImportPath]] = p1.ImportPath
+ }
+ }
+ for i, old := range p.Imports {
+ if new := m[old]; new != "" {
+ p.Imports[i] = new
+ }
+ }
+ for old := range m {
+ delete(m, old)
+ }
+ }
+ }
+
+ if listJsonFields.needAny("Deps", "DepsErrors") {
+ all := pkgs
+ // Make sure we iterate through packages in a postorder traversal,
+ // which load.PackageList guarantees. If *listDeps, then all is
+ // already in PackageList order. Otherwise, calling load.PackageList
+ // provides the guarantee. In the case of an import cycle, the last package
+ // visited in the cycle, importing the first encountered package in the cycle,
+ // is visited first. The cycle import error will be bubbled up in the traversal
+ // order up to the first package in the cycle, covering all the packages
+ // in the cycle.
+ if !*listDeps {
+ all = load.PackageList(pkgs)
+ }
+ if listJsonFields.needAny("Deps") {
+ for _, p := range all {
+ collectDeps(p)
+ }
+ }
+ if listJsonFields.needAny("DepsErrors") {
+ for _, p := range all {
+ collectDepsErrors(p)
+ }
+ }
+ }
+
+ // TODO(golang.org/issue/40676): This mechanism could be extended to support
+ // -u without -m.
+ if *listRetracted {
+ // Load retractions for modules that provide packages that will be printed.
+ // TODO(golang.org/issue/40775): Packages from the same module refer to
+ // distinct ModulePublic instance. It would be nice if they could all point
+ // to the same instance. This would require additional global state in
+ // modload.loaded, so that should be refactored first. For now, we update
+ // all instances.
+ modToArg := make(map[*modinfo.ModulePublic]string)
+ argToMods := make(map[string][]*modinfo.ModulePublic)
+ var args []string
+ addModule := func(mod *modinfo.ModulePublic) {
+ if mod.Version == "" {
+ return
+ }
+ arg := fmt.Sprintf("%s@%s", mod.Path, mod.Version)
+ if argToMods[arg] == nil {
+ args = append(args, arg)
+ }
+ argToMods[arg] = append(argToMods[arg], mod)
+ modToArg[mod] = arg
+ }
+ for _, p := range pkgs {
+ if p.Module == nil {
+ continue
+ }
+ addModule(p.Module)
+ if p.Module.Replace != nil {
+ addModule(p.Module.Replace)
+ }
+ }
+
+ if len(args) > 0 {
+ var mode modload.ListMode
+ if *listRetracted {
+ mode |= modload.ListRetracted
+ }
+ rmods, err := modload.ListModules(ctx, args, mode, *listReuse)
+ if err != nil && !*listE {
+ base.Error(err)
+ }
+ for i, arg := range args {
+ rmod := rmods[i]
+ for _, mod := range argToMods[arg] {
+ mod.Retracted = rmod.Retracted
+ if rmod.Error != nil && mod.Error == nil {
+ mod.Error = rmod.Error
+ }
+ }
+ }
+ }
+ }
+
+ // Record non-identity import mappings in p.ImportMap.
+ for _, p := range pkgs {
+ nRaw := len(p.Internal.RawImports)
+ for i, path := range p.Imports {
+ var srcPath string
+ if i < nRaw {
+ srcPath = p.Internal.RawImports[i]
+ } else {
+ // This path is not within the raw imports, so it must be an import
+ // found only within CompiledGoFiles. Those paths are found in
+ // CompiledImports.
+ srcPath = p.Internal.CompiledImports[i-nRaw]
+ }
+
+ if path != srcPath {
+ if p.ImportMap == nil {
+ p.ImportMap = make(map[string]string)
+ }
+ p.ImportMap[srcPath] = path
+ }
+ }
+ }
+
+ for _, p := range pkgs {
+ do(&p.PackagePublic)
+ }
+}
+
+// loadPackageList is like load.PackageList, but prints error messages and exits
+// with nonzero status if listE is not set and any package in the expanded list
+// has errors.
+func loadPackageList(roots []*load.Package) []*load.Package {
+ pkgs := load.PackageList(roots)
+
+ if !*listE {
+ for _, pkg := range pkgs {
+ if pkg.Error != nil {
+ base.Errorf("%v", pkg.Error)
+ }
+ }
+ }
+
+ return pkgs
+}
+
+// collectDeps populates p.Deps by iterating over p.Internal.Imports.
+// collectDeps must be called on all of p's Imports before being called on p.
+func collectDeps(p *load.Package) {
+ deps := make(map[string]bool)
+
+ for _, p := range p.Internal.Imports {
+ deps[p.ImportPath] = true
+ for _, q := range p.Deps {
+ deps[q] = true
+ }
+ }
+
+ p.Deps = make([]string, 0, len(deps))
+ for dep := range deps {
+ p.Deps = append(p.Deps, dep)
+ }
+ sort.Strings(p.Deps)
+}
+
+// collectDeps populates p.DepsErrors by iterating over p.Internal.Imports.
+// collectDepsErrors must be called on all of p's Imports before being called on p.
+func collectDepsErrors(p *load.Package) {
+ depsErrors := make(map[*load.PackageError]bool)
+
+ for _, p := range p.Internal.Imports {
+ if p.Error != nil {
+ depsErrors[p.Error] = true
+ }
+ for _, q := range p.DepsErrors {
+ depsErrors[q] = true
+ }
+ }
+
+ p.DepsErrors = make([]*load.PackageError, 0, len(depsErrors))
+ for deperr := range depsErrors {
+ p.DepsErrors = append(p.DepsErrors, deperr)
+ }
+ // Sort packages by the package on the top of the stack, which should be
+ // the package the error was produced for. Each package can have at most
+ // one error set on it.
+ sort.Slice(p.DepsErrors, func(i, j int) bool {
+ stki, stkj := p.DepsErrors[i].ImportStack, p.DepsErrors[j].ImportStack
+ // Some packages are missing import stacks. To ensure deterministic
+ // sort order compare two errors that are missing import stacks by
+ // their errors' error texts.
+ if len(stki) == 0 {
+ if len(stkj) != 0 {
+ return true
+ }
+
+ return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error()
+ } else if len(stkj) == 0 {
+ return false
+ }
+ pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1]
+ return pathi < pathj
+ })
+}
+
+// TrackingWriter tracks the last byte written on every write so
+// we can avoid printing a newline if one was already written or
+// if there is no output at all.
+type TrackingWriter struct {
+ w *bufio.Writer
+ last byte
+}
+
+func newTrackingWriter(w io.Writer) *TrackingWriter {
+ return &TrackingWriter{
+ w: bufio.NewWriter(w),
+ last: '\n',
+ }
+}
+
+func (t *TrackingWriter) Write(p []byte) (n int, err error) {
+ n, err = t.w.Write(p)
+ if n > 0 {
+ t.last = p[n-1]
+ }
+ return
+}
+
+func (t *TrackingWriter) Flush() {
+ t.w.Flush()
+}
+
+func (t *TrackingWriter) NeedNL() bool {
+ return t.last != '\n'
+}
diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go
new file mode 100644
index 0000000..55bdab0
--- /dev/null
+++ b/src/cmd/go/internal/load/flag.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "cmd/go/internal/base"
+ "cmd/internal/quoted"
+ "fmt"
+ "strings"
+)
+
+var (
+ BuildAsmflags PerPackageFlag // -asmflags
+ BuildGcflags PerPackageFlag // -gcflags
+ BuildLdflags PerPackageFlag // -ldflags
+ BuildGccgoflags PerPackageFlag // -gccgoflags
+)
+
+// A PerPackageFlag is a command-line flag implementation (a flag.Value)
+// that allows specifying different effective flags for different packages.
+// See 'go help build' for more details about per-package flags.
+type PerPackageFlag struct {
+ raw string
+ present bool
+ values []ppfValue
+}
+
+// A ppfValue is a single <pattern>=<flags> per-package flag value.
+type ppfValue struct {
+ match func(*Package) bool // compiled pattern
+ flags []string
+}
+
+// Set is called each time the flag is encountered on the command line.
+func (f *PerPackageFlag) Set(v string) error {
+ return f.set(v, base.Cwd())
+}
+
+// set is the implementation of Set, taking a cwd (current working directory) for easier testing.
+func (f *PerPackageFlag) set(v, cwd string) error {
+ f.raw = v
+ f.present = true
+ match := func(p *Package) bool { return p.Internal.CmdlinePkg || p.Internal.CmdlineFiles } // default predicate with no pattern
+ // For backwards compatibility with earlier flag splitting, ignore spaces around flags.
+ v = strings.TrimSpace(v)
+ if v == "" {
+ // Special case: -gcflags="" means no flags for command-line arguments
+ // (overrides previous -gcflags="-whatever").
+ f.values = append(f.values, ppfValue{match, []string{}})
+ return nil
+ }
+ if !strings.HasPrefix(v, "-") {
+ i := strings.Index(v, "=")
+ if i < 0 {
+ return fmt.Errorf("missing =<value> in <pattern>=<value>")
+ }
+ if i == 0 {
+ return fmt.Errorf("missing <pattern> in <pattern>=<value>")
+ }
+ if v[0] == '\'' || v[0] == '"' {
+ return fmt.Errorf("parameter may not start with quote character %c", v[0])
+ }
+ pattern := strings.TrimSpace(v[:i])
+ match = MatchPackage(pattern, cwd)
+ v = v[i+1:]
+ }
+ flags, err := quoted.Split(v)
+ if err != nil {
+ return err
+ }
+ if flags == nil {
+ flags = []string{}
+ }
+ f.values = append(f.values, ppfValue{match, flags})
+ return nil
+}
+
+func (f *PerPackageFlag) String() string { return f.raw }
+
+// Present reports whether the flag appeared on the command line.
+func (f *PerPackageFlag) Present() bool {
+ return f.present
+}
+
+// For returns the flags to use for the given package.
+func (f *PerPackageFlag) For(p *Package) []string {
+ flags := []string{}
+ for _, v := range f.values {
+ if v.match(p) {
+ flags = v.flags
+ }
+ }
+ return flags
+}
diff --git a/src/cmd/go/internal/load/flag_test.go b/src/cmd/go/internal/load/flag_test.go
new file mode 100644
index 0000000..d3223e1
--- /dev/null
+++ b/src/cmd/go/internal/load/flag_test.go
@@ -0,0 +1,135 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+type ppfTestPackage struct {
+ path string
+ dir string
+ cmdline bool
+ flags []string
+}
+
+type ppfTest struct {
+ args []string
+ pkgs []ppfTestPackage
+}
+
+var ppfTests = []ppfTest{
+ // -gcflags=-S applies only to packages on command line.
+ {
+ args: []string{"-S"},
+ pkgs: []ppfTestPackage{
+ {cmdline: true, flags: []string{"-S"}},
+ {cmdline: false, flags: []string{}},
+ },
+ },
+
+ // -gcflags=-S -gcflags= overrides the earlier -S.
+ {
+ args: []string{"-S", ""},
+ pkgs: []ppfTestPackage{
+ {cmdline: true, flags: []string{}},
+ },
+ },
+
+ // -gcflags=net=-S applies only to package net
+ {
+ args: []string{"net=-S"},
+ pkgs: []ppfTestPackage{
+ {path: "math", cmdline: true, flags: []string{}},
+ {path: "net", flags: []string{"-S"}},
+ },
+ },
+
+ // -gcflags=net=-S -gcflags=net= also overrides the earlier -S
+ {
+ args: []string{"net=-S", "net="},
+ pkgs: []ppfTestPackage{
+ {path: "net", flags: []string{}},
+ },
+ },
+
+ // -gcflags=net/...=-S net math
+ // applies -S to net and net/http but not math
+ {
+ args: []string{"net/...=-S"},
+ pkgs: []ppfTestPackage{
+ {path: "net", flags: []string{"-S"}},
+ {path: "net/http", flags: []string{"-S"}},
+ {path: "math", flags: []string{}},
+ },
+ },
+
+ // -gcflags=net/...=-S -gcflags=-m net math
+ // applies -m to net and math and -S to other packages matching net/...
+ // (net matches too, but it was grabbed by the later -gcflags).
+ {
+ args: []string{"net/...=-S", "-m"},
+ pkgs: []ppfTestPackage{
+ {path: "net", cmdline: true, flags: []string{"-m"}},
+ {path: "math", cmdline: true, flags: []string{"-m"}},
+ {path: "net", cmdline: false, flags: []string{"-S"}},
+ {path: "net/http", flags: []string{"-S"}},
+ {path: "math", flags: []string{}},
+ },
+ },
+
+ // relative path patterns
+ // ppfDirTest(pattern, n, dirs...) says the first n dirs should match and the others should not.
+ ppfDirTest(".", 1, "/my/test/dir", "/my/test", "/my/test/other", "/my/test/dir/sub"),
+ ppfDirTest("..", 1, "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub"),
+ ppfDirTest("./sub", 1, "/my/test/dir/sub", "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub/sub"),
+ ppfDirTest("../other", 1, "/my/test/other", "/my/test", "/my/test/dir", "/my/test/other/sub", "/my/test/dir/other", "/my/test/dir/sub"),
+ ppfDirTest("./...", 3, "/my/test/dir", "/my/test/dir/sub", "/my/test/dir/sub/sub", "/my/test/other", "/my/test/other/sub"),
+ ppfDirTest("../...", 4, "/my/test/dir", "/my/test/other", "/my/test/dir/sub", "/my/test/other/sub", "/my/other/test"),
+ ppfDirTest("../...sub...", 3, "/my/test/dir/sub", "/my/test/othersub", "/my/test/yellowsubmarine", "/my/other/test"),
+}
+
+func ppfDirTest(pattern string, nmatch int, dirs ...string) ppfTest {
+ var pkgs []ppfTestPackage
+ for i, d := range dirs {
+ flags := []string{}
+ if i < nmatch {
+ flags = []string{"-S"}
+ }
+ pkgs = append(pkgs, ppfTestPackage{path: "p", dir: d, flags: flags})
+ }
+ return ppfTest{args: []string{pattern + "=-S"}, pkgs: pkgs}
+}
+
+func TestPerPackageFlag(t *testing.T) {
+ nativeDir := func(d string) string {
+ if filepath.Separator == '\\' {
+ return `C:` + filepath.FromSlash(d)
+ }
+ return d
+ }
+
+ for i, tt := range ppfTests {
+ t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
+ ppFlags := new(PerPackageFlag)
+ for _, arg := range tt.args {
+ t.Logf("set(%s)", arg)
+ if err := ppFlags.set(arg, nativeDir("/my/test/dir")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ for _, p := range tt.pkgs {
+ dir := nativeDir(p.dir)
+ flags := ppFlags.For(&Package{PackagePublic: PackagePublic{ImportPath: p.path, Dir: dir}, Internal: PackageInternal{CmdlinePkg: p.cmdline}})
+ if !reflect.DeepEqual(flags, p.flags) {
+ t.Errorf("For(%v, %v, %v) = %v, want %v", p.path, dir, p.cmdline, flags, p.flags)
+ }
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go
new file mode 100644
index 0000000..c79245e
--- /dev/null
+++ b/src/cmd/go/internal/load/godebug.go
@@ -0,0 +1,126 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "cmd/go/internal/modload"
+ "errors"
+ "fmt"
+ "go/build"
+ "internal/godebugs"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var ErrNotGoDebug = errors.New("not //go:debug line")
+
+func ParseGoDebug(text string) (key, value string, err error) {
+ if !strings.HasPrefix(text, "//go:debug") {
+ return "", "", ErrNotGoDebug
+ }
+ i := strings.IndexAny(text, " \t")
+ if i < 0 {
+ if strings.TrimSpace(text) == "//go:debug" {
+ return "", "", fmt.Errorf("missing key=value")
+ }
+ return "", "", ErrNotGoDebug
+ }
+ k, v, ok := strings.Cut(strings.TrimSpace(text[i:]), "=")
+ if !ok {
+ return "", "", fmt.Errorf("missing key=value")
+ }
+ if strings.ContainsAny(k, " \t") {
+ return "", "", fmt.Errorf("key contains space")
+ }
+ if strings.ContainsAny(v, " \t") {
+ return "", "", fmt.Errorf("value contains space")
+ }
+ if strings.ContainsAny(k, ",") {
+ return "", "", fmt.Errorf("key contains comma")
+ }
+ if strings.ContainsAny(v, ",") {
+ return "", "", fmt.Errorf("value contains comma")
+ }
+
+ for _, info := range godebugs.All {
+ if k == info.Name {
+ return k, v, nil
+ }
+ }
+ return "", "", fmt.Errorf("unknown //go:debug setting %q", k)
+}
+
+// defaultGODEBUG returns the default GODEBUG setting for the main package p.
+// When building a test binary, directives, testDirectives, and xtestDirectives
+// list additional directives from the package under test.
+func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string {
+ if p.Name != "main" {
+ return ""
+ }
+ goVersion := modload.MainModules.GoVersion()
+ if modload.RootMode == modload.NoRoot && p.Module != nil {
+ // This is go install pkg@version or go run pkg@version.
+ // Use the Go version from the package.
+ // If there isn't one, then
+ goVersion = p.Module.GoVersion
+ if goVersion == "" {
+ goVersion = "1.20"
+ }
+ }
+
+ m := godebugForGoVersion(goVersion)
+ for _, list := range [][]build.Directive{p.Internal.Build.Directives, directives, testDirectives, xtestDirectives} {
+ for _, d := range list {
+ k, v, err := ParseGoDebug(d.Text)
+ if err != nil {
+ continue
+ }
+ if m == nil {
+ m = make(map[string]string)
+ }
+ m[k] = v
+ }
+ }
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ var b strings.Builder
+ for _, k := range keys {
+ if b.Len() > 0 {
+ b.WriteString(",")
+ }
+ b.WriteString(k)
+ b.WriteString("=")
+ b.WriteString(m[k])
+ }
+ return b.String()
+}
+
+func godebugForGoVersion(v string) map[string]string {
+ if strings.Count(v, ".") >= 2 {
+ i := strings.Index(v, ".")
+ j := i + 1 + strings.Index(v[i+1:], ".")
+ v = v[:j]
+ }
+
+ if !strings.HasPrefix(v, "1.") {
+ return nil
+ }
+ n, err := strconv.Atoi(v[len("1."):])
+ if err != nil {
+ return nil
+ }
+
+ def := make(map[string]string)
+ for _, info := range godebugs.All {
+ if n < info.Changed {
+ def[info.Name] = info.Old
+ }
+ }
+ return def
+}
diff --git a/src/cmd/go/internal/load/path.go b/src/cmd/go/internal/load/path.go
new file mode 100644
index 0000000..584cdff
--- /dev/null
+++ b/src/cmd/go/internal/load/path.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "path/filepath"
+)
+
+// expandPath returns the symlink-expanded form of path.
+func expandPath(p string) string {
+ x, err := filepath.EvalSymlinks(p)
+ if err == nil {
+ return x
+ }
+ return p
+}
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
new file mode 100644
index 0000000..c0e6265
--- /dev/null
+++ b/src/cmd/go/internal/load/pkg.go
@@ -0,0 +1,3554 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package load loads packages.
+package load
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/scanner"
+ "go/token"
+ "internal/platform"
+ "io/fs"
+ "os"
+ "os/exec"
+ pathpkg "path"
+ "path/filepath"
+ "runtime"
+ "runtime/debug"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/modinfo"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/par"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/go/internal/vcs"
+ "cmd/internal/pkgpattern"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+// A Package describes a single package found in a directory.
+type Package struct {
+ PackagePublic // visible in 'go list'
+ Internal PackageInternal // for use inside go command only
+}
+
+type PackagePublic struct {
+ // Note: These fields are part of the go command's public API.
+ // See list.go. It is okay to add fields, but not to change or
+ // remove existing ones. Keep in sync with ../list/list.go
+ Dir string `json:",omitempty"` // directory containing package sources
+ ImportPath string `json:",omitempty"` // import path of package in dir
+ ImportComment string `json:",omitempty"` // path in import comment on package statement
+ Name string `json:",omitempty"` // package name
+ Doc string `json:",omitempty"` // package documentation string
+ Target string `json:",omitempty"` // installed target for this package (may be executable)
+ Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
+ Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
+ ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
+ ForTest string `json:",omitempty"` // package is only for use in named test
+ Export string `json:",omitempty"` // file containing export data (set by go list -export)
+ BuildID string `json:",omitempty"` // build ID of the compiled package (set by go list -export)
+ Module *modinfo.ModulePublic `json:",omitempty"` // info about package's module, if any
+ Match []string `json:",omitempty"` // command-line patterns matching this package
+ Goroot bool `json:",omitempty"` // is this package found in the Go root?
+ Standard bool `json:",omitempty"` // is this package part of the standard Go library?
+ DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
+ BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
+ Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies?
+
+ DefaultGODEBUG string `json:",omitempty"` // default GODEBUG setting (only for Name=="main")
+
+ // Stale and StaleReason remain here *only* for the list command.
+ // They are only initialized in preparation for list execution.
+ // The regular build determines staleness on the fly during action execution.
+ Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
+ StaleReason string `json:",omitempty"` // why is Stale true?
+
+ // Source files
+ // If you add to this list you MUST add to p.AllFiles (below) too.
+ // Otherwise file name security lists will not apply to any new additions.
+ GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string `json:",omitempty"` // .go source files that import "C"
+ CompiledGoFiles []string `json:",omitempty"` // .go output from running cgo on CgoFiles
+ IgnoredGoFiles []string `json:",omitempty"` // .go source files ignored due to build constraints
+ InvalidGoFiles []string `json:",omitempty"` // .go source files with detected problems (parse error, wrong package name, and so on)
+ IgnoredOtherFiles []string `json:",omitempty"` // non-.go source files ignored due to build constraints
+ CFiles []string `json:",omitempty"` // .c source files
+ CXXFiles []string `json:",omitempty"` // .cc, .cpp and .cxx source files
+ MFiles []string `json:",omitempty"` // .m source files
+ HFiles []string `json:",omitempty"` // .h, .hh, .hpp and .hxx source files
+ FFiles []string `json:",omitempty"` // .f, .F, .for and .f90 Fortran source files
+ SFiles []string `json:",omitempty"` // .s source files
+ SwigFiles []string `json:",omitempty"` // .swig files
+ SwigCXXFiles []string `json:",omitempty"` // .swigcxx files
+ SysoFiles []string `json:",omitempty"` // .syso system object files added to package
+
+ // Embedded files
+ EmbedPatterns []string `json:",omitempty"` // //go:embed patterns
+ EmbedFiles []string `json:",omitempty"` // files matched by EmbedPatterns
+
+ // Cgo directives
+ CgoCFLAGS []string `json:",omitempty"` // cgo: flags for C compiler
+ CgoCPPFLAGS []string `json:",omitempty"` // cgo: flags for C preprocessor
+ CgoCXXFLAGS []string `json:",omitempty"` // cgo: flags for C++ compiler
+ CgoFFLAGS []string `json:",omitempty"` // cgo: flags for Fortran compiler
+ CgoLDFLAGS []string `json:",omitempty"` // cgo: flags for linker
+ CgoPkgConfig []string `json:",omitempty"` // cgo: pkg-config names
+
+ // Dependency information
+ Imports []string `json:",omitempty"` // import paths used by this package
+ ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted)
+ Deps []string `json:",omitempty"` // all (recursively) imported dependencies
+
+ // Error information
+ // Incomplete is above, packed into the other bools
+ Error *PackageError `json:",omitempty"` // error loading this package (not dependencies)
+ DepsErrors []*PackageError `json:",omitempty"` // errors loading dependencies, collected by go list before output
+
+ // Test information
+ // If you add to this list you MUST add to p.AllFiles (below) too.
+ // Otherwise file name security lists will not apply to any new additions.
+ TestGoFiles []string `json:",omitempty"` // _test.go files in package
+ TestImports []string `json:",omitempty"` // imports from TestGoFiles
+ TestEmbedPatterns []string `json:",omitempty"` // //go:embed patterns
+ TestEmbedFiles []string `json:",omitempty"` // files matched by TestEmbedPatterns
+ XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
+ XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
+ XTestEmbedPatterns []string `json:",omitempty"` // //go:embed patterns
+ XTestEmbedFiles []string `json:",omitempty"` // files matched by XTestEmbedPatterns
+}
+
+// AllFiles returns the names of all the files considered for the package.
+// This is used for sanity and security checks, so we include all files,
+// even IgnoredGoFiles, because some subcommands consider them.
+// The go/build package filtered others out (like foo_wrongGOARCH.s)
+// and that's OK.
+func (p *Package) AllFiles() []string {
+ files := str.StringList(
+ p.GoFiles,
+ p.CgoFiles,
+ // no p.CompiledGoFiles, because they are from GoFiles or generated by us
+ p.IgnoredGoFiles,
+ // no p.InvalidGoFiles, because they are from GoFiles
+ p.IgnoredOtherFiles,
+ p.CFiles,
+ p.CXXFiles,
+ p.MFiles,
+ p.HFiles,
+ p.FFiles,
+ p.SFiles,
+ p.SwigFiles,
+ p.SwigCXXFiles,
+ p.SysoFiles,
+ p.TestGoFiles,
+ p.XTestGoFiles,
+ )
+
+ // EmbedFiles may overlap with the other files.
+ // Dedup, but delay building the map as long as possible.
+ // Only files in the current directory (no slash in name)
+ // need to be checked against the files variable above.
+ var have map[string]bool
+ for _, file := range p.EmbedFiles {
+ if !strings.Contains(file, "/") {
+ if have == nil {
+ have = make(map[string]bool)
+ for _, file := range files {
+ have[file] = true
+ }
+ }
+ if have[file] {
+ continue
+ }
+ }
+ files = append(files, file)
+ }
+ return files
+}
+
+// Desc returns the package "description", for use in b.showOutput.
+func (p *Package) Desc() string {
+ if p.ForTest != "" {
+ return p.ImportPath + " [" + p.ForTest + ".test]"
+ }
+ if p.Internal.ForMain != "" {
+ return p.ImportPath + " [" + p.Internal.ForMain + "]"
+ }
+ return p.ImportPath
+}
+
+// IsTestOnly reports whether p is a test-only package.
+//
+// A “test-only” package is one that:
+// - is a test-only variant of an ordinary package, or
+// - is a synthesized "main" package for a test binary, or
+// - contains only _test.go files.
+func (p *Package) IsTestOnly() bool {
+ return p.ForTest != "" ||
+ p.Internal.TestmainGo != nil ||
+ len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 && len(p.GoFiles)+len(p.CgoFiles) == 0
+}
+
+type PackageInternal struct {
+ // Unexported fields are not part of the public API.
+ Build *build.Package
+ Imports []*Package // this package's direct imports
+ CompiledImports []string // additional Imports necessary when using CompiledGoFiles (all from standard library); 1:1 with the end of PackagePublic.Imports
+ RawImports []string // this package's original imports as they appear in the text of the program; 1:1 with the end of PackagePublic.Imports
+ ForceLibrary bool // this package is a library (even if named "main")
+ CmdlineFiles bool // package built from files listed on command line
+ CmdlinePkg bool // package listed on command line
+ CmdlinePkgLiteral bool // package listed as literal on command line (not via wildcard)
+ Local bool // imported via local path (./ or ../)
+ LocalPrefix string // interpret ./ and ../ imports relative to this prefix
+ ExeName string // desired name for temporary executable
+ FuzzInstrument bool // package should be instrumented for fuzzing
+ CoverMode string // preprocess Go source files with the coverage tool in this mode
+ CoverVars map[string]*CoverVar // variables created by coverage analysis
+ CoverageCfg string // coverage info config file path (passed to compiler)
+ OmitDebug bool // tell linker not to write debug information
+ GobinSubdir bool // install target would be subdir of GOBIN
+ BuildInfo *debug.BuildInfo // add this info to package main
+ TestmainGo *[]byte // content for _testmain.go
+ Embed map[string][]string // //go:embed comment mapping
+ OrigImportPath string // original import path before adding '_test' suffix
+ PGOProfile string // path to PGO profile
+ ForMain string // the main package if this package is built specifically for it
+
+ Asmflags []string // -asmflags for this package
+ Gcflags []string // -gcflags for this package
+ Ldflags []string // -ldflags for this package
+ Gccgoflags []string // -gccgoflags for this package
+}
+
+// A NoGoError indicates that no Go files for the package were applicable to the
+// build for that package.
+//
+// That may be because there were no files whatsoever, or because all files were
+// excluded, or because all non-excluded files were test sources.
+type NoGoError struct {
+ Package *Package
+}
+
+func (e *NoGoError) Error() string {
+ if len(e.Package.IgnoredGoFiles) > 0 {
+ // Go files exist, but they were ignored due to build constraints.
+ return "build constraints exclude all Go files in " + e.Package.Dir
+ }
+ if len(e.Package.TestGoFiles)+len(e.Package.XTestGoFiles) > 0 {
+ // Test Go files exist, but we're not interested in them.
+ // The double-negative is unfortunate but we want e.Package.Dir
+ // to appear at the end of error message.
+ return "no non-test Go files in " + e.Package.Dir
+ }
+ return "no Go files in " + e.Package.Dir
+}
+
+// setLoadPackageDataError presents an error found when loading package data
+// as a *PackageError. It has special cases for some common errors to improve
+// messages shown to users and reduce redundancy.
+//
+// setLoadPackageDataError returns true if it's safe to load information about
+// imported packages, for example, if there was a parse error loading imports
+// in one file, but other files are okay.
+func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportStack, importPos []token.Position) {
+ matchErr, isMatchErr := err.(*search.MatchError)
+ if isMatchErr && matchErr.Match.Pattern() == path {
+ if matchErr.Match.IsLiteral() {
+ // The error has a pattern has a pattern similar to the import path.
+ // It may be slightly different (./foo matching example.com/foo),
+ // but close enough to seem redundant.
+ // Unwrap the error so we don't show the pattern.
+ err = matchErr.Err
+ }
+ }
+
+ // Replace (possibly wrapped) *build.NoGoError with *load.NoGoError.
+ // The latter is more specific about the cause.
+ var nogoErr *build.NoGoError
+ if errors.As(err, &nogoErr) {
+ if p.Dir == "" && nogoErr.Dir != "" {
+ p.Dir = nogoErr.Dir
+ }
+ err = &NoGoError{Package: p}
+ }
+
+ // Take only the first error from a scanner.ErrorList. PackageError only
+ // has room for one position, so we report the first error with a position
+ // instead of all of the errors without a position.
+ var pos string
+ var isScanErr bool
+ if scanErr, ok := err.(scanner.ErrorList); ok && len(scanErr) > 0 {
+ isScanErr = true // For stack push/pop below.
+
+ scanPos := scanErr[0].Pos
+ scanPos.Filename = base.ShortPath(scanPos.Filename)
+ pos = scanPos.String()
+ err = errors.New(scanErr[0].Msg)
+ }
+
+ // Report the error on the importing package if the problem is with the import declaration
+ // for example, if the package doesn't exist or if the import path is malformed.
+ // On the other hand, don't include a position if the problem is with the imported package,
+ // for example there are no Go files (NoGoError), or there's a problem in the imported
+ // package's source files themselves (scanner errors).
+ //
+ // TODO(matloob): Perhaps make each of those the errors in the first group
+ // (including modload.ImportMissingError, ImportMissingSumError, and the
+ // corresponding "cannot find package %q in any of" GOPATH-mode error
+ // produced in build.(*Context).Import; modload.AmbiguousImportError,
+ // and modload.PackageNotInModuleError; and the malformed module path errors
+ // produced in golang.org/x/mod/module.CheckMod) implement an interface
+ // to make it easier to check for them? That would save us from having to
+ // move the modload errors into this package to avoid a package import cycle,
+ // and from having to export an error type for the errors produced in build.
+ if !isMatchErr && (nogoErr != nil || isScanErr) {
+ stk.Push(path)
+ defer stk.Pop()
+ }
+
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Pos: pos,
+ Err: err,
+ }
+ p.Incomplete = true
+
+ if path != stk.Top() {
+ p.Error.setPos(importPos)
+ }
+}
+
+// Resolve returns the resolved version of imports,
+// which should be p.TestImports or p.XTestImports, NOT p.Imports.
+// The imports in p.TestImports and p.XTestImports are not recursively
+// loaded during the initial load of p, so they list the imports found in
+// the source file, but most processing should be over the vendor-resolved
+// import paths. We do this resolution lazily both to avoid file system work
+// and because the eventual real load of the test imports (during 'go test')
+// can produce better error messages if it starts with the original paths.
+// The initial load of p loads all the non-test imports and rewrites
+// the vendored paths, so nothing should ever call p.vendored(p.Imports).
+func (p *Package) Resolve(imports []string) []string {
+ if len(imports) > 0 && len(p.Imports) > 0 && &imports[0] == &p.Imports[0] {
+ panic("internal error: p.Resolve(p.Imports) called")
+ }
+ seen := make(map[string]bool)
+ var all []string
+ for _, path := range imports {
+ path = ResolveImportPath(p, path)
+ if !seen[path] {
+ seen[path] = true
+ all = append(all, path)
+ }
+ }
+ sort.Strings(all)
+ return all
+}
+
+// CoverVar holds the name of the generated coverage variables targeting the named file.
+type CoverVar struct {
+ File string // local file name
+ Var string // name of count struct
+}
+
+func (p *Package) copyBuild(opts PackageOpts, pp *build.Package) {
+ p.Internal.Build = pp
+
+ if pp.PkgTargetRoot != "" && cfg.BuildPkgdir != "" {
+ old := pp.PkgTargetRoot
+ pp.PkgRoot = cfg.BuildPkgdir
+ pp.PkgTargetRoot = cfg.BuildPkgdir
+ if pp.PkgObj != "" {
+ pp.PkgObj = filepath.Join(cfg.BuildPkgdir, strings.TrimPrefix(pp.PkgObj, old))
+ }
+ }
+
+ p.Dir = pp.Dir
+ p.ImportPath = pp.ImportPath
+ p.ImportComment = pp.ImportComment
+ p.Name = pp.Name
+ p.Doc = pp.Doc
+ p.Root = pp.Root
+ p.ConflictDir = pp.ConflictDir
+ p.BinaryOnly = pp.BinaryOnly
+
+ // TODO? Target
+ p.Goroot = pp.Goroot
+ p.Standard = p.Goroot && p.ImportPath != "" && search.IsStandardImportPath(p.ImportPath)
+ p.GoFiles = pp.GoFiles
+ p.CgoFiles = pp.CgoFiles
+ p.IgnoredGoFiles = pp.IgnoredGoFiles
+ p.InvalidGoFiles = pp.InvalidGoFiles
+ p.IgnoredOtherFiles = pp.IgnoredOtherFiles
+ p.CFiles = pp.CFiles
+ p.CXXFiles = pp.CXXFiles
+ p.MFiles = pp.MFiles
+ p.HFiles = pp.HFiles
+ p.FFiles = pp.FFiles
+ p.SFiles = pp.SFiles
+ p.SwigFiles = pp.SwigFiles
+ p.SwigCXXFiles = pp.SwigCXXFiles
+ p.SysoFiles = pp.SysoFiles
+ if cfg.BuildMSan {
+ // There's no way for .syso files to be built both with and without
+ // support for memory sanitizer. Assume they are built without,
+ // and drop them.
+ p.SysoFiles = nil
+ }
+ p.CgoCFLAGS = pp.CgoCFLAGS
+ p.CgoCPPFLAGS = pp.CgoCPPFLAGS
+ p.CgoCXXFLAGS = pp.CgoCXXFLAGS
+ p.CgoFFLAGS = pp.CgoFFLAGS
+ p.CgoLDFLAGS = pp.CgoLDFLAGS
+ p.CgoPkgConfig = pp.CgoPkgConfig
+ // We modify p.Imports in place, so make copy now.
+ p.Imports = make([]string, len(pp.Imports))
+ copy(p.Imports, pp.Imports)
+ p.Internal.RawImports = pp.Imports
+ p.TestGoFiles = pp.TestGoFiles
+ p.TestImports = pp.TestImports
+ p.XTestGoFiles = pp.XTestGoFiles
+ p.XTestImports = pp.XTestImports
+ if opts.IgnoreImports {
+ p.Imports = nil
+ p.Internal.RawImports = nil
+ p.TestImports = nil
+ p.XTestImports = nil
+ }
+ p.EmbedPatterns = pp.EmbedPatterns
+ p.TestEmbedPatterns = pp.TestEmbedPatterns
+ p.XTestEmbedPatterns = pp.XTestEmbedPatterns
+ p.Internal.OrigImportPath = pp.ImportPath
+}
+
+// A PackageError describes an error loading information about a package.
+type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error
+ Err error // the error itself
+ IsImportCycle bool // the error is an import cycle
+ Hard bool // whether the error is soft or hard; soft errors are ignored in some places
+ alwaysPrintStack bool // whether to always print the ImportStack
+}
+
+func (p *PackageError) Error() string {
+ // TODO(#43696): decide when to print the stack or the position based on
+ // the error type and whether the package is in the main module.
+ // Document the rationale.
+ if p.Pos != "" && (len(p.ImportStack) == 0 || !p.alwaysPrintStack) {
+ // Omit import stack. The full path to the file where the error
+ // is the most important thing.
+ return p.Pos + ": " + p.Err.Error()
+ }
+
+ // If the error is an ImportPathError, and the last path on the stack appears
+ // in the error message, omit that path from the stack to avoid repetition.
+ // If an ImportPathError wraps another ImportPathError that matches the
+ // last path on the stack, we don't omit the path. An error like
+ // "package A imports B: error loading C caused by B" would not be clearer
+ // if "imports B" were omitted.
+ if len(p.ImportStack) == 0 {
+ return p.Err.Error()
+ }
+ var optpos string
+ if p.Pos != "" {
+ optpos = "\n\t" + p.Pos
+ }
+ return "package " + strings.Join(p.ImportStack, "\n\timports ") + optpos + ": " + p.Err.Error()
+}
+
+func (p *PackageError) Unwrap() error { return p.Err }
+
+// PackageError implements MarshalJSON so that Err is marshaled as a string
+// and non-essential fields are omitted.
+func (p *PackageError) MarshalJSON() ([]byte, error) {
+ perr := struct {
+ ImportStack []string
+ Pos string
+ Err string
+ }{p.ImportStack, p.Pos, p.Err.Error()}
+ return json.Marshal(perr)
+}
+
+func (p *PackageError) setPos(posList []token.Position) {
+ if len(posList) == 0 {
+ return
+ }
+ pos := posList[0]
+ pos.Filename = base.ShortPath(pos.Filename)
+ p.Pos = pos.String()
+}
+
+// ImportPathError is a type of error that prevents a package from being loaded
+// for a given import path. When such a package is loaded, a *Package is
+// returned with Err wrapping an ImportPathError: the error is attached to
+// the imported package, not the importing package.
+//
+// The string returned by ImportPath must appear in the string returned by
+// Error. Errors that wrap ImportPathError (such as PackageError) may omit
+// the import path.
+type ImportPathError interface {
+ error
+ ImportPath() string
+}
+
+var (
+ _ ImportPathError = (*importError)(nil)
+ _ ImportPathError = (*mainPackageError)(nil)
+ _ ImportPathError = (*modload.ImportMissingError)(nil)
+ _ ImportPathError = (*modload.ImportMissingSumError)(nil)
+ _ ImportPathError = (*modload.DirectImportFromImplicitDependencyError)(nil)
+)
+
+type importError struct {
+ importPath string
+ err error // created with fmt.Errorf
+}
+
+func ImportErrorf(path, format string, args ...any) ImportPathError {
+ err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
+ if errStr := err.Error(); !strings.Contains(errStr, path) {
+ panic(fmt.Sprintf("path %q not in error %q", path, errStr))
+ }
+ return err
+}
+
+func (e *importError) Error() string {
+ return e.err.Error()
+}
+
+func (e *importError) Unwrap() error {
+ // Don't return e.err directly, since we're only wrapping an error if %w
+ // was passed to ImportErrorf.
+ return errors.Unwrap(e.err)
+}
+
+func (e *importError) ImportPath() string {
+ return e.importPath
+}
+
+// An ImportStack is a stack of import paths, possibly with the suffix " (test)" appended.
+// The import path of a test package is the import path of the corresponding
+// non-test package with the suffix "_test" added.
+type ImportStack []string
+
+func (s *ImportStack) Push(p string) {
+ *s = append(*s, p)
+}
+
+func (s *ImportStack) Pop() {
+ *s = (*s)[0 : len(*s)-1]
+}
+
+func (s *ImportStack) Copy() []string {
+ return append([]string{}, *s...)
+}
+
+func (s *ImportStack) Top() string {
+ if len(*s) == 0 {
+ return ""
+ }
+ return (*s)[len(*s)-1]
+}
+
+// shorterThan reports whether sp is shorter than t.
+// We use this to record the shortest import sequence
+// that leads to a particular package.
+func (sp *ImportStack) shorterThan(t []string) bool {
+ s := *sp
+ if len(s) != len(t) {
+ return len(s) < len(t)
+ }
+ // If they are the same length, settle ties using string ordering.
+ for i := range s {
+ if s[i] != t[i] {
+ return s[i] < t[i]
+ }
+ }
+ return false // they are equal
+}
+
+// packageCache is a lookup cache for LoadImport,
+// so that if we look up a package multiple times
+// we return the same pointer each time.
+var packageCache = map[string]*Package{}
+
+// ClearPackageCache clears the in-memory package cache and the preload caches.
+// It is only for use by GOPATH-based "go get".
+// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function.
+func ClearPackageCache() {
+ for name := range packageCache {
+ delete(packageCache, name)
+ }
+ resolvedImportCache.Clear()
+ packageDataCache.Clear()
+}
+
+// ClearPackageCachePartial clears packages with the given import paths from the
+// in-memory package cache and the preload caches. It is only for use by
+// GOPATH-based "go get".
+// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function.
+func ClearPackageCachePartial(args []string) {
+ shouldDelete := make(map[string]bool)
+ for _, arg := range args {
+ shouldDelete[arg] = true
+ if p := packageCache[arg]; p != nil {
+ delete(packageCache, arg)
+ }
+ }
+ resolvedImportCache.DeleteIf(func(key importSpec) bool {
+ return shouldDelete[key.path]
+ })
+ packageDataCache.DeleteIf(func(key string) bool {
+ return shouldDelete[key]
+ })
+}
+
+// ReloadPackageNoFlags is like LoadImport but makes sure
+// not to use the package cache.
+// It is only for use by GOPATH-based "go get".
+// TODO(rsc): When GOPATH-based "go get" is removed, delete this function.
+func ReloadPackageNoFlags(arg string, stk *ImportStack) *Package {
+ p := packageCache[arg]
+ if p != nil {
+ delete(packageCache, arg)
+ resolvedImportCache.DeleteIf(func(key importSpec) bool {
+ return key.path == p.ImportPath
+ })
+ packageDataCache.Delete(p.ImportPath)
+ }
+ return LoadPackage(context.TODO(), PackageOpts{}, arg, base.Cwd(), stk, nil, 0)
+}
+
+// dirToImportPath returns the pseudo-import path we use for a package
+// outside the Go path. It begins with _/ and then contains the full path
+// to the directory. If the package lives in c:\home\gopher\my\pkg then
+// the pseudo-import path is _/c_/home/gopher/my/pkg.
+// Using a pseudo-import path like this makes the ./ imports no longer
+// a special case, so that all the code to deal with ordinary imports works
+// automatically.
+func dirToImportPath(dir string) string {
+ return pathpkg.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir)))
+}
+
+func makeImportValid(r rune) rune {
+ // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport.
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return '_'
+ }
+ return r
+}
+
+// Mode flags for loadImport and download (in get.go).
+const (
+ // ResolveImport means that loadImport should do import path expansion.
+ // That is, ResolveImport means that the import path came from
+ // a source file and has not been expanded yet to account for
+ // vendoring or possible module adjustment.
+ // Every import path should be loaded initially with ResolveImport,
+ // and then the expanded version (for example with the /vendor/ in it)
+ // gets recorded as the canonical import path. At that point, future loads
+ // of that package must not pass ResolveImport, because
+ // disallowVendor will reject direct use of paths containing /vendor/.
+ ResolveImport = 1 << iota
+
+ // ResolveModule is for download (part of "go get") and indicates
+ // that the module adjustment should be done, but not vendor adjustment.
+ ResolveModule
+
+ // GetTestDeps is for download (part of "go get") and indicates
+ // that test dependencies should be fetched too.
+ GetTestDeps
+
+ // The remainder are internal modes for calls to loadImport.
+
+ // cmdlinePkg is for a package mentioned on the command line.
+ cmdlinePkg
+
+ // cmdlinePkgLiteral is for a package mentioned on the command line
+ // without using any wildcards or meta-patterns.
+ cmdlinePkgLiteral
+)
+
+// LoadImport scans the directory named by path, which must be an import path,
+// but possibly a local import path (an absolute file system path or one beginning
+// with ./ or ../). A local relative path is interpreted relative to srcDir.
+// It returns a *Package describing the package found in that directory.
+// LoadImport does not set tool flags and should only be used by
+// this package, as part of a bigger load operation, and by GOPATH-based "go get".
+// TODO(rsc): When GOPATH-based "go get" is removed, unexport this function.
+// The returned PackageError, if any, describes why parent is not allowed
+// to import the named package, with the error referring to importPos.
+// The PackageError can only be non-nil when parent is not nil.
+func LoadImport(ctx context.Context, opts PackageOpts, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
+ return loadImport(ctx, opts, nil, path, srcDir, parent, stk, importPos, mode)
+}
+
+// LoadPackage does Load import, but without a parent package load contezt
+func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
+ p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode)
+ if err != nil {
+ base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path)
+ }
+ return p
+}
+
+func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
+ ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path)
+ defer span.Done()
+
+ if path == "" {
+ panic("LoadImport called with empty package path")
+ }
+
+ var parentPath, parentRoot string
+ parentIsStd := false
+ if parent != nil {
+ parentPath = parent.ImportPath
+ parentRoot = parent.Root
+ parentIsStd = parent.Standard
+ }
+ bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode)
+ if loaded && pre != nil && !opts.IgnoreImports {
+ pre.preloadImports(ctx, opts, bp.Imports, bp)
+ }
+ if bp == nil {
+ p := &Package{
+ PackagePublic: PackagePublic{
+ ImportPath: path,
+ Incomplete: true,
+ },
+ }
+ if importErr, ok := err.(ImportPathError); !ok || importErr.ImportPath() != path {
+ // Only add path to the error's import stack if it's not already present
+ // in the error.
+ //
+ // TODO(bcmills): setLoadPackageDataError itself has a similar Push / Pop
+ // sequence that empirically doesn't trigger for these errors, guarded by
+ // a somewhat complex condition. Figure out how to generalize that
+ // condition and eliminate the explicit calls here.
+ stk.Push(path)
+ defer stk.Pop()
+ }
+ p.setLoadPackageDataError(err, path, stk, nil)
+ return p, nil
+ }
+
+ setCmdline := func(p *Package) {
+ if mode&cmdlinePkg != 0 {
+ p.Internal.CmdlinePkg = true
+ }
+ if mode&cmdlinePkgLiteral != 0 {
+ p.Internal.CmdlinePkgLiteral = true
+ }
+ }
+
+ importPath := bp.ImportPath
+ p := packageCache[importPath]
+ if p != nil {
+ stk.Push(path)
+ p = reusePackage(p, stk)
+ stk.Pop()
+ setCmdline(p)
+ } else {
+ p = new(Package)
+ p.Internal.Local = build.IsLocalImport(path)
+ p.ImportPath = importPath
+ packageCache[importPath] = p
+
+ setCmdline(p)
+
+ // Load package.
+ // loadPackageData may return bp != nil even if an error occurs,
+ // in order to return partial information.
+ p.load(ctx, opts, path, stk, importPos, bp, err)
+
+ if !cfg.ModulesEnabled && path != cleanImport(path) {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: ImportErrorf(path, "non-canonical import path %q: should be %q", path, pathpkg.Clean(path)),
+ }
+ p.Incomplete = true
+ p.Error.setPos(importPos)
+ }
+ }
+
+ // Checked on every import because the rules depend on the code doing the importing.
+ if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil {
+ perr.setPos(importPos)
+ return p, perr
+ }
+ if mode&ResolveImport != 0 {
+ if perr := disallowVendor(srcDir, path, parentPath, p, stk); perr != nil {
+ perr.setPos(importPos)
+ return p, perr
+ }
+ }
+
+ if p.Name == "main" && parent != nil && parent.Dir != p.Dir {
+ perr := &PackageError{
+ ImportStack: stk.Copy(),
+ Err: ImportErrorf(path, "import %q is a program, not an importable package", path),
+ }
+ perr.setPos(importPos)
+ return p, perr
+ }
+
+ if p.Internal.Local && parent != nil && !parent.Internal.Local {
+ var err error
+ if path == "." {
+ err = ImportErrorf(path, "%s: cannot import current directory", path)
+ } else {
+ err = ImportErrorf(path, "local import %q in non-local package", path)
+ }
+ perr := &PackageError{
+ ImportStack: stk.Copy(),
+ Err: err,
+ }
+ perr.setPos(importPos)
+ return p, perr
+ }
+
+ return p, nil
+}
+
+// loadPackageData loads information needed to construct a *Package. The result
+// is cached, and later calls to loadPackageData for the same package will return
+// the same data.
+//
+// loadPackageData returns a non-nil package even if err is non-nil unless
+// the package path is malformed (for example, the path contains "mod/" or "@").
+//
+// loadPackageData returns a boolean, loaded, which is true if this is the
+// first time the package was loaded. Callers may preload imports in this case.
+func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) {
+ ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path)
+ defer span.Done()
+
+ if path == "" {
+ panic("loadPackageData called with empty package path")
+ }
+
+ if strings.HasPrefix(path, "mod/") {
+ // Paths beginning with "mod/" might accidentally
+ // look in the module cache directory tree in $GOPATH/pkg/mod/.
+ // This prefix is owned by the Go core for possible use in the
+ // standard library (since it does not begin with a domain name),
+ // so it's OK to disallow entirely.
+ return nil, false, fmt.Errorf("disallowed import path %q", path)
+ }
+
+ if strings.Contains(path, "@") {
+ return nil, false, errors.New("can only use path@version syntax with 'go get' and 'go install' in module-aware mode")
+ }
+
+ // Determine canonical package path and directory.
+ // For a local import the identifier is the pseudo-import path
+ // we create from the full directory to the package.
+ // Otherwise it is the usual import path.
+ // For vendored imports, it is the expanded form.
+ //
+ // Note that when modules are enabled, local import paths are normally
+ // canonicalized by modload.LoadPackages before now. However, if there's an
+ // error resolving a local path, it will be returned untransformed
+ // so that 'go list -e' reports something useful.
+ importKey := importSpec{
+ path: path,
+ parentPath: parentPath,
+ parentDir: parentDir,
+ parentRoot: parentRoot,
+ parentIsStd: parentIsStd,
+ mode: mode,
+ }
+ r := resolvedImportCache.Do(importKey, func() resolvedImport {
+ var r resolvedImport
+ if cfg.ModulesEnabled {
+ r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path)
+ } else if build.IsLocalImport(path) {
+ r.dir = filepath.Join(parentDir, path)
+ r.path = dirToImportPath(r.dir)
+ } else if mode&ResolveImport != 0 {
+ // We do our own path resolution, because we want to
+ // find out the key to use in packageCache without the
+ // overhead of repeated calls to buildContext.Import.
+ // The code is also needed in a few other places anyway.
+ r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd)
+ } else if mode&ResolveModule != 0 {
+ r.path = moduleImportPath(path, parentPath, parentDir, parentRoot)
+ }
+ if r.path == "" {
+ r.path = path
+ }
+ return r
+ })
+ // Invariant: r.path is set to the resolved import path. If the path cannot
+ // be resolved, r.path is set to path, the source import path.
+ // r.path is never empty.
+
+ // Load the package from its directory. If we already found the package's
+ // directory when resolving its import path, use that.
+ p, err := packageDataCache.Do(r.path, func() (*build.Package, error) {
+ loaded = true
+ var data struct {
+ p *build.Package
+ err error
+ }
+ if r.dir != "" {
+ var buildMode build.ImportMode
+ buildContext := cfg.BuildContext
+ if !cfg.ModulesEnabled {
+ buildMode = build.ImportComment
+ } else {
+ buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages
+ }
+ modroot := modload.PackageModRoot(ctx, r.path)
+ if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
+ modroot = cfg.GOROOTsrc
+ gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd")
+ if str.HasPathPrefix(r.dir, gorootSrcCmd) {
+ modroot = gorootSrcCmd
+ }
+ }
+ if modroot != "" {
+ if rp, err := modindex.GetPackage(modroot, r.dir); err == nil {
+ data.p, data.err = rp.Import(cfg.BuildContext, buildMode)
+ goto Happy
+ } else if !errors.Is(err, modindex.ErrNotIndexed) {
+ base.Fatal(err)
+ }
+ }
+ data.p, data.err = buildContext.ImportDir(r.dir, buildMode)
+ Happy:
+ if cfg.ModulesEnabled {
+ // Override data.p.Root, since ImportDir sets it to $GOPATH, if
+ // the module is inside $GOPATH/src.
+ if info := modload.PackageModuleInfo(ctx, path); info != nil {
+ data.p.Root = info.Dir
+ }
+ }
+ if r.err != nil {
+ if data.err != nil {
+ // ImportDir gave us one error, and the module loader gave us another.
+ // We arbitrarily choose to keep the error from ImportDir because
+ // that's what our tests already expect, and it seems to provide a bit
+ // more detail in most cases.
+ } else if errors.Is(r.err, imports.ErrNoGo) {
+ // ImportDir said there were files in the package, but the module
+ // loader said there weren't. Which one is right?
+ // Without this special-case hack, the TestScript/test_vet case fails
+ // on the vetfail/p1 package (added in CL 83955).
+ // Apparently, imports.ShouldBuild biases toward rejecting files
+ // with invalid build constraints, whereas ImportDir biases toward
+ // accepting them.
+ //
+ // TODO(#41410: Figure out how this actually ought to work and fix
+ // this mess.
+ } else {
+ data.err = r.err
+ }
+ }
+ } else if r.err != nil {
+ data.p = new(build.Package)
+ data.err = r.err
+ } else if cfg.ModulesEnabled && path != "unsafe" {
+ data.p = new(build.Package)
+ data.err = fmt.Errorf("unknown import path %q: internal error: module loader did not resolve import", r.path)
+ } else {
+ buildMode := build.ImportComment
+ if mode&ResolveImport == 0 || r.path != path {
+ // Not vendoring, or we already found the vendored path.
+ buildMode |= build.IgnoreVendor
+ }
+ data.p, data.err = cfg.BuildContext.Import(r.path, parentDir, buildMode)
+ }
+ data.p.ImportPath = r.path
+
+ // Set data.p.BinDir in cases where go/build.Context.Import
+ // may give us a path we don't want.
+ if !data.p.Goroot {
+ if cfg.GOBIN != "" {
+ data.p.BinDir = cfg.GOBIN
+ } else if cfg.ModulesEnabled {
+ data.p.BinDir = modload.BinDir()
+ }
+ }
+
+ if !cfg.ModulesEnabled && data.err == nil &&
+ data.p.ImportComment != "" && data.p.ImportComment != path &&
+ !strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") {
+ data.err = fmt.Errorf("code in directory %s expects import %q", data.p.Dir, data.p.ImportComment)
+ }
+ return data.p, data.err
+ })
+
+ return p, loaded, err
+}
+
+// importSpec describes an import declaration in source code. It is used as a
+// cache key for resolvedImportCache.
+type importSpec struct {
+ path string
+ parentPath, parentDir, parentRoot string
+ parentIsStd bool
+ mode int
+}
+
+// resolvedImport holds a canonical identifier for a package. It may also contain
+// a path to the package's directory and an error if one occurred. resolvedImport
+// is the value type in resolvedImportCache.
+type resolvedImport struct {
+ path, dir string
+ err error
+}
+
+// resolvedImportCache maps import strings to canonical package names.
+var resolvedImportCache par.Cache[importSpec, resolvedImport]
+
+// packageDataCache maps canonical package names (string) to package metadata.
+var packageDataCache par.ErrCache[string, *build.Package]
+
+// preloadWorkerCount is the number of concurrent goroutines that can load
+// packages. Experimentally, there are diminishing returns with more than
+// 4 workers. This was measured on the following machines.
+//
+// * MacBookPro with a 4-core Intel Core i7 CPU
+// * Linux workstation with 6-core Intel Xeon CPU
+// * Linux workstation with 24-core Intel Xeon CPU
+//
+// It is very likely (though not confirmed) that this workload is limited
+// by memory bandwidth. We don't have a good way to determine the number of
+// workers that would saturate the bus though, so runtime.GOMAXPROCS
+// seems like a reasonable default.
+var preloadWorkerCount = runtime.GOMAXPROCS(0)
+
+// preload holds state for managing concurrent preloading of package data.
+//
+// A preload should be created with newPreload before loading a large
+// package graph. flush must be called when package loading is complete
+// to ensure preload goroutines are no longer active. This is necessary
+// because of global mutable state that cannot safely be read and written
+// concurrently. In particular, packageDataCache may be cleared by "go get"
+// in GOPATH mode, and modload.loaded (accessed via modload.Lookup) may be
+// modified by modload.LoadPackages.
+type preload struct {
+ cancel chan struct{}
+ sema chan struct{}
+}
+
+// newPreload creates a new preloader. flush must be called later to avoid
+// accessing global state while it is being modified.
+func newPreload() *preload {
+ pre := &preload{
+ cancel: make(chan struct{}),
+ sema: make(chan struct{}, preloadWorkerCount),
+ }
+ return pre
+}
+
+// preloadMatches loads data for package paths matched by patterns.
+// When preloadMatches returns, some packages may not be loaded yet, but
+// loadPackageData and loadImport are always safe to call.
+func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) {
+ for _, m := range matches {
+ for _, pkg := range m.Pkgs {
+ select {
+ case <-pre.cancel:
+ return
+ case pre.sema <- struct{}{}:
+ go func(pkg string) {
+ mode := 0 // don't use vendoring or module import resolution
+ bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode)
+ <-pre.sema
+ if bp != nil && loaded && err == nil && !opts.IgnoreImports {
+ pre.preloadImports(ctx, opts, bp.Imports, bp)
+ }
+ }(pkg)
+ }
+ }
+ }
+}
+
+// preloadImports queues a list of imports for preloading.
+// When preloadImports returns, some packages may not be loaded yet,
+// but loadPackageData and loadImport are always safe to call.
+func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) {
+ parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath)
+ for _, path := range imports {
+ if path == "C" || path == "unsafe" {
+ continue
+ }
+ select {
+ case <-pre.cancel:
+ return
+ case pre.sema <- struct{}{}:
+ go func(path string) {
+ bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport)
+ <-pre.sema
+ if bp != nil && loaded && err == nil && !opts.IgnoreImports {
+ pre.preloadImports(ctx, opts, bp.Imports, bp)
+ }
+ }(path)
+ }
+ }
+}
+
+// flush stops pending preload operations. flush blocks until preload calls to
+// loadPackageData have completed. The preloader will not make any new calls
+// to loadPackageData.
+func (pre *preload) flush() {
+ // flush is usually deferred.
+ // Don't hang program waiting for workers on panic.
+ if v := recover(); v != nil {
+ panic(v)
+ }
+
+ close(pre.cancel)
+ for i := 0; i < preloadWorkerCount; i++ {
+ pre.sema <- struct{}{}
+ }
+}
+
+func cleanImport(path string) string {
+ orig := path
+ path = pathpkg.Clean(path)
+ if strings.HasPrefix(orig, "./") && path != ".." && !strings.HasPrefix(path, "../") {
+ path = "./" + path
+ }
+ return path
+}
+
+var isDirCache par.Cache[string, bool]
+
+func isDir(path string) bool {
+ return isDirCache.Do(path, func() bool {
+ fi, err := fsys.Stat(path)
+ return err == nil && fi.IsDir()
+ })
+}
+
+// ResolveImportPath returns the true meaning of path when it appears in parent.
+// There are two different resolutions applied.
+// First, there is Go 1.5 vendoring (golang.org/s/go15vendor).
+// If vendor expansion doesn't trigger, then the path is also subject to
+// Go 1.11 module legacy conversion (golang.org/issue/25069).
+func ResolveImportPath(parent *Package, path string) (found string) {
+ var parentPath, parentDir, parentRoot string
+ parentIsStd := false
+ if parent != nil {
+ parentPath = parent.ImportPath
+ parentDir = parent.Dir
+ parentRoot = parent.Root
+ parentIsStd = parent.Standard
+ }
+ return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd)
+}
+
+func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) {
+ if cfg.ModulesEnabled {
+ if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil {
+ return p
+ }
+ return path
+ }
+ found = vendoredImportPath(path, parentPath, parentDir, parentRoot)
+ if found != path {
+ return found
+ }
+ return moduleImportPath(path, parentPath, parentDir, parentRoot)
+}
+
+// dirAndRoot returns the source directory and workspace root
+// for the package p, guaranteeing that root is a path prefix of dir.
+func dirAndRoot(path string, dir, root string) (string, string) {
+ origDir, origRoot := dir, root
+ dir = filepath.Clean(dir)
+ root = filepath.Join(root, "src")
+ if !str.HasFilePathPrefix(dir, root) || path != "command-line-arguments" && filepath.Join(root, path) != dir {
+ // Look for symlinks before reporting error.
+ dir = expandPath(dir)
+ root = expandPath(root)
+ }
+
+ if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || path != "command-line-arguments" && !build.IsLocalImport(path) && filepath.Join(root, path) != dir {
+ debug.PrintStack()
+ base.Fatalf("unexpected directory layout:\n"+
+ " import path: %s\n"+
+ " root: %s\n"+
+ " dir: %s\n"+
+ " expand root: %s\n"+
+ " expand dir: %s\n"+
+ " separator: %s",
+ path,
+ filepath.Join(origRoot, "src"),
+ filepath.Clean(origDir),
+ origRoot,
+ origDir,
+ string(filepath.Separator))
+ }
+
+ return dir, root
+}
+
+// vendoredImportPath returns the vendor-expansion of path when it appears in parent.
+// If parent is x/y/z, then path might expand to x/y/z/vendor/path, x/y/vendor/path,
+// x/vendor/path, vendor/path, or else stay path if none of those exist.
+// vendoredImportPath returns the expanded path or, if no expansion is found, the original.
+func vendoredImportPath(path, parentPath, parentDir, parentRoot string) (found string) {
+ if parentRoot == "" {
+ return path
+ }
+
+ dir, root := dirAndRoot(parentPath, parentDir, parentRoot)
+
+ vpath := "vendor/" + path
+ for i := len(dir); i >= len(root); i-- {
+ if i < len(dir) && dir[i] != filepath.Separator {
+ continue
+ }
+ // Note: checking for the vendor directory before checking
+ // for the vendor/path directory helps us hit the
+ // isDir cache more often. It also helps us prepare a more useful
+ // list of places we looked, to report when an import is not found.
+ if !isDir(filepath.Join(dir[:i], "vendor")) {
+ continue
+ }
+ targ := filepath.Join(dir[:i], vpath)
+ if isDir(targ) && hasGoFiles(targ) {
+ importPath := parentPath
+ if importPath == "command-line-arguments" {
+ // If parent.ImportPath is 'command-line-arguments'.
+ // set to relative directory to root (also chopped root directory)
+ importPath = dir[len(root)+1:]
+ }
+ // We started with parent's dir c:\gopath\src\foo\bar\baz\quux\xyzzy.
+ // We know the import path for parent's dir.
+ // We chopped off some number of path elements and
+ // added vendor\path to produce c:\gopath\src\foo\bar\baz\vendor\path.
+ // Now we want to know the import path for that directory.
+ // Construct it by chopping the same number of path elements
+ // (actually the same number of bytes) from parent's import path
+ // and then append /vendor/path.
+ chopped := len(dir) - i
+ if chopped == len(importPath)+1 {
+ // We walked up from c:\gopath\src\foo\bar
+ // and found c:\gopath\src\vendor\path.
+ // We chopped \foo\bar (length 8) but the import path is "foo/bar" (length 7).
+ // Use "vendor/path" without any prefix.
+ return vpath
+ }
+ return importPath[:len(importPath)-chopped] + "/" + vpath
+ }
+ }
+ return path
+}
+
+var (
+ modulePrefix = []byte("\nmodule ")
+ goModPathCache par.Cache[string, string]
+)
+
+// goModPath returns the module path in the go.mod in dir, if any.
+func goModPath(dir string) (path string) {
+ return goModPathCache.Do(dir, func() string {
+ data, err := os.ReadFile(filepath.Join(dir, "go.mod"))
+ if err != nil {
+ return ""
+ }
+ var i int
+ if bytes.HasPrefix(data, modulePrefix[1:]) {
+ i = 0
+ } else {
+ i = bytes.Index(data, modulePrefix)
+ if i < 0 {
+ return ""
+ }
+ i++
+ }
+ line := data[i:]
+
+ // Cut line at \n, drop trailing \r if present.
+ if j := bytes.IndexByte(line, '\n'); j >= 0 {
+ line = line[:j]
+ }
+ if line[len(line)-1] == '\r' {
+ line = line[:len(line)-1]
+ }
+ line = line[len("module "):]
+
+ // If quoted, unquote.
+ path = strings.TrimSpace(string(line))
+ if path != "" && path[0] == '"' {
+ s, err := strconv.Unquote(path)
+ if err != nil {
+ return ""
+ }
+ path = s
+ }
+ return path
+ })
+}
+
+// findVersionElement returns the slice indices of the final version element /vN in path.
+// If there is no such element, it returns -1, -1.
+func findVersionElement(path string) (i, j int) {
+ j = len(path)
+ for i = len(path) - 1; i >= 0; i-- {
+ if path[i] == '/' {
+ if isVersionElement(path[i+1 : j]) {
+ return i, j
+ }
+ j = i
+ }
+ }
+ return -1, -1
+}
+
+// isVersionElement reports whether s is a well-formed path version element:
+// v2, v3, v10, etc, but not v0, v05, v1.
+func isVersionElement(s string) bool {
+ if len(s) < 2 || s[0] != 'v' || s[1] == '0' || s[1] == '1' && len(s) == 2 {
+ return false
+ }
+ for i := 1; i < len(s); i++ {
+ if s[i] < '0' || '9' < s[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// moduleImportPath translates import paths found in go modules
+// back down to paths that can be resolved in ordinary builds.
+//
+// Define “new” code as code with a go.mod file in the same directory
+// or a parent directory. If an import in new code says x/y/v2/z but
+// x/y/v2/z does not exist and x/y/go.mod says “module x/y/v2”,
+// then go build will read the import as x/y/z instead.
+// See golang.org/issue/25069.
+func moduleImportPath(path, parentPath, parentDir, parentRoot string) (found string) {
+ if parentRoot == "" {
+ return path
+ }
+
+ // If there are no vN elements in path, leave it alone.
+ // (The code below would do the same, but only after
+ // some other file system accesses that we can avoid
+ // here by returning early.)
+ if i, _ := findVersionElement(path); i < 0 {
+ return path
+ }
+
+ dir, root := dirAndRoot(parentPath, parentDir, parentRoot)
+
+ // Consider dir and parents, up to and including root.
+ for i := len(dir); i >= len(root); i-- {
+ if i < len(dir) && dir[i] != filepath.Separator {
+ continue
+ }
+ if goModPath(dir[:i]) != "" {
+ goto HaveGoMod
+ }
+ }
+ // This code is not in a tree with a go.mod,
+ // so apply no changes to the path.
+ return path
+
+HaveGoMod:
+ // This import is in a tree with a go.mod.
+ // Allow it to refer to code in GOPATH/src/x/y/z as x/y/v2/z
+ // if GOPATH/src/x/y/go.mod says module "x/y/v2",
+
+ // If x/y/v2/z exists, use it unmodified.
+ if bp, _ := cfg.BuildContext.Import(path, "", build.IgnoreVendor); bp.Dir != "" {
+ return path
+ }
+
+ // Otherwise look for a go.mod supplying a version element.
+ // Some version-like elements may appear in paths but not
+ // be module versions; we skip over those to look for module
+ // versions. For example the module m/v2 might have a
+ // package m/v2/api/v1/foo.
+ limit := len(path)
+ for limit > 0 {
+ i, j := findVersionElement(path[:limit])
+ if i < 0 {
+ return path
+ }
+ if bp, _ := cfg.BuildContext.Import(path[:i], "", build.IgnoreVendor); bp.Dir != "" {
+ if mpath := goModPath(bp.Dir); mpath != "" {
+ // Found a valid go.mod file, so we're stopping the search.
+ // If the path is m/v2/p and we found m/go.mod that says
+ // "module m/v2", then we return "m/p".
+ if mpath == path[:j] {
+ return path[:i] + path[j:]
+ }
+ // Otherwise just return the original path.
+ // We didn't find anything worth rewriting,
+ // and the go.mod indicates that we should
+ // not consider parent directories.
+ return path
+ }
+ }
+ limit = i
+ }
+ return path
+}
+
+// hasGoFiles reports whether dir contains any files with names ending in .go.
+// For a vendor check we must exclude directories that contain no .go files.
+// Otherwise it is not possible to vendor just a/b/c and still import the
+// non-vendored a/b. See golang.org/issue/13832.
+func hasGoFiles(dir string) bool {
+ files, _ := os.ReadDir(dir)
+ for _, f := range files {
+ if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+// reusePackage reuses package p to satisfy the import at the top
+// of the import stack stk. If this use causes an import loop,
+// reusePackage updates p's error information to record the loop.
+func reusePackage(p *Package, stk *ImportStack) *Package {
+ // We use p.Internal.Imports==nil to detect a package that
+ // is in the midst of its own loadPackage call
+ // (all the recursion below happens before p.Internal.Imports gets set).
+ if p.Internal.Imports == nil {
+ if p.Error == nil {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: errors.New("import cycle not allowed"),
+ IsImportCycle: true,
+ }
+ } else if !p.Error.IsImportCycle {
+ // If the error is already set, but it does not indicate that
+ // we are in an import cycle, set IsImportCycle so that we don't
+ // end up stuck in a loop down the road.
+ p.Error.IsImportCycle = true
+ }
+ p.Incomplete = true
+ }
+ // Don't rewrite the import stack in the error if we have an import cycle.
+ // If we do, we'll lose the path that describes the cycle.
+ if p.Error != nil && !p.Error.IsImportCycle && stk.shorterThan(p.Error.ImportStack) {
+ p.Error.ImportStack = stk.Copy()
+ }
+ return p
+}
+
+// disallowInternal checks that srcDir (containing package importerPath, if non-empty)
+// is allowed to import p.
+// If the import is allowed, disallowInternal returns the original package p.
+// If not, it returns a new package containing just an appropriate error.
+func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError {
+ // golang.org/s/go14internal:
+ // An import of a path containing the element “internal”
+ // is disallowed if the importing code is outside the tree
+ // rooted at the parent of the “internal” directory.
+
+ // There was an error loading the package; stop here.
+ if p.Error != nil {
+ return nil
+ }
+
+ // The generated 'testmain' package is allowed to access testing/internal/...,
+ // as if it were generated into the testing directory tree
+ // (it's actually in a temporary directory outside any Go tree).
+ // This cleans up a former kludge in passing functionality to the testing package.
+ if str.HasPathPrefix(p.ImportPath, "testing/internal") && importerPath == "testmain" {
+ return nil
+ }
+
+ // We can't check standard packages with gccgo.
+ if cfg.BuildContext.Compiler == "gccgo" && p.Standard {
+ return nil
+ }
+
+ // The sort package depends on internal/reflectlite, but during bootstrap
+ // the path rewriting causes the normal internal checks to fail.
+ // Instead, just ignore the internal rules during bootstrap.
+ if p.Standard && strings.HasPrefix(importerPath, "bootstrap/") {
+ return nil
+ }
+
+ // importerPath is empty: we started
+ // with a name given on the command line, not an
+ // import. Anything listed on the command line is fine.
+ if importerPath == "" {
+ return nil
+ }
+
+ // Check for "internal" element: three cases depending on begin of string and/or end of string.
+ i, ok := findInternal(p.ImportPath)
+ if !ok {
+ return nil
+ }
+
+ // Internal is present.
+ // Map import path back to directory corresponding to parent of internal.
+ if i > 0 {
+ i-- // rewind over slash in ".../internal"
+ }
+
+ if p.Module == nil {
+ parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)]
+
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ return nil
+ }
+
+ // Look for symlinks before reporting error.
+ srcDir = expandPath(srcDir)
+ parent = expandPath(parent)
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ return nil
+ }
+ } else {
+ // p is in a module, so make it available based on the importer's import path instead
+ // of the file path (https://golang.org/issue/23970).
+ if importer.Internal.CmdlineFiles {
+ // The importer is a list of command-line files.
+ // Pretend that the import path is the import path of the
+ // directory containing them.
+ // If the directory is outside the main modules, this will resolve to ".",
+ // which is not a prefix of any valid module.
+ importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir)
+ }
+ parentOfInternal := p.ImportPath[:i]
+ if str.HasPathPrefix(importerPath, parentOfInternal) {
+ return nil
+ }
+ }
+
+ // Internal is present, and srcDir is outside parent's tree. Not allowed.
+ perr := &PackageError{
+ alwaysPrintStack: true,
+ ImportStack: stk.Copy(),
+ Err: ImportErrorf(p.ImportPath, "use of internal package "+p.ImportPath+" not allowed"),
+ }
+ return perr
+}
+
+// findInternal looks for the final "internal" path element in the given import path.
+// If there isn't one, findInternal returns ok=false.
+// Otherwise, findInternal returns ok=true and the index of the "internal".
+func findInternal(path string) (index int, ok bool) {
+ // Three cases, depending on internal at start/end of string or not.
+ // The order matters: we must return the index of the final element,
+ // because the final one produces the most restrictive requirement
+ // on the importer.
+ switch {
+ case strings.HasSuffix(path, "/internal"):
+ return len(path) - len("internal"), true
+ case strings.Contains(path, "/internal/"):
+ return strings.LastIndex(path, "/internal/") + 1, true
+ case path == "internal", strings.HasPrefix(path, "internal/"):
+ return 0, true
+ }
+ return 0, false
+}
+
+// disallowVendor checks that srcDir is allowed to import p as path.
+// If the import is allowed, disallowVendor returns the original package p.
+// If not, it returns a PackageError.
+func disallowVendor(srcDir string, path string, importerPath string, p *Package, stk *ImportStack) *PackageError {
+ // If the importerPath is empty, we started
+ // with a name given on the command line, not an
+ // import. Anything listed on the command line is fine.
+ if importerPath == "" {
+ return nil
+ }
+
+ if perr := disallowVendorVisibility(srcDir, p, importerPath, stk); perr != nil {
+ return perr
+ }
+
+ // Paths like x/vendor/y must be imported as y, never as x/vendor/y.
+ if i, ok := FindVendor(path); ok {
+ perr := &PackageError{
+ ImportStack: stk.Copy(),
+ Err: ImportErrorf(path, "%s must be imported as %s", path, path[i+len("vendor/"):]),
+ }
+ return perr
+ }
+
+ return nil
+}
+
+// disallowVendorVisibility checks that srcDir is allowed to import p.
+// The rules are the same as for /internal/ except that a path ending in /vendor
+// is not subject to the rules, only subdirectories of vendor.
+// This allows people to have packages and commands named vendor,
+// for maximal compatibility with existing source trees.
+func disallowVendorVisibility(srcDir string, p *Package, importerPath string, stk *ImportStack) *PackageError {
+ // The stack does not include p.ImportPath.
+ // If there's nothing on the stack, we started
+ // with a name given on the command line, not an
+ // import. Anything listed on the command line is fine.
+ if importerPath == "" {
+ return nil
+ }
+
+ // Check for "vendor" element.
+ i, ok := FindVendor(p.ImportPath)
+ if !ok {
+ return nil
+ }
+
+ // Vendor is present.
+ // Map import path back to directory corresponding to parent of vendor.
+ if i > 0 {
+ i-- // rewind over slash in ".../vendor"
+ }
+ truncateTo := i + len(p.Dir) - len(p.ImportPath)
+ if truncateTo < 0 || len(p.Dir) < truncateTo {
+ return nil
+ }
+ parent := p.Dir[:truncateTo]
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ return nil
+ }
+
+ // Look for symlinks before reporting error.
+ srcDir = expandPath(srcDir)
+ parent = expandPath(parent)
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ return nil
+ }
+
+ // Vendor is present, and srcDir is outside parent's tree. Not allowed.
+
+ perr := &PackageError{
+ ImportStack: stk.Copy(),
+ Err: errors.New("use of vendored package not allowed"),
+ }
+ return perr
+}
+
+// FindVendor looks for the last non-terminating "vendor" path element in the given import path.
+// If there isn't one, FindVendor returns ok=false.
+// Otherwise, FindVendor returns ok=true and the index of the "vendor".
+//
+// Note that terminating "vendor" elements don't count: "x/vendor" is its own package,
+// not the vendored copy of an import "" (the empty import path).
+// This will allow people to have packages or commands named vendor.
+// This may help reduce breakage, or it may just be confusing. We'll see.
+func FindVendor(path string) (index int, ok bool) {
+ // Two cases, depending on internal at start of string or not.
+ // The order matters: we must return the index of the final element,
+ // because the final one is where the effective import path starts.
+ switch {
+ case strings.Contains(path, "/vendor/"):
+ return strings.LastIndex(path, "/vendor/") + 1, true
+ case strings.HasPrefix(path, "vendor/"):
+ return 0, true
+ }
+ return 0, false
+}
+
+type TargetDir int
+
+const (
+ ToTool TargetDir = iota // to GOROOT/pkg/tool (default for cmd/*)
+ ToBin // to bin dir inside package root (default for non-cmd/*)
+ StalePath // an old import path; fail to build
+)
+
+// InstallTargetDir reports the target directory for installing the command p.
+func InstallTargetDir(p *Package) TargetDir {
+ if strings.HasPrefix(p.ImportPath, "code.google.com/p/go.tools/cmd/") {
+ return StalePath
+ }
+ if p.Goroot && strings.HasPrefix(p.ImportPath, "cmd/") && p.Name == "main" {
+ switch p.ImportPath {
+ case "cmd/go", "cmd/gofmt":
+ return ToBin
+ }
+ return ToTool
+ }
+ return ToBin
+}
+
+var cgoExclude = map[string]bool{
+ "runtime/cgo": true,
+}
+
+var cgoSyscallExclude = map[string]bool{
+ "runtime/cgo": true,
+ "runtime/race": true,
+ "runtime/msan": true,
+ "runtime/asan": true,
+}
+
+var foldPath = make(map[string]string)
+
+// exeFromImportPath returns an executable name
+// for a package using the import path.
+//
+// The executable name is the last element of the import path.
+// In module-aware mode, an additional rule is used on import paths
+// consisting of two or more path elements. If the last element is
+// a vN path element specifying the major version, then the
+// second last element of the import path is used instead.
+func (p *Package) exeFromImportPath() string {
+ _, elem := pathpkg.Split(p.ImportPath)
+ if cfg.ModulesEnabled {
+ // If this is example.com/mycmd/v2, it's more useful to
+ // install it as mycmd than as v2. See golang.org/issue/24667.
+ if elem != p.ImportPath && isVersionElement(elem) {
+ _, elem = pathpkg.Split(pathpkg.Dir(p.ImportPath))
+ }
+ }
+ return elem
+}
+
+// exeFromFiles returns an executable name for a package
+// using the first element in GoFiles or CgoFiles collections without the prefix.
+//
+// Returns empty string in case of empty collection.
+func (p *Package) exeFromFiles() string {
+ var src string
+ if len(p.GoFiles) > 0 {
+ src = p.GoFiles[0]
+ } else if len(p.CgoFiles) > 0 {
+ src = p.CgoFiles[0]
+ } else {
+ return ""
+ }
+ _, elem := filepath.Split(src)
+ return elem[:len(elem)-len(".go")]
+}
+
+// DefaultExecName returns the default executable name for a package
+func (p *Package) DefaultExecName() string {
+ if p.Internal.CmdlineFiles {
+ return p.exeFromFiles()
+ }
+ return p.exeFromImportPath()
+}
+
+// load populates p using information from bp, err, which should
+// be the result of calling build.Context.Import.
+// stk contains the import stack, not including path itself.
+func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) {
+ p.copyBuild(opts, bp)
+
+ // The localPrefix is the path we interpret ./ imports relative to,
+ // if we support them at all (not in module mode!).
+ // Synthesized main packages sometimes override this.
+ if p.Internal.Local && !cfg.ModulesEnabled {
+ p.Internal.LocalPrefix = dirToImportPath(p.Dir)
+ }
+
+ // setError sets p.Error if it hasn't already been set. We may proceed
+ // after encountering some errors so that 'go list -e' has more complete
+ // output. If there's more than one error, we should report the first.
+ setError := func(err error) {
+ if p.Error == nil {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: err,
+ }
+ p.Incomplete = true
+
+ // Add the importer's position information if the import position exists, and
+ // the current package being examined is the importer.
+ // If we have not yet accepted package p onto the import stack,
+ // then the cause of the error is not within p itself: the error
+ // must be either in an explicit command-line argument,
+ // or on the importer side (indicated by a non-empty importPos).
+ if path != stk.Top() && len(importPos) > 0 {
+ p.Error.setPos(importPos)
+ }
+ }
+ }
+
+ if err != nil {
+ p.Incomplete = true
+ p.setLoadPackageDataError(err, path, stk, importPos)
+ }
+
+ useBindir := p.Name == "main"
+ if !p.Standard {
+ switch cfg.BuildBuildmode {
+ case "c-archive", "c-shared", "plugin":
+ useBindir = false
+ }
+ }
+
+ if useBindir {
+ // Report an error when the old code.google.com/p/go.tools paths are used.
+ if InstallTargetDir(p) == StalePath {
+ // TODO(matloob): remove this branch, and StalePath itself. code.google.com/p/go is so
+ // old, even this code checking for it is stale now!
+ newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1)
+ e := ImportErrorf(p.ImportPath, "the %v command has moved; use %v instead.", p.ImportPath, newPath)
+ setError(e)
+ return
+ }
+ elem := p.DefaultExecName() + cfg.ExeSuffix
+ full := filepath.Join(cfg.BuildContext.GOOS+"_"+cfg.BuildContext.GOARCH, elem)
+ if cfg.BuildContext.GOOS != runtime.GOOS || cfg.BuildContext.GOARCH != runtime.GOARCH {
+ // Install cross-compiled binaries to subdirectories of bin.
+ elem = full
+ }
+ if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled {
+ p.Internal.Build.BinDir = modload.BinDir()
+ }
+ if p.Internal.Build.BinDir != "" {
+ // Install to GOBIN or bin of GOPATH entry.
+ p.Target = filepath.Join(p.Internal.Build.BinDir, elem)
+ if !p.Goroot && strings.Contains(elem, string(filepath.Separator)) && cfg.GOBIN != "" {
+ // Do not create $GOBIN/goos_goarch/elem.
+ p.Target = ""
+ p.Internal.GobinSubdir = true
+ }
+ }
+ if InstallTargetDir(p) == ToTool {
+ // This is for 'go tool'.
+ // Override all the usual logic and force it into the tool directory.
+ if cfg.BuildToolchainName == "gccgo" {
+ p.Target = filepath.Join(build.ToolDir, elem)
+ } else {
+ p.Target = filepath.Join(cfg.GOROOTpkg, "tool", full)
+ }
+ }
+ } else if p.Internal.Local {
+ // Local import turned into absolute path.
+ // No permanent install target.
+ p.Target = ""
+ } else if p.Standard && cfg.BuildContext.Compiler == "gccgo" {
+ // gccgo has a preinstalled standard library that cmd/go cannot rebuild.
+ p.Target = ""
+ } else {
+ p.Target = p.Internal.Build.PkgObj
+ if cfg.BuildBuildmode == "shared" && p.Internal.Build.PkgTargetRoot != "" {
+ // TODO(matloob): This shouldn't be necessary, but the cmd/cgo/internal/testshared
+ // test fails without Target set for this condition. Figure out why and
+ // fix it.
+ p.Target = filepath.Join(p.Internal.Build.PkgTargetRoot, p.ImportPath+".a")
+ }
+ if cfg.BuildLinkshared && p.Internal.Build.PkgTargetRoot != "" {
+ // TODO(bcmills): The reliance on PkgTargetRoot implies that -linkshared does
+ // not work for any package that lacks a PkgTargetRoot — such as a non-main
+ // package in module mode. We should probably fix that.
+ targetPrefix := filepath.Join(p.Internal.Build.PkgTargetRoot, p.ImportPath)
+ p.Target = targetPrefix + ".a"
+ shlibnamefile := targetPrefix + ".shlibname"
+ shlib, err := os.ReadFile(shlibnamefile)
+ if err != nil && !os.IsNotExist(err) {
+ base.Fatalf("reading shlibname: %v", err)
+ }
+ if err == nil {
+ libname := strings.TrimSpace(string(shlib))
+ if cfg.BuildContext.Compiler == "gccgo" {
+ p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, "shlibs", libname)
+ } else {
+ p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, libname)
+ }
+ }
+ }
+ }
+
+ // Build augmented import list to add implicit dependencies.
+ // Be careful not to add imports twice, just to avoid confusion.
+ importPaths := p.Imports
+ addImport := func(path string, forCompiler bool) {
+ for _, p := range importPaths {
+ if path == p {
+ return
+ }
+ }
+ importPaths = append(importPaths, path)
+ if forCompiler {
+ p.Internal.CompiledImports = append(p.Internal.CompiledImports, path)
+ }
+ }
+
+ if !opts.IgnoreImports {
+ // Cgo translation adds imports of "unsafe", "runtime/cgo" and "syscall",
+ // except for certain packages, to avoid circular dependencies.
+ if p.UsesCgo() {
+ addImport("unsafe", true)
+ }
+ if p.UsesCgo() && (!p.Standard || !cgoExclude[p.ImportPath]) && cfg.BuildContext.Compiler != "gccgo" {
+ addImport("runtime/cgo", true)
+ }
+ if p.UsesCgo() && (!p.Standard || !cgoSyscallExclude[p.ImportPath]) {
+ addImport("syscall", true)
+ }
+
+ // SWIG adds imports of some standard packages.
+ if p.UsesSwig() {
+ addImport("unsafe", true)
+ if cfg.BuildContext.Compiler != "gccgo" {
+ addImport("runtime/cgo", true)
+ }
+ addImport("syscall", true)
+ addImport("sync", true)
+
+ // TODO: The .swig and .swigcxx files can use
+ // %go_import directives to import other packages.
+ }
+
+ // The linker loads implicit dependencies.
+ if p.Name == "main" && !p.Internal.ForceLibrary {
+ for _, dep := range LinkerDeps(p) {
+ addImport(dep, false)
+ }
+ }
+ }
+
+ // Check for case-insensitive collisions of import paths.
+ fold := str.ToFold(p.ImportPath)
+ if other := foldPath[fold]; other == "" {
+ foldPath[fold] = p.ImportPath
+ } else if other != p.ImportPath {
+ setError(ImportErrorf(p.ImportPath, "case-insensitive import collision: %q and %q", p.ImportPath, other))
+ return
+ }
+
+ if !SafeArg(p.ImportPath) {
+ setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
+ return
+ }
+
+ // Errors after this point are caused by this package, not the importing
+ // package. Pushing the path here prevents us from reporting the error
+ // with the position of the import declaration.
+ stk.Push(path)
+ defer stk.Pop()
+
+ pkgPath := p.ImportPath
+ if p.Internal.CmdlineFiles {
+ pkgPath = "command-line-arguments"
+ }
+ if cfg.ModulesEnabled {
+ p.Module = modload.PackageModuleInfo(ctx, pkgPath)
+ }
+ p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil)
+
+ if !opts.SuppressEmbedFiles {
+ p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns)
+ if err != nil {
+ p.Incomplete = true
+ setError(err)
+ embedErr := err.(*EmbedError)
+ p.Error.setPos(p.Internal.Build.EmbedPatternPos[embedErr.Pattern])
+ }
+ }
+
+ // Check for case-insensitive collision of input files.
+ // To avoid problems on case-insensitive files, we reject any package
+ // where two different input files have equal names under a case-insensitive
+ // comparison.
+ inputs := p.AllFiles()
+ f1, f2 := str.FoldDup(inputs)
+ if f1 != "" {
+ setError(fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2))
+ return
+ }
+
+ // If first letter of input file is ASCII, it must be alphanumeric.
+ // This avoids files turning into flags when invoking commands,
+ // and other problems we haven't thought of yet.
+ // Also, _cgo_ files must be generated by us, not supplied.
+ // They are allowed to have //go:cgo_ldflag directives.
+ // The directory scan ignores files beginning with _,
+ // so we shouldn't see any _cgo_ files anyway, but just be safe.
+ for _, file := range inputs {
+ if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") {
+ setError(fmt.Errorf("invalid input file name %q", file))
+ return
+ }
+ }
+ if name := pathpkg.Base(p.ImportPath); !SafeArg(name) {
+ setError(fmt.Errorf("invalid input directory name %q", name))
+ return
+ }
+ if strings.ContainsAny(p.Dir, "\r\n") {
+ setError(fmt.Errorf("invalid package directory %q", p.Dir))
+ return
+ }
+
+ // Build list of imported packages and full dependency list.
+ imports := make([]*Package, 0, len(p.Imports))
+ for i, path := range importPaths {
+ if path == "C" {
+ continue
+ }
+ p1, err := LoadImport(ctx, opts, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport)
+ if err != nil && p.Error == nil {
+ p.Error = err
+ p.Incomplete = true
+ }
+
+ path = p1.ImportPath
+ importPaths[i] = path
+ if i < len(p.Imports) {
+ p.Imports[i] = path
+ }
+
+ imports = append(imports, p1)
+ if p1.Incomplete {
+ p.Incomplete = true
+ }
+ }
+ p.Internal.Imports = imports
+ if p.Error == nil && p.Name == "main" && !p.Internal.ForceLibrary && !p.Incomplete && !opts.SuppressBuildInfo {
+ // TODO(bcmills): loading VCS metadata can be fairly slow.
+ // Consider starting this as a background goroutine and retrieving the result
+ // asynchronously when we're actually ready to build the package, or when we
+ // actually need to evaluate whether the package's metadata is stale.
+ p.setBuildInfo(ctx, opts.AutoVCS)
+ }
+
+ // If cgo is not enabled, ignore cgo supporting sources
+ // just as we ignore go files containing import "C".
+ if !cfg.BuildContext.CgoEnabled {
+ p.CFiles = nil
+ p.CXXFiles = nil
+ p.MFiles = nil
+ p.SwigFiles = nil
+ p.SwigCXXFiles = nil
+ // Note that SFiles are okay (they go to the Go assembler)
+ // and HFiles are okay (they might be used by the SFiles).
+ // Also Sysofiles are okay (they might not contain object
+ // code; see issue #16050).
+ }
+
+ // The gc toolchain only permits C source files with cgo or SWIG.
+ if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" {
+ setError(fmt.Errorf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " ")))
+ return
+ }
+
+ // C++, Objective-C, and Fortran source files are permitted only with cgo or SWIG,
+ // regardless of toolchain.
+ if len(p.CXXFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
+ setError(fmt.Errorf("C++ source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CXXFiles, " ")))
+ return
+ }
+ if len(p.MFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
+ setError(fmt.Errorf("Objective-C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.MFiles, " ")))
+ return
+ }
+ if len(p.FFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
+ setError(fmt.Errorf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " ")))
+ return
+ }
+}
+
+// An EmbedError indicates a problem with a go:embed directive.
+type EmbedError struct {
+ Pattern string
+ Err error
+}
+
+func (e *EmbedError) Error() string {
+ return fmt.Sprintf("pattern %s: %v", e.Pattern, e.Err)
+}
+
+func (e *EmbedError) Unwrap() error {
+ return e.Err
+}
+
+// ResolveEmbed resolves //go:embed patterns and returns only the file list.
+// For use by go mod vendor to find embedded files it should copy into the
+// vendor directory.
+// TODO(#42504): Once go mod vendor uses load.PackagesAndErrors, just
+// call (*Package).ResolveEmbed
+func ResolveEmbed(dir string, patterns []string) ([]string, error) {
+ files, _, err := resolveEmbed(dir, patterns)
+ return files, err
+}
+
+// resolveEmbed resolves //go:embed patterns to precise file lists.
+// It sets files to the list of unique files matched (for go list),
+// and it sets pmap to the more precise mapping from
+// patterns to files.
+func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[string][]string, err error) {
+ var pattern string
+ defer func() {
+ if err != nil {
+ err = &EmbedError{
+ Pattern: pattern,
+ Err: err,
+ }
+ }
+ }()
+
+ // TODO(rsc): All these messages need position information for better error reports.
+ pmap = make(map[string][]string)
+ have := make(map[string]int)
+ dirOK := make(map[string]bool)
+ pid := 0 // pattern ID, to allow reuse of have map
+ for _, pattern = range patterns {
+ pid++
+
+ glob := pattern
+ all := strings.HasPrefix(pattern, "all:")
+ if all {
+ glob = pattern[len("all:"):]
+ }
+ // Check pattern is valid for //go:embed.
+ if _, err := pathpkg.Match(glob, ""); err != nil || !validEmbedPattern(glob) {
+ return nil, nil, fmt.Errorf("invalid pattern syntax")
+ }
+
+ // Glob to find matches.
+ match, err := fsys.Glob(str.QuoteGlob(str.WithFilePathSeparator(pkgdir)) + filepath.FromSlash(glob))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Filter list of matches down to the ones that will still exist when
+ // the directory is packaged up as a module. (If p.Dir is in the module cache,
+ // only those files exist already, but if p.Dir is in the current module,
+ // then there may be other things lying around, like symbolic links or .git directories.)
+ var list []string
+ for _, file := range match {
+ // relative path to p.Dir which begins without prefix slash
+ rel := filepath.ToSlash(str.TrimFilePathPrefix(file, pkgdir))
+
+ what := "file"
+ info, err := fsys.Lstat(file)
+ if err != nil {
+ return nil, nil, err
+ }
+ if info.IsDir() {
+ what = "directory"
+ }
+
+ // Check that directories along path do not begin a new module
+ // (do not contain a go.mod).
+ for dir := file; len(dir) > len(pkgdir)+1 && !dirOK[dir]; dir = filepath.Dir(dir) {
+ if _, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil {
+ return nil, nil, fmt.Errorf("cannot embed %s %s: in different module", what, rel)
+ }
+ if dir != file {
+ if info, err := fsys.Lstat(dir); err == nil && !info.IsDir() {
+ return nil, nil, fmt.Errorf("cannot embed %s %s: in non-directory %s", what, rel, dir[len(pkgdir)+1:])
+ }
+ }
+ dirOK[dir] = true
+ if elem := filepath.Base(dir); isBadEmbedName(elem) {
+ if dir == file {
+ return nil, nil, fmt.Errorf("cannot embed %s %s: invalid name %s", what, rel, elem)
+ } else {
+ return nil, nil, fmt.Errorf("cannot embed %s %s: in invalid directory %s", what, rel, elem)
+ }
+ }
+ }
+
+ switch {
+ default:
+ return nil, nil, fmt.Errorf("cannot embed irregular file %s", rel)
+
+ case info.Mode().IsRegular():
+ if have[rel] != pid {
+ have[rel] = pid
+ list = append(list, rel)
+ }
+
+ case info.IsDir():
+ // Gather all files in the named directory, stopping at module boundaries
+ // and ignoring files that wouldn't be packaged into a module.
+ count := 0
+ err := fsys.Walk(file, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ rel := filepath.ToSlash(str.TrimFilePathPrefix(path, pkgdir))
+ name := info.Name()
+ if path != file && (isBadEmbedName(name) || ((name[0] == '.' || name[0] == '_') && !all)) {
+ // Ignore bad names, assuming they won't go into modules.
+ // Also avoid hidden files that user may not know about.
+ // See golang.org/issue/42328.
+ if info.IsDir() {
+ return fs.SkipDir
+ }
+ return nil
+ }
+ if info.IsDir() {
+ if _, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if !info.Mode().IsRegular() {
+ return nil
+ }
+ count++
+ if have[rel] != pid {
+ have[rel] = pid
+ list = append(list, rel)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ if count == 0 {
+ return nil, nil, fmt.Errorf("cannot embed directory %s: contains no embeddable files", rel)
+ }
+ }
+ }
+
+ if len(list) == 0 {
+ return nil, nil, fmt.Errorf("no matching files found")
+ }
+ sort.Strings(list)
+ pmap[pattern] = list
+ }
+
+ for file := range have {
+ files = append(files, file)
+ }
+ sort.Strings(files)
+ return files, pmap, nil
+}
+
+func validEmbedPattern(pattern string) bool {
+ return pattern != "." && fs.ValidPath(pattern)
+}
+
+// isBadEmbedName reports whether name is the base name of a file that
+// can't or won't be included in modules and therefore shouldn't be treated
+// as existing for embedding.
+func isBadEmbedName(name string) bool {
+ if err := module.CheckFilePath(name); err != nil {
+ return true
+ }
+ switch name {
+ // Empty string should be impossible but make it bad.
+ case "":
+ return true
+ // Version control directories won't be present in module.
+ case ".bzr", ".hg", ".git", ".svn":
+ return true
+ }
+ return false
+}
+
+// vcsStatusCache maps repository directories (string)
+// to their VCS information.
+var vcsStatusCache par.ErrCache[string, vcs.Status]
+
+func appendBuildSetting(info *debug.BuildInfo, key, value string) {
+ value = strings.ReplaceAll(value, "\n", " ") // make value safe
+ info.Settings = append(info.Settings, debug.BuildSetting{Key: key, Value: value})
+}
+
+// setBuildInfo gathers build information and sets it into
+// p.Internal.BuildInfo, which will later be formatted as a string and embedded
+// in the binary. setBuildInfo should only be called on a main package with no
+// errors.
+//
+// This information can be retrieved using debug.ReadBuildInfo.
+//
+// Note that the GoVersion field is not set here to avoid encoding it twice.
+// It is stored separately in the binary, mostly for historical reasons.
+func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) {
+ setPkgErrorf := func(format string, args ...any) {
+ if p.Error == nil {
+ p.Error = &PackageError{Err: fmt.Errorf(format, args...)}
+ p.Incomplete = true
+ }
+ }
+
+ var debugModFromModinfo func(*modinfo.ModulePublic) *debug.Module
+ debugModFromModinfo = func(mi *modinfo.ModulePublic) *debug.Module {
+ version := mi.Version
+ if version == "" {
+ version = "(devel)"
+ }
+ dm := &debug.Module{
+ Path: mi.Path,
+ Version: version,
+ }
+ if mi.Replace != nil {
+ dm.Replace = debugModFromModinfo(mi.Replace)
+ } else if mi.Version != "" {
+ dm.Sum = modfetch.Sum(ctx, module.Version{Path: mi.Path, Version: mi.Version})
+ }
+ return dm
+ }
+
+ var main debug.Module
+ if p.Module != nil {
+ main = *debugModFromModinfo(p.Module)
+ }
+
+ visited := make(map[*Package]bool)
+ mdeps := make(map[module.Version]*debug.Module)
+ var q []*Package
+ q = append(q, p.Internal.Imports...)
+ for len(q) > 0 {
+ p1 := q[0]
+ q = q[1:]
+ if visited[p1] {
+ continue
+ }
+ visited[p1] = true
+ if p1.Module != nil {
+ m := module.Version{Path: p1.Module.Path, Version: p1.Module.Version}
+ if p1.Module.Path != main.Path && mdeps[m] == nil {
+ mdeps[m] = debugModFromModinfo(p1.Module)
+ }
+ }
+ q = append(q, p1.Internal.Imports...)
+ }
+ sortedMods := make([]module.Version, 0, len(mdeps))
+ for mod := range mdeps {
+ sortedMods = append(sortedMods, mod)
+ }
+ gover.ModSort(sortedMods)
+ deps := make([]*debug.Module, len(sortedMods))
+ for i, mod := range sortedMods {
+ deps[i] = mdeps[mod]
+ }
+
+ pkgPath := p.ImportPath
+ if p.Internal.CmdlineFiles {
+ pkgPath = "command-line-arguments"
+ }
+ info := &debug.BuildInfo{
+ Path: pkgPath,
+ Main: main,
+ Deps: deps,
+ }
+ appendSetting := func(key, value string) {
+ appendBuildSetting(info, key, value)
+ }
+
+ // Add command-line flags relevant to the build.
+ // This is informational, not an exhaustive list.
+ // Please keep the list sorted.
+ if cfg.BuildASan {
+ appendSetting("-asan", "true")
+ }
+ if BuildAsmflags.present {
+ appendSetting("-asmflags", BuildAsmflags.String())
+ }
+ buildmode := cfg.BuildBuildmode
+ if buildmode == "default" {
+ if p.Name == "main" {
+ buildmode = "exe"
+ } else {
+ buildmode = "archive"
+ }
+ }
+ appendSetting("-buildmode", buildmode)
+ appendSetting("-compiler", cfg.BuildContext.Compiler)
+ if gccgoflags := BuildGccgoflags.String(); gccgoflags != "" && cfg.BuildContext.Compiler == "gccgo" {
+ appendSetting("-gccgoflags", gccgoflags)
+ }
+ if gcflags := BuildGcflags.String(); gcflags != "" && cfg.BuildContext.Compiler == "gc" {
+ appendSetting("-gcflags", gcflags)
+ }
+ if ldflags := BuildLdflags.String(); ldflags != "" {
+ // https://go.dev/issue/52372: only include ldflags if -trimpath is not set,
+ // since it can include system paths through various linker flags (notably
+ // -extar, -extld, and -extldflags).
+ //
+ // TODO: since we control cmd/link, in theory we can parse ldflags to
+ // determine whether they may refer to system paths. If we do that, we can
+ // redact only those paths from the recorded -ldflags setting and still
+ // record the system-independent parts of the flags.
+ if !cfg.BuildTrimpath {
+ appendSetting("-ldflags", ldflags)
+ }
+ }
+ // N.B. -pgo added later by setPGOProfilePath.
+ if cfg.BuildMSan {
+ appendSetting("-msan", "true")
+ }
+ if cfg.BuildRace {
+ appendSetting("-race", "true")
+ }
+ if tags := cfg.BuildContext.BuildTags; len(tags) > 0 {
+ appendSetting("-tags", strings.Join(tags, ","))
+ }
+ if cfg.BuildTrimpath {
+ appendSetting("-trimpath", "true")
+ }
+ if p.DefaultGODEBUG != "" {
+ appendSetting("DefaultGODEBUG", p.DefaultGODEBUG)
+ }
+ cgo := "0"
+ if cfg.BuildContext.CgoEnabled {
+ cgo = "1"
+ }
+ appendSetting("CGO_ENABLED", cgo)
+ // https://go.dev/issue/52372: only include CGO flags if -trimpath is not set.
+ // (If -trimpath is set, it is possible that these flags include system paths.)
+ // If cgo is involved, reproducibility is already pretty well ruined anyway,
+ // given that we aren't stamping header or library versions.
+ //
+ // TODO(bcmills): perhaps we could at least parse the flags and stamp the
+ // subset of flags that are known not to be paths?
+ if cfg.BuildContext.CgoEnabled && !cfg.BuildTrimpath {
+ for _, name := range []string{"CGO_CFLAGS", "CGO_CPPFLAGS", "CGO_CXXFLAGS", "CGO_LDFLAGS"} {
+ appendSetting(name, cfg.Getenv(name))
+ }
+ }
+ appendSetting("GOARCH", cfg.BuildContext.GOARCH)
+ if cfg.RawGOEXPERIMENT != "" {
+ appendSetting("GOEXPERIMENT", cfg.RawGOEXPERIMENT)
+ }
+ appendSetting("GOOS", cfg.BuildContext.GOOS)
+ if key, val := cfg.GetArchEnv(); key != "" && val != "" {
+ appendSetting(key, val)
+ }
+
+ // Add VCS status if all conditions are true:
+ //
+ // - -buildvcs is enabled.
+ // - p is a non-test contained within a main module (there may be multiple
+ // main modules in a workspace, but local replacements don't count).
+ // - Both the current directory and p's module's root directory are contained
+ // in the same local repository.
+ // - We know the VCS commands needed to get the status.
+ setVCSError := func(err error) {
+ setPkgErrorf("error obtaining VCS status: %v\n\tUse -buildvcs=false to disable VCS stamping.", err)
+ }
+
+ var repoDir string
+ var vcsCmd *vcs.Cmd
+ var err error
+ const allowNesting = true
+
+ wantVCS := false
+ switch cfg.BuildBuildvcs {
+ case "true":
+ wantVCS = true // Include VCS metadata even for tests if requested explicitly; see https://go.dev/issue/52648.
+ case "auto":
+ wantVCS = autoVCS && !p.IsTestOnly()
+ case "false":
+ default:
+ panic(fmt.Sprintf("unexpected value for cfg.BuildBuildvcs: %q", cfg.BuildBuildvcs))
+ }
+
+ if wantVCS && p.Module != nil && p.Module.Version == "" && !p.Standard {
+ if p.Module.Path == "bootstrap" && cfg.GOROOT == os.Getenv("GOROOT_BOOTSTRAP") {
+ // During bootstrapping, the bootstrap toolchain is built in module
+ // "bootstrap" (instead of "std"), with GOROOT set to GOROOT_BOOTSTRAP
+ // (so the bootstrap toolchain packages don't even appear to be in GOROOT).
+ goto omitVCS
+ }
+ repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "", allowNesting)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ setVCSError(err)
+ return
+ }
+ if !str.HasFilePathPrefix(p.Module.Dir, repoDir) &&
+ !str.HasFilePathPrefix(repoDir, p.Module.Dir) {
+ // The module containing the main package does not overlap with the
+ // repository containing the working directory. Don't include VCS info.
+ // If the repo contains the module or vice versa, but they are not
+ // the same directory, it's likely an error (see below).
+ goto omitVCS
+ }
+ if cfg.BuildBuildvcs == "auto" && vcsCmd != nil && vcsCmd.Cmd != "" {
+ if _, err := exec.LookPath(vcsCmd.Cmd); err != nil {
+ // We fould a repository, but the required VCS tool is not present.
+ // "-buildvcs=auto" means that we should silently drop the VCS metadata.
+ goto omitVCS
+ }
+ }
+ }
+ if repoDir != "" && vcsCmd.Status != nil {
+ // Check that the current directory, package, and module are in the same
+ // repository. vcs.FromDir allows nested Git repositories, but nesting
+ // is not allowed for other VCS tools. The current directory may be outside
+ // p.Module.Dir when a workspace is used.
+ pkgRepoDir, _, err := vcs.FromDir(p.Dir, "", allowNesting)
+ if err != nil {
+ setVCSError(err)
+ return
+ }
+ if pkgRepoDir != repoDir {
+ if cfg.BuildBuildvcs != "auto" {
+ setVCSError(fmt.Errorf("main package is in repository %q but current directory is in repository %q", pkgRepoDir, repoDir))
+ return
+ }
+ goto omitVCS
+ }
+ modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "", allowNesting)
+ if err != nil {
+ setVCSError(err)
+ return
+ }
+ if modRepoDir != repoDir {
+ if cfg.BuildBuildvcs != "auto" {
+ setVCSError(fmt.Errorf("main module is in repository %q but current directory is in repository %q", modRepoDir, repoDir))
+ return
+ }
+ goto omitVCS
+ }
+
+ st, err := vcsStatusCache.Do(repoDir, func() (vcs.Status, error) {
+ return vcsCmd.Status(vcsCmd, repoDir)
+ })
+ if err != nil {
+ setVCSError(err)
+ return
+ }
+
+ appendSetting("vcs", vcsCmd.Cmd)
+ if st.Revision != "" {
+ appendSetting("vcs.revision", st.Revision)
+ }
+ if !st.CommitTime.IsZero() {
+ stamp := st.CommitTime.UTC().Format(time.RFC3339Nano)
+ appendSetting("vcs.time", stamp)
+ }
+ appendSetting("vcs.modified", strconv.FormatBool(st.Uncommitted))
+ }
+omitVCS:
+
+ p.Internal.BuildInfo = info
+}
+
+// SafeArg reports whether arg is a "safe" command-line argument,
+// meaning that when it appears in a command-line, it probably
+// doesn't have some special meaning other than its own name.
+// Obviously args beginning with - are not safe (they look like flags).
+// Less obviously, args beginning with @ are not safe (they look like
+// GNU binutils flagfile specifiers, sometimes called "response files").
+// To be conservative, we reject almost any arg beginning with non-alphanumeric ASCII.
+// We accept leading . _ and / as likely in file system paths.
+// There is a copy of this function in cmd/compile/internal/gc/noder.go.
+func SafeArg(name string) bool {
+ if name == "" {
+ return false
+ }
+ c := name[0]
+ return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+}
+
+// LinkerDeps returns the list of linker-induced dependencies for main package p.
+func LinkerDeps(p *Package) []string {
+ // Everything links runtime.
+ deps := []string{"runtime"}
+
+ // External linking mode forces an import of runtime/cgo.
+ if externalLinkingForced(p) && cfg.BuildContext.Compiler != "gccgo" {
+ deps = append(deps, "runtime/cgo")
+ }
+ // On ARM with GOARM=5, it forces an import of math, for soft floating point.
+ if cfg.Goarch == "arm" {
+ deps = append(deps, "math")
+ }
+ // Using the race detector forces an import of runtime/race.
+ if cfg.BuildRace {
+ deps = append(deps, "runtime/race")
+ }
+ // Using memory sanitizer forces an import of runtime/msan.
+ if cfg.BuildMSan {
+ deps = append(deps, "runtime/msan")
+ }
+ // Using address sanitizer forces an import of runtime/asan.
+ if cfg.BuildASan {
+ deps = append(deps, "runtime/asan")
+ }
+ // Building for coverage forces an import of runtime/coverage.
+ if cfg.BuildCover && cfg.Experiment.CoverageRedesign {
+ deps = append(deps, "runtime/coverage")
+ }
+
+ return deps
+}
+
+// externalLinkingForced reports whether external linking is being
+// forced even for programs that do not use cgo.
+func externalLinkingForced(p *Package) bool {
+ if !cfg.BuildContext.CgoEnabled {
+ return false
+ }
+
+ // Some targets must use external linking even inside GOROOT.
+ if platform.MustLinkExternal(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH, false) {
+ return true
+ }
+
+ // Some build modes always require external linking.
+ switch cfg.BuildBuildmode {
+ case "c-shared", "plugin":
+ return true
+ }
+
+ // Using -linkshared always requires external linking.
+ if cfg.BuildLinkshared {
+ return true
+ }
+
+ // Decide whether we are building a PIE,
+ // bearing in mind that some systems default to PIE.
+ isPIE := false
+ if cfg.BuildBuildmode == "pie" {
+ isPIE = true
+ } else if cfg.BuildBuildmode == "default" && platform.DefaultPIE(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH, cfg.BuildRace) {
+ isPIE = true
+ }
+ // If we are building a PIE, and we are on a system
+ // that does not support PIE with internal linking mode,
+ // then we must use external linking.
+ if isPIE && !platform.InternalLinkPIESupported(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH) {
+ return true
+ }
+
+ // Using -ldflags=-linkmode=external forces external linking.
+ // If there are multiple -linkmode options, the last one wins.
+ linkmodeExternal := false
+ if p != nil {
+ ldflags := BuildLdflags.For(p)
+ for i := len(ldflags) - 1; i >= 0; i-- {
+ a := ldflags[i]
+ if a == "-linkmode=external" ||
+ a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "external" {
+ linkmodeExternal = true
+ break
+ } else if a == "-linkmode=internal" ||
+ a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "internal" {
+ break
+ }
+ }
+ }
+ return linkmodeExternal
+}
+
+// mkAbs rewrites list, which must be paths relative to p.Dir,
+// into a sorted list of absolute paths. It edits list in place but for
+// convenience also returns list back to its caller.
+func (p *Package) mkAbs(list []string) []string {
+ for i, f := range list {
+ list[i] = filepath.Join(p.Dir, f)
+ }
+ sort.Strings(list)
+ return list
+}
+
+// InternalGoFiles returns the list of Go files being built for the package,
+// using absolute paths.
+func (p *Package) InternalGoFiles() []string {
+ return p.mkAbs(str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles))
+}
+
+// InternalXGoFiles returns the list of Go files being built for the XTest package,
+// using absolute paths.
+func (p *Package) InternalXGoFiles() []string {
+ return p.mkAbs(p.XTestGoFiles)
+}
+
+// InternalAllGoFiles returns the list of all Go files possibly relevant for the package,
+// using absolute paths. "Possibly relevant" means that files are not excluded
+// due to build tags, but files with names beginning with . or _ are still excluded.
+func (p *Package) InternalAllGoFiles() []string {
+ return p.mkAbs(str.StringList(p.IgnoredGoFiles, p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles))
+}
+
+// UsesSwig reports whether the package needs to run SWIG.
+func (p *Package) UsesSwig() bool {
+ return len(p.SwigFiles) > 0 || len(p.SwigCXXFiles) > 0
+}
+
+// UsesCgo reports whether the package needs to run cgo
+func (p *Package) UsesCgo() bool {
+ return len(p.CgoFiles) > 0
+}
+
+// PackageList returns the list of packages in the dag rooted at roots
+// as visited in a depth-first post-order traversal.
+func PackageList(roots []*Package) []*Package {
+ seen := map[*Package]bool{}
+ all := []*Package{}
+ var walk func(*Package)
+ walk = func(p *Package) {
+ if seen[p] {
+ return
+ }
+ seen[p] = true
+ for _, p1 := range p.Internal.Imports {
+ walk(p1)
+ }
+ all = append(all, p)
+ }
+ for _, root := range roots {
+ walk(root)
+ }
+ return all
+}
+
+// TestPackageList returns the list of packages in the dag rooted at roots
+// as visited in a depth-first post-order traversal, including the test
+// imports of the roots. This ignores errors in test packages.
+func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []*Package {
+ seen := map[*Package]bool{}
+ all := []*Package{}
+ var walk func(*Package)
+ walk = func(p *Package) {
+ if seen[p] {
+ return
+ }
+ seen[p] = true
+ for _, p1 := range p.Internal.Imports {
+ walk(p1)
+ }
+ all = append(all, p)
+ }
+ walkTest := func(root *Package, path string) {
+ var stk ImportStack
+ p1, err := LoadImport(ctx, opts, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport)
+ if err != nil && root.Error == nil {
+ // Assign error importing the package to the importer.
+ root.Error = err
+ root.Incomplete = true
+ }
+ if p1.Error == nil {
+ walk(p1)
+ }
+ }
+ for _, root := range roots {
+ walk(root)
+ for _, path := range root.TestImports {
+ walkTest(root, path)
+ }
+ for _, path := range root.XTestImports {
+ walkTest(root, path)
+ }
+ }
+ return all
+}
+
+// LoadImportWithFlags loads the package with the given import path and
+// sets tool flags on that package. This function is useful loading implicit
+// dependencies (like sync/atomic for coverage).
+// TODO(jayconrod): delete this function and set flags automatically
+// in LoadImport instead.
+func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
+ p, err := LoadImport(context.TODO(), PackageOpts{}, path, srcDir, parent, stk, importPos, mode)
+ setToolFlags(p)
+ return p, err
+}
+
+// LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent.
+// It's then guaranteed to not return an error
+func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
+ p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode)
+ setToolFlags(p)
+ return p
+}
+
+// PackageOpts control the behavior of PackagesAndErrors and other package
+// loading functions.
+type PackageOpts struct {
+ // IgnoreImports controls whether we ignore explicit and implicit imports
+ // when loading packages. Implicit imports are added when supporting Cgo
+ // or SWIG and when linking main packages.
+ IgnoreImports bool
+
+ // ModResolveTests indicates whether calls to the module loader should also
+ // resolve test dependencies of the requested packages.
+ //
+ // If ModResolveTests is true, then the module loader needs to resolve test
+ // dependencies at the same time as packages; otherwise, the test dependencies
+ // of those packages could be missing, and resolving those missing dependencies
+ // could change the selected versions of modules that provide other packages.
+ ModResolveTests bool
+
+ // MainOnly is true if the caller only wants to load main packages.
+ // For a literal argument matching a non-main package, a stub may be returned
+ // with an error. For a non-literal argument (with "..."), non-main packages
+ // are not be matched, and their dependencies may not be loaded. A warning
+ // may be printed for non-literal arguments that match no main packages.
+ MainOnly bool
+
+ // AutoVCS controls whether we also load version-control metadata for main packages
+ // when -buildvcs=auto (the default).
+ AutoVCS bool
+
+ // SuppressBuildInfo is true if the caller does not need p.Stale, p.StaleReason, or p.Internal.BuildInfo
+ // to be populated on the package.
+ SuppressBuildInfo bool
+
+ // SuppressEmbedFiles is true if the caller does not need any embed files to be populated on the
+ // package.
+ SuppressEmbedFiles bool
+}
+
+// PackagesAndErrors returns the packages named by the command line arguments
+// 'patterns'. If a named package cannot be loaded, PackagesAndErrors returns
+// a *Package with the Error field describing the failure. If errors are found
+// loading imported packages, the DepsErrors field is set. The Incomplete field
+// may be set as well.
+//
+// To obtain a flat list of packages, use PackageList.
+// To report errors loading packages, use ReportPackageErrors.
+func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package {
+ ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors")
+ defer span.Done()
+
+ for _, p := range patterns {
+ // Listing is only supported with all patterns referring to either:
+ // - Files that are part of the same directory.
+ // - Explicit package paths or patterns.
+ if strings.HasSuffix(p, ".go") {
+ // We need to test whether the path is an actual Go file and not a
+ // package path or pattern ending in '.go' (see golang.org/issue/34653).
+ if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() {
+ pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)}
+ setPGOProfilePath(pkgs)
+ return pkgs
+ }
+ }
+ }
+
+ var matches []*search.Match
+ if modload.Init(); cfg.ModulesEnabled {
+ modOpts := modload.PackageOpts{
+ ResolveMissingImports: true,
+ LoadTests: opts.ModResolveTests,
+ SilencePackageErrors: true,
+ }
+ matches, _ = modload.LoadPackages(ctx, modOpts, patterns...)
+ } else {
+ noModRoots := []string{}
+ matches = search.ImportPaths(patterns, noModRoots)
+ }
+
+ var (
+ pkgs []*Package
+ stk ImportStack
+ seenPkg = make(map[*Package]bool)
+ )
+
+ pre := newPreload()
+ defer pre.flush()
+ pre.preloadMatches(ctx, opts, matches)
+
+ for _, m := range matches {
+ for _, pkg := range m.Pkgs {
+ if pkg == "" {
+ panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern()))
+ }
+ mode := cmdlinePkg
+ if m.IsLiteral() {
+ // Note: do not set = m.IsLiteral unconditionally
+ // because maybe we'll see p matching both
+ // a literal and also a non-literal pattern.
+ mode |= cmdlinePkgLiteral
+ }
+ p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode)
+ if perr != nil {
+ base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg)
+ }
+ p.Match = append(p.Match, m.Pattern())
+ if seenPkg[p] {
+ continue
+ }
+ seenPkg[p] = true
+ pkgs = append(pkgs, p)
+ }
+
+ if len(m.Errs) > 0 {
+ // In addition to any packages that were actually resolved from the
+ // pattern, there was some error in resolving the pattern itself.
+ // Report it as a synthetic package.
+ p := new(Package)
+ p.ImportPath = m.Pattern()
+ // Pass an empty ImportStack and nil importPos: the error arose from a pattern, not an import.
+ var stk ImportStack
+ var importPos []token.Position
+ p.setLoadPackageDataError(m.Errs[0], m.Pattern(), &stk, importPos)
+ p.Incomplete = true
+ p.Match = append(p.Match, m.Pattern())
+ p.Internal.CmdlinePkg = true
+ if m.IsLiteral() {
+ p.Internal.CmdlinePkgLiteral = true
+ }
+ pkgs = append(pkgs, p)
+ }
+ }
+
+ if opts.MainOnly {
+ pkgs = mainPackagesOnly(pkgs, matches)
+ }
+
+ // Now that CmdlinePkg is set correctly,
+ // compute the effective flags for all loaded packages
+ // (not just the ones matching the patterns but also
+ // their dependencies).
+ setToolFlags(pkgs...)
+
+ setPGOProfilePath(pkgs)
+
+ return pkgs
+}
+
+// setPGOProfilePath sets the PGO profile path for pkgs.
+// In -pgo=auto mode, it finds the default PGO profile.
+func setPGOProfilePath(pkgs []*Package) {
+ updateBuildInfo := func(p *Package, file string) {
+ // Don't create BuildInfo for packages that didn't already have it.
+ if p.Internal.BuildInfo == nil {
+ return
+ }
+
+ if cfg.BuildTrimpath {
+ appendBuildSetting(p.Internal.BuildInfo, "-pgo", filepath.Base(file))
+ } else {
+ appendBuildSetting(p.Internal.BuildInfo, "-pgo", file)
+ }
+ }
+
+ switch cfg.BuildPGO {
+ case "off":
+ return
+
+ case "auto":
+ // Locate PGO profiles from the main packages, and
+ // attach the profile to the main package and its
+ // dependencies.
+ // If we're building multiple main packages, they may
+ // have different profiles. We may need to split (unshare)
+ // the dependency graph so they can attach different
+ // profiles.
+ for _, p := range pkgs {
+ if p.Name != "main" {
+ continue
+ }
+ pmain := p
+ file := filepath.Join(pmain.Dir, "default.pgo")
+ if _, err := os.Stat(file); err != nil {
+ continue // no profile
+ }
+
+ // Packages already visited. The value should replace
+ // the key, as it may be a forked copy of the original
+ // Package.
+ visited := make(map[*Package]*Package)
+ var split func(p *Package) *Package
+ split = func(p *Package) *Package {
+ if p1 := visited[p]; p1 != nil {
+ return p1
+ }
+
+ if len(pkgs) > 1 && p != pmain {
+ // Make a copy, then attach profile.
+ // No need to copy if there is only one root package (we can
+ // attach profile directly in-place).
+ // Also no need to copy the main package.
+ if p.Internal.PGOProfile != "" {
+ panic("setPGOProfilePath: already have profile")
+ }
+ p1 := new(Package)
+ *p1 = *p
+ // Unalias the Internal.Imports slice, which is we're going to
+ // modify. We don't copy other slices as we don't change them.
+ p1.Internal.Imports = slices.Clone(p.Internal.Imports)
+ p1.Internal.ForMain = pmain.ImportPath
+ visited[p] = p1
+ p = p1
+ } else {
+ visited[p] = p
+ }
+ p.Internal.PGOProfile = file
+ updateBuildInfo(p, file)
+ // Recurse to dependencies.
+ for i, pp := range p.Internal.Imports {
+ p.Internal.Imports[i] = split(pp)
+ }
+ return p
+ }
+
+ // Replace the package and imports with the PGO version.
+ split(pmain)
+ }
+
+ default:
+ // Profile specified from the command line.
+ // Make it absolute path, as the compiler runs on various directories.
+ file, err := filepath.Abs(cfg.BuildPGO)
+ if err != nil {
+ base.Fatalf("fail to get absolute path of PGO file %s: %v", cfg.BuildPGO, err)
+ }
+
+ for _, p := range PackageList(pkgs) {
+ p.Internal.PGOProfile = file
+ updateBuildInfo(p, file)
+ }
+ }
+}
+
+// CheckPackageErrors prints errors encountered loading pkgs and their
+// dependencies, then exits with a non-zero status if any errors were found.
+func CheckPackageErrors(pkgs []*Package) {
+ var anyIncomplete bool
+ for _, pkg := range pkgs {
+ if pkg.Incomplete {
+ anyIncomplete = true
+ }
+ }
+ if anyIncomplete {
+ all := PackageList(pkgs)
+ for _, p := range all {
+ if p.Error != nil {
+ base.Errorf("%v", p.Error)
+ }
+ }
+ }
+ base.ExitIfErrors()
+
+ // Check for duplicate loads of the same package.
+ // That should be impossible, but if it does happen then
+ // we end up trying to build the same package twice,
+ // usually in parallel overwriting the same files,
+ // which doesn't work very well.
+ seen := map[string]bool{}
+ reported := map[string]bool{}
+ for _, pkg := range PackageList(pkgs) {
+ // -pgo=auto with multiple main packages can cause a package being
+ // built multiple times (with different profiles).
+ // We check that package import path + profile path is unique.
+ key := pkg.ImportPath
+ if pkg.Internal.PGOProfile != "" {
+ key += " pgo:" + pkg.Internal.PGOProfile
+ }
+ if seen[key] && !reported[key] {
+ reported[key] = true
+ base.Errorf("internal error: duplicate loads of %s", pkg.ImportPath)
+ }
+ seen[key] = true
+ }
+ base.ExitIfErrors()
+}
+
+// mainPackagesOnly filters out non-main packages matched only by arguments
+// containing "..." and returns the remaining main packages.
+//
+// Packages with missing, invalid, or ambiguous names may be treated as
+// possibly-main packages.
+//
+// mainPackagesOnly sets a non-main package's Error field and returns it if it
+// is named by a literal argument.
+//
+// mainPackagesOnly prints warnings for non-literal arguments that only match
+// non-main packages.
+func mainPackagesOnly(pkgs []*Package, matches []*search.Match) []*Package {
+ treatAsMain := map[string]bool{}
+ for _, m := range matches {
+ if m.IsLiteral() {
+ for _, path := range m.Pkgs {
+ treatAsMain[path] = true
+ }
+ }
+ }
+
+ var mains []*Package
+ for _, pkg := range pkgs {
+ if pkg.Name == "main" || (pkg.Name == "" && pkg.Error != nil) {
+ treatAsMain[pkg.ImportPath] = true
+ mains = append(mains, pkg)
+ continue
+ }
+
+ if len(pkg.InvalidGoFiles) > 0 { // TODO(#45999): && pkg.Name == "", but currently go/build sets pkg.Name arbitrarily if it is ambiguous.
+ // The package has (or may have) conflicting names, and we can't easily
+ // tell whether one of them is "main". So assume that it could be, and
+ // report an error for the package.
+ treatAsMain[pkg.ImportPath] = true
+ }
+ if treatAsMain[pkg.ImportPath] {
+ if pkg.Error == nil {
+ pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}}
+ pkg.Incomplete = true
+ }
+ mains = append(mains, pkg)
+ }
+ }
+
+ for _, m := range matches {
+ if m.IsLiteral() || len(m.Pkgs) == 0 {
+ continue
+ }
+ foundMain := false
+ for _, path := range m.Pkgs {
+ if treatAsMain[path] {
+ foundMain = true
+ break
+ }
+ }
+ if !foundMain {
+ fmt.Fprintf(os.Stderr, "go: warning: %q matched only non-main packages\n", m.Pattern())
+ }
+ }
+
+ return mains
+}
+
+type mainPackageError struct {
+ importPath string
+}
+
+func (e *mainPackageError) Error() string {
+ return fmt.Sprintf("package %s is not a main package", e.importPath)
+}
+
+func (e *mainPackageError) ImportPath() string {
+ return e.importPath
+}
+
+func setToolFlags(pkgs ...*Package) {
+ for _, p := range PackageList(pkgs) {
+ p.Internal.Asmflags = BuildAsmflags.For(p)
+ p.Internal.Gcflags = BuildGcflags.For(p)
+ p.Internal.Ldflags = BuildLdflags.For(p)
+ p.Internal.Gccgoflags = BuildGccgoflags.For(p)
+ }
+}
+
+// GoFilesPackage creates a package for building a collection of Go files
+// (typically named on the command line). The target is named p.a for
+// package p or named after the first Go file for package main.
+func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package {
+ modload.Init()
+
+ for _, f := range gofiles {
+ if !strings.HasSuffix(f, ".go") {
+ pkg := new(Package)
+ pkg.Internal.Local = true
+ pkg.Internal.CmdlineFiles = true
+ pkg.Name = f
+ pkg.Error = &PackageError{
+ Err: fmt.Errorf("named files must be .go files: %s", pkg.Name),
+ }
+ pkg.Incomplete = true
+ return pkg
+ }
+ }
+
+ var stk ImportStack
+ ctxt := cfg.BuildContext
+ ctxt.UseAllFiles = true
+
+ // Synthesize fake "directory" that only shows the named files,
+ // to make it look like this is a standard package or
+ // command directory. So that local imports resolve
+ // consistently, the files must all be in the same directory.
+ var dirent []fs.FileInfo
+ var dir string
+ for _, file := range gofiles {
+ fi, err := fsys.Stat(file)
+ if err != nil {
+ base.Fatalf("%s", err)
+ }
+ if fi.IsDir() {
+ base.Fatalf("%s is a directory, should be a Go file", file)
+ }
+ dir1 := filepath.Dir(file)
+ if dir == "" {
+ dir = dir1
+ } else if dir != dir1 {
+ base.Fatalf("named files must all be in one directory; have %s and %s", dir, dir1)
+ }
+ dirent = append(dirent, fi)
+ }
+ ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil }
+
+ if cfg.ModulesEnabled {
+ modload.ImportFromFiles(ctx, gofiles)
+ }
+
+ var err error
+ if dir == "" {
+ dir = base.Cwd()
+ }
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ base.Fatalf("%s", err)
+ }
+
+ bp, err := ctxt.ImportDir(dir, 0)
+ pkg := new(Package)
+ pkg.Internal.Local = true
+ pkg.Internal.CmdlineFiles = true
+ pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err)
+ if !cfg.ModulesEnabled {
+ pkg.Internal.LocalPrefix = dirToImportPath(dir)
+ }
+ pkg.ImportPath = "command-line-arguments"
+ pkg.Target = ""
+ pkg.Match = gofiles
+
+ if pkg.Name == "main" {
+ exe := pkg.DefaultExecName() + cfg.ExeSuffix
+
+ if cfg.GOBIN != "" {
+ pkg.Target = filepath.Join(cfg.GOBIN, exe)
+ } else if cfg.ModulesEnabled {
+ pkg.Target = filepath.Join(modload.BinDir(), exe)
+ }
+ }
+
+ if opts.MainOnly && pkg.Name != "main" && pkg.Error == nil {
+ pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}}
+ pkg.Incomplete = true
+ }
+ setToolFlags(pkg)
+
+ return pkg
+}
+
+// PackagesAndErrorsOutsideModule is like PackagesAndErrors but runs in
+// module-aware mode and ignores the go.mod file in the current directory or any
+// parent directory, if there is one. This is used in the implementation of 'go
+// install pkg@version' and other commands that support similar forms.
+//
+// modload.ForceUseModules must be true, and modload.RootMode must be NoRoot
+// before calling this function.
+//
+// PackagesAndErrorsOutsideModule imposes several constraints to avoid
+// ambiguity. All arguments must have the same version suffix (not just a suffix
+// that resolves to the same version). They must refer to packages in the same
+// module, which must not be std or cmd. That module is not considered the main
+// module, but its go.mod file (if it has one) must not contain directives that
+// would cause it to be interpreted differently if it were the main module
+// (replace, exclude).
+func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) {
+ if !modload.ForceUseModules {
+ panic("modload.ForceUseModules must be true")
+ }
+ if modload.RootMode != modload.NoRoot {
+ panic("modload.RootMode must be NoRoot")
+ }
+
+ // Check that the arguments satisfy syntactic constraints.
+ var version string
+ var firstPath string
+ for _, arg := range args {
+ if i := strings.Index(arg, "@"); i >= 0 {
+ firstPath, version = arg[:i], arg[i+1:]
+ if version == "" {
+ return nil, fmt.Errorf("%s: version must not be empty", arg)
+ }
+ break
+ }
+ }
+ patterns := make([]string, len(args))
+ for i, arg := range args {
+ p, found := strings.CutSuffix(arg, "@"+version)
+ if !found {
+ return nil, fmt.Errorf("%s: all arguments must refer to packages in the same module at the same version (@%s)", arg, version)
+ }
+ switch {
+ case build.IsLocalImport(p):
+ return nil, fmt.Errorf("%s: argument must be a package path, not a relative path", arg)
+ case filepath.IsAbs(p):
+ return nil, fmt.Errorf("%s: argument must be a package path, not an absolute path", arg)
+ case search.IsMetaPackage(p):
+ return nil, fmt.Errorf("%s: argument must be a package path, not a meta-package", arg)
+ case pathpkg.Clean(p) != p:
+ return nil, fmt.Errorf("%s: argument must be a clean package path", arg)
+ case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p):
+ return nil, fmt.Errorf("%s: argument must not be a package in the standard library", arg)
+ default:
+ patterns[i] = p
+ }
+ }
+
+ // Query the module providing the first argument, load its go.mod file, and
+ // check that it doesn't contain directives that would cause it to be
+ // interpreted differently if it were the main module.
+ //
+ // If multiple modules match the first argument, accept the longest match
+ // (first result). It's possible this module won't provide packages named by
+ // later arguments, and other modules would. Let's not try to be too
+ // magical though.
+ allowed := modload.CheckAllowed
+ if modload.IsRevisionQuery(firstPath, version) {
+ // Don't check for retractions if a specific revision is requested.
+ allowed = nil
+ }
+ noneSelected := func(path string) (version string) { return "none" }
+ qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", args[0], err)
+ }
+ rootMod := qrs[0].Mod
+ data, err := modfetch.GoMod(ctx, rootMod.Path, rootMod.Version)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", args[0], err)
+ }
+ f, err := modfile.Parse("go.mod", data, nil)
+ if err != nil {
+ return nil, fmt.Errorf("%s (in %s): %w", args[0], rootMod, err)
+ }
+ directiveFmt := "%s (in %s):\n" +
+ "\tThe go.mod file for the module providing named packages contains one or\n" +
+ "\tmore %s directives. It must not contain directives that would cause\n" +
+ "\tit to be interpreted differently than if it were the main module."
+ if len(f.Replace) > 0 {
+ return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "replace")
+ }
+ if len(f.Exclude) > 0 {
+ return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "exclude")
+ }
+
+ // Since we are in NoRoot mode, the build list initially contains only
+ // the dummy command-line-arguments module. Add a requirement on the
+ // module that provides the packages named on the command line.
+ if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil {
+ return nil, fmt.Errorf("%s: %w", args[0], err)
+ }
+
+ // Load packages for all arguments.
+ pkgs := PackagesAndErrors(ctx, opts, patterns)
+
+ // Check that named packages are all provided by the same module.
+ for _, pkg := range pkgs {
+ var pkgErr error
+ if pkg.Module == nil {
+ // Packages in std, cmd, and their vendored dependencies
+ // don't have this field set.
+ pkgErr = fmt.Errorf("package %s not provided by module %s", pkg.ImportPath, rootMod)
+ } else if pkg.Module.Path != rootMod.Path || pkg.Module.Version != rootMod.Version {
+ pkgErr = fmt.Errorf("package %s provided by module %s@%s\n\tAll packages must be provided by the same module (%s).", pkg.ImportPath, pkg.Module.Path, pkg.Module.Version, rootMod)
+ }
+ if pkgErr != nil && pkg.Error == nil {
+ pkg.Error = &PackageError{Err: pkgErr}
+ pkg.Incomplete = true
+ }
+ }
+
+ matchers := make([]func(string) bool, len(patterns))
+ for i, p := range patterns {
+ if strings.Contains(p, "...") {
+ matchers[i] = pkgpattern.MatchPattern(p)
+ }
+ }
+ return pkgs, nil
+}
+
+// EnsureImport ensures that package p imports the named package.
+func EnsureImport(p *Package, pkg string) {
+ for _, d := range p.Internal.Imports {
+ if d.Name == pkg {
+ return
+ }
+ }
+
+ p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0)
+ if err != nil {
+ base.Fatalf("load %s: %v", pkg, err)
+ }
+ if p1.Error != nil {
+ base.Fatalf("load %s: %v", pkg, p1.Error)
+ }
+
+ p.Internal.Imports = append(p.Internal.Imports, p1)
+}
+
+// PrepareForCoverageBuild is a helper invoked for "go install
+// -cover", "go run -cover", and "go build -cover" (but not used by
+// "go test -cover"). It walks through the packages being built (and
+// dependencies) and marks them for coverage instrumentation when
+// appropriate, and possibly adding additional deps where needed.
+func PrepareForCoverageBuild(pkgs []*Package) {
+ var match []func(*Package) bool
+
+ matchMainModAndCommandLine := func(p *Package) bool {
+ // note that p.Standard implies p.Module == nil below.
+ return p.Internal.CmdlineFiles || p.Internal.CmdlinePkg || (p.Module != nil && p.Module.Main)
+ }
+
+ if len(cfg.BuildCoverPkg) != 0 {
+ // If -coverpkg has been specified, then we instrument only
+ // the specific packages selected by the user-specified pattern(s).
+ match = make([]func(*Package) bool, len(cfg.BuildCoverPkg))
+ for i := range cfg.BuildCoverPkg {
+ match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd())
+ }
+ } else {
+ // Without -coverpkg, instrument only packages in the main module
+ // (if any), as well as packages/files specifically named on the
+ // command line.
+ match = []func(*Package) bool{matchMainModAndCommandLine}
+ }
+
+ // Visit the packages being built or installed, along with all of
+ // their dependencies, and mark them to be instrumented, taking
+ // into account the matchers we've set up in the sequence above.
+ SelectCoverPackages(PackageList(pkgs), match, "build")
+}
+
+func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package {
+ var warntag string
+ var includeMain bool
+ switch op {
+ case "build":
+ warntag = "built"
+ includeMain = true
+ case "test":
+ warntag = "tested"
+ default:
+ panic("internal error, bad mode passed to SelectCoverPackages")
+ }
+
+ covered := []*Package{}
+ matched := make([]bool, len(match))
+ for _, p := range roots {
+ haveMatch := false
+ for i := range match {
+ if match[i](p) {
+ matched[i] = true
+ haveMatch = true
+ }
+ }
+ if !haveMatch {
+ continue
+ }
+
+ // There is nothing to cover in package unsafe; it comes from
+ // the compiler.
+ if p.ImportPath == "unsafe" {
+ continue
+ }
+
+ // A package which only has test files can't be imported as a
+ // dependency, and at the moment we don't try to instrument it
+ // for coverage. There isn't any technical reason why
+ // *_test.go files couldn't be instrumented, but it probably
+ // doesn't make much sense to lump together coverage metrics
+ // (ex: percent stmts covered) of *_test.go files with
+ // non-test Go code.
+ if len(p.GoFiles)+len(p.CgoFiles) == 0 {
+ continue
+ }
+
+ // Silently ignore attempts to run coverage on sync/atomic
+ // and/or runtime/internal/atomic when using atomic coverage
+ // mode. Atomic coverage mode uses sync/atomic, so we can't
+ // also do coverage on it.
+ if cfg.BuildCoverMode == "atomic" && p.Standard &&
+ (p.ImportPath == "sync/atomic" || p.ImportPath == "runtime/internal/atomic") {
+ continue
+ }
+
+ // If using the race detector, silently ignore attempts to run
+ // coverage on the runtime packages. It will cause the race
+ // detector to be invoked before it has been initialized. Note
+ // the use of "regonly" instead of just ignoring the package
+ // completely-- we do this due to the requirements of the
+ // package ID numbering scheme. See the comment in
+ // $GOROOT/src/internal/coverage/pkid.go dealing with
+ // hard-coding of runtime package IDs.
+ cmode := cfg.BuildCoverMode
+ if cfg.BuildRace && p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
+ cmode = "regonly"
+ }
+
+ // If -coverpkg is in effect and for some reason we don't want
+ // coverage data for the main package, make sure that we at
+ // least process it for registration hooks.
+ if includeMain && p.Name == "main" && !haveMatch {
+ haveMatch = true
+ cmode = "regonly"
+ }
+
+ // Mark package for instrumentation.
+ p.Internal.CoverMode = cmode
+ covered = append(covered, p)
+
+ // Force import of sync/atomic into package if atomic mode.
+ if cfg.BuildCoverMode == "atomic" {
+ EnsureImport(p, "sync/atomic")
+ }
+
+ // Generate covervars if using legacy coverage design.
+ if !cfg.Experiment.CoverageRedesign {
+ var coverFiles []string
+ coverFiles = append(coverFiles, p.GoFiles...)
+ coverFiles = append(coverFiles, p.CgoFiles...)
+ p.Internal.CoverVars = DeclareCoverVars(p, coverFiles...)
+ }
+ }
+
+ // Warn about -coverpkg arguments that are not actually used.
+ for i := range cfg.BuildCoverPkg {
+ if !matched[i] {
+ fmt.Fprintf(os.Stderr, "warning: no packages being %s depend on matches for pattern %s\n", warntag, cfg.BuildCoverPkg[i])
+ }
+ }
+
+ return covered
+}
+
+// DeclareCoverVars attaches the required cover variables names
+// to the files, to be used when annotating the files. This
+// function only called when using legacy coverage test/build
+// (e.g. GOEXPERIMENT=coverageredesign is off).
+func DeclareCoverVars(p *Package, files ...string) map[string]*CoverVar {
+ coverVars := make(map[string]*CoverVar)
+ coverIndex := 0
+ // We create the cover counters as new top-level variables in the package.
+ // We need to avoid collisions with user variables (GoCover_0 is unlikely but still)
+ // and more importantly with dot imports of other covered packages,
+ // so we append 12 hex digits from the SHA-256 of the import path.
+ // The point is only to avoid accidents, not to defeat users determined to
+ // break things.
+ sum := sha256.Sum256([]byte(p.ImportPath))
+ h := fmt.Sprintf("%x", sum[:6])
+ for _, file := range files {
+ if base.IsTestFile(file) {
+ continue
+ }
+ // For a package that is "local" (imported via ./ import or command line, outside GOPATH),
+ // we record the full path to the file name.
+ // Otherwise we record the import path, then a forward slash, then the file name.
+ // This makes profiles within GOPATH file system-independent.
+ // These names appear in the cmd/cover HTML interface.
+ var longFile string
+ if p.Internal.Local {
+ longFile = filepath.Join(p.Dir, file)
+ } else {
+ longFile = pathpkg.Join(p.ImportPath, file)
+ }
+ coverVars[file] = &CoverVar{
+ File: longFile,
+ Var: fmt.Sprintf("GoCover_%d_%x", coverIndex, h),
+ }
+ coverIndex++
+ }
+ return coverVars
+}
diff --git a/src/cmd/go/internal/load/pkg_test.go b/src/cmd/go/internal/load/pkg_test.go
new file mode 100644
index 0000000..3bcddee
--- /dev/null
+++ b/src/cmd/go/internal/load/pkg_test.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "cmd/go/internal/cfg"
+ "testing"
+)
+
+func TestPkgDefaultExecName(t *testing.T) {
+ oldModulesEnabled := cfg.ModulesEnabled
+ defer func() { cfg.ModulesEnabled = oldModulesEnabled }()
+ for _, tt := range []struct {
+ in string
+ files []string
+ wantMod string
+ wantGopath string
+ }{
+ {"example.com/mycmd", []string{}, "mycmd", "mycmd"},
+ {"example.com/mycmd/v0", []string{}, "v0", "v0"},
+ {"example.com/mycmd/v1", []string{}, "v1", "v1"},
+ {"example.com/mycmd/v2", []string{}, "mycmd", "v2"}, // Semantic import versioning, use second last element in module mode.
+ {"example.com/mycmd/v3", []string{}, "mycmd", "v3"}, // Semantic import versioning, use second last element in module mode.
+ {"mycmd", []string{}, "mycmd", "mycmd"},
+ {"mycmd/v0", []string{}, "v0", "v0"},
+ {"mycmd/v1", []string{}, "v1", "v1"},
+ {"mycmd/v2", []string{}, "mycmd", "v2"}, // Semantic import versioning, use second last element in module mode.
+ {"v0", []string{}, "v0", "v0"},
+ {"v1", []string{}, "v1", "v1"},
+ {"v2", []string{}, "v2", "v2"},
+ {"command-line-arguments", []string{"output.go", "foo.go"}, "output", "output"},
+ } {
+ {
+ cfg.ModulesEnabled = true
+ pkg := new(Package)
+ pkg.ImportPath = tt.in
+ pkg.GoFiles = tt.files
+ pkg.Internal.CmdlineFiles = len(tt.files) > 0
+ gotMod := pkg.DefaultExecName()
+ if gotMod != tt.wantMod {
+ t.Errorf("pkg.DefaultExecName with ImportPath = %q in module mode = %v; want %v", tt.in, gotMod, tt.wantMod)
+ }
+ }
+ {
+ cfg.ModulesEnabled = false
+ pkg := new(Package)
+ pkg.ImportPath = tt.in
+ pkg.GoFiles = tt.files
+ pkg.Internal.CmdlineFiles = len(tt.files) > 0
+ gotGopath := pkg.DefaultExecName()
+ if gotGopath != tt.wantGopath {
+ t.Errorf("pkg.DefaultExecName with ImportPath = %q in gopath mode = %v; want %v", tt.in, gotGopath, tt.wantGopath)
+ }
+ }
+ }
+}
+
+func TestIsVersionElement(t *testing.T) {
+ t.Parallel()
+ for _, tt := range []struct {
+ in string
+ want bool
+ }{
+ {"v0", false},
+ {"v05", false},
+ {"v1", false},
+ {"v2", true},
+ {"v3", true},
+ {"v9", true},
+ {"v10", true},
+ {"v11", true},
+ {"v", false},
+ {"vx", false},
+ } {
+ got := isVersionElement(tt.in)
+ if got != tt.want {
+ t.Errorf("isVersionElement(%q) = %v; want %v", tt.in, got, tt.want)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go
new file mode 100644
index 0000000..565996a
--- /dev/null
+++ b/src/cmd/go/internal/load/search.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "path/filepath"
+ "strings"
+
+ "cmd/go/internal/search"
+ "cmd/internal/pkgpattern"
+)
+
+// MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd.
+func MatchPackage(pattern, cwd string) func(*Package) bool {
+ switch {
+ case search.IsRelativePath(pattern):
+ // Split pattern into leading pattern-free directory path
+ // (including all . and .. elements) and the final pattern.
+ var dir string
+ i := strings.Index(pattern, "...")
+ if i < 0 {
+ dir, pattern = pattern, ""
+ } else {
+ j := strings.LastIndex(pattern[:i], "/")
+ dir, pattern = pattern[:j], pattern[j+1:]
+ }
+ dir = filepath.Join(cwd, dir)
+ if pattern == "" {
+ return func(p *Package) bool { return p.Dir == dir }
+ }
+ matchPath := pkgpattern.MatchPattern(pattern)
+ return func(p *Package) bool {
+ // Compute relative path to dir and see if it matches the pattern.
+ rel, err := filepath.Rel(dir, p.Dir)
+ if err != nil {
+ // Cannot make relative - e.g. different drive letters on Windows.
+ return false
+ }
+ rel = filepath.ToSlash(rel)
+ if rel == ".." || strings.HasPrefix(rel, "../") {
+ return false
+ }
+ return matchPath(rel)
+ }
+ case pattern == "all":
+ return func(p *Package) bool { return true }
+ case pattern == "std":
+ return func(p *Package) bool { return p.Standard }
+ case pattern == "cmd":
+ return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") }
+ default:
+ matchPath := pkgpattern.MatchPattern(pattern)
+ return func(p *Package) bool { return matchPath(p.ImportPath) }
+ }
+}
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
new file mode 100644
index 0000000..e9ed0d3
--- /dev/null
+++ b/src/cmd/go/internal/load/test.go
@@ -0,0 +1,991 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package load
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "internal/lazytemplate"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+)
+
+var TestMainDeps = []string{
+ // Dependencies for testmain.
+ "os",
+ "reflect",
+ "testing",
+ "testing/internal/testdeps",
+}
+
+type TestCover struct {
+ Mode string
+ Local bool
+ Pkgs []*Package
+ Paths []string
+ Vars []coverInfo
+}
+
+// TestPackagesFor is like TestPackagesAndErrors but it returns
+// an error if the test packages or their dependencies have errors.
+// Only test packages without errors are returned.
+func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package, err error) {
+ pmain, ptest, pxtest = TestPackagesAndErrors(ctx, nil, opts, p, cover)
+ for _, p1 := range []*Package{ptest, pxtest, pmain} {
+ if p1 == nil {
+ // pxtest may be nil
+ continue
+ }
+ if p1.Error != nil {
+ err = p1.Error
+ break
+ }
+ if p1.Incomplete {
+ ps := PackageList([]*Package{p1})
+ for _, p := range ps {
+ if p.Error != nil {
+ err = p.Error
+ break
+ }
+ }
+ break
+ }
+ }
+ if pmain.Error != nil || pmain.Incomplete {
+ pmain = nil
+ }
+ if ptest.Error != nil || ptest.Incomplete {
+ ptest = nil
+ }
+ if pxtest != nil && (pxtest.Error != nil || pxtest.Incomplete) {
+ pxtest = nil
+ }
+ return pmain, ptest, pxtest, err
+}
+
+// TestPackagesAndErrors returns three packages:
+// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest).
+// - ptest, the package p compiled with added "package p" test files.
+// - pxtest, the result of compiling any "package p_test" (external) test files.
+//
+// If the package has no "package p_test" test files, pxtest will be nil.
+// If the non-test compilation of package p can be reused
+// (for example, if there are no "package p" test files and
+// package p need not be instrumented for coverage or any other reason),
+// then the returned ptest == p.
+//
+// If done is non-nil, TestPackagesAndErrors will finish filling out the returned
+// package structs in a goroutine and call done once finished. The members of the
+// returned packages should not be accessed until done is called.
+//
+// The caller is expected to have checked that len(p.TestGoFiles)+len(p.XTestGoFiles) > 0,
+// or else there's no point in any of this.
+func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) {
+ ctx, span := trace.StartSpan(ctx, "load.TestPackagesAndErrors")
+ defer span.Done()
+
+ pre := newPreload()
+ defer pre.flush()
+ allImports := append([]string{}, p.TestImports...)
+ allImports = append(allImports, p.XTestImports...)
+ pre.preloadImports(ctx, opts, allImports, p.Internal.Build)
+
+ var ptestErr, pxtestErr *PackageError
+ var imports, ximports []*Package
+ var stk ImportStack
+ var testEmbed, xtestEmbed map[string][]string
+ var incomplete bool
+ stk.Push(p.ImportPath + " (test)")
+ rawTestImports := str.StringList(p.TestImports)
+ for i, path := range p.TestImports {
+ p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport)
+ if err != nil && ptestErr == nil {
+ ptestErr = err
+ incomplete = true
+ }
+ if p1.Incomplete {
+ incomplete = true
+ }
+ p.TestImports[i] = p1.ImportPath
+ imports = append(imports, p1)
+ }
+ var err error
+ p.TestEmbedFiles, testEmbed, err = resolveEmbed(p.Dir, p.TestEmbedPatterns)
+ if err != nil {
+ ptestErr = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: err,
+ }
+ incomplete = true
+ embedErr := err.(*EmbedError)
+ ptestErr.setPos(p.Internal.Build.TestEmbedPatternPos[embedErr.Pattern])
+ }
+ stk.Pop()
+
+ stk.Push(p.ImportPath + "_test")
+ pxtestNeedsPtest := false
+ var pxtestIncomplete bool
+ rawXTestImports := str.StringList(p.XTestImports)
+ for i, path := range p.XTestImports {
+ p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport)
+ if err != nil && pxtestErr == nil {
+ pxtestErr = err
+ }
+ if p1.Incomplete {
+ pxtestIncomplete = true
+ }
+ if p1.ImportPath == p.ImportPath {
+ pxtestNeedsPtest = true
+ } else {
+ ximports = append(ximports, p1)
+ }
+ p.XTestImports[i] = p1.ImportPath
+ }
+ p.XTestEmbedFiles, xtestEmbed, err = resolveEmbed(p.Dir, p.XTestEmbedPatterns)
+ if err != nil && pxtestErr == nil {
+ pxtestErr = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: err,
+ }
+ embedErr := err.(*EmbedError)
+ pxtestErr.setPos(p.Internal.Build.XTestEmbedPatternPos[embedErr.Pattern])
+ }
+ pxtestIncomplete = pxtestIncomplete || pxtestErr != nil
+ stk.Pop()
+
+ // Test package.
+ if len(p.TestGoFiles) > 0 || p.Name == "main" || cover != nil && cover.Local {
+ ptest = new(Package)
+ *ptest = *p
+ ptest.Error = ptestErr
+ ptest.Incomplete = incomplete
+ ptest.ForTest = p.ImportPath
+ ptest.GoFiles = nil
+ ptest.GoFiles = append(ptest.GoFiles, p.GoFiles...)
+ ptest.GoFiles = append(ptest.GoFiles, p.TestGoFiles...)
+ ptest.Target = ""
+ // Note: The preparation of the vet config requires that common
+ // indexes in ptest.Imports and ptest.Internal.RawImports
+ // all line up (but RawImports can be shorter than the others).
+ // That is, for 0 ≤ i < len(RawImports),
+ // RawImports[i] is the import string in the program text, and
+ // Imports[i] is the expanded import string (vendoring applied or relative path expanded away).
+ // Any implicitly added imports appear in Imports and Internal.Imports
+ // but not RawImports (because they were not in the source code).
+ // We insert TestImports, imports, and rawTestImports at the start of
+ // these lists to preserve the alignment.
+ // Note that p.Internal.Imports may not be aligned with p.Imports/p.Internal.RawImports,
+ // but we insert at the beginning there too just for consistency.
+ ptest.Imports = str.StringList(p.TestImports, p.Imports)
+ ptest.Internal.Imports = append(imports, p.Internal.Imports...)
+ ptest.Internal.RawImports = str.StringList(rawTestImports, p.Internal.RawImports)
+ ptest.Internal.ForceLibrary = true
+ ptest.Internal.BuildInfo = nil
+ ptest.Internal.Build = new(build.Package)
+ *ptest.Internal.Build = *p.Internal.Build
+ m := map[string][]token.Position{}
+ for k, v := range p.Internal.Build.ImportPos {
+ m[k] = append(m[k], v...)
+ }
+ for k, v := range p.Internal.Build.TestImportPos {
+ m[k] = append(m[k], v...)
+ }
+ ptest.Internal.Build.ImportPos = m
+ if testEmbed == nil && len(p.Internal.Embed) > 0 {
+ testEmbed = map[string][]string{}
+ }
+ for k, v := range p.Internal.Embed {
+ testEmbed[k] = v
+ }
+ ptest.Internal.Embed = testEmbed
+ ptest.EmbedFiles = str.StringList(p.EmbedFiles, p.TestEmbedFiles)
+ ptest.Internal.OrigImportPath = p.Internal.OrigImportPath
+ ptest.Internal.PGOProfile = p.Internal.PGOProfile
+ ptest.Internal.Build.Directives = append(slices.Clip(p.Internal.Build.Directives), p.Internal.Build.TestDirectives...)
+ } else {
+ ptest = p
+ }
+
+ // External test package.
+ if len(p.XTestGoFiles) > 0 {
+ pxtest = &Package{
+ PackagePublic: PackagePublic{
+ Name: p.Name + "_test",
+ ImportPath: p.ImportPath + "_test",
+ Root: p.Root,
+ Dir: p.Dir,
+ Goroot: p.Goroot,
+ GoFiles: p.XTestGoFiles,
+ Imports: p.XTestImports,
+ ForTest: p.ImportPath,
+ Module: p.Module,
+ Error: pxtestErr,
+ Incomplete: pxtestIncomplete,
+ EmbedFiles: p.XTestEmbedFiles,
+ },
+ Internal: PackageInternal{
+ LocalPrefix: p.Internal.LocalPrefix,
+ Build: &build.Package{
+ ImportPos: p.Internal.Build.XTestImportPos,
+ Directives: p.Internal.Build.XTestDirectives,
+ },
+ Imports: ximports,
+ RawImports: rawXTestImports,
+
+ Asmflags: p.Internal.Asmflags,
+ Gcflags: p.Internal.Gcflags,
+ Ldflags: p.Internal.Ldflags,
+ Gccgoflags: p.Internal.Gccgoflags,
+ Embed: xtestEmbed,
+ OrigImportPath: p.Internal.OrigImportPath,
+ PGOProfile: p.Internal.PGOProfile,
+ },
+ }
+ if pxtestNeedsPtest {
+ pxtest.Internal.Imports = append(pxtest.Internal.Imports, ptest)
+ }
+ }
+
+ // Arrange for testing.Testing to report true.
+ ldflags := append(p.Internal.Ldflags, "-X", "testing.testBinary=1")
+ gccgoflags := append(p.Internal.Gccgoflags, "-Wl,--defsym,testing.gccgoTestBinary=1")
+
+ // Build main package.
+ pmain = &Package{
+ PackagePublic: PackagePublic{
+ Name: "main",
+ Dir: p.Dir,
+ GoFiles: []string{"_testmain.go"},
+ ImportPath: p.ImportPath + ".test",
+ Root: p.Root,
+ Imports: str.StringList(TestMainDeps),
+ Module: p.Module,
+ },
+ Internal: PackageInternal{
+ Build: &build.Package{Name: "main"},
+ BuildInfo: p.Internal.BuildInfo,
+ Asmflags: p.Internal.Asmflags,
+ Gcflags: p.Internal.Gcflags,
+ Ldflags: ldflags,
+ Gccgoflags: gccgoflags,
+ OrigImportPath: p.Internal.OrigImportPath,
+ PGOProfile: p.Internal.PGOProfile,
+ },
+ }
+
+ pb := p.Internal.Build
+ pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives)
+
+ // The generated main also imports testing, regexp, and os.
+ // Also the linker introduces implicit dependencies reported by LinkerDeps.
+ stk.Push("testmain")
+ deps := TestMainDeps // cap==len, so safe for append
+ for _, d := range LinkerDeps(p) {
+ deps = append(deps, d)
+ }
+ for _, dep := range deps {
+ if dep == ptest.ImportPath {
+ pmain.Internal.Imports = append(pmain.Internal.Imports, ptest)
+ } else {
+ p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0)
+ if err != nil && pmain.Error == nil {
+ pmain.Error = err
+ pmain.Incomplete = true
+ }
+ pmain.Internal.Imports = append(pmain.Internal.Imports, p1)
+ }
+ }
+ stk.Pop()
+
+ parallelizablePart := func() {
+ if cover != nil && cover.Pkgs != nil && !cfg.Experiment.CoverageRedesign {
+ // Add imports, but avoid duplicates.
+ seen := map[*Package]bool{p: true, ptest: true}
+ for _, p1 := range pmain.Internal.Imports {
+ seen[p1] = true
+ }
+ for _, p1 := range cover.Pkgs {
+ if seen[p1] {
+ // Don't add duplicate imports.
+ continue
+ }
+ seen[p1] = true
+ pmain.Internal.Imports = append(pmain.Internal.Imports, p1)
+ }
+ }
+
+ allTestImports := make([]*Package, 0, len(pmain.Internal.Imports)+len(imports)+len(ximports))
+ allTestImports = append(allTestImports, pmain.Internal.Imports...)
+ allTestImports = append(allTestImports, imports...)
+ allTestImports = append(allTestImports, ximports...)
+ setToolFlags(allTestImports...)
+
+ // Do initial scan for metadata needed for writing _testmain.go
+ // Use that metadata to update the list of imports for package main.
+ // The list of imports is used by recompileForTest and by the loop
+ // afterward that gathers t.Cover information.
+ t, err := loadTestFuncs(p)
+ if err != nil && pmain.Error == nil {
+ pmain.setLoadPackageDataError(err, p.ImportPath, &stk, nil)
+ }
+ t.Cover = cover
+ if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 {
+ pmain.Internal.Imports = append(pmain.Internal.Imports, ptest)
+ pmain.Imports = append(pmain.Imports, ptest.ImportPath)
+ t.ImportTest = true
+ }
+ if pxtest != nil {
+ pmain.Internal.Imports = append(pmain.Internal.Imports, pxtest)
+ pmain.Imports = append(pmain.Imports, pxtest.ImportPath)
+ t.ImportXtest = true
+ }
+
+ // Sort and dedup pmain.Imports.
+ // Only matters for go list -test output.
+ sort.Strings(pmain.Imports)
+ w := 0
+ for _, path := range pmain.Imports {
+ if w == 0 || path != pmain.Imports[w-1] {
+ pmain.Imports[w] = path
+ w++
+ }
+ }
+ pmain.Imports = pmain.Imports[:w]
+ pmain.Internal.RawImports = str.StringList(pmain.Imports)
+
+ // Replace pmain's transitive dependencies with test copies, as necessary.
+ cycleErr := recompileForTest(pmain, p, ptest, pxtest)
+ if cycleErr != nil {
+ ptest.Error = cycleErr
+ ptest.Incomplete = true
+ }
+
+ if cover != nil {
+ if cfg.Experiment.CoverageRedesign {
+ // Here ptest needs to inherit the proper coverage mode (since
+ // it contains p's Go files), whereas pmain contains only
+ // test harness code (don't want to instrument it, and
+ // we don't want coverage hooks in the pkg init).
+ ptest.Internal.CoverMode = p.Internal.CoverMode
+ pmain.Internal.CoverMode = "testmain"
+ }
+ // Should we apply coverage analysis locally, only for this
+ // package and only for this test? Yes, if -cover is on but
+ // -coverpkg has not specified a list of packages for global
+ // coverage.
+ if cover.Local {
+ ptest.Internal.CoverMode = cover.Mode
+
+ if !cfg.Experiment.CoverageRedesign {
+ var coverFiles []string
+ coverFiles = append(coverFiles, ptest.GoFiles...)
+ coverFiles = append(coverFiles, ptest.CgoFiles...)
+ ptest.Internal.CoverVars = DeclareCoverVars(ptest, coverFiles...)
+ }
+ }
+
+ if !cfg.Experiment.CoverageRedesign {
+ for _, cp := range pmain.Internal.Imports {
+ if len(cp.Internal.CoverVars) > 0 {
+ t.Cover.Vars = append(t.Cover.Vars, coverInfo{cp, cp.Internal.CoverVars})
+ }
+ }
+ }
+ }
+
+ data, err := formatTestmain(t)
+ if err != nil && pmain.Error == nil {
+ pmain.Error = &PackageError{Err: err}
+ pmain.Incomplete = true
+ }
+ // Set TestmainGo even if it is empty: the presence of a TestmainGo
+ // indicates that this package is, in fact, a test main.
+ pmain.Internal.TestmainGo = &data
+ }
+
+ if done != nil {
+ go func() {
+ parallelizablePart()
+ done()
+ }()
+ } else {
+ parallelizablePart()
+ }
+
+ return pmain, ptest, pxtest
+}
+
+// recompileForTest copies and replaces certain packages in pmain's dependency
+// graph. This is necessary for two reasons. First, if ptest is different than
+// preal, packages that import the package under test should get ptest instead
+// of preal. This is particularly important if pxtest depends on functionality
+// exposed in test sources in ptest. Second, if there is a main package
+// (other than pmain) anywhere, we need to set p.Internal.ForceLibrary and
+// clear p.Internal.BuildInfo in the test copy to prevent link conflicts.
+// This may happen if both -coverpkg and the command line patterns include
+// multiple main packages.
+func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
+ // The "test copy" of preal is ptest.
+ // For each package that depends on preal, make a "test copy"
+ // that depends on ptest. And so on, up the dependency tree.
+ testCopy := map[*Package]*Package{preal: ptest}
+ for _, p := range PackageList([]*Package{pmain}) {
+ if p == preal {
+ continue
+ }
+ // Copy on write.
+ didSplit := p == pmain || p == pxtest || p == ptest
+ split := func() {
+ if didSplit {
+ return
+ }
+ didSplit = true
+ if testCopy[p] != nil {
+ panic("recompileForTest loop")
+ }
+ p1 := new(Package)
+ testCopy[p] = p1
+ *p1 = *p
+ p1.ForTest = preal.ImportPath
+ p1.Internal.Imports = make([]*Package, len(p.Internal.Imports))
+ copy(p1.Internal.Imports, p.Internal.Imports)
+ p1.Imports = make([]string, len(p.Imports))
+ copy(p1.Imports, p.Imports)
+ p = p1
+ p.Target = ""
+ p.Internal.BuildInfo = nil
+ p.Internal.ForceLibrary = true
+ p.Internal.PGOProfile = preal.Internal.PGOProfile
+ }
+
+ // Update p.Internal.Imports to use test copies.
+ for i, imp := range p.Internal.Imports {
+ if p1 := testCopy[imp]; p1 != nil && p1 != imp {
+ split()
+
+ // If the test dependencies cause a cycle with pmain, this is
+ // where it is introduced.
+ // (There are no cycles in the graph until this assignment occurs.)
+ p.Internal.Imports[i] = p1
+ }
+ }
+
+ // Force main packages the test imports to be built as libraries.
+ // Normal imports of main packages are forbidden by the package loader,
+ // but this can still happen if -coverpkg patterns include main packages:
+ // covered packages are imported by pmain. Linking multiple packages
+ // compiled with '-p main' causes duplicate symbol errors.
+ // See golang.org/issue/30907, golang.org/issue/34114.
+ if p.Name == "main" && p != pmain && p != ptest {
+ split()
+ }
+ // Split and attach PGO information to test dependencies if preal
+ // is built with PGO.
+ if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" {
+ split()
+ }
+ }
+
+ // Do search to find cycle.
+ // importerOf maps each import path to its importer nearest to p.
+ importerOf := map[*Package]*Package{}
+ for _, p := range ptest.Internal.Imports {
+ importerOf[p] = nil
+ }
+
+ // q is a breadth-first queue of packages to search for target.
+ // Every package added to q has a corresponding entry in pathTo.
+ //
+ // We search breadth-first for two reasons:
+ //
+ // 1. We want to report the shortest cycle.
+ //
+ // 2. If p contains multiple cycles, the first cycle we encounter might not
+ // contain target. To ensure termination, we have to break all cycles
+ // other than the first.
+ q := slices.Clip(ptest.Internal.Imports)
+ for len(q) > 0 {
+ p := q[0]
+ q = q[1:]
+ if p == ptest {
+ // The stack is supposed to be in the order x imports y imports z.
+ // We collect in the reverse order: z is imported by y is imported
+ // by x, and then we reverse it.
+ var stk []string
+ for p != nil {
+ stk = append(stk, p.ImportPath)
+ p = importerOf[p]
+ }
+ // complete the cycle: we set importer[p] = nil to break the cycle
+ // in importerOf, it's an implicit importerOf[p] == pTest. Add it
+ // back here since we reached nil in the loop above to demonstrate
+ // the cycle as (for example) package p imports package q imports package r
+ // imports package p.
+ stk = append(stk, ptest.ImportPath)
+ slices.Reverse(stk)
+
+ return &PackageError{
+ ImportStack: stk,
+ Err: errors.New("import cycle not allowed in test"),
+ IsImportCycle: true,
+ }
+ }
+ for _, dep := range p.Internal.Imports {
+ if _, ok := importerOf[dep]; !ok {
+ importerOf[dep] = p
+ q = append(q, dep)
+ }
+ }
+ }
+
+ return nil
+}
+
+// isTestFunc tells whether fn has the type of a testing function. arg
+// specifies the parameter type we look for: B, M or T.
+func isTestFunc(fn *ast.FuncDecl, arg string) bool {
+ if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
+ fn.Type.Params.List == nil ||
+ len(fn.Type.Params.List) != 1 ||
+ len(fn.Type.Params.List[0].Names) > 1 {
+ return false
+ }
+ ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr)
+ if !ok {
+ return false
+ }
+ // We can't easily check that the type is *testing.M
+ // because we don't know how testing has been imported,
+ // but at least check that it's *M or *something.M.
+ // Same applies for B and T.
+ if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg {
+ return true
+ }
+ if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg {
+ return true
+ }
+ return false
+}
+
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(rune)
+}
+
+type coverInfo struct {
+ Package *Package
+ Vars map[string]*CoverVar
+}
+
+// loadTestFuncs returns the testFuncs describing the tests that will be run.
+// The returned testFuncs is always non-nil, even if an error occurred while
+// processing test files.
+func loadTestFuncs(ptest *Package) (*testFuncs, error) {
+ t := &testFuncs{
+ Package: ptest,
+ }
+ var err error
+ for _, file := range ptest.TestGoFiles {
+ if lerr := t.load(filepath.Join(ptest.Dir, file), "_test", &t.ImportTest, &t.NeedTest); lerr != nil && err == nil {
+ err = lerr
+ }
+ }
+ for _, file := range ptest.XTestGoFiles {
+ if lerr := t.load(filepath.Join(ptest.Dir, file), "_xtest", &t.ImportXtest, &t.NeedXtest); lerr != nil && err == nil {
+ err = lerr
+ }
+ }
+ return t, err
+}
+
+// formatTestmain returns the content of the _testmain.go file for t.
+func formatTestmain(t *testFuncs) ([]byte, error) {
+ var buf bytes.Buffer
+ tmpl := testmainTmpl
+ if cfg.Experiment.CoverageRedesign {
+ tmpl = testmainTmplNewCoverage
+ }
+ if err := tmpl.Execute(&buf, t); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+type testFuncs struct {
+ Tests []testFunc
+ Benchmarks []testFunc
+ FuzzTargets []testFunc
+ Examples []testFunc
+ TestMain *testFunc
+ Package *Package
+ ImportTest bool
+ NeedTest bool
+ ImportXtest bool
+ NeedXtest bool
+ Cover *TestCover
+}
+
+// ImportPath returns the import path of the package being tested, if it is within GOPATH.
+// This is printed by the testing package when running benchmarks.
+func (t *testFuncs) ImportPath() string {
+ pkg := t.Package.ImportPath
+ if strings.HasPrefix(pkg, "_/") {
+ return ""
+ }
+ if pkg == "command-line-arguments" {
+ return ""
+ }
+ return pkg
+}
+
+// Covered returns a string describing which packages are being tested for coverage.
+// If the covered package is the same as the tested package, it returns the empty string.
+// Otherwise it is a comma-separated human-readable list of packages beginning with
+// " in", ready for use in the coverage message.
+func (t *testFuncs) Covered() string {
+ if t.Cover == nil || t.Cover.Paths == nil {
+ return ""
+ }
+ return " in " + strings.Join(t.Cover.Paths, ", ")
+}
+
+// Tested returns the name of the package being tested.
+func (t *testFuncs) Tested() string {
+ return t.Package.Name
+}
+
+type testFunc struct {
+ Package string // imported package name (_test or _xtest)
+ Name string // function name
+ Output string // output, for examples
+ Unordered bool // output is allowed to be unordered.
+}
+
+var testFileSet = token.NewFileSet()
+
+func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
+ // Pass in the overlaid source if we have an overlay for this file.
+ src, err := fsys.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+ f, err := parser.ParseFile(testFileSet, filename, src, parser.ParseComments|parser.SkipObjectResolution)
+ if err != nil {
+ return err
+ }
+ for _, d := range f.Decls {
+ n, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if n.Recv != nil {
+ continue
+ }
+ name := n.Name.String()
+ switch {
+ case name == "TestMain":
+ if isTestFunc(n, "T") {
+ t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
+ *doImport, *seen = true, true
+ continue
+ }
+ err := checkTestFunc(n, "M")
+ if err != nil {
+ return err
+ }
+ if t.TestMain != nil {
+ return errors.New("multiple definitions of TestMain")
+ }
+ t.TestMain = &testFunc{pkg, name, "", false}
+ *doImport, *seen = true, true
+ case isTest(name, "Test"):
+ err := checkTestFunc(n, "T")
+ if err != nil {
+ return err
+ }
+ t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
+ *doImport, *seen = true, true
+ case isTest(name, "Benchmark"):
+ err := checkTestFunc(n, "B")
+ if err != nil {
+ return err
+ }
+ t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
+ *doImport, *seen = true, true
+ case isTest(name, "Fuzz"):
+ err := checkTestFunc(n, "F")
+ if err != nil {
+ return err
+ }
+ t.FuzzTargets = append(t.FuzzTargets, testFunc{pkg, name, "", false})
+ *doImport, *seen = true, true
+ }
+ }
+ ex := doc.Examples(f)
+ sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order })
+ for _, e := range ex {
+ *doImport = true // import test file whether executed or not
+ if e.Output == "" && !e.EmptyOutput {
+ // Don't run examples with no output.
+ continue
+ }
+ t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
+ *seen = true
+ }
+ return nil
+}
+
+func checkTestFunc(fn *ast.FuncDecl, arg string) error {
+ var why string
+ if !isTestFunc(fn, arg) {
+ why = fmt.Sprintf("must be: func %s(%s *testing.%s)", fn.Name.String(), strings.ToLower(arg), arg)
+ }
+ if fn.Type.TypeParams.NumFields() > 0 {
+ why = "test functions cannot have type parameters"
+ }
+ if why != "" {
+ pos := testFileSet.Position(fn.Pos())
+ return fmt.Errorf("%s: wrong signature for %s, %s", pos, fn.Name.String(), why)
+ }
+ return nil
+}
+
+var testmainTmpl = lazytemplate.New("main", `
+// Code generated by 'go test'. DO NOT EDIT.
+
+package main
+
+import (
+ "os"
+{{if .TestMain}}
+ "reflect"
+{{end}}
+ "testing"
+ "testing/internal/testdeps"
+
+{{if .ImportTest}}
+ {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}}
+{{end}}
+{{if .ImportXtest}}
+ {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}}
+{{end}}
+{{if .Cover}}
+{{range $i, $p := .Cover.Vars}}
+ _cover{{$i}} {{$p.Package.ImportPath | printf "%q"}}
+{{end}}
+{{end}}
+)
+
+var tests = []testing.InternalTest{
+{{range .Tests}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var benchmarks = []testing.InternalBenchmark{
+{{range .Benchmarks}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var fuzzTargets = []testing.InternalFuzzTarget{
+{{range .FuzzTargets}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var examples = []testing.InternalExample{
+{{range .Examples}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
+{{end}}
+}
+
+func init() {
+ testdeps.ImportPath = {{.ImportPath | printf "%q"}}
+}
+
+{{if .Cover}}
+
+// Only updated by init functions, so no need for atomicity.
+var (
+ coverCounters = make(map[string][]uint32)
+ coverBlocks = make(map[string][]testing.CoverBlock)
+)
+
+func init() {
+ {{range $i, $p := .Cover.Vars}}
+ {{range $file, $cover := $p.Vars}}
+ coverRegisterFile({{printf "%q" $cover.File}}, _cover{{$i}}.{{$cover.Var}}.Count[:], _cover{{$i}}.{{$cover.Var}}.Pos[:], _cover{{$i}}.{{$cover.Var}}.NumStmt[:])
+ {{end}}
+ {{end}}
+}
+
+func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
+ if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
+ panic("coverage: mismatched sizes")
+ }
+ if coverCounters[fileName] != nil {
+ // Already registered.
+ return
+ }
+ coverCounters[fileName] = counter
+ block := make([]testing.CoverBlock, len(counter))
+ for i := range counter {
+ block[i] = testing.CoverBlock{
+ Line0: pos[3*i+0],
+ Col0: uint16(pos[3*i+2]),
+ Line1: pos[3*i+1],
+ Col1: uint16(pos[3*i+2]>>16),
+ Stmts: numStmts[i],
+ }
+ }
+ coverBlocks[fileName] = block
+}
+{{end}}
+
+func main() {
+{{if .Cover}}
+ testing.RegisterCover(testing.Cover{
+ Mode: {{printf "%q" .Cover.Mode}},
+ Counters: coverCounters,
+ Blocks: coverBlocks,
+ CoveredPackages: {{printf "%q" .Covered}},
+ })
+{{end}}
+ m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, fuzzTargets, examples)
+{{with .TestMain}}
+ {{.Package}}.{{.Name}}(m)
+ os.Exit(int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int()))
+{{else}}
+ os.Exit(m.Run())
+{{end}}
+}
+
+`)
+
+var testmainTmplNewCoverage = lazytemplate.New("main", `
+// Code generated by 'go test'. DO NOT EDIT.
+
+package main
+
+import (
+ "os"
+{{if .Cover}}
+ _ "unsafe"
+{{end}}
+{{if .TestMain}}
+ "reflect"
+{{end}}
+ "testing"
+ "testing/internal/testdeps"
+
+{{if .ImportTest}}
+ {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}}
+{{end}}
+{{if .ImportXtest}}
+ {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}}
+{{end}}
+)
+
+var tests = []testing.InternalTest{
+{{range .Tests}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var benchmarks = []testing.InternalBenchmark{
+{{range .Benchmarks}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var fuzzTargets = []testing.InternalFuzzTarget{
+{{range .FuzzTargets}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}},
+{{end}}
+}
+
+var examples = []testing.InternalExample{
+{{range .Examples}}
+ {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
+{{end}}
+}
+
+func init() {
+ testdeps.ImportPath = {{.ImportPath | printf "%q"}}
+}
+
+{{if .Cover}}
+
+//go:linkname runtime_coverage_processCoverTestDir runtime/coverage.processCoverTestDir
+func runtime_coverage_processCoverTestDir(dir string, cfile string, cmode string, cpkgs string) error
+
+//go:linkname testing_registerCover2 testing.registerCover2
+func testing_registerCover2(mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64)
+
+//go:linkname runtime_coverage_markProfileEmitted runtime/coverage.markProfileEmitted
+func runtime_coverage_markProfileEmitted(val bool)
+
+//go:linkname runtime_coverage_snapshot runtime/coverage.snapshot
+func runtime_coverage_snapshot() float64
+
+func coverTearDown(coverprofile string, gocoverdir string) (string, error) {
+ var err error
+ if gocoverdir == "" {
+ gocoverdir, err = os.MkdirTemp("", "gocoverdir")
+ if err != nil {
+ return "error setting GOCOVERDIR: bad os.MkdirTemp return", err
+ }
+ defer os.RemoveAll(gocoverdir)
+ }
+ runtime_coverage_markProfileEmitted(true)
+ cmode := {{printf "%q" .Cover.Mode}}
+ if err := runtime_coverage_processCoverTestDir(gocoverdir, coverprofile, cmode, {{printf "%q" .Covered}}); err != nil {
+ return "error generating coverage report", err
+ }
+ return "", nil
+}
+{{end}}
+
+func main() {
+{{if .Cover}}
+ testing_registerCover2({{printf "%q" .Cover.Mode}}, coverTearDown, runtime_coverage_snapshot)
+{{end}}
+ m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, fuzzTargets, examples)
+{{with .TestMain}}
+ {{.Package}}.{{.Name}}(m)
+ os.Exit(int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int()))
+{{else}}
+ os.Exit(m.Run())
+{{end}}
+}
+
+`)
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
new file mode 100644
index 0000000..d373318
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filelock provides a platform-independent API for advisory file
+// locking. Calls to functions in this package on platforms that do not support
+// advisory locks will return errors for which IsNotSupported returns true.
+package filelock
+
+import (
+ "errors"
+ "io/fs"
+)
+
+// A File provides the minimal set of methods required to lock an open file.
+// File implementations must be usable as map keys.
+// The usual implementation is *os.File.
+type File interface {
+ // Name returns the name of the file.
+ Name() string
+
+ // Fd returns a valid file descriptor.
+ // (If the File is an *os.File, it must not be closed.)
+ Fd() uintptr
+
+ // Stat returns the FileInfo structure describing file.
+ Stat() (fs.FileInfo, error)
+}
+
+// Lock places an advisory write lock on the file, blocking until it can be
+// locked.
+//
+// If Lock returns nil, no other process will be able to place a read or write
+// lock on the file until this process exits, closes f, or calls Unlock on it.
+//
+// If f's descriptor is already read- or write-locked, the behavior of Lock is
+// unspecified.
+//
+// Closing the file may or may not release the lock promptly. Callers should
+// ensure that Unlock is always called when Lock succeeds.
+func Lock(f File) error {
+ return lock(f, writeLock)
+}
+
+// RLock places an advisory read lock on the file, blocking until it can be locked.
+//
+// If RLock returns nil, no other process will be able to place a write lock on
+// the file until this process exits, closes f, or calls Unlock on it.
+//
+// If f is already read- or write-locked, the behavior of RLock is unspecified.
+//
+// Closing the file may or may not release the lock promptly. Callers should
+// ensure that Unlock is always called if RLock succeeds.
+func RLock(f File) error {
+ return lock(f, readLock)
+}
+
+// Unlock removes an advisory lock placed on f by this process.
+//
+// The caller must not attempt to unlock a file that is not locked.
+func Unlock(f File) error {
+ return unlock(f)
+}
+
+// String returns the name of the function corresponding to lt
+// (Lock, RLock, or Unlock).
+func (lt lockType) String() string {
+ switch lt {
+ case readLock:
+ return "RLock"
+ case writeLock:
+ return "Lock"
+ default:
+ return "Unlock"
+ }
+}
+
+// IsNotSupported returns a boolean indicating whether the error is known to
+// report that a function is not supported (possibly for a specific input).
+// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
+func IsNotSupported(err error) bool {
+ return errors.Is(err, errors.ErrUnsupported)
+}
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go
new file mode 100644
index 0000000..8a62839
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go
@@ -0,0 +1,210 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || (solaris && !illumos)
+
+// This code implements the filelock API using POSIX 'fcntl' locks, which attach
+// to an (inode, process) pair rather than a file descriptor. To avoid unlocking
+// files prematurely when the same file is opened through different descriptors,
+// we allow only one read-lock at a time.
+//
+// Most platforms provide some alternative API, such as an 'flock' system call
+// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and
+// does not require per-inode bookkeeping in the application.
+
+package filelock
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+ "math/rand"
+ "sync"
+ "syscall"
+ "time"
+)
+
+type lockType int16
+
+const (
+ readLock lockType = syscall.F_RDLCK
+ writeLock lockType = syscall.F_WRLCK
+)
+
+type inode = uint64 // type of syscall.Stat_t.Ino
+
+type inodeLock struct {
+ owner File
+ queue []<-chan File
+}
+
+var (
+ mu sync.Mutex
+ inodes = map[File]inode{}
+ locks = map[inode]inodeLock{}
+)
+
+func lock(f File, lt lockType) (err error) {
+ // POSIX locks apply per inode and process, and the lock for an inode is
+ // released when *any* descriptor for that inode is closed. So we need to
+ // synchronize access to each inode internally, and must serialize lock and
+ // unlock calls that refer to the same inode through different descriptors.
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ ino := fi.Sys().(*syscall.Stat_t).Ino
+
+ mu.Lock()
+ if i, dup := inodes[f]; dup && i != ino {
+ mu.Unlock()
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: errors.New("inode for file changed since last Lock or RLock"),
+ }
+ }
+ inodes[f] = ino
+
+ var wait chan File
+ l := locks[ino]
+ if l.owner == f {
+ // This file already owns the lock, but the call may change its lock type.
+ } else if l.owner == nil {
+ // No owner: it's ours now.
+ l.owner = f
+ } else {
+ // Already owned: add a channel to wait on.
+ wait = make(chan File)
+ l.queue = append(l.queue, wait)
+ }
+ locks[ino] = l
+ mu.Unlock()
+
+ if wait != nil {
+ wait <- f
+ }
+
+ // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at
+ // the process, rather than thread, level. Consider processes P and Q, with
+ // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be
+ // reported as a deadlock on systems that consider only process granularity:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (This is erroneously reported as a deadlock.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+ //
+ // These spurious errors were observed in practice on AIX and Solaris in
+ // cmd/go: see https://golang.org/issue/32817.
+ //
+ // We work around this bug by treating EDEADLK as always spurious. If there
+ // really is a lock-ordering bug between the interacting processes, it will
+ // become a livelock instead, but that's not appreciably worse than if we had
+ // a proper flock implementation (which generally does not even attempt to
+ // diagnose deadlocks).
+ //
+ // In the above example, that changes the trace to:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 spuriously fails to lock file B and goes to sleep.
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 wakes up and locks file B.
+ // P.2 unlocks file B.
+ //
+ // We know that the retry loop will not introduce a *spurious* livelock
+ // because, according to the POSIX specification, EDEADLK is only to be
+ // returned when “the lock is blocked by a lock from another process”.
+ // If that process is blocked on some lock that we are holding, then the
+ // resulting livelock is due to a real deadlock (and would manifest as such
+ // when using, for example, the flock implementation of this package).
+ // If the other process is *not* blocked on some other lock that we are
+ // holding, then it will eventually release the requested lock.
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ err = setlkw(f.Fd(), lt)
+ if err != syscall.EDEADLK {
+ break
+ }
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions when we finally unblock.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+
+ if err != nil {
+ unlock(f)
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+
+ return nil
+}
+
+func unlock(f File) error {
+ var owner File
+
+ mu.Lock()
+ ino, ok := inodes[f]
+ if ok {
+ owner = locks[ino].owner
+ }
+ mu.Unlock()
+
+ if owner != f {
+ panic("unlock called on a file that is not locked")
+ }
+
+ err := setlkw(f.Fd(), syscall.F_UNLCK)
+
+ mu.Lock()
+ l := locks[ino]
+ if len(l.queue) == 0 {
+ // No waiters: remove the map entry.
+ delete(locks, ino)
+ } else {
+ // The first waiter is sending us their file now.
+ // Receive it and update the queue.
+ l.owner = <-l.queue[0]
+ l.queue = l.queue[1:]
+ locks[ino] = l
+ }
+ delete(inodes, f)
+ mu.Unlock()
+
+ return err
+}
+
+// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd.
+func setlkw(fd uintptr, lt lockType) error {
+ for {
+ err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{
+ Type: int16(lt),
+ Whence: io.SeekStart,
+ Start: 0,
+ Len: 0, // All bytes.
+ })
+ if err != syscall.EINTR {
+ return err
+ }
+ }
+}
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go
new file mode 100644
index 0000000..b16709e
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix && !windows
+
+package filelock
+
+import (
+ "errors"
+ "io/fs"
+)
+
+type lockType int8
+
+const (
+ readLock = iota + 1
+ writeLock
+)
+
+func lock(f File, lt lockType) error {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func unlock(f File) error {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Name(),
+ Err: errors.ErrUnsupported,
+ }
+}
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go
new file mode 100644
index 0000000..d32bf06
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go
@@ -0,0 +1,210 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js && !plan9 && !wasip1
+
+package filelock_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "cmd/go/internal/lockedfile/internal/filelock"
+)
+
+func lock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.Lock(f)
+ t.Logf("Lock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func rLock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.RLock(f)
+ t.Logf("RLock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func unlock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.Unlock(f)
+ t.Logf("Unlock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func mustTempFile(t *testing.T) (f *os.File, remove func()) {
+ t.Helper()
+
+ base := filepath.Base(t.Name())
+ f, err := os.CreateTemp("", base)
+ if err != nil {
+ t.Fatalf(`os.CreateTemp("", %q) = %v`, base, err)
+ }
+ t.Logf("fd %d = %s", f.Fd(), f.Name())
+
+ return f, func() {
+ f.Close()
+ os.Remove(f.Name())
+ }
+}
+
+func mustOpen(t *testing.T, name string) *os.File {
+ t.Helper()
+
+ f, err := os.OpenFile(name, os.O_RDWR, 0)
+ if err != nil {
+ t.Fatalf("os.Open(%q) = %v", name, err)
+ }
+
+ t.Logf("fd %d = os.Open(%q)", f.Fd(), name)
+ return f
+}
+
+const (
+ quiescent = 10 * time.Millisecond
+ probablyStillBlocked = 10 * time.Second
+)
+
+func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) {
+ t.Helper()
+
+ desc := fmt.Sprintf("%s(fd %d)", op, f.Fd())
+
+ done := make(chan struct{})
+ go func() {
+ t.Helper()
+ switch op {
+ case "Lock":
+ lock(t, f)
+ case "RLock":
+ rLock(t, f)
+ default:
+ panic("invalid op: " + op)
+ }
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ t.Fatalf("%s unexpectedly did not block", desc)
+ return nil
+
+ case <-time.After(quiescent):
+ t.Logf("%s is blocked (as expected)", desc)
+ return func(t *testing.T) {
+ t.Helper()
+ select {
+ case <-time.After(probablyStillBlocked):
+ t.Fatalf("%s is unexpectedly still blocked", desc)
+ case <-done:
+ }
+ }
+ }
+}
+
+func TestLockExcludesLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ lock(t, f)
+ lockOther := mustBlock(t, "Lock", other)
+ unlock(t, f)
+ lockOther(t)
+ unlock(t, other)
+}
+
+func TestLockExcludesRLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ lock(t, f)
+ rLockOther := mustBlock(t, "RLock", other)
+ unlock(t, f)
+ rLockOther(t)
+ unlock(t, other)
+}
+
+func TestRLockExcludesOnlyLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+ rLock(t, f)
+
+ f2 := mustOpen(t, f.Name())
+ defer f2.Close()
+
+ doUnlockTF := false
+ switch runtime.GOOS {
+ case "aix", "solaris":
+ // When using POSIX locks (as on Solaris), we can't safely read-lock the
+ // same inode through two different descriptors at the same time: when the
+ // first descriptor is closed, the second descriptor would still be open but
+ // silently unlocked. So a second RLock must block instead of proceeding.
+ lockF2 := mustBlock(t, "RLock", f2)
+ unlock(t, f)
+ lockF2(t)
+ default:
+ rLock(t, f2)
+ doUnlockTF = true
+ }
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+ lockOther := mustBlock(t, "Lock", other)
+
+ unlock(t, f2)
+ if doUnlockTF {
+ unlock(t, f)
+ }
+ lockOther(t)
+ unlock(t, other)
+}
+
+func TestLockNotDroppedByExecCommand(t *testing.T) {
+ testenv.MustHaveExec(t)
+
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ lock(t, f)
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ // Some kinds of file locks are dropped when a duplicated or forked file
+ // descriptor is unlocked. Double-check that the approach used by os/exec does
+ // not accidentally drop locks.
+ cmd := testenv.Command(t, os.Args[0], "-test.run=^$")
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("exec failed: %v", err)
+ }
+
+ lockOther := mustBlock(t, "Lock", other)
+ unlock(t, f)
+ lockOther(t)
+ unlock(t, other)
+}
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go
new file mode 100644
index 0000000..6f73b1b
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go
@@ -0,0 +1,40 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd
+
+package filelock
+
+import (
+ "io/fs"
+ "syscall"
+)
+
+type lockType int16
+
+const (
+ readLock lockType = syscall.LOCK_SH
+ writeLock lockType = syscall.LOCK_EX
+)
+
+func lock(f File, lt lockType) (err error) {
+ for {
+ err = syscall.Flock(int(f.Fd()), int(lt))
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func unlock(f File) error {
+ return lock(f, syscall.LOCK_UN)
+}
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go
new file mode 100644
index 0000000..647ee99
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go
@@ -0,0 +1,57 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package filelock
+
+import (
+ "internal/syscall/windows"
+ "io/fs"
+ "syscall"
+)
+
+type lockType uint32
+
+const (
+ readLock lockType = 0
+ writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK
+)
+
+const (
+ reserved = 0
+ allBytes = ^uint32(0)
+)
+
+func lock(f File, lt lockType) error {
+ // Per https://golang.org/issue/19098, “Programs currently expect the Fd
+ // method to return a handle that uses ordinary synchronous I/O.”
+ // However, LockFileEx still requires an OVERLAPPED structure,
+ // which contains the file offset of the beginning of the lock range.
+ // We want to lock the entire file, so we leave the offset as zero.
+ ol := new(syscall.Overlapped)
+
+ err := windows.LockFileEx(syscall.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol)
+ if err != nil {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func unlock(f File) error {
+ ol := new(syscall.Overlapped)
+ err := windows.UnlockFileEx(syscall.Handle(f.Fd()), reserved, allBytes, allBytes, ol)
+ if err != nil {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
diff --git a/src/cmd/go/internal/lockedfile/lockedfile.go b/src/cmd/go/internal/lockedfile/lockedfile.go
new file mode 100644
index 0000000..82e1a89
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/lockedfile.go
@@ -0,0 +1,187 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lockedfile creates and manipulates files whose contents should only
+// change atomically.
+package lockedfile
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "runtime"
+)
+
+// A File is a locked *os.File.
+//
+// Closing the file releases the lock.
+//
+// If the program exits while a file is locked, the operating system releases
+// the lock but may not do so promptly: callers must ensure that all locked
+// files are closed before exiting.
+type File struct {
+ osFile
+ closed bool
+}
+
+// osFile embeds a *os.File while keeping the pointer itself unexported.
+// (When we close a File, it must be the same file descriptor that we opened!)
+type osFile struct {
+ *os.File
+}
+
+// OpenFile is like os.OpenFile, but returns a locked file.
+// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked;
+// otherwise, it is read-locked.
+func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) {
+ var (
+ f = new(File)
+ err error
+ )
+ f.osFile.File, err = openFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+
+ // Although the operating system will drop locks for open files when the go
+ // command exits, we want to hold locks for as little time as possible, and we
+ // especially don't want to leave a file locked after we're done with it. Our
+ // Close method is what releases the locks, so use a finalizer to report
+ // missing Close calls on a best-effort basis.
+ runtime.SetFinalizer(f, func(f *File) {
+ panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name()))
+ })
+
+ return f, nil
+}
+
+// Open is like os.Open, but returns a read-locked file.
+func Open(name string) (*File, error) {
+ return OpenFile(name, os.O_RDONLY, 0)
+}
+
+// Create is like os.Create, but returns a write-locked file.
+func Create(name string) (*File, error) {
+ return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+}
+
+// Edit creates the named file with mode 0666 (before umask),
+// but does not truncate existing contents.
+//
+// If Edit succeeds, methods on the returned File can be used for I/O.
+// The associated file descriptor has mode O_RDWR and the file is write-locked.
+func Edit(name string) (*File, error) {
+ return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666)
+}
+
+// Close unlocks and closes the underlying file.
+//
+// Close may be called multiple times; all calls after the first will return a
+// non-nil error.
+func (f *File) Close() error {
+ if f.closed {
+ return &fs.PathError{
+ Op: "close",
+ Path: f.Name(),
+ Err: fs.ErrClosed,
+ }
+ }
+ f.closed = true
+
+ err := closeFile(f.osFile.File)
+ runtime.SetFinalizer(f, nil)
+ return err
+}
+
+// Read opens the named file with a read-lock and returns its contents.
+func Read(name string) ([]byte, error) {
+ f, err := Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return io.ReadAll(f)
+}
+
+// Write opens the named file (creating it with the given permissions if needed),
+// then write-locks it and overwrites it with the given content.
+func Write(name string, content io.Reader, perm fs.FileMode) (err error) {
+ f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(f, content)
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ return err
+}
+
+// Transform invokes t with the result of reading the named file, with its lock
+// still held.
+//
+// If t returns a nil error, Transform then writes the returned contents back to
+// the file, making a best effort to preserve existing contents on error.
+//
+// t must not modify the slice passed to it.
+func Transform(name string, t func([]byte) ([]byte, error)) (err error) {
+ f, err := Edit(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ old, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ new, err := t(old)
+ if err != nil {
+ return err
+ }
+
+ if len(new) > len(old) {
+ // The overall file size is increasing, so write the tail first: if we're
+ // about to run out of space on the disk, we would rather detect that
+ // failure before we have overwritten the original contents.
+ if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil {
+ // Make a best effort to remove the incomplete tail.
+ f.Truncate(int64(len(old)))
+ return err
+ }
+ }
+
+ // We're about to overwrite the old contents. In case of failure, make a best
+ // effort to roll back before we close the file.
+ defer func() {
+ if err != nil {
+ if _, err := f.WriteAt(old, 0); err == nil {
+ f.Truncate(int64(len(old)))
+ }
+ }
+ }()
+
+ if len(new) >= len(old) {
+ if _, err := f.WriteAt(new[:len(old)], 0); err != nil {
+ return err
+ }
+ } else {
+ if _, err := f.WriteAt(new, 0); err != nil {
+ return err
+ }
+ // The overall file size is decreasing, so shrink the file to its final size
+ // after writing. We do this after writing (instead of before) so that if
+ // the write fails, enough filesystem space will likely still be reserved
+ // to contain the previous contents.
+ if err := f.Truncate(int64(len(new))); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go
new file mode 100644
index 0000000..1a677a7
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9
+
+package lockedfile
+
+import (
+ "io/fs"
+ "os"
+
+ "cmd/go/internal/lockedfile/internal/filelock"
+)
+
+func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile
+ // call instead of locking separately, but we have to support separate locking
+ // calls for Linux and Windows anyway, so it's simpler to use that approach
+ // consistently.
+
+ f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm)
+ if err != nil {
+ return nil, err
+ }
+
+ switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) {
+ case os.O_WRONLY, os.O_RDWR:
+ err = filelock.Lock(f)
+ default:
+ err = filelock.RLock(f)
+ }
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+
+ if flag&os.O_TRUNC == os.O_TRUNC {
+ if err := f.Truncate(0); err != nil {
+ // The documentation for os.O_TRUNC says “if possible, truncate file when
+ // opened”, but doesn't define “possible” (golang.org/issue/28699).
+ // We'll treat regular files (and symlinks to regular files) as “possible”
+ // and ignore errors for the rest.
+ if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() {
+ filelock.Unlock(f)
+ f.Close()
+ return nil, err
+ }
+ }
+ }
+
+ return f, nil
+}
+
+func closeFile(f *os.File) error {
+ // Since locking syscalls operate on file descriptors, we must unlock the file
+ // while the descriptor is still valid — that is, before the file is closed —
+ // and avoid unlocking files that are already closed.
+ err := filelock.Unlock(f)
+
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ return err
+}
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
new file mode 100644
index 0000000..a2ce794
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
@@ -0,0 +1,94 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+
+package lockedfile
+
+import (
+ "io/fs"
+ "math/rand"
+ "os"
+ "strings"
+ "time"
+)
+
+// Opening an exclusive-use file returns an error.
+// The expected error strings are:
+//
+// - "open/create -- file is locked" (cwfs, kfs)
+// - "exclusive lock" (fossil)
+// - "exclusive use file already open" (ramfs)
+var lockedErrStrings = [...]string{
+ "file is locked",
+ "exclusive lock",
+ "exclusive use file already open",
+}
+
+// Even though plan9 doesn't support the Lock/RLock/Unlock functions to
+// manipulate already-open files, IsLocked is still meaningful: os.OpenFile
+// itself may return errors that indicate that a file with the ModeExclusive bit
+// set is already open.
+func isLocked(err error) bool {
+ s := err.Error()
+
+ for _, frag := range lockedErrStrings {
+ if strings.Contains(s, frag) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
+ //
+ // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
+ // for I/O by only one fid at a time across all clients of the server. If a
+ // second open is attempted, it draws an error.”
+ //
+ // So we can try to open a locked file, but if it fails we're on our own to
+ // figure out when it becomes available. We'll use exponential backoff with
+ // some jitter and an arbitrary limit of 500ms.
+
+ // If the file was unpacked or created by some other program, it might not
+ // have the ModeExclusive bit set. Set it before we call OpenFile, so that we
+ // can be confident that a successful OpenFile implies exclusive use.
+ if fi, err := os.Stat(name); err == nil {
+ if fi.Mode()&fs.ModeExclusive == 0 {
+ if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil {
+ return nil, err
+ }
+ }
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive)
+ if err == nil {
+ return f, nil
+ }
+
+ if !isLocked(err) {
+ return nil, err
+ }
+
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+}
+
+func closeFile(f *os.File) error {
+ return f.Close()
+}
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_test.go b/src/cmd/go/internal/lockedfile/lockedfile_test.go
new file mode 100644
index 0000000..8dea8f7
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/lockedfile_test.go
@@ -0,0 +1,286 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// js and wasip1 do not support inter-process file locking.
+//
+//go:build !js && !wasip1
+
+package lockedfile_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "cmd/go/internal/lockedfile"
+)
+
+func mustTempDir(t *testing.T) (dir string, remove func()) {
+ t.Helper()
+
+ dir, err := os.MkdirTemp("", filepath.Base(t.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ return dir, func() { os.RemoveAll(dir) }
+}
+
+const (
+ quiescent = 10 * time.Millisecond
+ probablyStillBlocked = 10 * time.Second
+)
+
+func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) {
+ t.Helper()
+
+ done := make(chan struct{})
+ go func() {
+ f()
+ close(done)
+ }()
+
+ timer := time.NewTimer(quiescent)
+ defer timer.Stop()
+ select {
+ case <-done:
+ t.Fatalf("%s unexpectedly did not block", desc)
+ case <-timer.C:
+ }
+
+ return func(t *testing.T) {
+ logTimer := time.NewTimer(quiescent)
+ defer logTimer.Stop()
+
+ select {
+ case <-logTimer.C:
+ // We expect the operation to have unblocked by now,
+ // but maybe it's just slow. Write to the test log
+ // in case the test times out, but don't fail it.
+ t.Helper()
+ t.Logf("%s is unexpectedly still blocked after %v", desc, quiescent)
+
+ // Wait for the operation to actually complete, no matter how long it
+ // takes. If the test has deadlocked, this will cause the test to time out
+ // and dump goroutines.
+ <-done
+
+ case <-done:
+ }
+ }
+}
+
+func TestMutexExcludes(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ path := filepath.Join(dir, "lock")
+
+ mu := lockedfile.MutexAt(path)
+ t.Logf("mu := MutexAt(_)")
+
+ unlock, err := mu.Lock()
+ if err != nil {
+ t.Fatalf("mu.Lock: %v", err)
+ }
+ t.Logf("unlock, _ := mu.Lock()")
+
+ mu2 := lockedfile.MutexAt(mu.Path)
+ t.Logf("mu2 := MutexAt(mu.Path)")
+
+ wait := mustBlock(t, "mu2.Lock()", func() {
+ unlock2, err := mu2.Lock()
+ if err != nil {
+ t.Errorf("mu2.Lock: %v", err)
+ return
+ }
+ t.Logf("unlock2, _ := mu2.Lock()")
+ t.Logf("unlock2()")
+ unlock2()
+ })
+
+ t.Logf("unlock()")
+ unlock()
+ wait(t)
+}
+
+func TestReadWaitsForLock(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ path := filepath.Join(dir, "timestamp.txt")
+
+ f, err := lockedfile.Create(path)
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ defer f.Close()
+
+ const (
+ part1 = "part 1\n"
+ part2 = "part 2\n"
+ )
+ _, err = f.WriteString(part1)
+ if err != nil {
+ t.Fatalf("WriteString: %v", err)
+ }
+ t.Logf("WriteString(%q) = <nil>", part1)
+
+ wait := mustBlock(t, "Read", func() {
+ b, err := lockedfile.Read(path)
+ if err != nil {
+ t.Errorf("Read: %v", err)
+ return
+ }
+
+ const want = part1 + part2
+ got := string(b)
+ if got == want {
+ t.Logf("Read(_) = %q", got)
+ } else {
+ t.Errorf("Read(_) = %q, _; want %q", got, want)
+ }
+ })
+
+ _, err = f.WriteString(part2)
+ if err != nil {
+ t.Errorf("WriteString: %v", err)
+ } else {
+ t.Logf("WriteString(%q) = <nil>", part2)
+ }
+ f.Close()
+
+ wait(t)
+}
+
+func TestCanLockExistingFile(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+ path := filepath.Join(dir, "existing.txt")
+
+ if err := os.WriteFile(path, []byte("ok"), 0777); err != nil {
+ t.Fatalf("os.WriteFile: %v", err)
+ }
+
+ f, err := lockedfile.Edit(path)
+ if err != nil {
+ t.Fatalf("first Edit: %v", err)
+ }
+
+ wait := mustBlock(t, "Edit", func() {
+ other, err := lockedfile.Edit(path)
+ if err != nil {
+ t.Errorf("second Edit: %v", err)
+ }
+ other.Close()
+ })
+
+ f.Close()
+ wait(t)
+}
+
+// TestSpuriousEDEADLK verifies that the spurious EDEADLK reported in
+// https://golang.org/issue/32817 no longer occurs.
+func TestSpuriousEDEADLK(t *testing.T) {
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (Spurious EDEADLK occurs here.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+
+ testenv.MustHaveExec(t)
+
+ dirVar := t.Name() + "DIR"
+
+ if dir := os.Getenv(dirVar); dir != "" {
+ // Q.3 locks file B.
+ b, err := lockedfile.Edit(filepath.Join(dir, "B"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Close()
+
+ if err := os.WriteFile(filepath.Join(dir, "locked"), []byte("ok"), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Q.3 blocks on file A.
+ a, err := lockedfile.Edit(filepath.Join(dir, "A"))
+ // Q.3 unblocks and locks file A.
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer a.Close()
+
+ // Q.3 unlocks files A and B.
+ return
+ }
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ // P.1 locks file A.
+ a, err := lockedfile.Edit(filepath.Join(dir, "A"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := testenv.Command(t, os.Args[0], "-test.run="+t.Name())
+ cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
+
+ qDone := make(chan struct{})
+ waitQ := mustBlock(t, "Edit A and B in subprocess", func() {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("%v:\n%s", err, out)
+ }
+ close(qDone)
+ })
+
+ // Wait until process Q has either failed or locked file B.
+ // Otherwise, P.2 might not block on file B as intended.
+locked:
+ for {
+ if _, err := os.Stat(filepath.Join(dir, "locked")); !os.IsNotExist(err) {
+ break locked
+ }
+ timer := time.NewTimer(1 * time.Millisecond)
+ select {
+ case <-qDone:
+ timer.Stop()
+ break locked
+ case <-timer.C:
+ }
+ }
+
+ waitP2 := mustBlock(t, "Edit B", func() {
+ // P.2 blocks on file B. (Spurious EDEADLK occurs here.)
+ b, err := lockedfile.Edit(filepath.Join(dir, "B"))
+ // P.2 unblocks and locks file B.
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ // P.2 unlocks file B.
+ b.Close()
+ })
+
+ // P.1 unlocks file A.
+ a.Close()
+
+ waitQ(t)
+ waitP2(t)
+}
diff --git a/src/cmd/go/internal/lockedfile/mutex.go b/src/cmd/go/internal/lockedfile/mutex.go
new file mode 100644
index 0000000..180a36c
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/mutex.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lockedfile
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+// A Mutex provides mutual exclusion within and across processes by locking a
+// well-known file. Such a file generally guards some other part of the
+// filesystem: for example, a Mutex file in a directory might guard access to
+// the entire tree rooted in that directory.
+//
+// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex
+// can fail to lock (e.g. if there is a permission error in the filesystem).
+//
+// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but
+// must not be copied after first use. The Path field must be set before first
+// use and must not be change thereafter.
+type Mutex struct {
+ Path string // The path to the well-known lock file. Must be non-empty.
+ mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands.
+}
+
+// MutexAt returns a new Mutex with Path set to the given non-empty path.
+func MutexAt(path string) *Mutex {
+ if path == "" {
+ panic("lockedfile.MutexAt: path must be non-empty")
+ }
+ return &Mutex{Path: path}
+}
+
+func (mu *Mutex) String() string {
+ return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path)
+}
+
+// Lock attempts to lock the Mutex.
+//
+// If successful, Lock returns a non-nil unlock function: it is provided as a
+// return-value instead of a separate method to remind the caller to check the
+// accompanying error. (See https://golang.org/issue/20803.)
+func (mu *Mutex) Lock() (unlock func(), err error) {
+ if mu.Path == "" {
+ panic("lockedfile.Mutex: missing Path during Lock")
+ }
+
+ // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the
+ // file at mu.Path is write-only, the call to OpenFile will fail with a
+ // permission error. That's actually what we want: if we add an RLock method
+ // in the future, it should call OpenFile with O_RDONLY and will require the
+ // files must be readable, so we should not let the caller make any
+ // assumptions about Mutex working with write-only files.
+ f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666)
+ if err != nil {
+ return nil, err
+ }
+ mu.mu.Lock()
+
+ return func() {
+ mu.mu.Unlock()
+ f.Close()
+ }, nil
+}
diff --git a/src/cmd/go/internal/lockedfile/transform_test.go b/src/cmd/go/internal/lockedfile/transform_test.go
new file mode 100644
index 0000000..f8b2802
--- /dev/null
+++ b/src/cmd/go/internal/lockedfile/transform_test.go
@@ -0,0 +1,105 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// js and wasip1 do not support inter-process file locking.
+//
+//go:build !js && !wasip1
+
+package lockedfile_test
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/rand"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "cmd/go/internal/lockedfile"
+)
+
+func isPowerOf2(x int) bool {
+ return x > 0 && x&(x-1) == 0
+}
+
+func roundDownToPowerOf2(x int) int {
+ if x <= 0 {
+ panic("nonpositive x")
+ }
+ bit := 1
+ for x != bit {
+ x = x &^ bit
+ bit <<= 1
+ }
+ return x
+}
+
+func TestTransform(t *testing.T) {
+ dir, remove := mustTempDir(t)
+ defer remove()
+ path := filepath.Join(dir, "blob.bin")
+
+ const maxChunkWords = 8 << 10
+ buf := make([]byte, 2*maxChunkWords*8)
+ for i := uint64(0); i < 2*maxChunkWords; i++ {
+ binary.LittleEndian.PutUint64(buf[i*8:], i)
+ }
+ if err := lockedfile.Write(path, bytes.NewReader(buf[:8]), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ var attempts int64 = 128
+ if !testing.Short() {
+ attempts *= 16
+ }
+ const parallel = 32
+
+ var sem = make(chan bool, parallel)
+
+ for n := attempts; n > 0; n-- {
+ sem <- true
+ go func() {
+ defer func() { <-sem }()
+
+ time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
+ chunkWords := roundDownToPowerOf2(rand.Intn(maxChunkWords) + 1)
+ offset := rand.Intn(chunkWords)
+
+ err := lockedfile.Transform(path, func(data []byte) (chunk []byte, err error) {
+ chunk = buf[offset*8 : (offset+chunkWords)*8]
+
+ if len(data)&^7 != len(data) {
+ t.Errorf("read %d bytes, but each write is an integer multiple of 8 bytes", len(data))
+ return chunk, nil
+ }
+
+ words := len(data) / 8
+ if !isPowerOf2(words) {
+ t.Errorf("read %d 8-byte words, but each write is a power-of-2 number of words", words)
+ return chunk, nil
+ }
+
+ u := binary.LittleEndian.Uint64(data)
+ for i := 1; i < words; i++ {
+ next := binary.LittleEndian.Uint64(data[i*8:])
+ if next != u+1 {
+ t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i)
+ return chunk, nil
+ }
+ u = next
+ }
+
+ return chunk, nil
+ })
+
+ if err != nil {
+ t.Errorf("unexpected error from Transform: %v", err)
+ }
+ }()
+ }
+
+ for n := parallel; n > 0; n-- {
+ sem <- true
+ }
+}
diff --git a/src/cmd/go/internal/mmap/mmap.go b/src/cmd/go/internal/mmap/mmap.go
new file mode 100644
index 0000000..0cad9ca
--- /dev/null
+++ b/src/cmd/go/internal/mmap/mmap.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package is a lightly modified version of the mmap code
+// in github.com/google/codesearch/index.
+
+// The mmap package provides an abstraction for memory mapping files
+// on different platforms.
+package mmap
+
+import (
+ "os"
+)
+
+// Data is mmap'ed read-only data from a file.
+// The backing file is never closed, so Data
+// remains valid for the lifetime of the process.
+type Data struct {
+ f *os.File
+ Data []byte
+}
+
+// Mmap maps the given file into memory.
+func Mmap(file string) (Data, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return Data{}, err
+ }
+ return mmapFile(f)
+}
diff --git a/src/cmd/go/internal/mmap/mmap_other.go b/src/cmd/go/internal/mmap/mmap_other.go
new file mode 100644
index 0000000..22e9395
--- /dev/null
+++ b/src/cmd/go/internal/mmap/mmap_other.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (js && wasm) || wasip1 || plan9
+
+package mmap
+
+import (
+ "io"
+ "os"
+)
+
+// mmapFile on other systems doesn't mmap the file. It just reads everything.
+func mmapFile(f *os.File) (Data, error) {
+ b, err := io.ReadAll(f)
+ if err != nil {
+ return Data{}, err
+ }
+ return Data{f, b}, nil
+}
diff --git a/src/cmd/go/internal/mmap/mmap_unix.go b/src/cmd/go/internal/mmap/mmap_unix.go
new file mode 100644
index 0000000..53bcbb9
--- /dev/null
+++ b/src/cmd/go/internal/mmap/mmap_unix.go
@@ -0,0 +1,36 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package mmap
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "syscall"
+)
+
+func mmapFile(f *os.File) (Data, error) {
+ st, err := f.Stat()
+ if err != nil {
+ return Data{}, err
+ }
+ size := st.Size()
+ pagesize := int64(os.Getpagesize())
+ if int64(int(size+(pagesize-1))) != size+(pagesize-1) {
+ return Data{}, fmt.Errorf("%s: too large for mmap", f.Name())
+ }
+ n := int(size)
+ if n == 0 {
+ return Data{f, nil}, nil
+ }
+ mmapLength := int(((size + pagesize - 1) / pagesize) * pagesize) // round up to page size
+ data, err := syscall.Mmap(int(f.Fd()), 0, mmapLength, syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return Data{}, &fs.PathError{Op: "mmap", Path: f.Name(), Err: err}
+ }
+ return Data{f, data[:n]}, nil
+}
diff --git a/src/cmd/go/internal/mmap/mmap_windows.go b/src/cmd/go/internal/mmap/mmap_windows.go
new file mode 100644
index 0000000..1cf62fe
--- /dev/null
+++ b/src/cmd/go/internal/mmap/mmap_windows.go
@@ -0,0 +1,41 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mmap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "internal/syscall/windows"
+)
+
+func mmapFile(f *os.File) (Data, error) {
+ st, err := f.Stat()
+ if err != nil {
+ return Data{}, err
+ }
+ size := st.Size()
+ if size == 0 {
+ return Data{f, nil}, nil
+ }
+ h, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, 0, 0, nil)
+ if err != nil {
+ return Data{}, fmt.Errorf("CreateFileMapping %s: %w", f.Name(), err)
+ }
+
+ addr, err := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
+ if err != nil {
+ return Data{}, fmt.Errorf("MapViewOfFile %s: %w", f.Name(), err)
+ }
+ var info windows.MemoryBasicInformation
+ err = windows.VirtualQuery(addr, &info, unsafe.Sizeof(info))
+ if err != nil {
+ return Data{}, fmt.Errorf("VirtualQuery %s: %w", f.Name(), err)
+ }
+ data := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.RegionSize))
+ return Data{f, data}, nil
+}
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
new file mode 100644
index 0000000..373acce
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -0,0 +1,389 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modcmd
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "os"
+ "runtime"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/toolchain"
+
+ "golang.org/x/mod/module"
+)
+
+var cmdDownload = &base.Command{
+ UsageLine: "go mod download [-x] [-json] [-reuse=old.json] [modules]",
+ Short: "download modules to local cache",
+ Long: `
+Download downloads the named modules, which can be module patterns selecting
+dependencies of the main module or module queries of the form path@version.
+
+With no arguments, download applies to the modules needed to build and test
+the packages in the main module: the modules explicitly required by the main
+module if it is at 'go 1.17' or higher, or all transitively-required modules
+if at 'go 1.16' or lower.
+
+The go command will automatically download modules as needed during ordinary
+execution. The "go mod download" command is useful mainly for pre-filling
+the local cache or to compute the answers for a Go module proxy.
+
+By default, download writes nothing to standard output. It may print progress
+messages and errors to standard error.
+
+The -json flag causes download to print a sequence of JSON objects
+to standard output, describing each downloaded module (or failure),
+corresponding to this Go struct:
+
+ type Module struct {
+ Path string // module path
+ Query string // version query corresponding to this version
+ Version string // module version
+ Error string // error loading module
+ Info string // absolute path to cached .info file
+ GoMod string // absolute path to cached .mod file
+ Zip string // absolute path to cached .zip file
+ Dir string // absolute path to cached source root directory
+ Sum string // checksum for path, version (as in go.sum)
+ GoModSum string // checksum for go.mod (as in go.sum)
+ Origin any // provenance of module
+ Reuse bool // reuse of old module info is safe
+ }
+
+The -reuse flag accepts the name of file containing the JSON output of a
+previous 'go mod download -json' invocation. The go command may use this
+file to determine that a module is unchanged since the previous invocation
+and avoid redownloading it. Modules that are not redownloaded will be marked
+in the new output by setting the Reuse field to true. Normally the module
+cache provides this kind of reuse automatically; the -reuse flag can be
+useful on systems that do not preserve the module cache.
+
+The -x flag causes download to print the commands download executes.
+
+See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'.
+
+See https://golang.org/ref/mod#version-queries for more about version queries.
+ `,
+}
+
+var (
+ downloadJSON = cmdDownload.Flag.Bool("json", false, "")
+ downloadReuse = cmdDownload.Flag.String("reuse", "", "")
+)
+
+func init() {
+ cmdDownload.Run = runDownload // break init cycle
+
+ // TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands.
+ cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "")
+ base.AddChdirFlag(&cmdDownload.Flag)
+ base.AddModCommonFlags(&cmdDownload.Flag)
+}
+
+// A ModuleJSON describes the result of go mod download.
+type ModuleJSON struct {
+ Path string `json:",omitempty"`
+ Version string `json:",omitempty"`
+ Query string `json:",omitempty"`
+ Error string `json:",omitempty"`
+ Info string `json:",omitempty"`
+ GoMod string `json:",omitempty"`
+ Zip string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ Sum string `json:",omitempty"`
+ GoModSum string `json:",omitempty"`
+
+ Origin *codehost.Origin `json:",omitempty"`
+ Reuse bool `json:",omitempty"`
+}
+
+func runDownload(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ // Check whether modules are enabled and whether we're in a module.
+ modload.ForceUseModules = true
+ modload.ExplicitWriteGoMod = true
+ haveExplicitArgs := len(args) > 0
+
+ if modload.HasModRoot() || modload.WorkFilePath() != "" {
+ modload.LoadModFile(ctx) // to fill MainModules
+
+ if haveExplicitArgs {
+ for _, mainModule := range modload.MainModules.Versions() {
+ targetAtUpgrade := mainModule.Path + "@upgrade"
+ targetAtPatch := mainModule.Path + "@patch"
+ for _, arg := range args {
+ switch arg {
+ case mainModule.Path, targetAtUpgrade, targetAtPatch:
+ os.Stderr.WriteString("go: skipping download of " + arg + " that resolves to the main module\n")
+ }
+ }
+ }
+ } else if modload.WorkFilePath() != "" {
+ // TODO(#44435): Think about what the correct query is to download the
+ // right set of modules. Also see code review comment at
+ // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992.
+ args = []string{"all"}
+ } else {
+ mainModule := modload.MainModules.Versions()[0]
+ modFile := modload.MainModules.ModFile(mainModule)
+ if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 {
+ if len(modFile.Require) > 0 {
+ args = []string{"all"}
+ }
+ } else {
+ // As of Go 1.17, the go.mod file explicitly requires every module
+ // that provides any package imported by the main module.
+ // 'go mod download' is typically run before testing packages in the
+ // main module, so by default we shouldn't download the others
+ // (which are presumed irrelevant to the packages in the main module).
+ // See https://golang.org/issue/44435.
+ //
+ // However, we also need to load the full module graph, to ensure that
+ // we have downloaded enough of the module graph to run 'go list all',
+ // 'go mod graph', and similar commands.
+ _, err := modload.LoadModGraph(ctx, "")
+ if err != nil {
+ // TODO(#64008): call base.Fatalf instead of toolchain.SwitchOrFatal
+ // here, since we can only reach this point with an outdated toolchain
+ // if the go.mod file is inconsistent.
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+
+ for _, m := range modFile.Require {
+ args = append(args, m.Mod.Path)
+ }
+ }
+ }
+ }
+
+ if len(args) == 0 {
+ if modload.HasModRoot() {
+ os.Stderr.WriteString("go: no module dependencies to download\n")
+ } else {
+ base.Errorf("go: no modules specified (see 'go help mod download')")
+ }
+ base.Exit()
+ }
+
+ if *downloadReuse != "" && modload.HasModRoot() {
+ base.Fatalf("go mod download -reuse cannot be used inside a module")
+ }
+
+ var mods []*ModuleJSON
+ type token struct{}
+ sem := make(chan token, runtime.GOMAXPROCS(0))
+ infos, infosErr := modload.ListModules(ctx, args, 0, *downloadReuse)
+
+ // There is a bit of a chicken-and-egg problem here: ideally we need to know
+ // which Go version to switch to to download the requested modules, but if we
+ // haven't downloaded the module's go.mod file yet the GoVersion field of its
+ // info struct is not yet populated.
+ //
+ // We also need to be careful to only print the info for each module once
+ // if the -json flag is set.
+ //
+ // In theory we could go through each module in the list, attempt to download
+ // its go.mod file, and record the maximum version (either from the file or
+ // from the resulting TooNewError), all before we try the actual full download
+ // of each module.
+ //
+ // For now, we go ahead and try all the downloads and collect the errors, and
+ // if any download failed due to a TooNewError, we switch toolchains and try
+ // again. Any downloads that already succeeded will still be in cache.
+ // That won't give optimal concurrency (we'll do two batches of concurrent
+ // downloads instead of all in one batch), and it might add a little overhead
+ // to look up the downloads from the first batch in the module cache when
+ // we see them again in the second batch. On the other hand, it's way simpler
+ // to implement, and not really any more expensive if the user is requesting
+ // no explicit arguments (their go.mod file should already list an appropriate
+ // toolchain version) or only one module (as is used by the Go Module Proxy).
+
+ if infosErr != nil {
+ var sw toolchain.Switcher
+ sw.Error(infosErr)
+ if sw.NeedSwitch() {
+ sw.Switch(ctx)
+ }
+ // Otherwise, wait to report infosErr after we have downloaded
+ // when we can.
+ }
+
+ if !haveExplicitArgs && modload.WorkFilePath() == "" {
+ // 'go mod download' is sometimes run without arguments to pre-populate the
+ // module cache. In modules that aren't at go 1.17 or higher, it may fetch
+ // modules that aren't needed to build packages in the main module. This is
+ // usually not intended, so don't save sums for downloaded modules
+ // (golang.org/issue/45332). We do still fix inconsistencies in go.mod
+ // though.
+ //
+ // TODO(#64008): In the future, report an error if go.mod or go.sum need to
+ // be updated after loading the build list. This may require setting
+ // the mode to "mod" or "readonly" depending on haveExplicitArgs.
+ if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil {
+ base.Fatal(err)
+ }
+ }
+
+ var downloadErrs sync.Map
+ for _, info := range infos {
+ if info.Replace != nil {
+ info = info.Replace
+ }
+ if info.Version == "" && info.Error == nil {
+ // main module or module replaced with file path.
+ // Nothing to download.
+ continue
+ }
+ m := &ModuleJSON{
+ Path: info.Path,
+ Version: info.Version,
+ Query: info.Query,
+ Reuse: info.Reuse,
+ Origin: info.Origin,
+ }
+ mods = append(mods, m)
+ if info.Error != nil {
+ m.Error = info.Error.Err
+ continue
+ }
+ if m.Reuse {
+ continue
+ }
+ sem <- token{}
+ go func() {
+ err := DownloadModule(ctx, m)
+ if err != nil {
+ downloadErrs.Store(m, err)
+ m.Error = err.Error()
+ }
+ <-sem
+ }()
+ }
+
+ // Fill semaphore channel to wait for goroutines to finish.
+ for n := cap(sem); n > 0; n-- {
+ sem <- token{}
+ }
+
+ // If there were explicit arguments
+ // (like 'go mod download golang.org/x/tools@latest'),
+ // check whether we need to upgrade the toolchain in order to download them.
+ //
+ // (If invoked without arguments, we expect the module graph to already
+ // be tidy and the go.mod file to declare a 'go' version that satisfies
+ // transitive requirements. If that invariant holds, then we should have
+ // already upgraded when we loaded the module graph, and should not need
+ // an additional check here. See https://go.dev/issue/45551.)
+ //
+ // We also allow upgrades if in a workspace because in workspace mode
+ // with no arguments we download the module pattern "all",
+ // which may include dependencies that are normally pruned out
+ // of the individual modules in the workspace.
+ if haveExplicitArgs || modload.WorkFilePath() != "" {
+ var sw toolchain.Switcher
+ // Add errors to the Switcher in deterministic order so that they will be
+ // logged deterministically.
+ for _, m := range mods {
+ if erri, ok := downloadErrs.Load(m); ok {
+ sw.Error(erri.(error))
+ }
+ }
+ // Only call sw.Switch if it will actually switch.
+ // Otherwise, we may want to write the errors as JSON
+ // (instead of using base.Error as sw.Switch would),
+ // and we may also have other errors to report from the
+ // initial infos returned by ListModules.
+ if sw.NeedSwitch() {
+ sw.Switch(ctx)
+ }
+ }
+
+ if *downloadJSON {
+ for _, m := range mods {
+ b, err := json.MarshalIndent(m, "", "\t")
+ if err != nil {
+ base.Fatal(err)
+ }
+ os.Stdout.Write(append(b, '\n'))
+ if m.Error != "" {
+ base.SetExitStatus(1)
+ }
+ }
+ } else {
+ for _, m := range mods {
+ if m.Error != "" {
+ base.Error(errors.New(m.Error))
+ }
+ }
+ base.ExitIfErrors()
+ }
+
+ // If there were explicit arguments, update go.mod and especially go.sum.
+ // 'go mod download mod@version' is a useful way to add a sum without using
+ // 'go get mod@version', which may have other side effects. We print this in
+ // some error message hints.
+ //
+ // If we're in workspace mode, update go.work.sum with checksums for all of
+ // the modules we downloaded that aren't already recorded. Since a requirement
+ // in one module may upgrade a dependency of another, we can't be sure that
+ // the import graph matches the import graph of any given module in isolation,
+ // so we may end up needing to load packages from modules that wouldn't
+ // otherwise be relevant.
+ //
+ // TODO(#44435): If we adjust the set of modules downloaded in workspace mode,
+ // we may also need to adjust the logic for saving checksums here.
+ //
+ // Don't save sums for 'go mod download' without arguments unless we're in
+ // workspace mode; see comment above.
+ if haveExplicitArgs || modload.WorkFilePath() != "" {
+ if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil {
+ base.Error(err)
+ }
+ }
+
+ // If there was an error matching some of the requested packages, emit it now
+ // (after we've written the checksums for the modules that were downloaded
+ // successfully).
+ if infosErr != nil {
+ base.Error(infosErr)
+ }
+}
+
+// DownloadModule runs 'go mod download' for m.Path@m.Version,
+// leaving the results (including any error) in m itself.
+func DownloadModule(ctx context.Context, m *ModuleJSON) error {
+ var err error
+ _, file, err := modfetch.InfoFile(ctx, m.Path, m.Version)
+ if err != nil {
+ return err
+ }
+ m.Info = file
+ m.GoMod, err = modfetch.GoModFile(ctx, m.Path, m.Version)
+ if err != nil {
+ return err
+ }
+ m.GoModSum, err = modfetch.GoModSum(ctx, m.Path, m.Version)
+ if err != nil {
+ return err
+ }
+ mod := module.Version{Path: m.Path, Version: m.Version}
+ m.Zip, err = modfetch.DownloadZip(ctx, mod)
+ if err != nil {
+ return err
+ }
+ m.Sum = modfetch.Sum(ctx, mod)
+ m.Dir, err = modfetch.Download(ctx, mod)
+ return err
+}
diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go
new file mode 100644
index 0000000..96bd608
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/edit.go
@@ -0,0 +1,545 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go mod edit
+
+package modcmd
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+var cmdEdit = &base.Command{
+ UsageLine: "go mod edit [editing flags] [-fmt|-print|-json] [go.mod]",
+ Short: "edit go.mod from tools or scripts",
+ Long: `
+Edit provides a command-line interface for editing go.mod,
+for use primarily by tools or scripts. It reads only go.mod;
+it does not look up information about the modules involved.
+By default, edit reads and writes the go.mod file of the main module,
+but a different target file can be specified after the editing flags.
+
+The editing flags specify a sequence of editing operations.
+
+The -fmt flag reformats the go.mod file without making other changes.
+This reformatting is also implied by any other modifications that use or
+rewrite the go.mod file. The only time this flag is needed is if no other
+flags are specified, as in 'go mod edit -fmt'.
+
+The -module flag changes the module's path (the go.mod file's module line).
+
+The -require=path@version and -droprequire=path flags
+add and drop a requirement on the given module path and version.
+Note that -require overrides any existing requirements on path.
+These flags are mainly for tools that understand the module graph.
+Users should prefer 'go get path@version' or 'go get path@none',
+which make other go.mod adjustments as needed to satisfy
+constraints imposed by other modules.
+
+The -exclude=path@version and -dropexclude=path@version flags
+add and drop an exclusion for the given module path and version.
+Note that -exclude=path@version is a no-op if that exclusion already exists.
+
+The -replace=old[@v]=new[@v] flag adds a replacement of the given
+module path and version pair. If the @v in old@v is omitted, a
+replacement without a version on the left side is added, which applies
+to all versions of the old module path. If the @v in new@v is omitted,
+the new path should be a local module root directory, not a module
+path. Note that -replace overrides any redundant replacements for old[@v],
+so omitting @v will drop existing replacements for specific versions.
+
+The -dropreplace=old[@v] flag drops a replacement of the given
+module path and version pair. If the @v is omitted, a replacement without
+a version on the left side is dropped.
+
+The -retract=version and -dropretract=version flags add and drop a
+retraction on the given version. The version may be a single version
+like "v1.2.3" or a closed interval like "[v1.1.0,v1.1.9]". Note that
+-retract=version is a no-op if that retraction already exists.
+
+The -require, -droprequire, -exclude, -dropexclude, -replace,
+-dropreplace, -retract, and -dropretract editing flags may be repeated,
+and the changes are applied in the order given.
+
+The -go=version flag sets the expected Go language version.
+
+The -toolchain=name flag sets the Go toolchain to use.
+
+The -print flag prints the final go.mod in its text format instead of
+writing it back to go.mod.
+
+The -json flag prints the final go.mod file in JSON format instead of
+writing it back to go.mod. The JSON output corresponds to these Go types:
+
+ type Module struct {
+ Path string
+ Version string
+ }
+
+ type GoMod struct {
+ Module ModPath
+ Go string
+ Toolchain string
+ Require []Require
+ Exclude []Module
+ Replace []Replace
+ Retract []Retract
+ }
+
+ type ModPath struct {
+ Path string
+ Deprecated string
+ }
+
+ type Require struct {
+ Path string
+ Version string
+ Indirect bool
+ }
+
+ type Replace struct {
+ Old Module
+ New Module
+ }
+
+ type Retract struct {
+ Low string
+ High string
+ Rationale string
+ }
+
+Retract entries representing a single version (not an interval) will have
+the "Low" and "High" fields set to the same value.
+
+Note that this only describes the go.mod file itself, not other modules
+referred to indirectly. For the full set of modules available to a build,
+use 'go list -m -json all'.
+
+Edit also provides the -C, -n, and -x build flags.
+
+See https://golang.org/ref/mod#go-mod-edit for more about 'go mod edit'.
+ `,
+}
+
+var (
+ editFmt = cmdEdit.Flag.Bool("fmt", false, "")
+ editGo = cmdEdit.Flag.String("go", "", "")
+ editToolchain = cmdEdit.Flag.String("toolchain", "", "")
+ editJSON = cmdEdit.Flag.Bool("json", false, "")
+ editPrint = cmdEdit.Flag.Bool("print", false, "")
+ editModule = cmdEdit.Flag.String("module", "", "")
+ edits []func(*modfile.File) // edits specified in flags
+)
+
+type flagFunc func(string)
+
+func (f flagFunc) String() string { return "" }
+func (f flagFunc) Set(s string) error { f(s); return nil }
+
+func init() {
+ cmdEdit.Run = runEdit // break init cycle
+
+ cmdEdit.Flag.Var(flagFunc(flagRequire), "require", "")
+ cmdEdit.Flag.Var(flagFunc(flagDropRequire), "droprequire", "")
+ cmdEdit.Flag.Var(flagFunc(flagExclude), "exclude", "")
+ cmdEdit.Flag.Var(flagFunc(flagDropReplace), "dropreplace", "")
+ cmdEdit.Flag.Var(flagFunc(flagReplace), "replace", "")
+ cmdEdit.Flag.Var(flagFunc(flagDropExclude), "dropexclude", "")
+ cmdEdit.Flag.Var(flagFunc(flagRetract), "retract", "")
+ cmdEdit.Flag.Var(flagFunc(flagDropRetract), "dropretract", "")
+
+ base.AddBuildFlagsNX(&cmdEdit.Flag)
+ base.AddChdirFlag(&cmdEdit.Flag)
+ base.AddModCommonFlags(&cmdEdit.Flag)
+}
+
+func runEdit(ctx context.Context, cmd *base.Command, args []string) {
+ anyFlags := *editModule != "" ||
+ *editGo != "" ||
+ *editToolchain != "" ||
+ *editJSON ||
+ *editPrint ||
+ *editFmt ||
+ len(edits) > 0
+
+ if !anyFlags {
+ base.Fatalf("go: no flags specified (see 'go help mod edit').")
+ }
+
+ if *editJSON && *editPrint {
+ base.Fatalf("go: cannot use both -json and -print")
+ }
+
+ if len(args) > 1 {
+ base.Fatalf("go: too many arguments")
+ }
+ var gomod string
+ if len(args) == 1 {
+ gomod = args[0]
+ } else {
+ gomod = modload.ModFilePath()
+ }
+
+ if *editModule != "" {
+ if err := module.CheckImportPath(*editModule); err != nil {
+ base.Fatalf("go: invalid -module: %v", err)
+ }
+ }
+
+ if *editGo != "" && *editGo != "none" {
+ if !modfile.GoVersionRE.MatchString(*editGo) {
+ base.Fatalf(`go mod: invalid -go option; expecting something like "-go %s"`, gover.Local())
+ }
+ }
+ if *editToolchain != "" && *editToolchain != "none" {
+ if !modfile.ToolchainRE.MatchString(*editToolchain) {
+ base.Fatalf(`go mod: invalid -toolchain option; expecting something like "-toolchain go%s"`, gover.Local())
+ }
+ }
+
+ data, err := lockedfile.Read(gomod)
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ modFile, err := modfile.Parse(gomod, data, nil)
+ if err != nil {
+ base.Fatalf("go: errors parsing %s:\n%s", base.ShortPath(gomod), err)
+ }
+
+ if *editModule != "" {
+ modFile.AddModuleStmt(*editModule)
+ }
+
+ if *editGo == "none" {
+ modFile.DropGoStmt()
+ } else if *editGo != "" {
+ if err := modFile.AddGoStmt(*editGo); err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ }
+ if *editToolchain == "none" {
+ modFile.DropToolchainStmt()
+ } else if *editToolchain != "" {
+ if err := modFile.AddToolchainStmt(*editToolchain); err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ }
+
+ if len(edits) > 0 {
+ for _, edit := range edits {
+ edit(modFile)
+ }
+ }
+ modFile.SortBlocks()
+ modFile.Cleanup() // clean file after edits
+
+ if *editJSON {
+ editPrintJSON(modFile)
+ return
+ }
+
+ out, err := modFile.Format()
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ if *editPrint {
+ os.Stdout.Write(out)
+ return
+ }
+
+ // Make a best-effort attempt to acquire the side lock, only to exclude
+ // previous versions of the 'go' command from making simultaneous edits.
+ if unlock, err := modfetch.SideLock(ctx); err == nil {
+ defer unlock()
+ }
+
+ err = lockedfile.Transform(gomod, func(lockedData []byte) ([]byte, error) {
+ if !bytes.Equal(lockedData, data) {
+ return nil, errors.New("go.mod changed during editing; not overwriting")
+ }
+ return out, nil
+ })
+ if err != nil {
+ base.Fatal(err)
+ }
+}
+
+// parsePathVersion parses -flag=arg expecting arg to be path@version.
+func parsePathVersion(flag, arg string) (path, version string) {
+ before, after, found := strings.Cut(arg, "@")
+ if !found {
+ base.Fatalf("go: -%s=%s: need path@version", flag, arg)
+ }
+ path, version = strings.TrimSpace(before), strings.TrimSpace(after)
+ if err := module.CheckImportPath(path); err != nil {
+ base.Fatalf("go: -%s=%s: invalid path: %v", flag, arg, err)
+ }
+
+ if !allowedVersionArg(version) {
+ base.Fatalf("go: -%s=%s: invalid version %q", flag, arg, version)
+ }
+
+ return path, version
+}
+
+// parsePath parses -flag=arg expecting arg to be path (not path@version).
+func parsePath(flag, arg string) (path string) {
+ if strings.Contains(arg, "@") {
+ base.Fatalf("go: -%s=%s: need just path, not path@version", flag, arg)
+ }
+ path = arg
+ if err := module.CheckImportPath(path); err != nil {
+ base.Fatalf("go: -%s=%s: invalid path: %v", flag, arg, err)
+ }
+ return path
+}
+
+// parsePathVersionOptional parses path[@version], using adj to
+// describe any errors.
+func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
+ before, after, found := strings.Cut(arg, "@")
+ if !found {
+ path = arg
+ } else {
+ path, version = strings.TrimSpace(before), strings.TrimSpace(after)
+ }
+ if err := module.CheckImportPath(path); err != nil {
+ if !allowDirPath || !modfile.IsDirectoryPath(path) {
+ return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
+ }
+ }
+ if path != arg && !allowedVersionArg(version) {
+ return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
+ }
+ return path, version, nil
+}
+
+// parseVersionInterval parses a single version like "v1.2.3" or a closed
+// interval like "[v1.2.3,v1.4.5]". Note that a single version has the same
+// representation as an interval with equal upper and lower bounds: both
+// Low and High are set.
+func parseVersionInterval(arg string) (modfile.VersionInterval, error) {
+ if !strings.HasPrefix(arg, "[") {
+ if !allowedVersionArg(arg) {
+ return modfile.VersionInterval{}, fmt.Errorf("invalid version: %q", arg)
+ }
+ return modfile.VersionInterval{Low: arg, High: arg}, nil
+ }
+ if !strings.HasSuffix(arg, "]") {
+ return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
+ }
+ s := arg[1 : len(arg)-1]
+ before, after, found := strings.Cut(s, ",")
+ if !found {
+ return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
+ }
+ low := strings.TrimSpace(before)
+ high := strings.TrimSpace(after)
+ if !allowedVersionArg(low) || !allowedVersionArg(high) {
+ return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
+ }
+ return modfile.VersionInterval{Low: low, High: high}, nil
+}
+
+// allowedVersionArg returns whether a token may be used as a version in go.mod.
+// We don't call modfile.CheckPathVersion, because that insists on versions
+// being in semver form, but here we want to allow versions like "master" or
+// "1234abcdef", which the go command will resolve the next time it runs (or
+// during -fix). Even so, we need to make sure the version is a valid token.
+func allowedVersionArg(arg string) bool {
+ return !modfile.MustQuote(arg)
+}
+
+// flagRequire implements the -require flag.
+func flagRequire(arg string) {
+ path, version := parsePathVersion("require", arg)
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.AddRequire(path, version); err != nil {
+ base.Fatalf("go: -require=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagDropRequire implements the -droprequire flag.
+func flagDropRequire(arg string) {
+ path := parsePath("droprequire", arg)
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.DropRequire(path); err != nil {
+ base.Fatalf("go: -droprequire=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagExclude implements the -exclude flag.
+func flagExclude(arg string) {
+ path, version := parsePathVersion("exclude", arg)
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.AddExclude(path, version); err != nil {
+ base.Fatalf("go: -exclude=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagDropExclude implements the -dropexclude flag.
+func flagDropExclude(arg string) {
+ path, version := parsePathVersion("dropexclude", arg)
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.DropExclude(path, version); err != nil {
+ base.Fatalf("go: -dropexclude=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagReplace implements the -replace flag.
+func flagReplace(arg string) {
+ before, after, found := strings.Cut(arg, "=")
+ if !found {
+ base.Fatalf("go: -replace=%s: need old[@v]=new[@w] (missing =)", arg)
+ }
+ old, new := strings.TrimSpace(before), strings.TrimSpace(after)
+ if strings.HasPrefix(new, ">") {
+ base.Fatalf("go: -replace=%s: separator between old and new is =, not =>", arg)
+ }
+ oldPath, oldVersion, err := parsePathVersionOptional("old", old, false)
+ if err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ newPath, newVersion, err := parsePathVersionOptional("new", new, true)
+ if err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ if newPath == new && !modfile.IsDirectoryPath(new) {
+ base.Fatalf("go: -replace=%s: unversioned new path must be local directory", arg)
+ }
+
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.AddReplace(oldPath, oldVersion, newPath, newVersion); err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagDropReplace implements the -dropreplace flag.
+func flagDropReplace(arg string) {
+ path, version, err := parsePathVersionOptional("old", arg, true)
+ if err != nil {
+ base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+ }
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.DropReplace(path, version); err != nil {
+ base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagRetract implements the -retract flag.
+func flagRetract(arg string) {
+ vi, err := parseVersionInterval(arg)
+ if err != nil {
+ base.Fatalf("go: -retract=%s: %v", arg, err)
+ }
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.AddRetract(vi, ""); err != nil {
+ base.Fatalf("go: -retract=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagDropRetract implements the -dropretract flag.
+func flagDropRetract(arg string) {
+ vi, err := parseVersionInterval(arg)
+ if err != nil {
+ base.Fatalf("go: -dropretract=%s: %v", arg, err)
+ }
+ edits = append(edits, func(f *modfile.File) {
+ if err := f.DropRetract(vi); err != nil {
+ base.Fatalf("go: -dropretract=%s: %v", arg, err)
+ }
+ })
+}
+
+// fileJSON is the -json output data structure.
+type fileJSON struct {
+ Module editModuleJSON
+ Go string `json:",omitempty"`
+ Toolchain string `json:",omitempty"`
+ Require []requireJSON
+ Exclude []module.Version
+ Replace []replaceJSON
+ Retract []retractJSON
+}
+
+type editModuleJSON struct {
+ Path string
+ Deprecated string `json:",omitempty"`
+}
+
+type requireJSON struct {
+ Path string
+ Version string `json:",omitempty"`
+ Indirect bool `json:",omitempty"`
+}
+
+type replaceJSON struct {
+ Old module.Version
+ New module.Version
+}
+
+type retractJSON struct {
+ Low string `json:",omitempty"`
+ High string `json:",omitempty"`
+ Rationale string `json:",omitempty"`
+}
+
+// editPrintJSON prints the -json output.
+func editPrintJSON(modFile *modfile.File) {
+ var f fileJSON
+ if modFile.Module != nil {
+ f.Module = editModuleJSON{
+ Path: modFile.Module.Mod.Path,
+ Deprecated: modFile.Module.Deprecated,
+ }
+ }
+ if modFile.Go != nil {
+ f.Go = modFile.Go.Version
+ }
+ if modFile.Toolchain != nil {
+ f.Toolchain = modFile.Toolchain.Name
+ }
+ for _, r := range modFile.Require {
+ f.Require = append(f.Require, requireJSON{Path: r.Mod.Path, Version: r.Mod.Version, Indirect: r.Indirect})
+ }
+ for _, x := range modFile.Exclude {
+ f.Exclude = append(f.Exclude, x.Mod)
+ }
+ for _, r := range modFile.Replace {
+ f.Replace = append(f.Replace, replaceJSON{r.Old, r.New})
+ }
+ for _, r := range modFile.Retract {
+ f.Retract = append(f.Retract, retractJSON{r.Low, r.High, r.Rationale})
+ }
+ data, err := json.MarshalIndent(&f, "", "\t")
+ if err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ data = append(data, '\n')
+ os.Stdout.Write(data)
+}
diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go
new file mode 100644
index 0000000..172c1dd
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/graph.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go mod graph
+
+package modcmd
+
+import (
+ "bufio"
+ "context"
+ "os"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/toolchain"
+
+ "golang.org/x/mod/module"
+)
+
+var cmdGraph = &base.Command{
+ UsageLine: "go mod graph [-go=version] [-x]",
+ Short: "print module requirement graph",
+ Long: `
+Graph prints the module requirement graph (with replacements applied)
+in text form. Each line in the output has two space-separated fields: a module
+and one of its requirements. Each module is identified as a string of the form
+path@version, except for the main module, which has no @version suffix.
+
+The -go flag causes graph to report the module graph as loaded by the
+given Go version, instead of the version indicated by the 'go' directive
+in the go.mod file.
+
+The -x flag causes graph to print the commands graph executes.
+
+See https://golang.org/ref/mod#go-mod-graph for more about 'go mod graph'.
+ `,
+ Run: runGraph,
+}
+
+var (
+ graphGo goVersionFlag
+)
+
+func init() {
+ cmdGraph.Flag.Var(&graphGo, "go", "")
+ cmdGraph.Flag.BoolVar(&cfg.BuildX, "x", false, "")
+ base.AddChdirFlag(&cmdGraph.Flag)
+ base.AddModCommonFlags(&cmdGraph.Flag)
+}
+
+func runGraph(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ if len(args) > 0 {
+ base.Fatalf("go: 'go mod graph' accepts no arguments")
+ }
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NeedRoot
+
+ goVersion := graphGo.String()
+ if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 {
+ toolchain.SwitchOrFatal(ctx, &gover.TooNewError{
+ What: "-go flag",
+ GoVersion: goVersion,
+ })
+ }
+
+ mg, err := modload.LoadModGraph(ctx, goVersion)
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ w := bufio.NewWriter(os.Stdout)
+ defer w.Flush()
+
+ format := func(m module.Version) {
+ w.WriteString(m.Path)
+ if m.Version != "" {
+ w.WriteString("@")
+ w.WriteString(m.Version)
+ }
+ }
+
+ mg.WalkBreadthFirst(func(m module.Version) {
+ reqs, _ := mg.RequiredBy(m)
+ for _, r := range reqs {
+ format(m)
+ w.WriteByte(' ')
+ format(r)
+ w.WriteByte('\n')
+ }
+ })
+}
diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go
new file mode 100644
index 0000000..e4be73f
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/init.go
@@ -0,0 +1,52 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go mod init
+
+package modcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/modload"
+ "context"
+)
+
+var cmdInit = &base.Command{
+ UsageLine: "go mod init [module-path]",
+ Short: "initialize new module in current directory",
+ Long: `
+Init initializes and writes a new go.mod file in the current directory, in
+effect creating a new module rooted at the current directory. The go.mod file
+must not already exist.
+
+Init accepts one optional argument, the module path for the new module. If the
+module path argument is omitted, init will attempt to infer the module path
+using import comments in .go files, vendoring tool configuration files (like
+Gopkg.lock), and the current directory (if in GOPATH).
+
+If a configuration file for a vendoring tool is present, init will attempt to
+import module requirements from it.
+
+See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'.
+`,
+ Run: runInit,
+}
+
+func init() {
+ base.AddChdirFlag(&cmdInit.Flag)
+ base.AddModCommonFlags(&cmdInit.Flag)
+}
+
+func runInit(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) > 1 {
+ base.Fatalf("go: 'go mod init' accepts at most one argument")
+ }
+ var modPath string
+ if len(args) == 1 {
+ modPath = args[0]
+ }
+
+ modload.ForceUseModules = true
+ modload.CreateModFile(ctx, modPath) // does all the hard work
+}
diff --git a/src/cmd/go/internal/modcmd/mod.go b/src/cmd/go/internal/modcmd/mod.go
new file mode 100644
index 0000000..125ba33
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/mod.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modcmd implements the “go mod” command.
+package modcmd
+
+import (
+ "cmd/go/internal/base"
+)
+
+var CmdMod = &base.Command{
+ UsageLine: "go mod",
+ Short: "module maintenance",
+ Long: `Go mod provides access to operations on modules.
+
+Note that support for modules is built into all the go commands,
+not just 'go mod'. For example, day-to-day adding, removing, upgrading,
+and downgrading of dependencies should be done using 'go get'.
+See 'go help modules' for an overview of module functionality.
+ `,
+
+ Commands: []*base.Command{
+ cmdDownload,
+ cmdEdit,
+ cmdGraph,
+ cmdInit,
+ cmdTidy,
+ cmdVendor,
+ cmdVerify,
+ cmdWhy,
+ },
+}
diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go
new file mode 100644
index 0000000..36be926
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/tidy.go
@@ -0,0 +1,139 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go mod tidy
+
+package modcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/toolchain"
+ "context"
+ "fmt"
+
+ "golang.org/x/mod/modfile"
+)
+
+var cmdTidy = &base.Command{
+ UsageLine: "go mod tidy [-e] [-v] [-x] [-go=version] [-compat=version]",
+ Short: "add missing and remove unused modules",
+ Long: `
+Tidy makes sure go.mod matches the source code in the module.
+It adds any missing modules necessary to build the current module's
+packages and dependencies, and it removes unused modules that
+don't provide any relevant packages. It also adds any missing entries
+to go.sum and removes any unnecessary ones.
+
+The -v flag causes tidy to print information about removed modules
+to standard error.
+
+The -e flag causes tidy to attempt to proceed despite errors
+encountered while loading packages.
+
+The -go flag causes tidy to update the 'go' directive in the go.mod
+file to the given version, which may change which module dependencies
+are retained as explicit requirements in the go.mod file.
+(Go versions 1.17 and higher retain more requirements in order to
+support lazy module loading.)
+
+The -compat flag preserves any additional checksums needed for the
+'go' command from the indicated major Go release to successfully load
+the module graph, and causes tidy to error out if that version of the
+'go' command would load any imported package from a different module
+version. By default, tidy acts as if the -compat flag were set to the
+version prior to the one indicated by the 'go' directive in the go.mod
+file.
+
+The -x flag causes tidy to print the commands download executes.
+
+See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'.
+ `,
+ Run: runTidy,
+}
+
+var (
+ tidyE bool // if true, report errors but proceed anyway.
+ tidyGo goVersionFlag // go version to write to the tidied go.mod file (toggles lazy loading)
+ tidyCompat goVersionFlag // go version for which the tidied go.mod and go.sum files should be “compatible”
+)
+
+func init() {
+ cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+ cmdTidy.Flag.BoolVar(&cfg.BuildX, "x", false, "")
+ cmdTidy.Flag.BoolVar(&tidyE, "e", false, "")
+ cmdTidy.Flag.Var(&tidyGo, "go", "")
+ cmdTidy.Flag.Var(&tidyCompat, "compat", "")
+ base.AddChdirFlag(&cmdTidy.Flag)
+ base.AddModCommonFlags(&cmdTidy.Flag)
+}
+
+// A goVersionFlag is a flag.Value representing a supported Go version.
+//
+// (Note that the -go argument to 'go mod edit' is *not* a goVersionFlag.
+// It intentionally allows newer-than-supported versions as arguments.)
+type goVersionFlag struct {
+ v string
+}
+
+func (f *goVersionFlag) String() string { return f.v }
+func (f *goVersionFlag) Get() any { return f.v }
+
+func (f *goVersionFlag) Set(s string) error {
+ if s != "" {
+ latest := gover.Local()
+ if !modfile.GoVersionRE.MatchString(s) {
+ return fmt.Errorf("expecting a Go version like %q", latest)
+ }
+ if gover.Compare(s, latest) > 0 {
+ return fmt.Errorf("maximum supported Go version is %s", latest)
+ }
+ }
+
+ f.v = s
+ return nil
+}
+
+func runTidy(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) > 0 {
+ base.Fatalf("go: 'go mod tidy' accepts no arguments")
+ }
+
+ // Tidy aims to make 'go test' reproducible for any package in 'all', so we
+ // need to include test dependencies. For modules that specify go 1.15 or
+ // earlier this is a no-op (because 'all' saturates transitive test
+ // dependencies).
+ //
+ // However, with lazy loading (go 1.16+) 'all' includes only the packages that
+ // are transitively imported by the main module, not the test dependencies of
+ // those packages. In order to make 'go test' reproducible for the packages
+ // that are in 'all' but outside of the main module, we must explicitly
+ // request that their test dependencies be included.
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NeedRoot
+
+ goVersion := tidyGo.String()
+ if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 {
+ toolchain.SwitchOrFatal(ctx, &gover.TooNewError{
+ What: "-go flag",
+ GoVersion: goVersion,
+ })
+ }
+
+ modload.LoadPackages(ctx, modload.PackageOpts{
+ TidyGoVersion: tidyGo.String(),
+ Tags: imports.AnyTags(),
+ Tidy: true,
+ TidyCompatibleVersion: tidyCompat.String(),
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: true,
+ LoadTests: true,
+ AllowErrors: tidyE,
+ SilenceMissingStdImports: true,
+ Switcher: new(toolchain.Switcher),
+ }, "all")
+}
diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go
new file mode 100644
index 0000000..1a0d69e
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/vendor.go
@@ -0,0 +1,431 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modcmd
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "go/build"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+
+ "golang.org/x/mod/module"
+)
+
+var cmdVendor = &base.Command{
+ UsageLine: "go mod vendor [-e] [-v] [-o outdir]",
+ Short: "make vendored copy of dependencies",
+ Long: `
+Vendor resets the main module's vendor directory to include all packages
+needed to build and test all the main module's packages.
+It does not include test code for vendored packages.
+
+The -v flag causes vendor to print the names of vendored
+modules and packages to standard error.
+
+The -e flag causes vendor to attempt to proceed despite errors
+encountered while loading packages.
+
+The -o flag causes vendor to create the vendor directory at the given
+path instead of "vendor". The go command can only use a vendor directory
+named "vendor" within the module root directory, so this flag is
+primarily useful for other tools.
+
+See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'.
+ `,
+ Run: runVendor,
+}
+
+var vendorE bool // if true, report errors but proceed anyway
+var vendorO string // if set, overrides the default output directory
+
+func init() {
+ cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+ cmdVendor.Flag.BoolVar(&vendorE, "e", false, "")
+ cmdVendor.Flag.StringVar(&vendorO, "o", "", "")
+ base.AddChdirFlag(&cmdVendor.Flag)
+ base.AddModCommonFlags(&cmdVendor.Flag)
+}
+
+func runVendor(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) != 0 {
+ base.Fatalf("go: 'go mod vendor' accepts no arguments")
+ }
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NeedRoot
+
+ loadOpts := modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: true,
+ UseVendorAll: true,
+ AllowErrors: vendorE,
+ SilenceMissingStdImports: true,
+ }
+ _, pkgs := modload.LoadPackages(ctx, loadOpts, "all")
+
+ var vdir string
+ switch {
+ case filepath.IsAbs(vendorO):
+ vdir = vendorO
+ case vendorO != "":
+ vdir = filepath.Join(base.Cwd(), vendorO)
+ default:
+ vdir = filepath.Join(modload.VendorDir())
+ }
+ if err := os.RemoveAll(vdir); err != nil {
+ base.Fatal(err)
+ }
+
+ modpkgs := make(map[module.Version][]string)
+ for _, pkg := range pkgs {
+ m := modload.PackageModule(pkg)
+ if m.Path == "" || m.Version == "" && modload.MainModules.Contains(m.Path) {
+ continue
+ }
+ modpkgs[m] = append(modpkgs[m], pkg)
+ }
+
+ includeAllReplacements := false
+ includeGoVersions := false
+ isExplicit := map[module.Version]bool{}
+ if gv := modload.ModFile().Go; gv != nil {
+ if gover.Compare(gv.Version, "1.14") >= 0 {
+ // If the Go version is at least 1.14, annotate all explicit 'require' and
+ // 'replace' targets found in the go.mod file so that we can perform a
+ // stronger consistency check when -mod=vendor is set.
+ for _, r := range modload.ModFile().Require {
+ isExplicit[r.Mod] = true
+ }
+ includeAllReplacements = true
+ }
+ if gover.Compare(gv.Version, "1.17") >= 0 {
+ // If the Go version is at least 1.17, annotate all modules with their
+ // 'go' version directives.
+ includeGoVersions = true
+ }
+ }
+
+ var vendorMods []module.Version
+ for m := range isExplicit {
+ vendorMods = append(vendorMods, m)
+ }
+ for m := range modpkgs {
+ if !isExplicit[m] {
+ vendorMods = append(vendorMods, m)
+ }
+ }
+ gover.ModSort(vendorMods)
+
+ var (
+ buf bytes.Buffer
+ w io.Writer = &buf
+ )
+ if cfg.BuildV {
+ w = io.MultiWriter(&buf, os.Stderr)
+ }
+
+ for _, m := range vendorMods {
+ replacement := modload.Replacement(m)
+ line := moduleLine(m, replacement)
+ io.WriteString(w, line)
+
+ goVersion := ""
+ if includeGoVersions {
+ goVersion = modload.ModuleInfo(ctx, m.Path).GoVersion
+ }
+ switch {
+ case isExplicit[m] && goVersion != "":
+ fmt.Fprintf(w, "## explicit; go %s\n", goVersion)
+ case isExplicit[m]:
+ io.WriteString(w, "## explicit\n")
+ case goVersion != "":
+ fmt.Fprintf(w, "## go %s\n", goVersion)
+ }
+
+ pkgs := modpkgs[m]
+ sort.Strings(pkgs)
+ for _, pkg := range pkgs {
+ fmt.Fprintf(w, "%s\n", pkg)
+ vendorPkg(vdir, pkg)
+ }
+ }
+
+ if includeAllReplacements {
+ // Record unused and wildcard replacements at the end of the modules.txt file:
+ // without access to the complete build list, the consumer of the vendor
+ // directory can't otherwise determine that those replacements had no effect.
+ for _, r := range modload.ModFile().Replace {
+ if len(modpkgs[r.Old]) > 0 {
+ // We we already recorded this replacement in the entry for the replaced
+ // module with the packages it provides.
+ continue
+ }
+
+ line := moduleLine(r.Old, r.New)
+ buf.WriteString(line)
+ if cfg.BuildV {
+ os.Stderr.WriteString(line)
+ }
+ }
+ }
+
+ if buf.Len() == 0 {
+ fmt.Fprintf(os.Stderr, "go: no dependencies to vendor\n")
+ return
+ }
+
+ if err := os.MkdirAll(vdir, 0777); err != nil {
+ base.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(vdir, "modules.txt"), buf.Bytes(), 0666); err != nil {
+ base.Fatal(err)
+ }
+}
+
+func moduleLine(m, r module.Version) string {
+ b := new(strings.Builder)
+ b.WriteString("# ")
+ b.WriteString(m.Path)
+ if m.Version != "" {
+ b.WriteString(" ")
+ b.WriteString(m.Version)
+ }
+ if r.Path != "" {
+ if str.HasFilePathPrefix(filepath.Clean(r.Path), "vendor") {
+ base.Fatalf("go: replacement path %s inside vendor directory", r.Path)
+ }
+ b.WriteString(" => ")
+ b.WriteString(r.Path)
+ if r.Version != "" {
+ b.WriteString(" ")
+ b.WriteString(r.Version)
+ }
+ }
+ b.WriteString("\n")
+ return b.String()
+}
+
+func vendorPkg(vdir, pkg string) {
+ src, realPath, _ := modload.Lookup("", false, pkg)
+ if src == "" {
+ base.Errorf("internal error: no pkg for %s\n", pkg)
+ return
+ }
+ if realPath != pkg {
+ // TODO(#26904): Revisit whether this behavior still makes sense.
+ // This should actually be impossible today, because the import map is the
+ // identity function for packages outside of the standard library.
+ //
+ // Part of the purpose of the vendor directory is to allow the packages in
+ // the module to continue to build in GOPATH mode, and GOPATH-mode users
+ // won't know about replacement aliasing. How important is it to maintain
+ // compatibility?
+ fmt.Fprintf(os.Stderr, "warning: %s imported as both %s and %s; making two copies.\n", realPath, realPath, pkg)
+ }
+
+ copiedFiles := make(map[string]bool)
+ dst := filepath.Join(vdir, pkg)
+ copyDir(dst, src, matchPotentialSourceFile, copiedFiles)
+ if m := modload.PackageModule(realPath); m.Path != "" {
+ copyMetadata(m.Path, realPath, dst, src, copiedFiles)
+ }
+
+ ctx := build.Default
+ ctx.UseAllFiles = true
+ bp, err := ctx.ImportDir(src, build.IgnoreVendor)
+ // Because UseAllFiles is set on the build.Context, it's possible ta get
+ // a MultiplePackageError on an otherwise valid package: the package could
+ // have different names for GOOS=windows and GOOS=mac for example. On the
+ // other hand if there's a NoGoError, the package might have source files
+ // specifying "// +build ignore" those packages should be skipped because
+ // embeds from ignored files can't be used.
+ // TODO(#42504): Find a better way to avoid errors from ImportDir. We'll
+ // need to figure this out when we switch to PackagesAndErrors as per the
+ // TODO above.
+ var multiplePackageError *build.MultiplePackageError
+ var noGoError *build.NoGoError
+ if err != nil {
+ if errors.As(err, &noGoError) {
+ return // No source files in this package are built. Skip embeds in ignored files.
+ } else if !errors.As(err, &multiplePackageError) { // multiplePackageErrors are OK, but others are not.
+ base.Fatalf("internal error: failed to find embedded files of %s: %v\n", pkg, err)
+ }
+ }
+ embedPatterns := str.StringList(bp.EmbedPatterns, bp.TestEmbedPatterns, bp.XTestEmbedPatterns)
+ embeds, err := load.ResolveEmbed(bp.Dir, embedPatterns)
+ if err != nil {
+ base.Fatal(err)
+ }
+ for _, embed := range embeds {
+ embedDst := filepath.Join(dst, embed)
+ if copiedFiles[embedDst] {
+ continue
+ }
+
+ // Copy the file as is done by copyDir below.
+ r, err := os.Open(filepath.Join(src, embed))
+ if err != nil {
+ base.Fatal(err)
+ }
+ if err := os.MkdirAll(filepath.Dir(embedDst), 0777); err != nil {
+ base.Fatal(err)
+ }
+ w, err := os.Create(embedDst)
+ if err != nil {
+ base.Fatal(err)
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ base.Fatal(err)
+ }
+ r.Close()
+ if err := w.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }
+}
+
+type metakey struct {
+ modPath string
+ dst string
+}
+
+var copiedMetadata = make(map[metakey]bool)
+
+// copyMetadata copies metadata files from parents of src to parents of dst,
+// stopping after processing the src parent for modPath.
+func copyMetadata(modPath, pkg, dst, src string, copiedFiles map[string]bool) {
+ for parent := 0; ; parent++ {
+ if copiedMetadata[metakey{modPath, dst}] {
+ break
+ }
+ copiedMetadata[metakey{modPath, dst}] = true
+ if parent > 0 {
+ copyDir(dst, src, matchMetadata, copiedFiles)
+ }
+ if modPath == pkg {
+ break
+ }
+ pkg = path.Dir(pkg)
+ dst = filepath.Dir(dst)
+ src = filepath.Dir(src)
+ }
+}
+
+// metaPrefixes is the list of metadata file prefixes.
+// Vendoring copies metadata files from parents of copied directories.
+// Note that this list could be arbitrarily extended, and it is longer
+// in other tools (such as godep or dep). By using this limited set of
+// prefixes and also insisting on capitalized file names, we are trying
+// to nudge people toward more agreement on the naming
+// and also trying to avoid false positives.
+var metaPrefixes = []string{
+ "AUTHORS",
+ "CONTRIBUTORS",
+ "COPYLEFT",
+ "COPYING",
+ "COPYRIGHT",
+ "LEGAL",
+ "LICENSE",
+ "NOTICE",
+ "PATENTS",
+}
+
+// matchMetadata reports whether info is a metadata file.
+func matchMetadata(dir string, info fs.DirEntry) bool {
+ name := info.Name()
+ for _, p := range metaPrefixes {
+ if strings.HasPrefix(name, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// matchPotentialSourceFile reports whether info may be relevant to a build operation.
+func matchPotentialSourceFile(dir string, info fs.DirEntry) bool {
+ if strings.HasSuffix(info.Name(), "_test.go") {
+ return false
+ }
+ if info.Name() == "go.mod" || info.Name() == "go.sum" {
+ if gv := modload.ModFile().Go; gv != nil && gover.Compare(gv.Version, "1.17") >= 0 {
+ // As of Go 1.17, we strip go.mod and go.sum files from dependency modules.
+ // Otherwise, 'go' commands invoked within the vendor subtree may misidentify
+ // an arbitrary directory within the vendor tree as a module root.
+ // (See https://golang.org/issue/42970.)
+ return false
+ }
+ }
+ if strings.HasSuffix(info.Name(), ".go") {
+ f, err := fsys.Open(filepath.Join(dir, info.Name()))
+ if err != nil {
+ base.Fatal(err)
+ }
+ defer f.Close()
+
+ content, err := imports.ReadImports(f, false, nil)
+ if err == nil && !imports.ShouldBuild(content, imports.AnyTags()) {
+ // The file is explicitly tagged "ignore", so it can't affect the build.
+ // Leave it out.
+ return false
+ }
+ return true
+ }
+
+ // We don't know anything about this file, so optimistically assume that it is
+ // needed.
+ return true
+}
+
+// copyDir copies all regular files satisfying match(info) from src to dst.
+func copyDir(dst, src string, match func(dir string, info fs.DirEntry) bool, copiedFiles map[string]bool) {
+ files, err := os.ReadDir(src)
+ if err != nil {
+ base.Fatal(err)
+ }
+ if err := os.MkdirAll(dst, 0777); err != nil {
+ base.Fatal(err)
+ }
+ for _, file := range files {
+ if file.IsDir() || !file.Type().IsRegular() || !match(src, file) {
+ continue
+ }
+ copiedFiles[file.Name()] = true
+ r, err := os.Open(filepath.Join(src, file.Name()))
+ if err != nil {
+ base.Fatal(err)
+ }
+ dstPath := filepath.Join(dst, file.Name())
+ copiedFiles[dstPath] = true
+ w, err := os.Create(dstPath)
+ if err != nil {
+ base.Fatal(err)
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ base.Fatal(err)
+ }
+ r.Close()
+ if err := w.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go
new file mode 100644
index 0000000..d07f730
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/verify.go
@@ -0,0 +1,143 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modcmd
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "runtime"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/dirhash"
+)
+
+var cmdVerify = &base.Command{
+ UsageLine: "go mod verify",
+ Short: "verify dependencies have expected content",
+ Long: `
+Verify checks that the dependencies of the current module,
+which are stored in a local downloaded source cache, have not been
+modified since being downloaded. If all the modules are unmodified,
+verify prints "all modules verified." Otherwise it reports which
+modules have been changed and causes 'go mod' to exit with a
+non-zero status.
+
+See https://golang.org/ref/mod#go-mod-verify for more about 'go mod verify'.
+ `,
+ Run: runVerify,
+}
+
+func init() {
+ base.AddChdirFlag(&cmdVerify.Flag)
+ base.AddModCommonFlags(&cmdVerify.Flag)
+}
+
+func runVerify(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ if len(args) != 0 {
+ // NOTE(rsc): Could take a module pattern.
+ base.Fatalf("go: verify takes no arguments")
+ }
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NeedRoot
+
+ // Only verify up to GOMAXPROCS zips at once.
+ type token struct{}
+ sem := make(chan token, runtime.GOMAXPROCS(0))
+
+ mg, err := modload.LoadModGraph(ctx, "")
+ if err != nil {
+ base.Fatal(err)
+ }
+ mods := mg.BuildList()
+ // Use a slice of result channels, so that the output is deterministic.
+ errsChans := make([]<-chan []error, len(mods))
+
+ for i, mod := range mods {
+ sem <- token{}
+ errsc := make(chan []error, 1)
+ errsChans[i] = errsc
+ mod := mod // use a copy to avoid data races
+ go func() {
+ errsc <- verifyMod(ctx, mod)
+ <-sem
+ }()
+ }
+
+ ok := true
+ for _, errsc := range errsChans {
+ errs := <-errsc
+ for _, err := range errs {
+ base.Errorf("%s", err)
+ ok = false
+ }
+ }
+ if ok {
+ fmt.Printf("all modules verified\n")
+ }
+}
+
+func verifyMod(ctx context.Context, mod module.Version) []error {
+ if gover.IsToolchain(mod.Path) {
+ // "go" and "toolchain" have no disk footprint; nothing to verify.
+ return nil
+ }
+ if modload.MainModules.Contains(mod.Path) {
+ return nil
+ }
+ var errs []error
+ zip, zipErr := modfetch.CachePath(ctx, mod, "zip")
+ if zipErr == nil {
+ _, zipErr = os.Stat(zip)
+ }
+ dir, dirErr := modfetch.DownloadDir(ctx, mod)
+ data, err := os.ReadFile(zip + "hash")
+ if err != nil {
+ if zipErr != nil && errors.Is(zipErr, fs.ErrNotExist) &&
+ dirErr != nil && errors.Is(dirErr, fs.ErrNotExist) {
+ // Nothing downloaded yet. Nothing to verify.
+ return nil
+ }
+ errs = append(errs, fmt.Errorf("%s %s: missing ziphash: %v", mod.Path, mod.Version, err))
+ return errs
+ }
+ h := string(bytes.TrimSpace(data))
+
+ if zipErr != nil && errors.Is(zipErr, fs.ErrNotExist) {
+ // ok
+ } else {
+ hZ, err := dirhash.HashZip(zip, dirhash.DefaultHash)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("%s %s: %v", mod.Path, mod.Version, err))
+ return errs
+ } else if hZ != h {
+ errs = append(errs, fmt.Errorf("%s %s: zip has been modified (%v)", mod.Path, mod.Version, zip))
+ }
+ }
+ if dirErr != nil && errors.Is(dirErr, fs.ErrNotExist) {
+ // ok
+ } else {
+ hD, err := dirhash.HashDir(dir, mod.Path+"@"+mod.Version, dirhash.DefaultHash)
+ if err != nil {
+
+ errs = append(errs, fmt.Errorf("%s %s: %v", mod.Path, mod.Version, err))
+ return errs
+ }
+ if hD != h {
+ errs = append(errs, fmt.Errorf("%s %s: dir has been modified (%v)", mod.Path, mod.Version, dir))
+ }
+ }
+ return errs
+}
diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go
new file mode 100644
index 0000000..198672d
--- /dev/null
+++ b/src/cmd/go/internal/modcmd/why.go
@@ -0,0 +1,143 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modcmd
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modload"
+)
+
+var cmdWhy = &base.Command{
+ UsageLine: "go mod why [-m] [-vendor] packages...",
+ Short: "explain why packages or modules are needed",
+ Long: `
+Why shows a shortest path in the import graph from the main module to
+each of the listed packages. If the -m flag is given, why treats the
+arguments as a list of modules and finds a path to any package in each
+of the modules.
+
+By default, why queries the graph of packages matched by "go list all",
+which includes tests for reachable packages. The -vendor flag causes why
+to exclude tests of dependencies.
+
+The output is a sequence of stanzas, one for each package or module
+name on the command line, separated by blank lines. Each stanza begins
+with a comment line "# package" or "# module" giving the target
+package or module. Subsequent lines give a path through the import
+graph, one package per line. If the package or module is not
+referenced from the main module, the stanza will display a single
+parenthesized note indicating that fact.
+
+For example:
+
+ $ go mod why golang.org/x/text/language golang.org/x/text/encoding
+ # golang.org/x/text/language
+ rsc.io/quote
+ rsc.io/sampler
+ golang.org/x/text/language
+
+ # golang.org/x/text/encoding
+ (main module does not need package golang.org/x/text/encoding)
+ $
+
+See https://golang.org/ref/mod#go-mod-why for more about 'go mod why'.
+ `,
+}
+
+var (
+ whyM = cmdWhy.Flag.Bool("m", false, "")
+ whyVendor = cmdWhy.Flag.Bool("vendor", false, "")
+)
+
+func init() {
+ cmdWhy.Run = runWhy // break init cycle
+ base.AddChdirFlag(&cmdWhy.Flag)
+ base.AddModCommonFlags(&cmdWhy.Flag)
+}
+
+func runWhy(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NeedRoot
+ modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules
+
+ loadOpts := modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ VendorModulesInGOROOTSrc: true,
+ LoadTests: !*whyVendor,
+ SilencePackageErrors: true,
+ UseVendorAll: *whyVendor,
+ }
+
+ if *whyM {
+ for _, arg := range args {
+ if strings.Contains(arg, "@") {
+ base.Fatalf("go: %s: 'go mod why' requires a module path, not a version query", arg)
+ }
+ }
+
+ mods, err := modload.ListModules(ctx, args, 0, "")
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ byModule := make(map[string][]string)
+ _, pkgs := modload.LoadPackages(ctx, loadOpts, "all")
+ for _, path := range pkgs {
+ m := modload.PackageModule(path)
+ if m.Path != "" {
+ byModule[m.Path] = append(byModule[m.Path], path)
+ }
+ }
+ sep := ""
+ for _, m := range mods {
+ best := ""
+ bestDepth := 1000000000
+ for _, path := range byModule[m.Path] {
+ d := modload.WhyDepth(path)
+ if d > 0 && d < bestDepth {
+ best = path
+ bestDepth = d
+ }
+ }
+ why := modload.Why(best)
+ if why == "" {
+ vendoring := ""
+ if *whyVendor {
+ vendoring = " to vendor"
+ }
+ why = "(main module does not need" + vendoring + " module " + m.Path + ")\n"
+ }
+ fmt.Printf("%s# %s\n%s", sep, m.Path, why)
+ sep = "\n"
+ }
+ } else {
+ // Resolve to packages.
+ matches, _ := modload.LoadPackages(ctx, loadOpts, args...)
+
+ modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages)
+
+ sep := ""
+ for _, m := range matches {
+ for _, path := range m.Pkgs {
+ why := modload.Why(path)
+ if why == "" {
+ vendoring := ""
+ if *whyVendor {
+ vendoring = " to vendor"
+ }
+ why = "(main module does not need" + vendoring + " package " + path + ")\n"
+ }
+ fmt.Printf("%s# %s\n%s", sep, path, why)
+ sep = "\n"
+ }
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modconv/convert.go b/src/cmd/go/internal/modconv/convert.go
new file mode 100644
index 0000000..9c861f8
--- /dev/null
+++ b/src/cmd/go/internal/modconv/convert.go
@@ -0,0 +1,105 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/base"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+// ConvertLegacyConfig converts legacy config to modfile.
+// The file argument is slash-delimited.
+func ConvertLegacyConfig(f *modfile.File, file string, data []byte, queryPackage func(path, rev string) (module.Version, error)) error {
+ i := strings.LastIndex(file, "/")
+ j := -2
+ if i >= 0 {
+ j = strings.LastIndex(file[:i], "/")
+ }
+ convert := Converters[file[i+1:]]
+ if convert == nil && j != -2 {
+ convert = Converters[file[j+1:]]
+ }
+ if convert == nil {
+ return fmt.Errorf("unknown legacy config file %s", file)
+ }
+ mf, err := convert(file, data)
+ if err != nil {
+ return fmt.Errorf("parsing %s: %v", file, err)
+ }
+
+ // Convert requirements block, which may use raw SHA1 hashes as versions,
+ // to valid semver requirement list, respecting major versions.
+ versions := make([]module.Version, len(mf.Require))
+ replace := make(map[string]*modfile.Replace)
+
+ for _, r := range mf.Replace {
+ replace[r.New.Path] = r
+ replace[r.Old.Path] = r
+ }
+
+ type token struct{}
+ sem := make(chan token, runtime.GOMAXPROCS(0))
+ for i, r := range mf.Require {
+ m := r.Mod
+ if m.Path == "" {
+ continue
+ }
+ if re, ok := replace[m.Path]; ok {
+ m = re.New
+ }
+ sem <- token{}
+ go func(i int, m module.Version) {
+ defer func() { <-sem }()
+ version, err := queryPackage(m.Path, m.Version)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go: converting %s: stat %s@%s: %v\n", base.ShortPath(file), m.Path, m.Version, err)
+ return
+ }
+
+ versions[i] = version
+ }(i, m)
+ }
+ // Fill semaphore channel to wait for all tasks to finish.
+ for n := cap(sem); n > 0; n-- {
+ sem <- token{}
+ }
+
+ need := map[string]string{}
+ for _, v := range versions {
+ if v.Path == "" {
+ continue
+ }
+ // Don't use semver.Max here; need to preserve +incompatible suffix.
+ if needv, ok := need[v.Path]; !ok || semver.Compare(needv, v.Version) < 0 {
+ need[v.Path] = v.Version
+ }
+ }
+ paths := make([]string, 0, len(need))
+ for path := range need {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths)
+ for _, path := range paths {
+ if re, ok := replace[path]; ok {
+ err := f.AddReplace(re.Old.Path, re.Old.Version, path, need[path])
+ if err != nil {
+ return fmt.Errorf("add replace: %v", err)
+ }
+ }
+ f.AddNewRequire(path, need[path], false)
+ }
+
+ f.Cleanup()
+ return nil
+}
diff --git a/src/cmd/go/internal/modconv/dep.go b/src/cmd/go/internal/modconv/dep.go
new file mode 100644
index 0000000..9bea761
--- /dev/null
+++ b/src/cmd/go/internal/modconv/dep.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "fmt"
+ "internal/lazyregexp"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+func ParseGopkgLock(file string, data []byte) (*modfile.File, error) {
+ type pkg struct {
+ Path string
+ Version string
+ Source string
+ }
+ mf := new(modfile.File)
+ var list []pkg
+ var r *pkg
+ for lineno, line := range strings.Split(string(data), "\n") {
+ lineno++
+ if i := strings.Index(line, "#"); i >= 0 {
+ line = line[:i]
+ }
+ line = strings.TrimSpace(line)
+ if line == "[[projects]]" {
+ list = append(list, pkg{})
+ r = &list[len(list)-1]
+ continue
+ }
+ if strings.HasPrefix(line, "[") {
+ r = nil
+ continue
+ }
+ if r == nil {
+ continue
+ }
+ before, after, found := strings.Cut(line, "=")
+ if !found {
+ continue
+ }
+ key := strings.TrimSpace(before)
+ val := strings.TrimSpace(after)
+ if len(val) >= 2 && val[0] == '"' && val[len(val)-1] == '"' {
+ q, err := strconv.Unquote(val) // Go unquoting, but close enough for now
+ if err != nil {
+ return nil, fmt.Errorf("%s:%d: invalid quoted string: %v", file, lineno, err)
+ }
+ val = q
+ }
+ switch key {
+ case "name":
+ r.Path = val
+ case "source":
+ r.Source = val
+ case "revision", "version":
+ // Note: key "version" should take priority over "revision",
+ // and it does, because dep writes toml keys in alphabetical order,
+ // so we see version (if present) second.
+ if key == "version" {
+ if !semver.IsValid(val) || semver.Canonical(val) != val {
+ break
+ }
+ }
+ r.Version = val
+ }
+ }
+ for _, r := range list {
+ if r.Path == "" || r.Version == "" {
+ return nil, fmt.Errorf("%s: empty [[projects]] stanza (%s)", file, r.Path)
+ }
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: r.Path, Version: r.Version}})
+
+ if r.Source != "" {
+ // Convert "source" to import path, such as
+ // git@test.com:x/y.git and https://test.com/x/y.git.
+ // We get "test.com/x/y" at last.
+ source, err := decodeSource(r.Source)
+ if err != nil {
+ return nil, err
+ }
+ old := module.Version{Path: r.Path, Version: r.Version}
+ new := module.Version{Path: source, Version: r.Version}
+ mf.Replace = append(mf.Replace, &modfile.Replace{Old: old, New: new})
+ }
+ }
+ return mf, nil
+}
+
+var scpSyntaxReg = lazyregexp.New(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
+
+func decodeSource(source string) (string, error) {
+ var u *url.URL
+ var p string
+ if m := scpSyntaxReg.FindStringSubmatch(source); m != nil {
+ // Match SCP-like syntax and convert it to a URL.
+ // Eg, "git@github.com:user/repo" becomes
+ // "ssh://git@github.com/user/repo".
+ u = &url.URL{
+ Scheme: "ssh",
+ User: url.User(m[1]),
+ Host: m[2],
+ Path: "/" + m[3],
+ }
+ } else {
+ var err error
+ u, err = url.Parse(source)
+ if err != nil {
+ return "", fmt.Errorf("%q is not a valid URI", source)
+ }
+ }
+
+ // If no scheme was passed, then the entire path will have been put into
+ // u.Path. Either way, construct the normalized path correctly.
+ if u.Host == "" {
+ p = source
+ } else {
+ p = path.Join(u.Host, u.Path)
+ }
+ p = strings.TrimSuffix(p, ".git")
+ p = strings.TrimSuffix(p, ".hg")
+ return p, nil
+}
diff --git a/src/cmd/go/internal/modconv/glide.go b/src/cmd/go/internal/modconv/glide.go
new file mode 100644
index 0000000..d1de3f7
--- /dev/null
+++ b/src/cmd/go/internal/modconv/glide.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseGlideLock(file string, data []byte) (*modfile.File, error) {
+ mf := new(modfile.File)
+ imports := false
+ name := ""
+ for _, line := range strings.Split(string(data), "\n") {
+ if line == "" {
+ continue
+ }
+ if strings.HasPrefix(line, "imports:") {
+ imports = true
+ } else if line[0] != '-' && line[0] != ' ' && line[0] != '\t' {
+ imports = false
+ }
+ if !imports {
+ continue
+ }
+ if strings.HasPrefix(line, "- name:") {
+ name = strings.TrimSpace(line[len("- name:"):])
+ }
+ if strings.HasPrefix(line, " version:") {
+ version := strings.TrimSpace(line[len(" version:"):])
+ if name != "" && version != "" {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: name, Version: version}})
+ }
+ }
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/glock.go b/src/cmd/go/internal/modconv/glock.go
new file mode 100644
index 0000000..b8dc204
--- /dev/null
+++ b/src/cmd/go/internal/modconv/glock.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseGLOCKFILE(file string, data []byte) (*modfile.File, error) {
+ mf := new(modfile.File)
+ for _, line := range strings.Split(string(data), "\n") {
+ f := strings.Fields(line)
+ if len(f) >= 2 && f[0] != "cmd" {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[1]}})
+ }
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/godeps.go b/src/cmd/go/internal/modconv/godeps.go
new file mode 100644
index 0000000..09c0fa3
--- /dev/null
+++ b/src/cmd/go/internal/modconv/godeps.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "encoding/json"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseGodepsJSON(file string, data []byte) (*modfile.File, error) {
+ var cfg struct {
+ ImportPath string
+ Deps []struct {
+ ImportPath string
+ Rev string
+ }
+ }
+ if err := json.Unmarshal(data, &cfg); err != nil {
+ return nil, err
+ }
+ mf := new(modfile.File)
+ for _, d := range cfg.Deps {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.ImportPath, Version: d.Rev}})
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/modconv.go b/src/cmd/go/internal/modconv/modconv.go
new file mode 100644
index 0000000..dc06072
--- /dev/null
+++ b/src/cmd/go/internal/modconv/modconv.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import "golang.org/x/mod/modfile"
+
+var Converters = map[string]func(string, []byte) (*modfile.File, error){
+ "GLOCKFILE": ParseGLOCKFILE,
+ "Godeps/Godeps.json": ParseGodepsJSON,
+ "Gopkg.lock": ParseGopkgLock,
+ "dependencies.tsv": ParseDependenciesTSV,
+ "glide.lock": ParseGlideLock,
+ "vendor.conf": ParseVendorConf,
+ "vendor.yml": ParseVendorYML,
+ "vendor/manifest": ParseVendorManifest,
+ "vendor/vendor.json": ParseVendorJSON,
+}
diff --git a/src/cmd/go/internal/modconv/modconv_test.go b/src/cmd/go/internal/modconv/modconv_test.go
new file mode 100644
index 0000000..750525d
--- /dev/null
+++ b/src/cmd/go/internal/modconv/modconv_test.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+var extMap = map[string]string{
+ ".dep": "Gopkg.lock",
+ ".glide": "glide.lock",
+ ".glock": "GLOCKFILE",
+ ".godeps": "Godeps/Godeps.json",
+ ".tsv": "dependencies.tsv",
+ ".vconf": "vendor.conf",
+ ".vjson": "vendor/vendor.json",
+ ".vyml": "vendor.yml",
+ ".vmanifest": "vendor/manifest",
+}
+
+func Test(t *testing.T) {
+ tests, _ := filepath.Glob("testdata/*")
+ if len(tests) == 0 {
+ t.Fatalf("no tests found")
+ }
+ for _, test := range tests {
+ file := filepath.Base(test)
+ ext := filepath.Ext(file)
+ if ext == ".out" {
+ continue
+ }
+ t.Run(file, func(t *testing.T) {
+ if extMap[ext] == "" {
+ t.Fatal("unknown extension")
+ }
+ if Converters[extMap[ext]] == nil {
+ t.Fatalf("Converters[%q] == nil", extMap[ext])
+ }
+ data, err := os.ReadFile(test)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := Converters[extMap[ext]](test, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want, err := os.ReadFile(test[:len(test)-len(ext)] + ".out")
+ if err != nil {
+ t.Error(err)
+ }
+ var buf bytes.Buffer
+ for _, r := range out.Require {
+ fmt.Fprintf(&buf, "%s %s\n", r.Mod.Path, r.Mod.Version)
+ }
+ for _, r := range out.Replace {
+ fmt.Fprintf(&buf, "replace: %s %s %s %s\n", r.Old.Path, r.Old.Version, r.New.Path, r.New.Version)
+ }
+ if !bytes.Equal(buf.Bytes(), want) {
+ t.Errorf("have:\n%s\nwant:\n%s", buf.Bytes(), want)
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/modconv/testdata/cockroach.glock b/src/cmd/go/internal/modconv/testdata/cockroach.glock
new file mode 100644
index 0000000..221c8ac
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/cockroach.glock
@@ -0,0 +1,41 @@
+cmd github.com/cockroachdb/c-protobuf/cmd/protoc
+cmd github.com/cockroachdb/yacc
+cmd github.com/gogo/protobuf/protoc-gen-gogo
+cmd github.com/golang/lint/golint
+cmd github.com/jteeuwen/go-bindata/go-bindata
+cmd github.com/kisielk/errcheck
+cmd github.com/robfig/glock
+cmd github.com/tebeka/go2xunit
+cmd golang.org/x/tools/cmd/goimports
+cmd golang.org/x/tools/cmd/stringer
+github.com/agtorre/gocolorize f42b554bf7f006936130c9bb4f971afd2d87f671
+github.com/biogo/store e1f74b3c58befe661feed7fa4cf52436de753128
+github.com/cockroachdb/c-lz4 6e71f140a365017bbe0904710007f8725fd3f809
+github.com/cockroachdb/c-protobuf 0f9ab7b988ca7474cf76b9a961ab03c0552abcb3
+github.com/cockroachdb/c-rocksdb 7fc876fe79b96de0e25069c9ae27e6444637bd54
+github.com/cockroachdb/c-snappy 618733f9e5bab8463b9049117a335a7a1bfc9fd5
+github.com/cockroachdb/yacc 572e006f8e6b0061ebda949d13744f5108389514
+github.com/coreos/etcd 18ecc297bc913bed6fc093d66b1fa22020dba7dc
+github.com/docker/docker 7374852be9def787921aea2ca831771982badecf
+github.com/elazarl/go-bindata-assetfs 3dcc96556217539f50599357fb481ac0dc7439b9
+github.com/gogo/protobuf 98e73e511a62a9c232152f94999112c80142a813
+github.com/golang/lint 7b7f4364ff76043e6c3610281525fabc0d90f0e4
+github.com/google/btree cc6329d4279e3f025a53a83c397d2339b5705c45
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/jteeuwen/go-bindata dce55d09e24ac40a6e725c8420902b86554f8046
+github.com/julienschmidt/httprouter 6aacfd5ab513e34f7e64ea9627ab9670371b34e7
+github.com/kisielk/errcheck 50b84cf7fa18ee2985b8c63ba3de5edd604b9259
+github.com/kisielk/gotool d678387370a2eb9b5b0a33218bc8c9d8de15b6be
+github.com/lib/pq a8d8d01c4f91602f876bf5aa210274e8203a6b45
+github.com/montanaflynn/stats 44fb56da2a2a67d394dec0e18a82dd316f192529
+github.com/peterh/liner 1bb0d1c1a25ed393d8feb09bab039b2b1b1fbced
+github.com/robfig/glock cb3c3ec56de988289cab7bbd284eddc04dfee6c9
+github.com/samalba/dockerclient 12570e600d71374233e5056ba315f657ced496c7
+github.com/spf13/cobra 66816bcd0378e248c613e3c443c020f544c28804
+github.com/spf13/pflag 67cbc198fd11dab704b214c1e629a97af392c085
+github.com/tebeka/go2xunit d45000af2242dd0e7b8c7b07d82a1068adc5fd40
+golang.org/x/crypto cc04154d65fb9296747569b107cfd05380b1ea3e
+golang.org/x/net 8bfde94a845cb31000de3266ac83edbda58dab09
+golang.org/x/text d4cc1b1e16b49d6dafc4982403b40fe89c512cd5
+golang.org/x/tools d02228d1857b9f49cd0252788516ff5584266eb6
+gopkg.in/yaml.v1 9f9df34309c04878acc86042b16630b0f696e1de
diff --git a/src/cmd/go/internal/modconv/testdata/cockroach.out b/src/cmd/go/internal/modconv/testdata/cockroach.out
new file mode 100644
index 0000000..30cdbb7
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/cockroach.out
@@ -0,0 +1,31 @@
+github.com/agtorre/gocolorize f42b554bf7f006936130c9bb4f971afd2d87f671
+github.com/biogo/store e1f74b3c58befe661feed7fa4cf52436de753128
+github.com/cockroachdb/c-lz4 6e71f140a365017bbe0904710007f8725fd3f809
+github.com/cockroachdb/c-protobuf 0f9ab7b988ca7474cf76b9a961ab03c0552abcb3
+github.com/cockroachdb/c-rocksdb 7fc876fe79b96de0e25069c9ae27e6444637bd54
+github.com/cockroachdb/c-snappy 618733f9e5bab8463b9049117a335a7a1bfc9fd5
+github.com/cockroachdb/yacc 572e006f8e6b0061ebda949d13744f5108389514
+github.com/coreos/etcd 18ecc297bc913bed6fc093d66b1fa22020dba7dc
+github.com/docker/docker 7374852be9def787921aea2ca831771982badecf
+github.com/elazarl/go-bindata-assetfs 3dcc96556217539f50599357fb481ac0dc7439b9
+github.com/gogo/protobuf 98e73e511a62a9c232152f94999112c80142a813
+github.com/golang/lint 7b7f4364ff76043e6c3610281525fabc0d90f0e4
+github.com/google/btree cc6329d4279e3f025a53a83c397d2339b5705c45
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/jteeuwen/go-bindata dce55d09e24ac40a6e725c8420902b86554f8046
+github.com/julienschmidt/httprouter 6aacfd5ab513e34f7e64ea9627ab9670371b34e7
+github.com/kisielk/errcheck 50b84cf7fa18ee2985b8c63ba3de5edd604b9259
+github.com/kisielk/gotool d678387370a2eb9b5b0a33218bc8c9d8de15b6be
+github.com/lib/pq a8d8d01c4f91602f876bf5aa210274e8203a6b45
+github.com/montanaflynn/stats 44fb56da2a2a67d394dec0e18a82dd316f192529
+github.com/peterh/liner 1bb0d1c1a25ed393d8feb09bab039b2b1b1fbced
+github.com/robfig/glock cb3c3ec56de988289cab7bbd284eddc04dfee6c9
+github.com/samalba/dockerclient 12570e600d71374233e5056ba315f657ced496c7
+github.com/spf13/cobra 66816bcd0378e248c613e3c443c020f544c28804
+github.com/spf13/pflag 67cbc198fd11dab704b214c1e629a97af392c085
+github.com/tebeka/go2xunit d45000af2242dd0e7b8c7b07d82a1068adc5fd40
+golang.org/x/crypto cc04154d65fb9296747569b107cfd05380b1ea3e
+golang.org/x/net 8bfde94a845cb31000de3266ac83edbda58dab09
+golang.org/x/text d4cc1b1e16b49d6dafc4982403b40fe89c512cd5
+golang.org/x/tools d02228d1857b9f49cd0252788516ff5584266eb6
+gopkg.in/yaml.v1 9f9df34309c04878acc86042b16630b0f696e1de
diff --git a/src/cmd/go/internal/modconv/testdata/dockermachine.godeps b/src/cmd/go/internal/modconv/testdata/dockermachine.godeps
new file mode 100644
index 0000000..a551002
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/dockermachine.godeps
@@ -0,0 +1,159 @@
+{
+ "ImportPath": "github.com/docker/machine",
+ "GoVersion": "go1.4.2",
+ "Deps": [
+ {
+ "ImportPath": "code.google.com/p/goauth2/oauth",
+ "Comment": "weekly-56",
+ "Rev": "afe77d958c701557ec5dc56f6936fcc194d15520"
+ },
+ {
+ "ImportPath": "github.com/MSOpenTech/azure-sdk-for-go",
+ "Comment": "v1.1-17-g515f3ec",
+ "Rev": "515f3ec74ce6a5b31e934cefae997c97bd0a1b1e"
+ },
+ {
+ "ImportPath": "github.com/cenkalti/backoff",
+ "Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a"
+ },
+ {
+ "ImportPath": "github.com/codegangsta/cli",
+ "Comment": "1.2.0-64-ge1712f3",
+ "Rev": "e1712f381785e32046927f64a7c86fe569203196"
+ },
+ {
+ "ImportPath": "github.com/digitalocean/godo",
+ "Comment": "v0.5.0",
+ "Rev": "5478aae80694de1d2d0e02c386bbedd201266234"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/dockerversion",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/engine",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/archive",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/fileutils",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/ioutils",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/mflag",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/parsers",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/pools",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/promise",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/system",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/term",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/timeutils",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/units",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/version",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
+ "Comment": "v1.5.0",
+ "Rev": "a8a31eff10544860d2188dddabdee4d727545796"
+ },
+ {
+ "ImportPath": "github.com/docker/libtrust",
+ "Rev": "c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41"
+ },
+ {
+ "ImportPath": "github.com/google/go-querystring/query",
+ "Rev": "30f7a39f4a218feb5325f3aebc60c32a572a8274"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/mapstructure",
+ "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
+ },
+ {
+ "ImportPath": "github.com/rackspace/gophercloud",
+ "Comment": "v1.0.0-558-ce0f487",
+ "Rev": "ce0f487f6747ab43c4e4404722df25349385bebd"
+ },
+ {
+ "ImportPath": "github.com/skarademir/naturalsort",
+ "Rev": "983d4d86054d80f91fd04dd62ec52c1d078ce403"
+ },
+ {
+ "ImportPath": "github.com/smartystreets/go-aws-auth",
+ "Rev": "1f0db8c0ee6362470abe06a94e3385927ed72a4b"
+ },
+ {
+ "ImportPath": "github.com/stretchr/testify/assert",
+ "Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
+ },
+ {
+ "ImportPath": "github.com/pyr/egoscale/src/egoscale",
+ "Rev": "bbaa67324aeeacc90430c1fe0a9c620d3929512e"
+ },
+ {
+ "ImportPath": "github.com/tent/http-link-go",
+ "Rev": "ac974c61c2f990f4115b119354b5e0b47550e888"
+ },
+ {
+ "ImportPath": "github.com/vmware/govcloudair",
+ "Comment": "v0.0.2",
+ "Rev": "66a23eaabc61518f91769939ff541886fe1dceef"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/ssh",
+ "Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e"
+ },
+ {
+ "ImportPath": "google.golang.org/api/compute/v1",
+ "Rev": "aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5"
+ },
+ {
+ "ImportPath": "google.golang.org/api/googleapi",
+ "Rev": "aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5"
+ }
+ ]
+}
diff --git a/src/cmd/go/internal/modconv/testdata/dockermachine.out b/src/cmd/go/internal/modconv/testdata/dockermachine.out
new file mode 100644
index 0000000..0b39cea
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/dockermachine.out
@@ -0,0 +1,33 @@
+code.google.com/p/goauth2/oauth afe77d958c701557ec5dc56f6936fcc194d15520
+github.com/MSOpenTech/azure-sdk-for-go 515f3ec74ce6a5b31e934cefae997c97bd0a1b1e
+github.com/cenkalti/backoff 9831e1e25c874e0a0601b6dc43641071414eec7a
+github.com/codegangsta/cli e1712f381785e32046927f64a7c86fe569203196
+github.com/digitalocean/godo 5478aae80694de1d2d0e02c386bbedd201266234
+github.com/docker/docker/dockerversion a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/engine a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/archive a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/fileutils a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/ioutils a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/mflag a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/parsers a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/pools a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/promise a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/system a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/term a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/timeutils a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/units a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/pkg/version a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar a8a31eff10544860d2188dddabdee4d727545796
+github.com/docker/libtrust c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41
+github.com/google/go-querystring/query 30f7a39f4a218feb5325f3aebc60c32a572a8274
+github.com/mitchellh/mapstructure 740c764bc6149d3f1806231418adb9f52c11bcbf
+github.com/rackspace/gophercloud ce0f487f6747ab43c4e4404722df25349385bebd
+github.com/skarademir/naturalsort 983d4d86054d80f91fd04dd62ec52c1d078ce403
+github.com/smartystreets/go-aws-auth 1f0db8c0ee6362470abe06a94e3385927ed72a4b
+github.com/stretchr/testify/assert e4ec8152c15fc46bd5056ce65997a07c7d415325
+github.com/pyr/egoscale/src/egoscale bbaa67324aeeacc90430c1fe0a9c620d3929512e
+github.com/tent/http-link-go ac974c61c2f990f4115b119354b5e0b47550e888
+github.com/vmware/govcloudair 66a23eaabc61518f91769939ff541886fe1dceef
+golang.org/x/crypto/ssh 1fbbd62cfec66bd39d91e97749579579d4d3037e
+google.golang.org/api/compute/v1 aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5
+google.golang.org/api/googleapi aa91ac681e18e52b1a0dfe29b9d8354e88c0dcf5
diff --git a/src/cmd/go/internal/modconv/testdata/dockerman.glide b/src/cmd/go/internal/modconv/testdata/dockerman.glide
new file mode 100644
index 0000000..5ec765a
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/dockerman.glide
@@ -0,0 +1,52 @@
+hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb
+updated: 2016-06-20T21:53:35.420817456Z
+imports:
+- name: github.com/BurntSushi/toml
+ version: f0aeabca5a127c4078abb8c8d64298b147264b55
+- name: github.com/cpuguy83/go-md2man
+ version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
+ subpackages:
+ - md2man
+- name: github.com/fsnotify/fsnotify
+ version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
+- name: github.com/hashicorp/hcl
+ version: da486364306ed66c218be9b7953e19173447c18b
+ subpackages:
+ - hcl/ast
+ - hcl/parser
+ - hcl/token
+ - json/parser
+ - hcl/scanner
+ - hcl/strconv
+ - json/scanner
+ - json/token
+- name: github.com/inconshreveable/mousetrap
+ version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+- name: github.com/magiconair/properties
+ version: c265cfa48dda6474e208715ca93e987829f572f8
+- name: github.com/mitchellh/mapstructure
+ version: d2dd0262208475919e1a362f675cfc0e7c10e905
+- name: github.com/russross/blackfriday
+ version: 1d6b8e9301e720b08a8938b8c25c018285885438
+- name: github.com/shurcooL/sanitized_anchor_name
+ version: 10ef21a441db47d8b13ebcc5fd2310f636973c77
+- name: github.com/spf13/cast
+ version: 27b586b42e29bec072fe7379259cc719e1289da6
+- name: github.com/spf13/jwalterweatherman
+ version: 33c24e77fb80341fe7130ee7c594256ff08ccc46
+- name: github.com/spf13/pflag
+ version: dabebe21bf790f782ea4c7bbd2efc430de182afd
+- name: github.com/spf13/viper
+ version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd
+- name: golang.org/x/sys
+ version: 62bee037599929a6e9146f29d10dd5208c43507d
+ subpackages:
+ - unix
+- name: gopkg.in/yaml.v2
+ version: a83829b6f1293c91addabc89d0571c246397bbf4
+- name: github.com/spf13/cobra
+ repo: https://github.com/dnephin/cobra
+ subpackages:
+ - doc
+ version: v1.3
+devImports: []
diff --git a/src/cmd/go/internal/modconv/testdata/dockerman.out b/src/cmd/go/internal/modconv/testdata/dockerman.out
new file mode 100644
index 0000000..5e6370b
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/dockerman.out
@@ -0,0 +1,16 @@
+github.com/BurntSushi/toml f0aeabca5a127c4078abb8c8d64298b147264b55
+github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
+github.com/fsnotify/fsnotify 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
+github.com/hashicorp/hcl da486364306ed66c218be9b7953e19173447c18b
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/magiconair/properties c265cfa48dda6474e208715ca93e987829f572f8
+github.com/mitchellh/mapstructure d2dd0262208475919e1a362f675cfc0e7c10e905
+github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438
+github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77
+github.com/spf13/cast 27b586b42e29bec072fe7379259cc719e1289da6
+github.com/spf13/jwalterweatherman 33c24e77fb80341fe7130ee7c594256ff08ccc46
+github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd
+github.com/spf13/viper c1ccc378a054ea8d4e38d8c67f6938d4760b53dd
+golang.org/x/sys 62bee037599929a6e9146f29d10dd5208c43507d
+gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
+github.com/spf13/cobra v1.3
diff --git a/src/cmd/go/internal/modconv/testdata/govmomi.out b/src/cmd/go/internal/modconv/testdata/govmomi.out
new file mode 100644
index 0000000..188c458
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/govmomi.out
@@ -0,0 +1,5 @@
+github.com/davecgh/go-xdr/xdr2 4930550ba2e22f87187498acfd78348b15f4e7a8
+github.com/google/uuid 6a5e28554805e78ea6141142aba763936c4761c0
+github.com/kr/pretty 2ee9d7453c02ef7fa518a83ae23644eb8872186a
+github.com/kr/pty 95d05c1eef33a45bd58676b6ce28d105839b8d0b
+github.com/vmware/vmw-guestinfo 25eff159a728be87e103a0b8045e08273f4dbec4
diff --git a/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest b/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest
new file mode 100644
index 0000000..b89e4ab
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/govmomi.vmanifest
@@ -0,0 +1,46 @@
+{
+ "version": 0,
+ "dependencies": [
+ {
+ "importpath": "github.com/davecgh/go-xdr/xdr2",
+ "repository": "https://github.com/rasky/go-xdr",
+ "vcs": "git",
+ "revision": "4930550ba2e22f87187498acfd78348b15f4e7a8",
+ "branch": "improvements",
+ "path": "/xdr2",
+ "notests": true
+ },
+ {
+ "importpath": "github.com/google/uuid",
+ "repository": "https://github.com/google/uuid",
+ "vcs": "git",
+ "revision": "6a5e28554805e78ea6141142aba763936c4761c0",
+ "branch": "master",
+ "notests": true
+ },
+ {
+ "importpath": "github.com/kr/pretty",
+ "repository": "https://github.com/dougm/pretty",
+ "vcs": "git",
+ "revision": "2ee9d7453c02ef7fa518a83ae23644eb8872186a",
+ "branch": "govmomi",
+ "notests": true
+ },
+ {
+ "importpath": "github.com/kr/pty",
+ "repository": "https://github.com/kr/pty",
+ "vcs": "git",
+ "revision": "95d05c1eef33a45bd58676b6ce28d105839b8d0b",
+ "branch": "master",
+ "notests": true
+ },
+ {
+ "importpath": "github.com/vmware/vmw-guestinfo",
+ "repository": "https://github.com/vmware/vmw-guestinfo",
+ "vcs": "git",
+ "revision": "25eff159a728be87e103a0b8045e08273f4dbec4",
+ "branch": "master",
+ "notests": true
+ }
+ ]
+}
diff --git a/src/cmd/go/internal/modconv/testdata/juju.out b/src/cmd/go/internal/modconv/testdata/juju.out
new file mode 100644
index 0000000..c2430b1
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/juju.out
@@ -0,0 +1,106 @@
+github.com/Azure/azure-sdk-for-go 902d95d9f311ae585ee98cfd18f418b467d60d5a
+github.com/Azure/go-autorest 6f40a8acfe03270d792cb8155e2942c09d7cff95
+github.com/ajstarks/svgo 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518
+github.com/altoros/gosigma 31228935eec685587914528585da4eb9b073c76d
+github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
+github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
+github.com/coreos/go-systemd 7b2428fec40033549c68f54e26e89e7ca9a9ce31
+github.com/dgrijalva/jwt-go 01aeca54ebda6e0fbfafd0a524d234159c05ec20
+github.com/dustin/go-humanize 145fabdb1ab757076a70a886d092a3af27f66f4c
+github.com/godbus/dbus 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15
+github.com/golang/protobuf 4bd1920723d7b7c925de087aa32e2187708897f7
+github.com/google/go-querystring 9235644dd9e52eeae6fa48efd539fdc351a0af53
+github.com/gorilla/schema 08023a0215e7fc27a9aecd8b8c50913c40019478
+github.com/gorilla/websocket 804cb600d06b10672f2fbc0a336a7bee507a428e
+github.com/gosuri/uitable 36ee7e946282a3fb1cfecd476ddc9b35d8847e42
+github.com/joyent/gocommon ade826b8b54e81a779ccb29d358a45ba24b7809c
+github.com/joyent/gosdc 2f11feadd2d9891e92296a1077c3e2e56939547d
+github.com/joyent/gosign 0da0d5f1342065321c97812b1f4ac0c2b0bab56c
+github.com/juju/ansiterm b99631de12cf04a906c1d4e4ec54fb86eae5863d
+github.com/juju/blobstore 06056004b3d7b54bbb7984d830c537bad00fec21
+github.com/juju/bundlechanges 7725027b95e0d54635e0fb11efc2debdcdf19f75
+github.com/juju/cmd 9425a576247f348b9b40afe3b60085de63470de5
+github.com/juju/description d3742c23561884cd7d759ef7142340af1d22cab0
+github.com/juju/errors 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3
+github.com/juju/gnuflag 4e76c56581859c14d9d87e1ddbe29e1c0f10195f
+github.com/juju/go4 40d72ab9641a2a8c36a9c46a51e28367115c8e59
+github.com/juju/gojsonpointer afe8b77aa08f272b49e01b82de78510c11f61500
+github.com/juju/gojsonreference f0d24ac5ee330baa21721cdff56d45e4ee42628e
+github.com/juju/gojsonschema e1ad140384f254c82f89450d9a7c8dd38a632838
+github.com/juju/gomaasapi cfbc096bd45f276c17a391efc4db710b60ae3ad7
+github.com/juju/httpprof 14bf14c307672fd2456bdbf35d19cf0ccd3cf565
+github.com/juju/httprequest 266fd1e9debf09c037a63f074d099a2da4559ece
+github.com/juju/idmclient 4dc25171f675da4206b71695d3fd80e519ad05c1
+github.com/juju/jsonschema a0ef8b74ebcffeeff9fc374854deb4af388f037e
+github.com/juju/loggo 21bc4c63e8b435779a080e39e592969b7b90b889
+github.com/juju/mempool 24974d6c264fe5a29716e7d56ea24c4bd904b7cc
+github.com/juju/mutex 59c26ee163447c5c57f63ff71610d433862013de
+github.com/juju/persistent-cookiejar 5243747bf8f2d0897f6c7a52799327dc97d585e8
+github.com/juju/pubsub 9dcaca7eb4340dbf685aa7b3ad4cc4f8691a33d4
+github.com/juju/replicaset 6b5becf2232ce76656ea765d8d915d41755a1513
+github.com/juju/retry 62c62032529169c7ec02fa48f93349604c345e1f
+github.com/juju/rfc ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee
+github.com/juju/romulus 98d6700423d63971f10ca14afea9ecf2b9b99f0f
+github.com/juju/schema 075de04f9b7d7580d60a1e12a0b3f50bb18e6998
+github.com/juju/terms-client 9b925afd677234e4146dde3cb1a11e187cbed64e
+github.com/juju/testing fce9bc4ebf7a77310c262ac4884e03b778eae06a
+github.com/juju/txn 28898197906200d603394d8e4ce537436529f1c5
+github.com/juju/usso 68a59c96c178fbbad65926e7f93db50a2cd14f33
+github.com/juju/utils 9f8aeb9b09e2d8c769be8317ccfa23f7eec62c26
+github.com/juju/version 1f41e27e54f21acccf9b2dddae063a782a8a7ceb
+github.com/juju/webbrowser 54b8c57083b4afb7dc75da7f13e2967b2606a507
+github.com/juju/xml eb759a627588d35166bc505fceb51b88500e291e
+github.com/juju/zip f6b1e93fa2e29a1d7d49b566b2b51efb060c982a
+github.com/julienschmidt/httprouter 77a895ad01ebc98a4dc95d8355bc825ce80a56f6
+github.com/lestrrat/go-jspointer f4881e611bdbe9fb413a7780721ef8400a1f2341
+github.com/lestrrat/go-jsref e452c7b5801d1c6494c9e7e0cbc7498c0f88dfd1
+github.com/lestrrat/go-jsschema b09d7650b822d2ea3dc83d5091a5e2acd8330051
+github.com/lestrrat/go-jsval b1258a10419fe0693f7b35ad65cd5074bc0ba1e5
+github.com/lestrrat/go-pdebug 2e6eaaa5717f81bda41d27070d3c966f40a1e75f
+github.com/lestrrat/go-structinfo f74c056fe41f860aa6264478c664a6fff8a64298
+github.com/lunixbochs/vtclean 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36
+github.com/lxc/lxd 23da0234979fa6299565b91b529a6dbeb42ee36d
+github.com/masterzen/azure-sdk-for-go ee4f0065d00cd12b542f18f5bc45799e88163b12
+github.com/masterzen/simplexml 4572e39b1ab9fe03ee513ce6fc7e289e98482190
+github.com/masterzen/winrm 7a535cd943fccaeed196718896beec3fb51aff41
+github.com/masterzen/xmlpath 13f4951698adc0fa9c1dda3e275d489a24201161
+github.com/mattn/go-colorable ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8
+github.com/mattn/go-isatty 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8
+github.com/mattn/go-runewidth d96d1bd051f2bd9e7e43d602782b37b93b1b5666
+github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
+github.com/nu7hatch/gouuid 179d4d0c4d8d407a32af483c2354df1d2c91e6c3
+github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
+github.com/prometheus/client_golang 575f371f7862609249a1be4c9145f429fe065e32
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common dd586c1c5abb0be59e60f942c22af711a2008cb4
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/rogpeppe/fastuuid 6724a57986aff9bff1a1770e9347036def7c89f6
+github.com/vmware/govmomi c0c7ce63df7edd78e713257b924c89d9a2dac119
+golang.org/x/crypto 8e06e8ddd9629eb88639aba897641bff8031f1d3
+golang.org/x/net ea47fc708ee3e20177f3ca3716217c4ab75942cb
+golang.org/x/oauth2 11c60b6f71a6ad48ed6f93c65fa4c6f9b1b5b46a
+golang.org/x/sys 7a6e5648d140666db5d920909e082ca00a87ba2c
+golang.org/x/text 2910a502d2bf9e43193af9d68ca516529614eed3
+google.golang.org/api 0d3983fb069cb6651353fc44c5cb604e263f2a93
+google.golang.org/cloud f20d6dcccb44ed49de45ae3703312cb46e627db1
+gopkg.in/amz.v3 8c3190dff075bf5442c9eedbf8f8ed6144a099e7
+gopkg.in/check.v1 4f90aeace3a26ad7021961c297b22c42160c7b25
+gopkg.in/errgo.v1 442357a80af5c6bf9b6d51ae791a39c3421004f3
+gopkg.in/goose.v1 ac43167b647feacdd9a1e34ee81e574551bc748d
+gopkg.in/ini.v1 776aa739ce9373377cd16f526cdf06cb4c89b40f
+gopkg.in/juju/blobstore.v2 51fa6e26128d74e445c72d3a91af555151cc3654
+gopkg.in/juju/charm.v6-unstable 83771c4919d6810bce5b7e63f46bea5fbfed0b93
+gopkg.in/juju/charmrepo.v2-unstable e79aa298df89ea887c9bffec46063c24bfb730f7
+gopkg.in/juju/charmstore.v5-unstable fd1eef3002fc6b6daff5e97efab6f5056d22dcc7
+gopkg.in/juju/environschema.v1 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0
+gopkg.in/juju/jujusvg.v2 d82160011935ef79fc7aca84aba2c6f74700fe75
+gopkg.in/juju/names.v2 0847c26d322a121e52614f969fb82eae2820c715
+gopkg.in/juju/worker.v1 6965b9d826717287bb002e02d1fd4d079978083e
+gopkg.in/macaroon-bakery.v1 469b44e6f1f9479e115c8ae879ef80695be624d5
+gopkg.in/macaroon.v1 ab3940c6c16510a850e1c2dd628b919f0f3f1464
+gopkg.in/mgo.v2 f2b6f6c918c452ad107eec89615f074e3bd80e33
+gopkg.in/natefinch/lumberjack.v2 514cbda263a734ae8caac038dadf05f8f3f9f738
+gopkg.in/natefinch/npipe.v2 c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6
+gopkg.in/retry.v1 c09f6b86ba4d5d2cf5bdf0665364aec9fd4815db
+gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
+gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
diff --git a/src/cmd/go/internal/modconv/testdata/juju.tsv b/src/cmd/go/internal/modconv/testdata/juju.tsv
new file mode 100644
index 0000000..0bddcef
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/juju.tsv
@@ -0,0 +1,106 @@
+github.com/Azure/azure-sdk-for-go git 902d95d9f311ae585ee98cfd18f418b467d60d5a 2016-07-20T05:16:58Z
+github.com/Azure/go-autorest git 6f40a8acfe03270d792cb8155e2942c09d7cff95 2016-07-19T23:14:56Z
+github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z
+github.com/altoros/gosigma git 31228935eec685587914528585da4eb9b073c76d 2015-04-08T14:52:32Z
+github.com/beorn7/perks git 3ac7bf7a47d159a033b107610db8a1b6575507a4 2016-02-29T21:34:45Z
+github.com/bmizerany/pat git c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c 2016-02-17T10:32:42Z
+github.com/coreos/go-systemd git 7b2428fec40033549c68f54e26e89e7ca9a9ce31 2016-02-02T21:14:25Z
+github.com/dgrijalva/jwt-go git 01aeca54ebda6e0fbfafd0a524d234159c05ec20 2016-07-05T20:30:06Z
+github.com/dustin/go-humanize git 145fabdb1ab757076a70a886d092a3af27f66f4c 2014-12-28T07:11:48Z
+github.com/godbus/dbus git 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15 2016-05-06T22:25:50Z
+github.com/golang/protobuf git 4bd1920723d7b7c925de087aa32e2187708897f7 2016-11-09T07:27:36Z
+github.com/google/go-querystring git 9235644dd9e52eeae6fa48efd539fdc351a0af53 2016-04-01T23:30:42Z
+github.com/gorilla/schema git 08023a0215e7fc27a9aecd8b8c50913c40019478 2016-04-26T23:15:12Z
+github.com/gorilla/websocket git 804cb600d06b10672f2fbc0a336a7bee507a428e 2017-02-14T17:41:18Z
+github.com/gosuri/uitable git 36ee7e946282a3fb1cfecd476ddc9b35d8847e42 2016-04-04T20:39:58Z
+github.com/joyent/gocommon git ade826b8b54e81a779ccb29d358a45ba24b7809c 2016-03-20T19:31:33Z
+github.com/joyent/gosdc git 2f11feadd2d9891e92296a1077c3e2e56939547d 2014-05-24T00:08:15Z
+github.com/joyent/gosign git 0da0d5f1342065321c97812b1f4ac0c2b0bab56c 2014-05-24T00:07:34Z
+github.com/juju/ansiterm git b99631de12cf04a906c1d4e4ec54fb86eae5863d 2016-09-07T23:45:32Z
+github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z
+github.com/juju/bundlechanges git 7725027b95e0d54635e0fb11efc2debdcdf19f75 2016-12-15T16:06:52Z
+github.com/juju/cmd git 9425a576247f348b9b40afe3b60085de63470de5 2017-03-20T01:37:09Z
+github.com/juju/description git d3742c23561884cd7d759ef7142340af1d22cab0 2017-03-20T07:46:40Z
+github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z
+github.com/juju/gnuflag git 4e76c56581859c14d9d87e1ddbe29e1c0f10195f 2016-08-09T16:52:14Z
+github.com/juju/go4 git 40d72ab9641a2a8c36a9c46a51e28367115c8e59 2016-02-22T16:32:58Z
+github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z
+github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z
+github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z
+github.com/juju/gomaasapi git cfbc096bd45f276c17a391efc4db710b60ae3ad7 2017-02-27T07:51:07Z
+github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z
+github.com/juju/httprequest git 266fd1e9debf09c037a63f074d099a2da4559ece 2016-10-06T15:09:09Z
+github.com/juju/idmclient git 4dc25171f675da4206b71695d3fd80e519ad05c1 2017-02-09T16:27:49Z
+github.com/juju/jsonschema git a0ef8b74ebcffeeff9fc374854deb4af388f037e 2016-11-02T18:19:19Z
+github.com/juju/loggo git 21bc4c63e8b435779a080e39e592969b7b90b889 2017-02-22T12:20:47Z
+github.com/juju/mempool git 24974d6c264fe5a29716e7d56ea24c4bd904b7cc 2016-02-05T10:49:27Z
+github.com/juju/mutex git 59c26ee163447c5c57f63ff71610d433862013de 2016-06-17T01:09:07Z
+github.com/juju/persistent-cookiejar git 5243747bf8f2d0897f6c7a52799327dc97d585e8 2016-11-15T13:33:28Z
+github.com/juju/pubsub git 9dcaca7eb4340dbf685aa7b3ad4cc4f8691a33d4 2016-07-28T03:00:34Z
+github.com/juju/replicaset git 6b5becf2232ce76656ea765d8d915d41755a1513 2016-11-25T16:08:49Z
+github.com/juju/retry git 62c62032529169c7ec02fa48f93349604c345e1f 2015-10-29T02:48:21Z
+github.com/juju/rfc git ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee 2016-07-11T02:42:13Z
+github.com/juju/romulus git 98d6700423d63971f10ca14afea9ecf2b9b99f0f 2017-01-23T14:29:29Z
+github.com/juju/schema git 075de04f9b7d7580d60a1e12a0b3f50bb18e6998 2016-04-20T04:42:03Z
+github.com/juju/terms-client git 9b925afd677234e4146dde3cb1a11e187cbed64e 2016-08-09T13:19:00Z
+github.com/juju/testing git fce9bc4ebf7a77310c262ac4884e03b778eae06a 2017-02-22T09:01:19Z
+github.com/juju/txn git 28898197906200d603394d8e4ce537436529f1c5 2016-11-16T04:07:55Z
+github.com/juju/usso git 68a59c96c178fbbad65926e7f93db50a2cd14f33 2016-04-01T10:44:24Z
+github.com/juju/utils git 9f8aeb9b09e2d8c769be8317ccfa23f7eec62c26 2017-02-15T08:19:00Z
+github.com/juju/version git 1f41e27e54f21acccf9b2dddae063a782a8a7ceb 2016-10-31T05:19:06Z
+github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z
+github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z
+github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z
+github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z
+github.com/lestrrat/go-jspointer git f4881e611bdbe9fb413a7780721ef8400a1f2341 2016-02-29T02:13:54Z
+github.com/lestrrat/go-jsref git e452c7b5801d1c6494c9e7e0cbc7498c0f88dfd1 2016-06-01T01:32:40Z
+github.com/lestrrat/go-jsschema git b09d7650b822d2ea3dc83d5091a5e2acd8330051 2016-09-03T13:19:57Z
+github.com/lestrrat/go-jsval git b1258a10419fe0693f7b35ad65cd5074bc0ba1e5 2016-10-12T04:57:17Z
+github.com/lestrrat/go-pdebug git 2e6eaaa5717f81bda41d27070d3c966f40a1e75f 2016-08-17T06:33:33Z
+github.com/lestrrat/go-structinfo git f74c056fe41f860aa6264478c664a6fff8a64298 2016-03-08T13:11:05Z
+github.com/lunixbochs/vtclean git 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36 2016-01-25T03:51:06Z
+github.com/lxc/lxd git 23da0234979fa6299565b91b529a6dbeb42ee36d 2017-02-16T05:29:42Z
+github.com/masterzen/azure-sdk-for-go git ee4f0065d00cd12b542f18f5bc45799e88163b12 2016-10-14T13:56:28Z
+github.com/masterzen/simplexml git 4572e39b1ab9fe03ee513ce6fc7e289e98482190 2016-06-08T18:30:07Z
+github.com/masterzen/winrm git 7a535cd943fccaeed196718896beec3fb51aff41 2016-10-14T15:10:40Z
+github.com/masterzen/xmlpath git 13f4951698adc0fa9c1dda3e275d489a24201161 2014-02-18T18:59:01Z
+github.com/mattn/go-colorable git ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8 2016-07-31T23:54:17Z
+github.com/mattn/go-isatty git 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8 2016-08-06T12:27:52Z
+github.com/mattn/go-runewidth git d96d1bd051f2bd9e7e43d602782b37b93b1b5666 2015-11-18T07:21:59Z
+github.com/matttproud/golang_protobuf_extensions git c12348ce28de40eed0136aa2b644d0ee0650e56c 2016-04-24T11:30:07Z
+github.com/nu7hatch/gouuid git 179d4d0c4d8d407a32af483c2354df1d2c91e6c3 2013-12-21T20:05:32Z
+github.com/pkg/errors git 839d9e913e063e28dfd0e6c7b7512793e0a48be9 2016-10-02T05:25:12Z
+github.com/prometheus/client_golang git 575f371f7862609249a1be4c9145f429fe065e32 2016-11-24T15:57:32Z
+github.com/prometheus/client_model git fa8ad6fec33561be4280a8f0514318c79d7f6cb6 2015-02-12T10:17:44Z
+github.com/prometheus/common git dd586c1c5abb0be59e60f942c22af711a2008cb4 2016-05-03T22:05:32Z
+github.com/prometheus/procfs git abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 2016-04-11T19:08:41Z
+github.com/rogpeppe/fastuuid git 6724a57986aff9bff1a1770e9347036def7c89f6 2015-01-06T09:32:20Z
+github.com/vmware/govmomi git c0c7ce63df7edd78e713257b924c89d9a2dac119 2016-06-30T15:37:42Z
+golang.org/x/crypto git 8e06e8ddd9629eb88639aba897641bff8031f1d3 2016-09-22T17:06:29Z
+golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z
+golang.org/x/oauth2 git 11c60b6f71a6ad48ed6f93c65fa4c6f9b1b5b46a 2015-03-25T02:00:22Z
+golang.org/x/sys git 7a6e5648d140666db5d920909e082ca00a87ba2c 2017-02-01T05:12:45Z
+golang.org/x/text git 2910a502d2bf9e43193af9d68ca516529614eed3 2016-07-26T16:48:57Z
+google.golang.org/api git 0d3983fb069cb6651353fc44c5cb604e263f2a93 2014-12-10T23:51:26Z
+google.golang.org/cloud git f20d6dcccb44ed49de45ae3703312cb46e627db1 2015-03-19T22:36:35Z
+gopkg.in/amz.v3 git 8c3190dff075bf5442c9eedbf8f8ed6144a099e7 2016-12-15T13:08:49Z
+gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z
+gopkg.in/errgo.v1 git 442357a80af5c6bf9b6d51ae791a39c3421004f3 2016-12-22T12:58:16Z
+gopkg.in/goose.v1 git ac43167b647feacdd9a1e34ee81e574551bc748d 2017-02-15T01:56:23Z
+gopkg.in/ini.v1 git 776aa739ce9373377cd16f526cdf06cb4c89b40f 2016-02-22T23:24:41Z
+gopkg.in/juju/blobstore.v2 git 51fa6e26128d74e445c72d3a91af555151cc3654 2016-01-25T02:37:03Z
+gopkg.in/juju/charm.v6-unstable git 83771c4919d6810bce5b7e63f46bea5fbfed0b93 2016-10-03T20:31:18Z
+gopkg.in/juju/charmrepo.v2-unstable git e79aa298df89ea887c9bffec46063c24bfb730f7 2016-11-17T15:25:28Z
+gopkg.in/juju/charmstore.v5-unstable git fd1eef3002fc6b6daff5e97efab6f5056d22dcc7 2016-09-16T10:09:07Z
+gopkg.in/juju/environschema.v1 git 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0 2015-11-04T11:58:10Z
+gopkg.in/juju/jujusvg.v2 git d82160011935ef79fc7aca84aba2c6f74700fe75 2016-06-09T10:52:15Z
+gopkg.in/juju/names.v2 git 0847c26d322a121e52614f969fb82eae2820c715 2016-11-02T13:43:03Z
+gopkg.in/juju/worker.v1 git 6965b9d826717287bb002e02d1fd4d079978083e 2017-03-08T00:24:58Z
+gopkg.in/macaroon-bakery.v1 git 469b44e6f1f9479e115c8ae879ef80695be624d5 2016-06-22T12:14:21Z
+gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z
+gopkg.in/mgo.v2 git f2b6f6c918c452ad107eec89615f074e3bd80e33 2016-08-18T01:52:18Z
+gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z
+gopkg.in/natefinch/npipe.v2 git c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6 2016-06-21T03:49:01Z
+gopkg.in/retry.v1 git c09f6b86ba4d5d2cf5bdf0665364aec9fd4815db 2016-10-25T18:14:30Z
+gopkg.in/tomb.v1 git dd632973f1e7218eb1089048e0798ec9ae7dceb8 2014-10-24T13:56:13Z
+gopkg.in/yaml.v2 git a3f3340b5840cee44f372bddb5880fcbc419b46a 2017-02-08T14:18:51Z
diff --git a/src/cmd/go/internal/modconv/testdata/moby.out b/src/cmd/go/internal/modconv/testdata/moby.out
new file mode 100644
index 0000000..2cb2e05
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/moby.out
@@ -0,0 +1,105 @@
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/Microsoft/hcsshim v0.6.5
+github.com/Microsoft/go-winio v0.4.5
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
+github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609
+github.com/gorilla/context v1.1
+github.com/gorilla/mux v1.1
+github.com/Microsoft/opengcs v0.3.4
+github.com/kr/pty 5cf931ef8f
+github.com/mattn/go-shellwords v1.0.3
+github.com/sirupsen/logrus v1.0.3
+github.com/tchap/go-patricia v2.2.6
+github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
+github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/pmezard/go-difflib v1.0.0
+github.com/gotestyourself/gotestyourself v1.1.0
+github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
+github.com/imdario/mergo 0.2.1
+golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
+github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
+github.com/docker/libnetwork 68f1039f172434709a4550fe92e3e058406c74ce
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
+github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
+github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
+github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
+github.com/hashicorp/memberlist v0.1.0
+github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
+github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
+github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
+github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
+github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
+github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
+github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
+github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
+github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
+github.com/coreos/etcd v3.2.1
+github.com/coreos/go-semver v0.2.0
+github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
+github.com/hashicorp/consul v0.5.2
+github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
+github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
+github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
+github.com/vbatts/tar-split v0.10.1
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
+github.com/pborman/uuid v1.0
+google.golang.org/grpc v1.3.0
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
+github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runtime-spec v1.0.0
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+github.com/coreos/go-systemd v4
+github.com/godbus/dbus v4.0.0
+github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
+github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
+github.com/Graylog2/go-gelf v2
+github.com/fluent/fluent-logger-golang v1.2.1
+github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
+github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+github.com/fsnotify/fsnotify v1.4.2
+github.com/aws/aws-sdk-go v1.4.22
+github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
+github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
+github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
+golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
+google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
+cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
+github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
+google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
+github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
+github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
+github.com/gogo/protobuf v0.4
+github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
+github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
+github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
+github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/matttproud/golang_protobuf_extensions v1.0.0
+github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
+github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+github.com/spf13/cobra v1.5.1
+github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
+github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
+github.com/opencontainers/selinux v1.0.0-rc1
diff --git a/src/cmd/go/internal/modconv/testdata/moby.vconf b/src/cmd/go/internal/modconv/testdata/moby.vconf
new file mode 100644
index 0000000..53b90d1
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/moby.vconf
@@ -0,0 +1,149 @@
+# the following lines are in sorted order, FYI
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/Microsoft/hcsshim v0.6.5
+github.com/Microsoft/go-winio v0.4.5
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
+github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
+github.com/gorilla/context v1.1
+github.com/gorilla/mux v1.1
+github.com/Microsoft/opengcs v0.3.4
+github.com/kr/pty 5cf931ef8f
+github.com/mattn/go-shellwords v1.0.3
+github.com/sirupsen/logrus v1.0.3
+github.com/tchap/go-patricia v2.2.6
+github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
+github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/pmezard/go-difflib v1.0.0
+github.com/gotestyourself/gotestyourself v1.1.0
+
+github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
+github.com/imdario/mergo 0.2.1
+golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
+github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
+
+#get libnetwork packages
+github.com/docker/libnetwork 68f1039f172434709a4550fe92e3e058406c74ce
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
+github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
+github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
+github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
+github.com/hashicorp/memberlist v0.1.0
+github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
+github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
+github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
+github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
+github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
+github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
+github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
+github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
+github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
+github.com/coreos/etcd v3.2.1
+github.com/coreos/go-semver v0.2.0
+github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
+github.com/hashicorp/consul v0.5.2
+github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
+github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
+
+# get graph and distribution packages
+github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
+github.com/vbatts/tar-split v0.10.1
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+
+# get go-zfs packages
+github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
+github.com/pborman/uuid v1.0
+
+google.golang.org/grpc v1.3.0
+
+# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
+github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runtime-spec v1.0.0
+
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+
+# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
+github.com/coreos/go-systemd v4
+github.com/godbus/dbus v4.0.0
+github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
+github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
+
+# gelf logging driver deps
+github.com/Graylog2/go-gelf v2
+
+github.com/fluent/fluent-logger-golang v1.2.1
+# fluent-logger-golang deps
+github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
+github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+
+# fsnotify
+github.com/fsnotify/fsnotify v1.4.2
+
+# awslogs deps
+github.com/aws/aws-sdk-go v1.4.22
+github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
+github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
+
+# logentries
+github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
+
+# gcplogs deps
+golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
+google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
+cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
+github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
+google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+
+# containerd
+github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
+github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
+
+# cluster
+github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
+github.com/gogo/protobuf v0.4
+github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
+github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
+github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
+github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/matttproud/golang_protobuf_extensions v1.0.0
+github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
+github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+
+# cli
+github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
+github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
+
+# metrics
+github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
+
+github.com/opencontainers/selinux v1.0.0-rc1
+
+# archive/tar
+# mkdir -p ./vendor/archive
+# git clone git://github.com/tonistiigi/go-1.git ./go
+# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
+# cp -a go/src/archive/tar ./vendor/archive/tar
+# rm -rf ./go
+# vndr
diff --git a/src/cmd/go/internal/modconv/testdata/panicparse.out b/src/cmd/go/internal/modconv/testdata/panicparse.out
new file mode 100644
index 0000000..8830033
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/panicparse.out
@@ -0,0 +1,8 @@
+github.com/kr/pretty 737b74a46c4bf788349f72cb256fed10aea4d0ac
+github.com/kr/text 7cafcd837844e784b526369c9bce262804aebc60
+github.com/maruel/ut a9c9f15ccfa6f8b90182a53df32f4745586fbae3
+github.com/mattn/go-colorable 9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59
+github.com/mattn/go-isatty 56b76bdf51f7708750eac80fa38b952bb9f32639
+github.com/mgutz/ansi c286dcecd19ff979eeb73ea444e479b903f2cfcb
+github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
+golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
diff --git a/src/cmd/go/internal/modconv/testdata/panicparse.vyml b/src/cmd/go/internal/modconv/testdata/panicparse.vyml
new file mode 100644
index 0000000..ff3d43f
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/panicparse.vyml
@@ -0,0 +1,17 @@
+vendors:
+- path: github.com/kr/pretty
+ rev: 737b74a46c4bf788349f72cb256fed10aea4d0ac
+- path: github.com/kr/text
+ rev: 7cafcd837844e784b526369c9bce262804aebc60
+- path: github.com/maruel/ut
+ rev: a9c9f15ccfa6f8b90182a53df32f4745586fbae3
+- path: github.com/mattn/go-colorable
+ rev: 9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59
+- path: github.com/mattn/go-isatty
+ rev: 56b76bdf51f7708750eac80fa38b952bb9f32639
+- path: github.com/mgutz/ansi
+ rev: c286dcecd19ff979eeb73ea444e479b903f2cfcb
+- path: github.com/pmezard/go-difflib
+ rev: 792786c7400a136282c1664665ae0a8db921c6c2
+- path: golang.org/x/sys
+ rev: a646d33e2ee3172a661fc09bca23bb4889a41bc8
diff --git a/src/cmd/go/internal/modconv/testdata/prometheus.out b/src/cmd/go/internal/modconv/testdata/prometheus.out
new file mode 100644
index 0000000..d11b8ec
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/prometheus.out
@@ -0,0 +1,258 @@
+cloud.google.com/go/compute/metadata c589d0c9f0d81640c518354c7bcae77d99820aa3
+cloud.google.com/go/internal c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/Azure/azure-sdk-for-go/arm/compute bd73d950fa4440dae889bd9917bff7cef539f86e
+github.com/Azure/azure-sdk-for-go/arm/network bd73d950fa4440dae889bd9917bff7cef539f86e
+github.com/Azure/go-autorest/autorest 8a25372bbfec739b8719a9e3987400d15ef9e179
+github.com/Azure/go-autorest/autorest/azure 8a25372bbfec739b8719a9e3987400d15ef9e179
+github.com/Azure/go-autorest/autorest/date 8a25372bbfec739b8719a9e3987400d15ef9e179
+github.com/Azure/go-autorest/autorest/to 8a25372bbfec739b8719a9e3987400d15ef9e179
+github.com/Azure/go-autorest/autorest/validation 8a25372bbfec739b8719a9e3987400d15ef9e179
+github.com/PuerkitoBio/purell c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/PuerkitoBio/urlesc c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/asaskevich/govalidator 7b3beb6df3c42abd3509abfc3bcacc0fbfb7c877
+github.com/aws/aws-sdk-go/aws 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/awserr 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/awsutil 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/client 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/client/metadata 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/corehandlers 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/credentials 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/credentials/endpointcreds 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/credentials/stscreds 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/defaults 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/ec2metadata 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/request 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/session 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/aws/signer/v4 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/endpoints 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol/ec2query 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol/query 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol/query/queryutil 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol/rest 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/private/waiter 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/service/ec2 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/aws/aws-sdk-go/service/sts 707203bc55114ed114446bf57949c5c211d8b7c0
+github.com/beorn7/perks/quantile 3ac7bf7a47d159a033b107610db8a1b6575507a4
+github.com/blang/semver c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/go-oidc/http c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/go-oidc/jose c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/go-oidc/key c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/go-oidc/oauth2 c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/go-oidc/oidc c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/pkg/health c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/pkg/httputil c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/coreos/pkg/timeutil c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/davecgh/go-spew/spew c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/dgrijalva/jwt-go 9ed569b5d1ac936e6494082958d63a6aa4fff99a
+github.com/docker/distribution/digest c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/docker/distribution/reference c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/emicklei/go-restful c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/emicklei/go-restful/log c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/emicklei/go-restful/swagger c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/ghodss/yaml c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/go-ini/ini 6e4869b434bd001f6983749881c7ead3545887d8
+github.com/go-openapi/jsonpointer c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/go-openapi/jsonreference c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/go-openapi/spec c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/go-openapi/swag c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/gogo/protobuf/proto c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/gogo/protobuf/sortkeys c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/golang/glog c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/golang/protobuf/proto 98fa357170587e470c5f27d3c3ea0947b71eb455
+github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
+github.com/google/gofuzz c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/hashicorp/consul/api daacc4be8bee214e3fc4b32a6dd385f5ef1b4c36
+github.com/hashicorp/go-cleanhttp ad28ea4487f05916463e2423a55166280e8254b5
+github.com/hashicorp/serf/coordinate 1d4fa605f6ff3ed628d7ae5eda7c0e56803e72a5
+github.com/influxdb/influxdb/client 291aaeb9485b43b16875c238482b2f7d0a22a13b
+github.com/influxdb/influxdb/tsdb 291aaeb9485b43b16875c238482b2f7d0a22a13b
+github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
+github.com/jonboulle/clockwork c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/juju/ratelimit c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/julienschmidt/httprouter 109e267447e95ad1bb48b758e40dd7453eb7b039
+github.com/mailru/easyjson/buffer c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/mailru/easyjson/jlexer c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/mailru/easyjson/jwriter c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/matttproud/golang_protobuf_extensions/pbutil fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
+github.com/miekg/dns 58f52c57ce9df13460ac68200cef30a008b9c468
+github.com/pborman/uuid c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/pmezard/go-difflib/difflib d77da356e56a7428ad25149ca77381849a6a5232
+github.com/prometheus/client_golang/prometheus c5b7fccd204277076155f10851dad72b76a49317
+github.com/prometheus/client_model/go fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common/expfmt 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/common/log 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/common/model 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/common/route 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/common/version 85637ea67b04b5c3bb25e671dacded2977f8f9f6
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/samuel/go-zookeeper/zk 177002e16a0061912f02377e2dd8951a8b3551bc
+github.com/spf13/pflag c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/stretchr/testify/assert d77da356e56a7428ad25149ca77381849a6a5232
+github.com/stretchr/testify/require d77da356e56a7428ad25149ca77381849a6a5232
+github.com/syndtr/goleveldb/leveldb 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/cache 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/comparer 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/errors 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/filter 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/iterator 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/journal 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/memdb 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/opt 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/storage 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/table 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/syndtr/goleveldb/leveldb/util 6b4daa5362b502898ddf367c5c11deb9e7a5c727
+github.com/ugorji/go/codec c589d0c9f0d81640c518354c7bcae77d99820aa3
+github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
+golang.org/x/net/context b336a971b799939dd16ae9b1df8334cb8b977c4d
+golang.org/x/net/context/ctxhttp b336a971b799939dd16ae9b1df8334cb8b977c4d
+golang.org/x/net/http2 c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/net/http2/hpack c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/net/idna c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/net/internal/timeseries 6250b412798208e6c90b03b7c4f226de5aa299e2
+golang.org/x/net/lex/httplex c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/net/netutil bc3663df0ac92f928d419e31e0d2af22e683a5a2
+golang.org/x/oauth2 65a8d08c6292395d47053be10b3c5e91960def76
+golang.org/x/oauth2/google 65a8d08c6292395d47053be10b3c5e91960def76
+golang.org/x/oauth2/internal 65a8d08c6292395d47053be10b3c5e91960def76
+golang.org/x/oauth2/jws 65a8d08c6292395d47053be10b3c5e91960def76
+golang.org/x/oauth2/jwt 65a8d08c6292395d47053be10b3c5e91960def76
+golang.org/x/sys/unix c200b10b5d5e122be351b67af224adc6128af5bf
+golang.org/x/sys/windows c200b10b5d5e122be351b67af224adc6128af5bf
+golang.org/x/sys/windows/registry c200b10b5d5e122be351b67af224adc6128af5bf
+golang.org/x/sys/windows/svc/eventlog c200b10b5d5e122be351b67af224adc6128af5bf
+golang.org/x/text/cases c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/internal/tag c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/language c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/runes c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/secure/bidirule c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/secure/precis c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/transform c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/unicode/bidi c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/unicode/norm c589d0c9f0d81640c518354c7bcae77d99820aa3
+golang.org/x/text/width c589d0c9f0d81640c518354c7bcae77d99820aa3
+google.golang.org/api/compute/v1 63ade871fd3aec1225809d496e81ec91ab76ea29
+google.golang.org/api/gensupport 63ade871fd3aec1225809d496e81ec91ab76ea29
+google.golang.org/api/googleapi 63ade871fd3aec1225809d496e81ec91ab76ea29
+google.golang.org/api/googleapi/internal/uritemplates 63ade871fd3aec1225809d496e81ec91ab76ea29
+google.golang.org/appengine 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/app_identity 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/base 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/datastore 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/log 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/modules 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/internal/remote_api 4f7eeb5305a4ba1966344836ba4af9996b7b4e05
+google.golang.org/appengine/internal/urlfetch 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/appengine/urlfetch 267c27e7492265b84fc6719503b14a1e17975d79
+google.golang.org/cloud/compute/metadata 0a83eba2cadb60eb22123673c8fb6fca02b03c94
+google.golang.org/cloud/internal 0a83eba2cadb60eb22123673c8fb6fca02b03c94
+gopkg.in/fsnotify.v1 30411dbcefb7a1da7e84f75530ad3abe4011b4f8
+gopkg.in/inf.v0 c589d0c9f0d81640c518354c7bcae77d99820aa3
+gopkg.in/yaml.v2 7ad95dd0798a40da1ccdff6dff35fd177b5edf40
+k8s.io/client-go/1.5/discovery c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/apps/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/authentication/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/authorization/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/autoscaling/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/batch/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/certificates/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/core/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/policy/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/rbac/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/kubernetes/typed/storage/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/errors c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/meta c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/meta/metatypes c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/resource c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/unversioned c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/api/validation/path c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apimachinery c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apimachinery/announced c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apimachinery/registered c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/apps c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/apps/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/apps/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authentication c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authentication/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authentication/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authorization c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authorization/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/authorization/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/autoscaling c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/autoscaling/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/autoscaling/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/batch c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/batch/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/batch/v1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/batch/v2alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/certificates c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/certificates/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/certificates/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/extensions c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/extensions/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/policy c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/policy/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/policy/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/rbac c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/rbac/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/rbac/v1alpha1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/storage c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/storage/install c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/apis/storage/v1beta1 c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/auth/user c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/conversion c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/conversion/queryparams c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/fields c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/genericapiserver/openapi/common c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/labels c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer/json c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer/protobuf c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer/recognizer c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer/streaming c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/runtime/serializer/versioning c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/selection c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/third_party/forked/golang/reflect c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/types c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/cert c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/clock c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/errors c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/flowcontrol c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/framer c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/integer c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/intstr c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/json c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/labels c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/net c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/parsers c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/rand c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/runtime c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/sets c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/uuid c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/validation c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/validation/field c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/wait c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/util/yaml c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/version c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/watch c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/pkg/watch/versioned c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/plugin/pkg/client/auth c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/plugin/pkg/client/auth/gcp c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/plugin/pkg/client/auth/oidc c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/rest c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/tools/cache c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/tools/clientcmd/api c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/tools/metrics c589d0c9f0d81640c518354c7bcae77d99820aa3
+k8s.io/client-go/1.5/transport c589d0c9f0d81640c518354c7bcae77d99820aa3
diff --git a/src/cmd/go/internal/modconv/testdata/prometheus.vjson b/src/cmd/go/internal/modconv/testdata/prometheus.vjson
new file mode 100644
index 0000000..648bec4
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/prometheus.vjson
@@ -0,0 +1,1605 @@
+{
+ "comment": "",
+ "ignore": "test appengine",
+ "package": [
+ {
+ "checksumSHA1": "Cslv4/ITyQmgjSUhNXFu8q5bqOU=",
+ "origin": "k8s.io/client-go/1.5/vendor/cloud.google.com/go/compute/metadata",
+ "path": "cloud.google.com/go/compute/metadata",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "hiJXjkFEGy+sDFf6O58Ocdy9Rnk=",
+ "origin": "k8s.io/client-go/1.5/vendor/cloud.google.com/go/internal",
+ "path": "cloud.google.com/go/internal",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "oIt4tXgFYnZJBsCac1BQLnTWALM=",
+ "path": "github.com/Azure/azure-sdk-for-go/arm/compute",
+ "revision": "bd73d950fa4440dae889bd9917bff7cef539f86e",
+ "revisionTime": "2016-10-28T18:31:11Z"
+ },
+ {
+ "checksumSHA1": "QKi6LiSyD5GnRK8ExpMgZl4XiMI=",
+ "path": "github.com/Azure/azure-sdk-for-go/arm/network",
+ "revision": "bd73d950fa4440dae889bd9917bff7cef539f86e",
+ "revisionTime": "2016-10-28T18:31:11Z"
+ },
+ {
+ "checksumSHA1": "eVSHe6GIHj9/ziFrQLZ1SC7Nn6k=",
+ "path": "github.com/Azure/go-autorest/autorest",
+ "revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
+ "revisionTime": "2016-10-25T18:07:34Z"
+ },
+ {
+ "checksumSHA1": "0sYi0JprevG/PZjtMbOh8h0pt0g=",
+ "path": "github.com/Azure/go-autorest/autorest/azure",
+ "revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
+ "revisionTime": "2016-10-25T18:07:34Z"
+ },
+ {
+ "checksumSHA1": "q9Qz8PAxK5FTOZwgYKe5Lj38u4c=",
+ "path": "github.com/Azure/go-autorest/autorest/date",
+ "revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
+ "revisionTime": "2016-10-25T18:07:34Z"
+ },
+ {
+ "checksumSHA1": "Ev8qCsbFjDlMlX0N2tYAhYQFpUc=",
+ "path": "github.com/Azure/go-autorest/autorest/to",
+ "revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
+ "revisionTime": "2016-10-25T18:07:34Z"
+ },
+ {
+ "checksumSHA1": "oBixceM+55gdk47iff8DSEIh3po=",
+ "path": "github.com/Azure/go-autorest/autorest/validation",
+ "revision": "8a25372bbfec739b8719a9e3987400d15ef9e179",
+ "revisionTime": "2016-10-25T18:07:34Z"
+ },
+ {
+ "checksumSHA1": "IatnluZB5jTVUncMN134e4VOV34=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/purell",
+ "path": "github.com/PuerkitoBio/purell",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "E/Tz8z0B/gaR551g+XqPKAhcteM=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/urlesc",
+ "path": "github.com/PuerkitoBio/urlesc",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "BdLdZP/C2uOO3lqk9X3NCKFpXa4=",
+ "path": "github.com/asaskevich/govalidator",
+ "revision": "7b3beb6df3c42abd3509abfc3bcacc0fbfb7c877",
+ "revisionTime": "2016-10-01T16:31:30Z"
+ },
+ {
+ "checksumSHA1": "WNfR3yhLjRC5/uccgju/bwrdsxQ=",
+ "path": "github.com/aws/aws-sdk-go/aws",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
+ "path": "github.com/aws/aws-sdk-go/aws/awserr",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "+q4vdl3l1Wom8K1wfIpJ4jlFsbY=",
+ "path": "github.com/aws/aws-sdk-go/aws/awsutil",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "/232RBWA3KnT7U+wciPS2+wmvR0=",
+ "path": "github.com/aws/aws-sdk-go/aws/client",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
+ "path": "github.com/aws/aws-sdk-go/aws/client/metadata",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "c1N3Loy3AS9zD+m5CzpPNAED39U=",
+ "path": "github.com/aws/aws-sdk-go/aws/corehandlers",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "DwhFsNluCFEwqzyp3hbJR3q2Wqs=",
+ "path": "github.com/aws/aws-sdk-go/aws/defaults",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "8E0fEBUJY/1lJOyVxzTxMGQGInk=",
+ "path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "5Ac22YMTBmrX/CXaEIXzWljr8UY=",
+ "path": "github.com/aws/aws-sdk-go/aws/request",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "eOo6evLMAxQfo7Qkc5/h5euN1Sw=",
+ "path": "github.com/aws/aws-sdk-go/aws/session",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "diXvBs1LRC0RJ9WK6sllWKdzC04=",
+ "path": "github.com/aws/aws-sdk-go/aws/signer/v4",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "Esab5F8KswqkTdB4TtjSvZgs56k=",
+ "path": "github.com/aws/aws-sdk-go/private/endpoints",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/rest",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
+ "path": "github.com/aws/aws-sdk-go/private/waiter",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "6h4tJ9wVtbYb9wG4srtUxyPoAYM=",
+ "path": "github.com/aws/aws-sdk-go/service/ec2",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "ouwhxcAsIYQ6oJbMRdLW/Ys/iyg=",
+ "path": "github.com/aws/aws-sdk-go/service/sts",
+ "revision": "707203bc55114ed114446bf57949c5c211d8b7c0",
+ "revisionTime": "2016-11-02T21:59:28Z"
+ },
+ {
+ "checksumSHA1": "4QnLdmB1kG3N+KlDd1N+G9TWAGQ=",
+ "path": "github.com/beorn7/perks/quantile",
+ "revision": "3ac7bf7a47d159a033b107610db8a1b6575507a4",
+ "revisionTime": "2016-02-29T21:34:45Z"
+ },
+ {
+ "checksumSHA1": "n+s4YwtzpMWW5Rt0dEaQa7NHDGQ=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/blang/semver",
+ "path": "github.com/blang/semver",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Z2AOGSmDKKvI6nuxa+UPjQWpIeM=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/http",
+ "path": "github.com/coreos/go-oidc/http",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "8yvt1xKCgNwuuavJdxRnvaIjrIc=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/jose",
+ "path": "github.com/coreos/go-oidc/jose",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "zhXKrWBSSJLqZxVE/Xsw0M9ynFQ=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/key",
+ "path": "github.com/coreos/go-oidc/key",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "bkW0mnXvmHQwHprW/6wrbpP7lAk=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/oauth2",
+ "path": "github.com/coreos/go-oidc/oauth2",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "E1x2k5FdhJ+dzFrh3kCmC6aJfVw=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/go-oidc/oidc",
+ "path": "github.com/coreos/go-oidc/oidc",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "O0UMBRCOD9ItMayDqLQ2MJEjkVE=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/health",
+ "path": "github.com/coreos/pkg/health",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "74vyZz/d49FZXMbFaHOfCGvSLj0=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/httputil",
+ "path": "github.com/coreos/pkg/httputil",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "etBdQ0LN6ojGunfvUt6B5C3FNrQ=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/coreos/pkg/timeutil",
+ "path": "github.com/coreos/pkg/timeutil",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "SdSd7pyjONWWTHc5XE3AhglLo34=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/davecgh/go-spew/spew",
+ "path": "github.com/davecgh/go-spew/spew",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "2Fy1Y6Z3lRRX1891WF/+HT4XS2I=",
+ "path": "github.com/dgrijalva/jwt-go",
+ "revision": "9ed569b5d1ac936e6494082958d63a6aa4fff99a",
+ "revisionTime": "2016-11-01T19:39:35Z"
+ },
+ {
+ "checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/digest",
+ "path": "github.com/docker/distribution/digest",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "PzXRTLmmqWXxmDqdIXLcRYBma18=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/reference",
+ "path": "github.com/docker/distribution/reference",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "1vQR+ZyudsjKio6RNKmWhwzGTb0=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful",
+ "path": "github.com/emicklei/go-restful",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "3xWz4fZ9xW+CfADpYoPFcZCYJ4E=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/log",
+ "path": "github.com/emicklei/go-restful/log",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "J7CtF9gIs2yH9A7lPQDDrhYxiRk=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/swagger",
+ "path": "github.com/emicklei/go-restful/swagger",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ww7LVo7jNJ1o6sfRcromEHKyY+o=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/ghodss/yaml",
+ "path": "github.com/ghodss/yaml",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "cVyhKIRI2gQrgpn5qrBeAqErmWM=",
+ "path": "github.com/go-ini/ini",
+ "revision": "6e4869b434bd001f6983749881c7ead3545887d8",
+ "revisionTime": "2016-08-27T06:11:18Z"
+ },
+ {
+ "checksumSHA1": "NaZnW0tKj/b0k5WzcMD0twrLbrE=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonpointer",
+ "path": "github.com/go-openapi/jsonpointer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "3LJXjMDxPY+veIqzQtiAvK3hXnY=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonreference",
+ "path": "github.com/go-openapi/jsonreference",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "faeB3fny260hQ/gEfEXa1ZQTGtk=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/spec",
+ "path": "github.com/go-openapi/spec",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "wGpZwJ5HZtReou8A3WEV1Gdxs6k=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/swag",
+ "path": "github.com/go-openapi/swag",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "BIyZQL97iG7mzZ2UMR3XpiXbZdc=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/gogo/protobuf/proto",
+ "path": "github.com/gogo/protobuf/proto",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "e6cMbpJj41MpihS5eP4SIliRBK4=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/gogo/protobuf/sortkeys",
+ "path": "github.com/gogo/protobuf/sortkeys",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "URsJa4y/sUUw/STmbeYx9EKqaYE=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/golang/glog",
+ "path": "github.com/golang/glog",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "yDh5kmmr0zEF1r+rvYqbZcR7iLs=",
+ "path": "github.com/golang/protobuf/proto",
+ "revision": "98fa357170587e470c5f27d3c3ea0947b71eb455",
+ "revisionTime": "2016-10-12T20:53:35Z"
+ },
+ {
+ "checksumSHA1": "2a/SsTUBMKtcM6VtpbdPGO+c6c8=",
+ "path": "github.com/golang/snappy",
+ "revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380",
+ "revisionTime": "2016-05-29T05:00:41Z"
+ },
+ {
+ "checksumSHA1": "/yFfUp3tGt6cK22UVzbq8SjPDCU=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/google/gofuzz",
+ "path": "github.com/google/gofuzz",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "LclVLJYrBi03PBjsVPpgoMbUDQ8=",
+ "path": "github.com/hashicorp/consul/api",
+ "revision": "daacc4be8bee214e3fc4b32a6dd385f5ef1b4c36",
+ "revisionTime": "2016-10-28T04:06:46Z"
+ },
+ {
+ "checksumSHA1": "Uzyon2091lmwacNsl1hCytjhHtg=",
+ "path": "github.com/hashicorp/go-cleanhttp",
+ "revision": "ad28ea4487f05916463e2423a55166280e8254b5",
+ "revisionTime": "2016-04-07T17:41:26Z"
+ },
+ {
+ "checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=",
+ "path": "github.com/hashicorp/serf/coordinate",
+ "revision": "1d4fa605f6ff3ed628d7ae5eda7c0e56803e72a5",
+ "revisionTime": "2016-10-07T00:41:22Z"
+ },
+ {
+ "path": "github.com/influxdb/influxdb/client",
+ "revision": "291aaeb9485b43b16875c238482b2f7d0a22a13b",
+ "revisionTime": "2015-09-16T14:41:53+02:00"
+ },
+ {
+ "path": "github.com/influxdb/influxdb/tsdb",
+ "revision": "291aaeb9485b43b16875c238482b2f7d0a22a13b",
+ "revisionTime": "2015-09-16T14:41:53+02:00"
+ },
+ {
+ "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
+ "path": "github.com/jmespath/go-jmespath",
+ "revision": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d",
+ "revisionTime": "2016-08-03T19:07:31Z"
+ },
+ {
+ "checksumSHA1": "9ZVOEbIXnTuYpVqce4en8rwlkPE=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/jonboulle/clockwork",
+ "path": "github.com/jonboulle/clockwork",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "gA95N2LM2hEJLoqrTPaFsSWDJ2Y=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/juju/ratelimit",
+ "path": "github.com/juju/ratelimit",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Farach1xcmsQYrhiUfkwF2rbIaE=",
+ "path": "github.com/julienschmidt/httprouter",
+ "revision": "109e267447e95ad1bb48b758e40dd7453eb7b039",
+ "revisionTime": "2015-09-05T19:25:33+02:00"
+ },
+ {
+ "checksumSHA1": "urY45++NYCue4nh4k8OjUFnIGfU=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/buffer",
+ "path": "github.com/mailru/easyjson/buffer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "yTDKAM4KBgOvXRsZC50zg0OChvM=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jlexer",
+ "path": "github.com/mailru/easyjson/jlexer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "4+d+6rhM1pei6lBguhqSEW7LaXs=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jwriter",
+ "path": "github.com/mailru/easyjson/jwriter",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Q2vw4HZBbnU8BLFt8VrzStwqSJg=",
+ "path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "revision": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a",
+ "revisionTime": "2015-04-06T19:39:34+02:00"
+ },
+ {
+ "checksumSHA1": "Wahi4g/9XiHhSLAJ+8jskg71PCU=",
+ "path": "github.com/miekg/dns",
+ "revision": "58f52c57ce9df13460ac68200cef30a008b9c468",
+ "revisionTime": "2016-10-18T06:08:08Z"
+ },
+ {
+ "checksumSHA1": "3YJklSuzSE1Rt8A+2dhiWSmf/fw=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/pborman/uuid",
+ "path": "github.com/pborman/uuid",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "zKKp5SZ3d3ycKe4EKMNT0BqAWBw=",
+ "origin": "github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib",
+ "path": "github.com/pmezard/go-difflib/difflib",
+ "revision": "d77da356e56a7428ad25149ca77381849a6a5232",
+ "revisionTime": "2016-06-15T09:26:46Z"
+ },
+ {
+ "checksumSHA1": "KkB+77Ziom7N6RzSbyUwYGrmDeU=",
+ "path": "github.com/prometheus/client_golang/prometheus",
+ "revision": "c5b7fccd204277076155f10851dad72b76a49317",
+ "revisionTime": "2016-08-17T15:48:24Z"
+ },
+ {
+ "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
+ "path": "github.com/prometheus/client_model/go",
+ "revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
+ "revisionTime": "2015-02-12T10:17:44Z"
+ },
+ {
+ "checksumSHA1": "mHyjbJ3BWOfUV6q9f5PBt0gaY1k=",
+ "path": "github.com/prometheus/common/expfmt",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
+ "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "UU6hIfhVjnAYDADQEfE/3T7Ddm8=",
+ "path": "github.com/prometheus/common/log",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "nFie+rxcX5WdIv1diZ+fu3aj6lE=",
+ "path": "github.com/prometheus/common/model",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "QQKJYoGcY10nIHxhBEHwjwUZQzk=",
+ "path": "github.com/prometheus/common/route",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=",
+ "path": "github.com/prometheus/common/version",
+ "revision": "85637ea67b04b5c3bb25e671dacded2977f8f9f6",
+ "revisionTime": "2016-10-02T21:02:34Z"
+ },
+ {
+ "checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
+ "path": "github.com/prometheus/procfs",
+ "revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
+ "revisionTime": "2016-04-11T19:08:41Z"
+ },
+ {
+ "checksumSHA1": "+49Vr4Me28p3cR+gxX5SUQHbbas=",
+ "path": "github.com/samuel/go-zookeeper/zk",
+ "revision": "177002e16a0061912f02377e2dd8951a8b3551bc",
+ "revisionTime": "2015-08-17T10:50:50-07:00"
+ },
+ {
+ "checksumSHA1": "YuPBOVkkE3uuBh4RcRUTF0n+frs=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/spf13/pflag",
+ "path": "github.com/spf13/pflag",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "iydUphwYqZRq3WhstEdGsbvBAKs=",
+ "path": "github.com/stretchr/testify/assert",
+ "revision": "d77da356e56a7428ad25149ca77381849a6a5232",
+ "revisionTime": "2016-06-15T09:26:46Z"
+ },
+ {
+ "checksumSHA1": "P9FJpir2c4G5PA46qEkaWy3l60U=",
+ "path": "github.com/stretchr/testify/require",
+ "revision": "d77da356e56a7428ad25149ca77381849a6a5232",
+ "revisionTime": "2016-06-15T09:26:46Z"
+ },
+ {
+ "checksumSHA1": "VhcnDY37sYAnL8WjfYQN9YYl+W4=",
+ "path": "github.com/syndtr/goleveldb/leveldb",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
+ "path": "github.com/syndtr/goleveldb/leveldb/cache",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
+ "path": "github.com/syndtr/goleveldb/leveldb/comparer",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
+ "path": "github.com/syndtr/goleveldb/leveldb/errors",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
+ "path": "github.com/syndtr/goleveldb/leveldb/filter",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "8dXuAVIsbtaMiGGuHjzGR6Ny/5c=",
+ "path": "github.com/syndtr/goleveldb/leveldb/iterator",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
+ "path": "github.com/syndtr/goleveldb/leveldb/journal",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "j+uaQ6DwJ50dkIdfMQu1TXdlQcY=",
+ "path": "github.com/syndtr/goleveldb/leveldb/memdb",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
+ "path": "github.com/syndtr/goleveldb/leveldb/opt",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "/Wvv9HeJTN9UUjdjwUlz7X4ioIo=",
+ "path": "github.com/syndtr/goleveldb/leveldb/storage",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "JTJA+u8zk7EXy1UUmpFPNGvtO2A=",
+ "path": "github.com/syndtr/goleveldb/leveldb/table",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "4zil8Gwg8VPkDn1YzlgCvtukJFU=",
+ "path": "github.com/syndtr/goleveldb/leveldb/util",
+ "revision": "6b4daa5362b502898ddf367c5c11deb9e7a5c727",
+ "revisionTime": "2016-10-11T05:00:08Z"
+ },
+ {
+ "checksumSHA1": "f6Aew+ZA+HBAXCw6/xTST3mB0Lw=",
+ "origin": "k8s.io/client-go/1.5/vendor/github.com/ugorji/go/codec",
+ "path": "github.com/ugorji/go/codec",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "sFD8LpJPQtWLwGda3edjf5mNUbs=",
+ "path": "github.com/vaughan0/go-ini",
+ "revision": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1",
+ "revisionTime": "2013-09-23T16:52:12+02:00"
+ },
+ {
+ "checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
+ "path": "golang.org/x/net/context",
+ "revision": "b336a971b799939dd16ae9b1df8334cb8b977c4d",
+ "revisionTime": "2016-10-27T19:58:04Z"
+ },
+ {
+ "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
+ "path": "golang.org/x/net/context/ctxhttp",
+ "revision": "b336a971b799939dd16ae9b1df8334cb8b977c4d",
+ "revisionTime": "2016-10-27T19:58:04Z"
+ },
+ {
+ "checksumSHA1": "SPYGC6DQrH9jICccUsOfbvvhB4g=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/http2",
+ "path": "golang.org/x/net/http2",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "EYNaHp7XdLWRydUCE0amEkKAtgk=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/http2/hpack",
+ "path": "golang.org/x/net/http2/hpack",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "gXiSniT8fevWOVPVKopYgrdzi60=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/idna",
+ "path": "golang.org/x/net/idna",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
+ "path": "golang.org/x/net/internal/timeseries",
+ "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2",
+ "revisionTime": "2016-08-24T22:20:41Z"
+ },
+ {
+ "checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/net/lex/httplex",
+ "path": "golang.org/x/net/lex/httplex",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "7WASrg0PEueWDDRHkFhEEN6Qrms=",
+ "path": "golang.org/x/net/netutil",
+ "revision": "bc3663df0ac92f928d419e31e0d2af22e683a5a2",
+ "revisionTime": "2016-06-21T20:48:10Z"
+ },
+ {
+ "checksumSHA1": "mktBVED98G2vv+OKcSgtnFVZC1Y=",
+ "path": "golang.org/x/oauth2",
+ "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
+ "revisionTime": "2016-06-07T03:33:14Z"
+ },
+ {
+ "checksumSHA1": "2rk6lthfQa5Rfydj8j7+dilKGbo=",
+ "path": "golang.org/x/oauth2/google",
+ "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
+ "revisionTime": "2016-06-07T03:33:14Z"
+ },
+ {
+ "checksumSHA1": "W/GiDqzsagBnR7/yEvxatMhUDBs=",
+ "path": "golang.org/x/oauth2/internal",
+ "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
+ "revisionTime": "2016-06-07T03:33:14Z"
+ },
+ {
+ "checksumSHA1": "CPTYHWrVL4jA0B1IuC0hvgcE2AQ=",
+ "path": "golang.org/x/oauth2/jws",
+ "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
+ "revisionTime": "2016-06-07T03:33:14Z"
+ },
+ {
+ "checksumSHA1": "xifBSq0Pn6pIoPA/o3tyzq8X4Ds=",
+ "path": "golang.org/x/oauth2/jwt",
+ "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
+ "revisionTime": "2016-06-07T03:33:14Z"
+ },
+ {
+ "checksumSHA1": "aVgPDgwY3/t4J/JOw9H3FVMHqh0=",
+ "path": "golang.org/x/sys/unix",
+ "revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
+ "revisionTime": "2016-10-22T18:22:21Z"
+ },
+ {
+ "checksumSHA1": "fpW2dhGFC6SrVzipJx7fjg2DIH8=",
+ "path": "golang.org/x/sys/windows",
+ "revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
+ "revisionTime": "2016-10-22T18:22:21Z"
+ },
+ {
+ "checksumSHA1": "PjYlbMS0ttyZYlaevvjA/gV3g1c=",
+ "path": "golang.org/x/sys/windows/registry",
+ "revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
+ "revisionTime": "2016-10-22T18:22:21Z"
+ },
+ {
+ "checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=",
+ "path": "golang.org/x/sys/windows/svc/eventlog",
+ "revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
+ "revisionTime": "2016-10-22T18:22:21Z"
+ },
+ {
+ "checksumSHA1": "QQpKbWuqvhmxVr/hfEYdWzzcXRM=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/cases",
+ "path": "golang.org/x/text/cases",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "iAsGo/kxvnwILbJVUCd0ZcqZO/Q=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/internal/tag",
+ "path": "golang.org/x/text/internal/tag",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "mQ6PCGHY7K0oPjKbYD8wsTjm/P8=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/language",
+ "path": "golang.org/x/text/language",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "WpeH2TweiuiZAQVTJNO5vyZAQQA=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/runes",
+ "path": "golang.org/x/text/runes",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "1VjEPyjdi0xOiIN/Alkqiad/B/c=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/secure/bidirule",
+ "path": "golang.org/x/text/secure/bidirule",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "FcK7VslktIAWj5jnWVnU2SesBq0=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/secure/precis",
+ "path": "golang.org/x/text/secure/precis",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "nwlu7UTwYbCj9l5f3a7t2ROwNzM=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/transform",
+ "path": "golang.org/x/text/transform",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "nWJ9R1+Xw41f/mM3b7BYtv77CfI=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/bidi",
+ "path": "golang.org/x/text/unicode/bidi",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "BAZ96wCGUj6HdY9sG60Yw09KWA4=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/norm",
+ "path": "golang.org/x/text/unicode/norm",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "AZMILKWqLP99UilLgbGZ+uzIVrM=",
+ "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/width",
+ "path": "golang.org/x/text/width",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "AjdmRXf0fiy6Bec9mNlsGsmZi1k=",
+ "path": "google.golang.org/api/compute/v1",
+ "revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
+ "revisionTime": "2016-05-31T06:42:46Z"
+ },
+ {
+ "checksumSHA1": "OtsMVXY89Hc/bBXdDp84atFQawM=",
+ "path": "google.golang.org/api/gensupport",
+ "revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
+ "revisionTime": "2016-05-31T06:42:46Z"
+ },
+ {
+ "checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=",
+ "path": "google.golang.org/api/googleapi",
+ "revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
+ "revisionTime": "2016-05-31T06:42:46Z"
+ },
+ {
+ "checksumSHA1": "ii4ET3JHk3vkMUEcg+9t/1RZSUU=",
+ "path": "google.golang.org/api/googleapi/internal/uritemplates",
+ "revision": "63ade871fd3aec1225809d496e81ec91ab76ea29",
+ "revisionTime": "2016-05-31T06:42:46Z"
+ },
+ {
+ "checksumSHA1": "N3KZEuQ9O1QwJXcCJbe7Czwroo4=",
+ "path": "google.golang.org/appengine",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "G9Xp1ScdsfcKsw+PcWunivRRP3o=",
+ "path": "google.golang.org/appengine/internal",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
+ "path": "google.golang.org/appengine/internal/app_identity",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
+ "path": "google.golang.org/appengine/internal/base",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
+ "path": "google.golang.org/appengine/internal/datastore",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=",
+ "path": "google.golang.org/appengine/internal/log",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
+ "path": "google.golang.org/appengine/internal/modules",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
+ "path": "google.golang.org/appengine/internal/remote_api",
+ "revision": "4f7eeb5305a4ba1966344836ba4af9996b7b4e05",
+ "revisionTime": "2016-08-19T23:33:10Z"
+ },
+ {
+ "checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
+ "path": "google.golang.org/appengine/internal/urlfetch",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
+ "path": "google.golang.org/appengine/urlfetch",
+ "revision": "267c27e7492265b84fc6719503b14a1e17975d79",
+ "revisionTime": "2016-06-21T05:59:22Z"
+ },
+ {
+ "checksumSHA1": "Wp8g9MHRmK8SwcyGVCoGtPx+5Lo=",
+ "path": "google.golang.org/cloud/compute/metadata",
+ "revision": "0a83eba2cadb60eb22123673c8fb6fca02b03c94",
+ "revisionTime": "2016-06-21T15:59:29Z"
+ },
+ {
+ "checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=",
+ "path": "google.golang.org/cloud/internal",
+ "revision": "0a83eba2cadb60eb22123673c8fb6fca02b03c94",
+ "revisionTime": "2016-06-21T15:59:29Z"
+ },
+ {
+ "checksumSHA1": "JfVmsMwyeeepbdw4q4wpN07BuFg=",
+ "path": "gopkg.in/fsnotify.v1",
+ "revision": "30411dbcefb7a1da7e84f75530ad3abe4011b4f8",
+ "revisionTime": "2016-04-12T13:37:56Z"
+ },
+ {
+ "checksumSHA1": "pfQwQtWlFezJq0Viroa/L+v+yDM=",
+ "origin": "k8s.io/client-go/1.5/vendor/gopkg.in/inf.v0",
+ "path": "gopkg.in/inf.v0",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "KgT+peLCcuh0/m2mpoOZXuxXmwc=",
+ "path": "gopkg.in/yaml.v2",
+ "revision": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40",
+ "revisionTime": "2015-06-24T11:29:02+01:00"
+ },
+ {
+ "checksumSHA1": "st0Nbu4zwLcP3mz03lDOJVZtn8Y=",
+ "path": "k8s.io/client-go/1.5/discovery",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "S+OzpkipMb46LGZoWuveqSLAcoM=",
+ "path": "k8s.io/client-go/1.5/kubernetes",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "yCBn8ig1TUMrk+ljtK0nDr7E5Vo=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/apps/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ZRnUz5NrpvJsXAjtnRdEv5UYhSI=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/authentication/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "TY55Np20olmPMzXgfVlIUIyqv04=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/authorization/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "FRByJsFff/6lPH20FtJPaK1NPWI=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/autoscaling/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "3Cy2as7HnQ2FDcvpNbatpFWx0P4=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/batch/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "RUKywApIbSLLsfkYxXzifh7HIvs=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/certificates/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "4+Lsxu+sYgzsS2JOHP7CdrZLSKc=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/core/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "H8jzevN03YUfmf2krJt0qj2P9sU=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "hrpA6xxtwj3oMcQbFxI2cDhO2ZA=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/policy/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "B2+F12NeMwrOHvHK2ALyEcr3UGA=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/rbac/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "h2eSNUym87RWPlez7UKujShwrUQ=",
+ "path": "k8s.io/client-go/1.5/kubernetes/typed/storage/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "+oIykJ3A0wYjAWbbrGo0jNnMLXw=",
+ "path": "k8s.io/client-go/1.5/pkg/api",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "UsUsIdhuy5Ej2vI0hbmSsrimoaQ=",
+ "path": "k8s.io/client-go/1.5/pkg/api/errors",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Eo6LLHFqG6YznIAKr2mVjuqUj6k=",
+ "path": "k8s.io/client-go/1.5/pkg/api/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "dYznkLcCEai21z1dX8kZY7uDsck=",
+ "path": "k8s.io/client-go/1.5/pkg/api/meta",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "b06esG4xMj/YNFD85Lqq00cx+Yo=",
+ "path": "k8s.io/client-go/1.5/pkg/api/meta/metatypes",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "L9svak1yut0Mx8r9VLDOwpqZzBk=",
+ "path": "k8s.io/client-go/1.5/pkg/api/resource",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "m7jGshKDLH9kdokfa6MwAqzxRQk=",
+ "path": "k8s.io/client-go/1.5/pkg/api/unversioned",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "iI6s5WAexr1PEfqrbvuscB+oVik=",
+ "path": "k8s.io/client-go/1.5/pkg/api/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ikac34qI/IkTWHnfi8pPl9irPyo=",
+ "path": "k8s.io/client-go/1.5/pkg/api/validation/path",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "MJyygSPp8N6z+7SPtcROz4PEwas=",
+ "path": "k8s.io/client-go/1.5/pkg/apimachinery",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "EGb4IcSTQ1VXCmX0xcyG5GpWId8=",
+ "path": "k8s.io/client-go/1.5/pkg/apimachinery/announced",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "vhSyuINHQhCsDKTyBmvJT1HzDHI=",
+ "path": "k8s.io/client-go/1.5/pkg/apimachinery/registered",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "rXeBnwLg8ZFe6m5/Ki7tELVBYDk=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/apps",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "KzHaG858KV1tBh5cuLInNcm+G5s=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/apps/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "fynWdchlRbPaxuST2oGDKiKLTqE=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/apps/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "hreIYssoH4Ef/+Aglpitn3GNLR4=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authentication",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "EgUqJH4CqB9vXVg6T8II2OEt5LE=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authentication/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Z3DKgomzRPGcBv/8hlL6pfnIpXI=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authentication/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "GpuScB2Z+NOT4WIQg1mVvVSDUts=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authorization",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "+u3UD+HY9lBH+PFi/2B4W564JEw=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authorization/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "zIFzgWjmlWNLHGHMpCpDCvoLtKY=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/authorization/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "tdpzQFQyVkt5kCLTvtKTVqT+maE=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/autoscaling",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "nb6LbYGS5tv8H8Ovptg6M7XuDZ4=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/autoscaling/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "DNb1/nl/5RDdckRrJoXBRagzJXs=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/autoscaling/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "4bLhH2vNl5l4Qp6MjLhWyWVAPE0=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/batch",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "RpAAEynmxlvOlLLZK1KEUQRnYzk=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/batch/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "uWJ2BHmjL/Gq4FFlNkqiN6vvPyM=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/batch/v1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "mHWt/p724dKeP1vqLtWQCye7zaE=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/batch/v2alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "6dJ1dGfXkB3A42TOtMaY/rvv4N8=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/certificates",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Bkrhm6HbFYANwtzUE8eza9SWBk0=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/certificates/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "nRRPIBQ5O3Ad24kscNtK+gPC+fk=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/certificates/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "KUMhoaOg9GXHN/aAVvSLO18SgqU=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/extensions",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "eSo2VhNAYtesvmpEPqn05goW4LY=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/extensions/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "DunWIPrCC5iGMWzkaaugMOxD+hg=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "rVGYi2ko0E7vL5OZSMYX+NAGPYw=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/policy",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "llJHd2H0LzABGB6BcletzIHnexo=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/policy/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "j44bqyY13ldnuCtysYE8nRkMD7o=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/policy/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "vT7rFxowcKMTYc55mddePqUFRgE=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/rbac",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "r1MzUXsG+Zyn30aU8I5R5dgrJPA=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/rbac/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "aNfO8xn8VDO3fM9CpVCe6EIB+GA=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/rbac/v1alpha1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "rQCxrbisCXmj2wymlYG63kcTL9I=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/storage",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "wZyxh5nt5Eh6kF7YNAIYukKWWy0=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/storage/install",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "P8ANOt/I4Cs3QtjVXWmDA/gpQdg=",
+ "path": "k8s.io/client-go/1.5/pkg/apis/storage/v1beta1",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "qnVPwzvNLz2mmr3BXdU9qIhQXXU=",
+ "path": "k8s.io/client-go/1.5/pkg/auth/user",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "KrIchxhapSs242yAy8yrTS1XlZo=",
+ "path": "k8s.io/client-go/1.5/pkg/conversion",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "weZqKFcOhcnF47eDDHXzluCKSF0=",
+ "path": "k8s.io/client-go/1.5/pkg/conversion/queryparams",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "T3EMfyXZX5939/OOQ1JU+Nmbk4k=",
+ "path": "k8s.io/client-go/1.5/pkg/fields",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "2v11s3EBH8UBl2qfImT29tQN2kM=",
+ "path": "k8s.io/client-go/1.5/pkg/genericapiserver/openapi/common",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "GvBlph6PywK3zguou/T9kKNNdoQ=",
+ "path": "k8s.io/client-go/1.5/pkg/labels",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Vtrgy827r0rWzIAgvIWY4flu740=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "SEcZqRATexhgHvDn+eHvMc07UJs=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "qzYKG9YZSj8l/W1QVTOrGAry/BM=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer/json",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "F7h+8zZ0JPLYkac4KgSVljguBE4=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer/protobuf",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "CvySOL8C85e3y7EWQ+Au4cwUZJM=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer/recognizer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "eCitoKeIun+lJzYFhAfdSIIicSM=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer/streaming",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "kVWvZuLGltJ4YqQsiaCLRRLDDK0=",
+ "path": "k8s.io/client-go/1.5/pkg/runtime/serializer/versioning",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "m51+LAeQ9RK1KHX+l2iGcwbVCKs=",
+ "path": "k8s.io/client-go/1.5/pkg/selection",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "dp4IWcC3U6a0HeOdVCDQWODWCbw=",
+ "path": "k8s.io/client-go/1.5/pkg/third_party/forked/golang/reflect",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ER898XJD1ox4d71gKZD8TLtTSpM=",
+ "path": "k8s.io/client-go/1.5/pkg/types",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "BVdXtnLDlmBQksRPfHOIG+qdeVg=",
+ "path": "k8s.io/client-go/1.5/pkg/util",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "nnh8Sa4dCupxRI4bbKaozGp1d/A=",
+ "path": "k8s.io/client-go/1.5/pkg/util/cert",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "S32d5uduNlwouM8+mIz+ALpliUQ=",
+ "path": "k8s.io/client-go/1.5/pkg/util/clock",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Y6rWC0TUw2/uUeUjJ7kazyEUzBQ=",
+ "path": "k8s.io/client-go/1.5/pkg/util/errors",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "C7IfEAdCOePw3/IraaZCNXuYXLw=",
+ "path": "k8s.io/client-go/1.5/pkg/util/flowcontrol",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "EuslQHnhBSRXaWimYqLEqhMPV48=",
+ "path": "k8s.io/client-go/1.5/pkg/util/framer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ByO18NbZwiifFr8qtLyfJAHXguA=",
+ "path": "k8s.io/client-go/1.5/pkg/util/integer",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ww+RfsoIlUBDwThg2oqC5QVz33Y=",
+ "path": "k8s.io/client-go/1.5/pkg/util/intstr",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "7E8f8dLlXW7u6r9sggMjvB4HEiw=",
+ "path": "k8s.io/client-go/1.5/pkg/util/json",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "d0pFZxMJG9j95acNmaIM1l+X+QU=",
+ "path": "k8s.io/client-go/1.5/pkg/util/labels",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "wCN7u1lE+25neM9jXeI7aE8EAfk=",
+ "path": "k8s.io/client-go/1.5/pkg/util/net",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "g+kBkxcb+tYmFtRRly+VE+JAIfw=",
+ "path": "k8s.io/client-go/1.5/pkg/util/parsers",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "S4wUnE5VkaWWrkLbgPL/1oNLJ4g=",
+ "path": "k8s.io/client-go/1.5/pkg/util/rand",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "8j9c2PqTKybtnymXbStNYRexRj8=",
+ "path": "k8s.io/client-go/1.5/pkg/util/runtime",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "aAz4e8hLGs0+ZAz1TdA5tY/9e1A=",
+ "path": "k8s.io/client-go/1.5/pkg/util/sets",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "P/fwh6QZ5tsjVyHTaASDWL3WaGs=",
+ "path": "k8s.io/client-go/1.5/pkg/util/uuid",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "P9Bq/1qbF4SvnN9HyCTRpbUz7sQ=",
+ "path": "k8s.io/client-go/1.5/pkg/util/validation",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "D0JIEjlP69cuPOZEdsSKeFgsnI8=",
+ "path": "k8s.io/client-go/1.5/pkg/util/validation/field",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "T7ba8t8i+BtgClMgL+aMZM94fcI=",
+ "path": "k8s.io/client-go/1.5/pkg/util/wait",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "6RCTv/KDiw7as4KeyrgU3XrUSQI=",
+ "path": "k8s.io/client-go/1.5/pkg/util/yaml",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "OwKlsSeKtz1FBVC9cQ5gWRL5pKc=",
+ "path": "k8s.io/client-go/1.5/pkg/version",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Oil9WGw/dODbpBopn6LWQGS3DYg=",
+ "path": "k8s.io/client-go/1.5/pkg/watch",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "r5alnRCbLaPsbTeJjjTVn/bt6uw=",
+ "path": "k8s.io/client-go/1.5/pkg/watch/versioned",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "X1+ltyfHui/XCwDupXIf39+9gWQ=",
+ "path": "k8s.io/client-go/1.5/plugin/pkg/client/auth",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "KYy+js37AS0ZT08g5uBr1ZoMPmE=",
+ "path": "k8s.io/client-go/1.5/plugin/pkg/client/auth/gcp",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "wQ9G5++lbQpejqCzGHo037N3YcY=",
+ "path": "k8s.io/client-go/1.5/plugin/pkg/client/auth/oidc",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "ABe8YfZVEDoRpAUqp2BKP8o1VIA=",
+ "path": "k8s.io/client-go/1.5/rest",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "Gbe0Vs9hkI7X5hhbXUuWdRFffSI=",
+ "path": "k8s.io/client-go/1.5/tools/cache",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "K/oOznXABjqSS1c2Fs407c5F8KA=",
+ "path": "k8s.io/client-go/1.5/tools/clientcmd/api",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "c1PQ4WJRfpA9BYcFHW2+46hu5IE=",
+ "path": "k8s.io/client-go/1.5/tools/metrics",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ },
+ {
+ "checksumSHA1": "e4W2q+6wvjejv3V0UCI1mewTTro=",
+ "path": "k8s.io/client-go/1.5/transport",
+ "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
+ "revisionTime": "2016-09-30T00:14:02Z"
+ }
+ ],
+ "rootPath": "github.com/prometheus/prometheus"
+}
diff --git a/src/cmd/go/internal/modconv/testdata/traefik.dep b/src/cmd/go/internal/modconv/testdata/traefik.dep
new file mode 100644
index 0000000..8510f0f
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/traefik.dep
@@ -0,0 +1,79 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+[[projects]]
+ name = "github.com/Nvveen/Gotty"
+ packages = ["."]
+ revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c"
+ source = "github.com/ijc25/Gotty"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/OpenDNS/vegadns2client"
+ packages = ["."]
+ revision = "a3fa4a771d87bda2514a90a157e1fed1b6897d2e"
+
+[[projects]]
+ name = "github.com/PuerkitoBio/purell"
+ packages = ["."]
+ revision = "8a290539e2e8629dbc4e6bad948158f790ec31f4"
+ version = "v1.0.0"
+
+[[projects]]
+ name = "github.com/PuerkitoBio/urlesc"
+ packages = ["."]
+ revision = "5bd2802263f21d8788851d5305584c82a5c75d7e"
+
+[[projects]]
+ name = "github.com/Shopify/sarama"
+ packages = ["."]
+ revision = "70f6a705d4a17af059acbc6946fb2bd30762acd7"
+
+[[projects]]
+ name = "github.com/VividCortex/gohistogram"
+ packages = ["."]
+ revision = "51564d9861991fb0ad0f531c99ef602d0f9866e6"
+ version = "v1.0.0"
+
+[[projects]]
+ branch = "containous-fork"
+ name = "github.com/abbot/go-http-auth"
+ packages = ["."]
+ revision = "65b0cdae8d7fe5c05c7430e055938ef6d24a66c9"
+ source = "github.com/containous/go-http-auth"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/abronan/valkeyrie"
+ packages = [
+ ".",
+ "store",
+ "store/boltdb",
+ "store/consul",
+ "store/etcd/v2",
+ "store/etcd/v3",
+ "store/zookeeper"
+ ]
+ revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/mesosphere/mesos-dns"
+ packages = [
+ "detect",
+ "errorutil",
+ "logging",
+ "models",
+ "records",
+ "records/labels",
+ "records/state",
+ "util"
+ ]
+ revision = "b47dc4c19f215e98da687b15b4c64e70f629bea5"
+ source = "git@github.com:containous/mesos-dns.git"
+
+ [[projects]]
+ name = "gopkg.in/fsnotify.v1"
+ packages = ["."]
+ revision = "629574ca2a5df945712d3079857300b5e4da0236"
+ source = "github.com/fsnotify/fsnotify"
+ version = "v1.4.2" \ No newline at end of file
diff --git a/src/cmd/go/internal/modconv/testdata/traefik.out b/src/cmd/go/internal/modconv/testdata/traefik.out
new file mode 100644
index 0000000..5054295
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/traefik.out
@@ -0,0 +1,14 @@
+github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
+github.com/OpenDNS/vegadns2client a3fa4a771d87bda2514a90a157e1fed1b6897d2e
+github.com/PuerkitoBio/purell v1.0.0
+github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e
+github.com/Shopify/sarama 70f6a705d4a17af059acbc6946fb2bd30762acd7
+github.com/VividCortex/gohistogram v1.0.0
+github.com/abbot/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9
+github.com/abronan/valkeyrie 063d875e3c5fd734fa2aa12fac83829f62acfc70
+github.com/mesosphere/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5
+gopkg.in/fsnotify.v1 v1.4.2
+replace: github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c github.com/ijc25/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c
+replace: github.com/abbot/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9 github.com/containous/go-http-auth 65b0cdae8d7fe5c05c7430e055938ef6d24a66c9
+replace: github.com/mesosphere/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5 github.com/containous/mesos-dns b47dc4c19f215e98da687b15b4c64e70f629bea5
+replace: gopkg.in/fsnotify.v1 v1.4.2 github.com/fsnotify/fsnotify v1.4.2
diff --git a/src/cmd/go/internal/modconv/testdata/upspin.dep b/src/cmd/go/internal/modconv/testdata/upspin.dep
new file mode 100644
index 0000000..be77bcb
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/upspin.dep
@@ -0,0 +1,57 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "bazil.org/fuse"
+ packages = [".","fs","fuseutil"]
+ revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/NYTimes/gziphandler"
+ packages = ["."]
+ revision = "97ae7fbaf81620fe97840685304a78a306a39c64"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/russross/blackfriday"
+ packages = ["."]
+ revision = "6d1ef893fcb01b4f50cb6e57ed7df3e2e627b6b2"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/crypto"
+ packages = ["acme","acme/autocert","hkdf"]
+ revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/net"
+ packages = ["context"]
+ revision = "4b14673ba32bee7f5ac0f990a48f033919fd418b"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/text"
+ packages = ["cases","internal","internal/gen","internal/tag","internal/triegen","internal/ucd","language","runes","secure/bidirule","secure/precis","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
+ revision = "6eab0e8f74e86c598ec3b6fad4888e0c11482d48"
+
+[[projects]]
+ branch = "v2"
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "2246e647ba1c78b0b9f948f9fb072fff1467284fb138709c063e99736f646b90"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/src/cmd/go/internal/modconv/testdata/upspin.out b/src/cmd/go/internal/modconv/testdata/upspin.out
new file mode 100644
index 0000000..00597db
--- /dev/null
+++ b/src/cmd/go/internal/modconv/testdata/upspin.out
@@ -0,0 +1,8 @@
+bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
+github.com/NYTimes/gziphandler 97ae7fbaf81620fe97840685304a78a306a39c64
+github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
+github.com/russross/blackfriday 6d1ef893fcb01b4f50cb6e57ed7df3e2e627b6b2
+golang.org/x/crypto 13931e22f9e72ea58bb73048bc752b48c6d4d4ac
+golang.org/x/net 4b14673ba32bee7f5ac0f990a48f033919fd418b
+golang.org/x/text 6eab0e8f74e86c598ec3b6fad4888e0c11482d48
+gopkg.in/yaml.v2 eb3733d160e74a9c7e442f435eb3bea458e1d19f
diff --git a/src/cmd/go/internal/modconv/tsv.go b/src/cmd/go/internal/modconv/tsv.go
new file mode 100644
index 0000000..4649579
--- /dev/null
+++ b/src/cmd/go/internal/modconv/tsv.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseDependenciesTSV(file string, data []byte) (*modfile.File, error) {
+ mf := new(modfile.File)
+ for _, line := range strings.Split(string(data), "\n") {
+ f := strings.Split(line, "\t")
+ if len(f) >= 3 {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[2]}})
+ }
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/vconf.go b/src/cmd/go/internal/modconv/vconf.go
new file mode 100644
index 0000000..9bad2ba
--- /dev/null
+++ b/src/cmd/go/internal/modconv/vconf.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseVendorConf(file string, data []byte) (*modfile.File, error) {
+ mf := new(modfile.File)
+ for _, line := range strings.Split(string(data), "\n") {
+ if i := strings.Index(line, "#"); i >= 0 {
+ line = line[:i]
+ }
+ f := strings.Fields(line)
+ if len(f) >= 2 {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: f[0], Version: f[1]}})
+ }
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/vjson.go b/src/cmd/go/internal/modconv/vjson.go
new file mode 100644
index 0000000..1bd025c
--- /dev/null
+++ b/src/cmd/go/internal/modconv/vjson.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "encoding/json"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseVendorJSON(file string, data []byte) (*modfile.File, error) {
+ var cfg struct {
+ Package []struct {
+ Path string
+ Revision string
+ }
+ }
+ if err := json.Unmarshal(data, &cfg); err != nil {
+ return nil, err
+ }
+ mf := new(modfile.File)
+ for _, d := range cfg.Package {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.Path, Version: d.Revision}})
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/vmanifest.go b/src/cmd/go/internal/modconv/vmanifest.go
new file mode 100644
index 0000000..bcf0008
--- /dev/null
+++ b/src/cmd/go/internal/modconv/vmanifest.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "encoding/json"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseVendorManifest(file string, data []byte) (*modfile.File, error) {
+ var cfg struct {
+ Dependencies []struct {
+ ImportPath string
+ Revision string
+ }
+ }
+ if err := json.Unmarshal(data, &cfg); err != nil {
+ return nil, err
+ }
+ mf := new(modfile.File)
+ for _, d := range cfg.Dependencies {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: d.ImportPath, Version: d.Revision}})
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modconv/vyml.go b/src/cmd/go/internal/modconv/vyml.go
new file mode 100644
index 0000000..cfa4194
--- /dev/null
+++ b/src/cmd/go/internal/modconv/vyml.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modconv
+
+import (
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+func ParseVendorYML(file string, data []byte) (*modfile.File, error) {
+ mf := new(modfile.File)
+ vendors := false
+ path := ""
+ for _, line := range strings.Split(string(data), "\n") {
+ if line == "" {
+ continue
+ }
+ if strings.HasPrefix(line, "vendors:") {
+ vendors = true
+ } else if line[0] != '-' && line[0] != ' ' && line[0] != '\t' {
+ vendors = false
+ }
+ if !vendors {
+ continue
+ }
+ if strings.HasPrefix(line, "- path:") {
+ path = strings.TrimSpace(line[len("- path:"):])
+ }
+ if strings.HasPrefix(line, " rev:") {
+ rev := strings.TrimSpace(line[len(" rev:"):])
+ if path != "" && rev != "" {
+ mf.Require = append(mf.Require, &modfile.Require{Mod: module.Version{Path: path, Version: rev}})
+ }
+ }
+ }
+ return mf, nil
+}
diff --git a/src/cmd/go/internal/modfetch/bootstrap.go b/src/cmd/go/internal/modfetch/bootstrap.go
new file mode 100644
index 0000000..e23669f
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/bootstrap.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmd_go_bootstrap
+
+package modfetch
+
+import "golang.org/x/mod/module"
+
+func useSumDB(mod module.Version) bool {
+ return false
+}
+
+func lookupSumDB(mod module.Version) (string, []string, error) {
+ panic("bootstrap")
+}
diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go
new file mode 100644
index 0000000..5a727c6
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/cache.go
@@ -0,0 +1,815 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/par"
+ "cmd/go/internal/robustio"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+func cacheDir(ctx context.Context, path string) (string, error) {
+ if err := checkCacheDir(ctx); err != nil {
+ return "", err
+ }
+ enc, err := module.EscapePath(path)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(cfg.GOMODCACHE, "cache/download", enc, "/@v"), nil
+}
+
+func CachePath(ctx context.Context, m module.Version, suffix string) (string, error) {
+ if gover.IsToolchain(m.Path) {
+ return "", ErrToolchain
+ }
+ dir, err := cacheDir(ctx, m.Path)
+ if err != nil {
+ return "", err
+ }
+ if !gover.ModIsValid(m.Path, m.Version) {
+ return "", fmt.Errorf("non-semver module version %q", m.Version)
+ }
+ if module.CanonicalVersion(m.Version) != m.Version {
+ return "", fmt.Errorf("non-canonical module version %q", m.Version)
+ }
+ encVer, err := module.EscapeVersion(m.Version)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(dir, encVer+"."+suffix), nil
+}
+
+// DownloadDir returns the directory to which m should have been downloaded.
+// An error will be returned if the module path or version cannot be escaped.
+// An error satisfying errors.Is(err, fs.ErrNotExist) will be returned
+// along with the directory if the directory does not exist or if the directory
+// is not completely populated.
+func DownloadDir(ctx context.Context, m module.Version) (string, error) {
+ if gover.IsToolchain(m.Path) {
+ return "", ErrToolchain
+ }
+ if err := checkCacheDir(ctx); err != nil {
+ return "", err
+ }
+ enc, err := module.EscapePath(m.Path)
+ if err != nil {
+ return "", err
+ }
+ if !gover.ModIsValid(m.Path, m.Version) {
+ return "", fmt.Errorf("non-semver module version %q", m.Version)
+ }
+ if module.CanonicalVersion(m.Version) != m.Version {
+ return "", fmt.Errorf("non-canonical module version %q", m.Version)
+ }
+ encVer, err := module.EscapeVersion(m.Version)
+ if err != nil {
+ return "", err
+ }
+
+ // Check whether the directory itself exists.
+ dir := filepath.Join(cfg.GOMODCACHE, enc+"@"+encVer)
+ if fi, err := os.Stat(dir); os.IsNotExist(err) {
+ return dir, err
+ } else if err != nil {
+ return dir, &DownloadDirPartialError{dir, err}
+ } else if !fi.IsDir() {
+ return dir, &DownloadDirPartialError{dir, errors.New("not a directory")}
+ }
+
+ // Check if a .partial file exists. This is created at the beginning of
+ // a download and removed after the zip is extracted.
+ partialPath, err := CachePath(ctx, m, "partial")
+ if err != nil {
+ return dir, err
+ }
+ if _, err := os.Stat(partialPath); err == nil {
+ return dir, &DownloadDirPartialError{dir, errors.New("not completely extracted")}
+ } else if !os.IsNotExist(err) {
+ return dir, err
+ }
+
+ // Check if a .ziphash file exists. It should be created before the
+ // zip is extracted, but if it was deleted (by another program?), we need
+ // to re-calculate it. Note that checkMod will repopulate the ziphash
+ // file if it doesn't exist, but if the module is excluded by checks
+ // through GONOSUMDB or GOPRIVATE, that check and repopulation won't happen.
+ ziphashPath, err := CachePath(ctx, m, "ziphash")
+ if err != nil {
+ return dir, err
+ }
+ if _, err := os.Stat(ziphashPath); os.IsNotExist(err) {
+ return dir, &DownloadDirPartialError{dir, errors.New("ziphash file is missing")}
+ } else if err != nil {
+ return dir, err
+ }
+ return dir, nil
+}
+
+// DownloadDirPartialError is returned by DownloadDir if a module directory
+// exists but was not completely populated.
+//
+// DownloadDirPartialError is equivalent to fs.ErrNotExist.
+type DownloadDirPartialError struct {
+ Dir string
+ Err error
+}
+
+func (e *DownloadDirPartialError) Error() string { return fmt.Sprintf("%s: %v", e.Dir, e.Err) }
+func (e *DownloadDirPartialError) Is(err error) bool { return err == fs.ErrNotExist }
+
+// lockVersion locks a file within the module cache that guards the downloading
+// and extraction of the zipfile for the given module version.
+func lockVersion(ctx context.Context, mod module.Version) (unlock func(), err error) {
+ path, err := CachePath(ctx, mod, "lock")
+ if err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
+ return nil, err
+ }
+ return lockedfile.MutexAt(path).Lock()
+}
+
+// SideLock locks a file within the module cache that previously guarded
+// edits to files outside the cache, such as go.sum and go.mod files in the
+// user's working directory.
+// If err is nil, the caller MUST eventually call the unlock function.
+func SideLock(ctx context.Context) (unlock func(), err error) {
+ if err := checkCacheDir(ctx); err != nil {
+ return nil, err
+ }
+
+ path := filepath.Join(cfg.GOMODCACHE, "cache", "lock")
+ if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
+ return nil, fmt.Errorf("failed to create cache directory: %w", err)
+ }
+
+ return lockedfile.MutexAt(path).Lock()
+}
+
+// A cachingRepo is a cache around an underlying Repo,
+// avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not CheckReuse or Zip).
+// It is also safe for simultaneous use by multiple goroutines
+// (so that it can be returned from Lookup multiple times).
+// It serializes calls to the underlying Repo.
+type cachingRepo struct {
+ path string
+ versionsCache par.ErrCache[string, *Versions]
+ statCache par.ErrCache[string, *RevInfo]
+ latestCache par.ErrCache[struct{}, *RevInfo]
+ gomodCache par.ErrCache[string, []byte]
+
+ once sync.Once
+ initRepo func(context.Context) (Repo, error)
+ r Repo
+}
+
+func newCachingRepo(ctx context.Context, path string, initRepo func(context.Context) (Repo, error)) *cachingRepo {
+ return &cachingRepo{
+ path: path,
+ initRepo: initRepo,
+ }
+}
+
+func (r *cachingRepo) repo(ctx context.Context) Repo {
+ r.once.Do(func() {
+ var err error
+ r.r, err = r.initRepo(ctx)
+ if err != nil {
+ r.r = errRepo{r.path, err}
+ }
+ })
+ return r.r
+}
+
+func (r *cachingRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return r.repo(ctx).CheckReuse(ctx, old)
+}
+
+func (r *cachingRepo) ModulePath() string {
+ return r.path
+}
+
+func (r *cachingRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
+ v, err := r.versionsCache.Do(prefix, func() (*Versions, error) {
+ return r.repo(ctx).Versions(ctx, prefix)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ return &Versions{
+ Origin: v.Origin,
+ List: append([]string(nil), v.List...),
+ }, nil
+}
+
+type cachedInfo struct {
+ info *RevInfo
+ err error
+}
+
+func (r *cachingRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ if gover.IsToolchain(r.path) {
+ // Skip disk cache; the underlying golang.org/toolchain repo is cached instead.
+ return r.repo(ctx).Stat(ctx, rev)
+ }
+ info, err := r.statCache.Do(rev, func() (*RevInfo, error) {
+ file, info, err := readDiskStat(ctx, r.path, rev)
+ if err == nil {
+ return info, err
+ }
+
+ info, err = r.repo(ctx).Stat(ctx, rev)
+ if err == nil {
+ // If we resolved, say, 1234abcde to v0.0.0-20180604122334-1234abcdef78,
+ // then save the information under the proper version, for future use.
+ if info.Version != rev {
+ file, _ = CachePath(ctx, module.Version{Path: r.path, Version: info.Version}, "info")
+ r.statCache.Do(info.Version, func() (*RevInfo, error) {
+ return info, nil
+ })
+ }
+
+ if err := writeDiskStat(ctx, file, info); err != nil {
+ fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err)
+ }
+ }
+ return info, err
+ })
+ if info != nil {
+ copy := *info
+ info = &copy
+ }
+ return info, err
+}
+
+func (r *cachingRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ if gover.IsToolchain(r.path) {
+ // Skip disk cache; the underlying golang.org/toolchain repo is cached instead.
+ return r.repo(ctx).Latest(ctx)
+ }
+ info, err := r.latestCache.Do(struct{}{}, func() (*RevInfo, error) {
+ info, err := r.repo(ctx).Latest(ctx)
+
+ // Save info for likely future Stat call.
+ if err == nil {
+ r.statCache.Do(info.Version, func() (*RevInfo, error) {
+ return info, nil
+ })
+ if file, _, err := readDiskStat(ctx, r.path, info.Version); err != nil {
+ writeDiskStat(ctx, file, info)
+ }
+ }
+
+ return info, err
+ })
+ if info != nil {
+ copy := *info
+ info = &copy
+ }
+ return info, err
+}
+
+func (r *cachingRepo) GoMod(ctx context.Context, version string) ([]byte, error) {
+ if gover.IsToolchain(r.path) {
+ // Skip disk cache; the underlying golang.org/toolchain repo is cached instead.
+ return r.repo(ctx).GoMod(ctx, version)
+ }
+ text, err := r.gomodCache.Do(version, func() ([]byte, error) {
+ file, text, err := readDiskGoMod(ctx, r.path, version)
+ if err == nil {
+ // Note: readDiskGoMod already called checkGoMod.
+ return text, nil
+ }
+
+ text, err = r.repo(ctx).GoMod(ctx, version)
+ if err == nil {
+ if err := checkGoMod(r.path, version, text); err != nil {
+ return text, err
+ }
+ if err := writeDiskGoMod(ctx, file, text); err != nil {
+ fmt.Fprintf(os.Stderr, "go: writing go.mod cache: %v\n", err)
+ }
+ }
+ return text, err
+ })
+ if err != nil {
+ return nil, err
+ }
+ return append([]byte(nil), text...), nil
+}
+
+func (r *cachingRepo) Zip(ctx context.Context, dst io.Writer, version string) error {
+ if gover.IsToolchain(r.path) {
+ return ErrToolchain
+ }
+ return r.repo(ctx).Zip(ctx, dst, version)
+}
+
+// InfoFile is like Lookup(ctx, path).Stat(version) but also returns the name of the file
+// containing the cached information.
+func InfoFile(ctx context.Context, path, version string) (*RevInfo, string, error) {
+ if !gover.ModIsValid(path, version) {
+ return nil, "", fmt.Errorf("invalid version %q", version)
+ }
+
+ if file, info, err := readDiskStat(ctx, path, version); err == nil {
+ return info, file, nil
+ }
+
+ var info *RevInfo
+ var err2info map[error]*RevInfo
+ err := TryProxies(func(proxy string) error {
+ i, err := Lookup(ctx, proxy, path).Stat(ctx, version)
+ if err == nil {
+ info = i
+ } else {
+ if err2info == nil {
+ err2info = make(map[error]*RevInfo)
+ }
+ err2info[err] = info
+ }
+ return err
+ })
+ if err != nil {
+ return err2info[err], "", err
+ }
+
+ // Stat should have populated the disk cache for us.
+ file, err := CachePath(ctx, module.Version{Path: path, Version: version}, "info")
+ if err != nil {
+ return nil, "", err
+ }
+ return info, file, nil
+}
+
+// GoMod is like Lookup(ctx, path).GoMod(rev) but avoids the
+// repository path resolution in Lookup if the result is
+// already cached on local disk.
+func GoMod(ctx context.Context, path, rev string) ([]byte, error) {
+ // Convert commit hash to pseudo-version
+ // to increase cache hit rate.
+ if !gover.ModIsValid(path, rev) {
+ if _, info, err := readDiskStat(ctx, path, rev); err == nil {
+ rev = info.Version
+ } else {
+ if errors.Is(err, statCacheErr) {
+ return nil, err
+ }
+ err := TryProxies(func(proxy string) error {
+ info, err := Lookup(ctx, proxy, path).Stat(ctx, rev)
+ if err == nil {
+ rev = info.Version
+ }
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ _, data, err := readDiskGoMod(ctx, path, rev)
+ if err == nil {
+ return data, nil
+ }
+
+ err = TryProxies(func(proxy string) (err error) {
+ data, err = Lookup(ctx, proxy, path).GoMod(ctx, rev)
+ return err
+ })
+ return data, err
+}
+
+// GoModFile is like GoMod but returns the name of the file containing
+// the cached information.
+func GoModFile(ctx context.Context, path, version string) (string, error) {
+ if !gover.ModIsValid(path, version) {
+ return "", fmt.Errorf("invalid version %q", version)
+ }
+ if _, err := GoMod(ctx, path, version); err != nil {
+ return "", err
+ }
+ // GoMod should have populated the disk cache for us.
+ file, err := CachePath(ctx, module.Version{Path: path, Version: version}, "mod")
+ if err != nil {
+ return "", err
+ }
+ return file, nil
+}
+
+// GoModSum returns the go.sum entry for the module version's go.mod file.
+// (That is, it returns the entry listed in go.sum as "path version/go.mod".)
+func GoModSum(ctx context.Context, path, version string) (string, error) {
+ if !gover.ModIsValid(path, version) {
+ return "", fmt.Errorf("invalid version %q", version)
+ }
+ data, err := GoMod(ctx, path, version)
+ if err != nil {
+ return "", err
+ }
+ sum, err := goModSum(data)
+ if err != nil {
+ return "", err
+ }
+ return sum, nil
+}
+
+var errNotCached = fmt.Errorf("not in cache")
+
+// readDiskStat reads a cached stat result from disk,
+// returning the name of the cache file and the result.
+// If the read fails, the caller can use
+// writeDiskStat(file, info) to write a new cache entry.
+func readDiskStat(ctx context.Context, path, rev string) (file string, info *RevInfo, err error) {
+ if gover.IsToolchain(path) {
+ return "", nil, errNotCached
+ }
+ file, data, err := readDiskCache(ctx, path, rev, "info")
+ if err != nil {
+ // If the cache already contains a pseudo-version with the given hash, we
+ // would previously return that pseudo-version without checking upstream.
+ // However, that produced an unfortunate side-effect: if the author added a
+ // tag to the repository, 'go get' would not pick up the effect of that new
+ // tag on the existing commits, and 'go' commands that referred to those
+ // commits would use the previous name instead of the new one.
+ //
+ // That's especially problematic if the original pseudo-version starts with
+ // v0.0.0-, as was the case for all pseudo-versions during vgo development,
+ // since a v0.0.0- pseudo-version has lower precedence than pretty much any
+ // tagged version.
+ //
+ // In practice, we're only looking up by hash during initial conversion of a
+ // legacy config and during an explicit 'go get', and a little extra latency
+ // for those operations seems worth the benefit of picking up more accurate
+ // versions.
+ //
+ // Fall back to this resolution scheme only if the GOPROXY setting prohibits
+ // us from resolving upstream tags.
+ if cfg.GOPROXY == "off" {
+ if file, info, err := readDiskStatByHash(ctx, path, rev); err == nil {
+ return file, info, nil
+ }
+ }
+ return file, nil, err
+ }
+ info = new(RevInfo)
+ if err := json.Unmarshal(data, info); err != nil {
+ return file, nil, errNotCached
+ }
+ // The disk might have stale .info files that have Name and Short fields set.
+ // We want to canonicalize to .info files with those fields omitted.
+ // Remarshal and update the cache file if needed.
+ data2, err := json.Marshal(info)
+ if err == nil && !bytes.Equal(data2, data) {
+ writeDiskCache(ctx, file, data)
+ }
+ return file, info, nil
+}
+
+// readDiskStatByHash is a fallback for readDiskStat for the case
+// where rev is a commit hash instead of a proper semantic version.
+// In that case, we look for a cached pseudo-version that matches
+// the commit hash. If we find one, we use it.
+// This matters most for converting legacy package management
+// configs, when we are often looking up commits by full hash.
+// Without this check we'd be doing network I/O to the remote repo
+// just to find out about a commit we already know about
+// (and have cached under its pseudo-version).
+func readDiskStatByHash(ctx context.Context, path, rev string) (file string, info *RevInfo, err error) {
+ if gover.IsToolchain(path) {
+ return "", nil, errNotCached
+ }
+ if cfg.GOMODCACHE == "" {
+ // Do not download to current directory.
+ return "", nil, errNotCached
+ }
+
+ if !codehost.AllHex(rev) || len(rev) < 12 {
+ return "", nil, errNotCached
+ }
+ rev = rev[:12]
+ cdir, err := cacheDir(ctx, path)
+ if err != nil {
+ return "", nil, errNotCached
+ }
+ dir, err := os.Open(cdir)
+ if err != nil {
+ return "", nil, errNotCached
+ }
+ names, err := dir.Readdirnames(-1)
+ dir.Close()
+ if err != nil {
+ return "", nil, errNotCached
+ }
+
+ // A given commit hash may map to more than one pseudo-version,
+ // depending on which tags are present on the repository.
+ // Take the highest such version.
+ var maxVersion string
+ suffix := "-" + rev + ".info"
+ err = errNotCached
+ for _, name := range names {
+ if strings.HasSuffix(name, suffix) {
+ v := strings.TrimSuffix(name, ".info")
+ if module.IsPseudoVersion(v) && semver.Compare(v, maxVersion) > 0 {
+ maxVersion = v
+ file, info, err = readDiskStat(ctx, path, strings.TrimSuffix(name, ".info"))
+ }
+ }
+ }
+ return file, info, err
+}
+
+// oldVgoPrefix is the prefix in the old auto-generated cached go.mod files.
+// We stopped trying to auto-generate the go.mod files. Now we use a trivial
+// go.mod with only a module line, and we've dropped the version prefix
+// entirely. If we see a version prefix, that means we're looking at an old copy
+// and should ignore it.
+var oldVgoPrefix = []byte("//vgo 0.0.")
+
+// readDiskGoMod reads a cached go.mod file from disk,
+// returning the name of the cache file and the result.
+// If the read fails, the caller can use
+// writeDiskGoMod(file, data) to write a new cache entry.
+func readDiskGoMod(ctx context.Context, path, rev string) (file string, data []byte, err error) {
+ if gover.IsToolchain(path) {
+ return "", nil, errNotCached
+ }
+ file, data, err = readDiskCache(ctx, path, rev, "mod")
+
+ // If the file has an old auto-conversion prefix, pretend it's not there.
+ if bytes.HasPrefix(data, oldVgoPrefix) {
+ err = errNotCached
+ data = nil
+ }
+
+ if err == nil {
+ if err := checkGoMod(path, rev, data); err != nil {
+ return "", nil, err
+ }
+ }
+
+ return file, data, err
+}
+
+// readDiskCache is the generic "read from a cache file" implementation.
+// It takes the revision and an identifying suffix for the kind of data being cached.
+// It returns the name of the cache file and the content of the file.
+// If the read fails, the caller can use
+// writeDiskCache(file, data) to write a new cache entry.
+func readDiskCache(ctx context.Context, path, rev, suffix string) (file string, data []byte, err error) {
+ if gover.IsToolchain(path) {
+ return "", nil, errNotCached
+ }
+ file, err = CachePath(ctx, module.Version{Path: path, Version: rev}, suffix)
+ if err != nil {
+ return "", nil, errNotCached
+ }
+ data, err = robustio.ReadFile(file)
+ if err != nil {
+ return file, nil, errNotCached
+ }
+ return file, data, nil
+}
+
+// writeDiskStat writes a stat result cache entry.
+// The file name must have been returned by a previous call to readDiskStat.
+func writeDiskStat(ctx context.Context, file string, info *RevInfo) error {
+ if file == "" {
+ return nil
+ }
+
+ if info.Origin != nil {
+ // Clean the origin information, which might have too many
+ // validation criteria, for example if we are saving the result of
+ // m@master as m@pseudo-version.
+ clean := *info
+ info = &clean
+ o := *info.Origin
+ info.Origin = &o
+
+ // Tags never matter if you are starting with a semver version,
+ // as we would be when finding this cache entry.
+ o.TagSum = ""
+ o.TagPrefix = ""
+ // Ref doesn't matter if you have a pseudoversion.
+ if module.IsPseudoVersion(info.Version) {
+ o.Ref = ""
+ }
+ }
+
+ js, err := json.Marshal(info)
+ if err != nil {
+ return err
+ }
+ return writeDiskCache(ctx, file, js)
+}
+
+// writeDiskGoMod writes a go.mod cache entry.
+// The file name must have been returned by a previous call to readDiskGoMod.
+func writeDiskGoMod(ctx context.Context, file string, text []byte) error {
+ return writeDiskCache(ctx, file, text)
+}
+
+// writeDiskCache is the generic "write to a cache file" implementation.
+// The file must have been returned by a previous call to readDiskCache.
+func writeDiskCache(ctx context.Context, file string, data []byte) error {
+ if file == "" {
+ return nil
+ }
+ // Make sure directory for file exists.
+ if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil {
+ return err
+ }
+
+ // Write the file to a temporary location, and then rename it to its final
+ // path to reduce the likelihood of a corrupt file existing at that final path.
+ f, err := tempFile(ctx, filepath.Dir(file), filepath.Base(file), 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // Only call os.Remove on f.Name() if we failed to rename it: otherwise,
+ // some other process may have created a new file with the same name after
+ // the rename completed.
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ }
+ }()
+
+ if _, err := f.Write(data); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if err := robustio.Rename(f.Name(), file); err != nil {
+ return err
+ }
+
+ if strings.HasSuffix(file, ".mod") {
+ rewriteVersionList(ctx, filepath.Dir(file))
+ }
+ return nil
+}
+
+// tempFile creates a new temporary file with given permission bits.
+func tempFile(ctx context.Context, dir, prefix string, perm fs.FileMode) (f *os.File, err error) {
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+".tmp")
+ f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
+ if os.IsExist(err) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// rewriteVersionList rewrites the version list in dir
+// after a new *.mod file has been written.
+func rewriteVersionList(ctx context.Context, dir string) (err error) {
+ if filepath.Base(dir) != "@v" {
+ base.Fatalf("go: internal error: misuse of rewriteVersionList")
+ }
+
+ listFile := filepath.Join(dir, "list")
+
+ // Lock listfile when writing to it to try to avoid corruption to the file.
+ // Under rare circumstances, for instance, if the system loses power in the
+ // middle of a write it is possible for corrupt data to be written. This is
+ // not a problem for the go command itself, but may be an issue if the
+ // cache is being served by a GOPROXY HTTP server. This will be corrected
+ // the next time a new version of the module is fetched and the file is rewritten.
+ // TODO(matloob): golang.org/issue/43313 covers adding a go mod verify
+ // command that removes module versions that fail checksums. It should also
+ // remove list files that are detected to be corrupt.
+ f, err := lockedfile.Edit(listFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }()
+ infos, err := os.ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ var list []string
+ for _, info := range infos {
+ // We look for *.mod files on the theory that if we can't supply
+ // the .mod file then there's no point in listing that version,
+ // since it's unusable. (We can have *.info without *.mod.)
+ // We don't require *.zip files on the theory that for code only
+ // involved in module graph construction, many *.zip files
+ // will never be requested.
+ name := info.Name()
+ if v, found := strings.CutSuffix(name, ".mod"); found {
+ if v != "" && module.CanonicalVersion(v) == v {
+ list = append(list, v)
+ }
+ }
+ }
+ semver.Sort(list)
+
+ var buf bytes.Buffer
+ for _, v := range list {
+ buf.WriteString(v)
+ buf.WriteString("\n")
+ }
+ if fi, err := f.Stat(); err == nil && int(fi.Size()) == buf.Len() {
+ old := make([]byte, buf.Len()+1)
+ if n, err := f.ReadAt(old, 0); err == io.EOF && n == buf.Len() && bytes.Equal(buf.Bytes(), old) {
+ return nil // No edit needed.
+ }
+ }
+ // Remove existing contents, so that when we truncate to the actual size it will zero-fill,
+ // and we will be able to detect (some) incomplete writes as files containing trailing NUL bytes.
+ if err := f.Truncate(0); err != nil {
+ return err
+ }
+ // Reserve the final size and zero-fill.
+ if err := f.Truncate(int64(buf.Len())); err != nil {
+ return err
+ }
+ // Write the actual contents. If this fails partway through,
+ // the remainder of the file should remain as zeroes.
+ if _, err := f.Write(buf.Bytes()); err != nil {
+ f.Truncate(0)
+ return err
+ }
+
+ return nil
+}
+
+var (
+ statCacheOnce sync.Once
+ statCacheErr error
+)
+
+// checkCacheDir checks if the directory specified by GOMODCACHE exists. An
+// error is returned if it does not.
+func checkCacheDir(ctx context.Context) error {
+ if cfg.GOMODCACHE == "" {
+ // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE
+ // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen.
+ return fmt.Errorf("module cache not found: neither GOMODCACHE nor GOPATH is set")
+ }
+ if !filepath.IsAbs(cfg.GOMODCACHE) {
+ return fmt.Errorf("GOMODCACHE entry is relative; must be absolute path: %q.\n", cfg.GOMODCACHE)
+ }
+
+ // os.Stat is slow on Windows, so we only call it once to prevent unnecessary
+ // I/O every time this function is called.
+ statCacheOnce.Do(func() {
+ fi, err := os.Stat(cfg.GOMODCACHE)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ statCacheErr = fmt.Errorf("could not create module cache: %w", err)
+ return
+ }
+ if err := os.MkdirAll(cfg.GOMODCACHE, 0777); err != nil {
+ statCacheErr = fmt.Errorf("could not create module cache: %w", err)
+ return
+ }
+ return
+ }
+ if !fi.IsDir() {
+ statCacheErr = fmt.Errorf("could not create module cache: %q is not a directory", cfg.GOMODCACHE)
+ return
+ }
+ })
+ return statCacheErr
+}
diff --git a/src/cmd/go/internal/modfetch/cache_test.go b/src/cmd/go/internal/modfetch/cache_test.go
new file mode 100644
index 0000000..6aada66
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/cache_test.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestWriteDiskCache(t *testing.T) {
+ ctx := context.Background()
+
+ tmpdir, err := os.MkdirTemp("", "go-writeCache-test-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ err = writeDiskCache(ctx, filepath.Join(tmpdir, "file"), []byte("data"))
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go
new file mode 100644
index 0000000..ca57762
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/codehost.go
@@ -0,0 +1,390 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package codehost defines the interface implemented by a code hosting source,
+// along with support code for use by implementations.
+package codehost
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/str"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+// Downloaded size limits.
+const (
+ MaxGoMod = 16 << 20 // maximum size of go.mod file
+ MaxLICENSE = 16 << 20 // maximum size of LICENSE file
+ MaxZipFile = 500 << 20 // maximum size of downloaded zip file
+)
+
+// A Repo represents a code hosting source.
+// Typical implementations include local version control repositories,
+// remote version control servers, and code hosting sites.
+//
+// A Repo must be safe for simultaneous use by multiple goroutines,
+// and callers must not modify returned values, which may be cached and shared.
+type Repo interface {
+ // CheckReuse checks whether the old origin information
+ // remains up to date. If so, whatever cached object it was
+ // taken from can be reused.
+ // The subdir gives subdirectory name where the module root is expected to be found,
+ // "" for the root or "sub/dir" for a subdirectory (no trailing slash).
+ CheckReuse(ctx context.Context, old *Origin, subdir string) error
+
+ // List lists all tags with the given prefix.
+ Tags(ctx context.Context, prefix string) (*Tags, error)
+
+ // Stat returns information about the revision rev.
+ // A revision can be any identifier known to the underlying service:
+ // commit hash, branch, tag, and so on.
+ Stat(ctx context.Context, rev string) (*RevInfo, error)
+
+ // Latest returns the latest revision on the default branch,
+ // whatever that means in the underlying implementation.
+ Latest(ctx context.Context) (*RevInfo, error)
+
+ // ReadFile reads the given file in the file tree corresponding to revision rev.
+ // It should refuse to read more than maxSize bytes.
+ //
+ // If the requested file does not exist it should return an error for which
+ // os.IsNotExist(err) returns true.
+ ReadFile(ctx context.Context, rev, file string, maxSize int64) (data []byte, err error)
+
+ // ReadZip downloads a zip file for the subdir subdirectory
+ // of the given revision to a new file in a given temporary directory.
+ // It should refuse to read more than maxSize bytes.
+ // It returns a ReadCloser for a streamed copy of the zip file.
+ // All files in the zip file are expected to be
+ // nested in a single top-level directory, whose name is not specified.
+ ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error)
+
+ // RecentTag returns the most recent tag on rev or one of its predecessors
+ // with the given prefix. allowed may be used to filter out unwanted versions.
+ RecentTag(ctx context.Context, rev, prefix string, allowed func(tag string) bool) (tag string, err error)
+
+ // DescendsFrom reports whether rev or any of its ancestors has the given tag.
+ //
+ // DescendsFrom must return true for any tag returned by RecentTag for the
+ // same revision.
+ DescendsFrom(ctx context.Context, rev, tag string) (bool, error)
+}
+
+// An Origin describes the provenance of a given repo method result.
+// It can be passed to CheckReuse (usually in a different go command invocation)
+// to see whether the result remains up-to-date.
+type Origin struct {
+ VCS string `json:",omitempty"` // "git" etc
+ URL string `json:",omitempty"` // URL of repository
+ Subdir string `json:",omitempty"` // subdirectory in repo
+
+ // If TagSum is non-empty, then the resolution of this module version
+ // depends on the set of tags present in the repo, specifically the tags
+ // of the form TagPrefix + a valid semver version.
+ // If the matching repo tags and their commit hashes still hash to TagSum,
+ // the Origin is still valid (at least as far as the tags are concerned).
+ // The exact checksum is up to the Repo implementation; see (*gitRepo).Tags.
+ TagPrefix string `json:",omitempty"`
+ TagSum string `json:",omitempty"`
+
+ // If Ref is non-empty, then the resolution of this module version
+ // depends on Ref resolving to the revision identified by Hash.
+ // If Ref still resolves to Hash, the Origin is still valid (at least as far as Ref is concerned).
+ // For Git, the Ref is a full ref like "refs/heads/main" or "refs/tags/v1.2.3",
+ // and the Hash is the Git object hash the ref maps to.
+ // Other VCS might choose differently, but the idea is that Ref is the name
+ // with a mutable meaning while Hash is a name with an immutable meaning.
+ Ref string `json:",omitempty"`
+ Hash string `json:",omitempty"`
+
+ // If RepoSum is non-empty, then the resolution of this module version
+ // failed due to the repo being available but the version not being present.
+ // This depends on the entire state of the repo, which RepoSum summarizes.
+ // For Git, this is a hash of all the refs and their hashes.
+ RepoSum string `json:",omitempty"`
+}
+
+// Checkable reports whether the Origin contains anything that can be checked.
+// If not, the Origin is purely informational and should fail a CheckReuse call.
+func (o *Origin) Checkable() bool {
+ return o.TagSum != "" || o.Ref != "" || o.Hash != "" || o.RepoSum != ""
+}
+
+// ClearCheckable clears the Origin enough to make Checkable return false.
+func (o *Origin) ClearCheckable() {
+ o.TagSum = ""
+ o.TagPrefix = ""
+ o.Ref = ""
+ o.Hash = ""
+ o.RepoSum = ""
+}
+
+// A Tags describes the available tags in a code repository.
+type Tags struct {
+ Origin *Origin
+ List []Tag
+}
+
+// A Tag describes a single tag in a code repository.
+type Tag struct {
+ Name string
+ Hash string // content hash identifying tag's content, if available
+}
+
+// isOriginTag reports whether tag should be preserved
+// in the Tags method's Origin calculation.
+// We can safely ignore tags that are not look like pseudo-versions,
+// because ../coderepo.go's (*codeRepo).Versions ignores them too.
+// We can also ignore non-semver tags, but we have to include semver
+// tags with extra suffixes, because the pseudo-version base finder uses them.
+func isOriginTag(tag string) bool {
+ // modfetch.(*codeRepo).Versions uses Canonical == tag,
+ // but pseudo-version calculation has a weaker condition that
+ // the canonical is a prefix of the tag.
+ // Include those too, so that if any new one appears, we'll invalidate the cache entry.
+ // This will lead to spurious invalidation of version list results,
+ // but tags of this form being created should be fairly rare
+ // (and invalidate pseudo-version results anyway).
+ c := semver.Canonical(tag)
+ return c != "" && strings.HasPrefix(tag, c) && !module.IsPseudoVersion(tag)
+}
+
+// A RevInfo describes a single revision in a source code repository.
+type RevInfo struct {
+ Origin *Origin
+ Name string // complete ID in underlying repository
+ Short string // shortened ID, for use in pseudo-version
+ Version string // version used in lookup
+ Time time.Time // commit time
+ Tags []string // known tags for commit
+}
+
+// UnknownRevisionError is an error equivalent to fs.ErrNotExist, but for a
+// revision rather than a file.
+type UnknownRevisionError struct {
+ Rev string
+}
+
+func (e *UnknownRevisionError) Error() string {
+ return "unknown revision " + e.Rev
+}
+func (UnknownRevisionError) Is(err error) bool {
+ return err == fs.ErrNotExist
+}
+
+// ErrNoCommits is an error equivalent to fs.ErrNotExist indicating that a given
+// repository or module contains no commits.
+var ErrNoCommits error = noCommitsError{}
+
+type noCommitsError struct{}
+
+func (noCommitsError) Error() string {
+ return "no commits"
+}
+func (noCommitsError) Is(err error) bool {
+ return err == fs.ErrNotExist
+}
+
+// AllHex reports whether the revision rev is entirely lower-case hexadecimal digits.
+func AllHex(rev string) bool {
+ for i := 0; i < len(rev); i++ {
+ c := rev[i]
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// ShortenSHA1 shortens a SHA1 hash (40 hex digits) to the canonical length
+// used in pseudo-versions (12 hex digits).
+func ShortenSHA1(rev string) string {
+ if AllHex(rev) && len(rev) == 40 {
+ return rev[:12]
+ }
+ return rev
+}
+
+// WorkDir returns the name of the cached work directory to use for the
+// given repository type and name.
+func WorkDir(ctx context.Context, typ, name string) (dir, lockfile string, err error) {
+ if cfg.GOMODCACHE == "" {
+ return "", "", fmt.Errorf("neither GOPATH nor GOMODCACHE are set")
+ }
+
+ // We name the work directory for the SHA256 hash of the type and name.
+ // We intentionally avoid the actual name both because of possible
+ // conflicts with valid file system paths and because we want to ensure
+ // that one checkout is never nested inside another. That nesting has
+ // led to security problems in the past.
+ if strings.Contains(typ, ":") {
+ return "", "", fmt.Errorf("codehost.WorkDir: type cannot contain colon")
+ }
+ key := typ + ":" + name
+ dir = filepath.Join(cfg.GOMODCACHE, "cache/vcs", fmt.Sprintf("%x", sha256.Sum256([]byte(key))))
+
+ xLog, buildX := cfg.BuildXWriter(ctx)
+ if buildX {
+ fmt.Fprintf(xLog, "mkdir -p %s # %s %s\n", filepath.Dir(dir), typ, name)
+ }
+ if err := os.MkdirAll(filepath.Dir(dir), 0777); err != nil {
+ return "", "", err
+ }
+
+ lockfile = dir + ".lock"
+ if buildX {
+ fmt.Fprintf(xLog, "# lock %s\n", lockfile)
+ }
+
+ unlock, err := lockedfile.MutexAt(lockfile).Lock()
+ if err != nil {
+ return "", "", fmt.Errorf("codehost.WorkDir: can't find or create lock file: %v", err)
+ }
+ defer unlock()
+
+ data, err := os.ReadFile(dir + ".info")
+ info, err2 := os.Stat(dir)
+ if err == nil && err2 == nil && info.IsDir() {
+ // Info file and directory both already exist: reuse.
+ have := strings.TrimSuffix(string(data), "\n")
+ if have != key {
+ return "", "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key)
+ }
+ if buildX {
+ fmt.Fprintf(xLog, "# %s for %s %s\n", dir, typ, name)
+ }
+ return dir, lockfile, nil
+ }
+
+ // Info file or directory missing. Start from scratch.
+ if xLog != nil {
+ fmt.Fprintf(xLog, "mkdir -p %s # %s %s\n", dir, typ, name)
+ }
+ os.RemoveAll(dir)
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return "", "", err
+ }
+ if err := os.WriteFile(dir+".info", []byte(key), 0666); err != nil {
+ os.RemoveAll(dir)
+ return "", "", err
+ }
+ return dir, lockfile, nil
+}
+
+type RunError struct {
+ Cmd string
+ Err error
+ Stderr []byte
+ HelpText string
+}
+
+func (e *RunError) Error() string {
+ text := e.Cmd + ": " + e.Err.Error()
+ stderr := bytes.TrimRight(e.Stderr, "\n")
+ if len(stderr) > 0 {
+ text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t")
+ }
+ if len(e.HelpText) > 0 {
+ text += "\n" + e.HelpText
+ }
+ return text
+}
+
+var dirLock sync.Map
+
+// Run runs the command line in the given directory
+// (an empty dir means the current directory).
+// It returns the standard output and, for a non-zero exit,
+// a *RunError indicating the command, exit status, and standard error.
+// Standard error is unavailable for commands that exit successfully.
+func Run(ctx context.Context, dir string, cmdline ...any) ([]byte, error) {
+ return RunWithStdin(ctx, dir, nil, cmdline...)
+}
+
+// bashQuoter escapes characters that have special meaning in double-quoted strings in the bash shell.
+// See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html.
+var bashQuoter = strings.NewReplacer(`"`, `\"`, `$`, `\$`, "`", "\\`", `\`, `\\`)
+
+func RunWithStdin(ctx context.Context, dir string, stdin io.Reader, cmdline ...any) ([]byte, error) {
+ if dir != "" {
+ muIface, ok := dirLock.Load(dir)
+ if !ok {
+ muIface, _ = dirLock.LoadOrStore(dir, new(sync.Mutex))
+ }
+ mu := muIface.(*sync.Mutex)
+ mu.Lock()
+ defer mu.Unlock()
+ }
+
+ cmd := str.StringList(cmdline...)
+ if os.Getenv("TESTGOVCS") == "panic" {
+ panic(fmt.Sprintf("use of vcs: %v", cmd))
+ }
+ if xLog, ok := cfg.BuildXWriter(ctx); ok {
+ text := new(strings.Builder)
+ if dir != "" {
+ text.WriteString("cd ")
+ text.WriteString(dir)
+ text.WriteString("; ")
+ }
+ for i, arg := range cmd {
+ if i > 0 {
+ text.WriteByte(' ')
+ }
+ switch {
+ case strings.ContainsAny(arg, "'"):
+ // Quote args that could be mistaken for quoted args.
+ text.WriteByte('"')
+ text.WriteString(bashQuoter.Replace(arg))
+ text.WriteByte('"')
+ case strings.ContainsAny(arg, "$`\\*?[\"\t\n\v\f\r \u0085\u00a0"):
+ // Quote args that contain special characters, glob patterns, or spaces.
+ text.WriteByte('\'')
+ text.WriteString(arg)
+ text.WriteByte('\'')
+ default:
+ text.WriteString(arg)
+ }
+ }
+ fmt.Fprintf(xLog, "%s\n", text)
+ start := time.Now()
+ defer func() {
+ fmt.Fprintf(xLog, "%.3fs # %s\n", time.Since(start).Seconds(), text)
+ }()
+ }
+ // TODO: Impose limits on command output size.
+ // TODO: Set environment to get English error messages.
+ var stderr bytes.Buffer
+ var stdout bytes.Buffer
+ c := exec.CommandContext(ctx, cmd[0], cmd[1:]...)
+ c.Cancel = func() error { return c.Process.Signal(os.Interrupt) }
+ c.Dir = dir
+ c.Stdin = stdin
+ c.Stderr = &stderr
+ c.Stdout = &stdout
+ // For Git commands, manually supply GIT_DIR so Git works with safe.bareRepository=explicit set. Noop for other commands.
+ c.Env = append(c.Environ(), "GIT_DIR="+dir)
+ err := c.Run()
+ if err != nil {
+ err = &RunError{Cmd: strings.Join(cmd, " ") + " in " + dir, Stderr: stderr.Bytes(), Err: err}
+ }
+ return stdout.Bytes(), err
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
new file mode 100644
index 0000000..d1a18a8
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -0,0 +1,915 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codehost
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/par"
+ "cmd/go/internal/web"
+
+ "golang.org/x/mod/semver"
+)
+
+// LocalGitRepo is like Repo but accepts both Git remote references
+// and paths to repositories on the local file system.
+func LocalGitRepo(ctx context.Context, remote string) (Repo, error) {
+ return newGitRepoCached(ctx, remote, true)
+}
+
+// A notExistError wraps another error to retain its original text
+// but makes it opaquely equivalent to fs.ErrNotExist.
+type notExistError struct {
+ err error
+}
+
+func (e notExistError) Error() string { return e.err.Error() }
+func (notExistError) Is(err error) bool { return err == fs.ErrNotExist }
+
+const gitWorkDirType = "git3"
+
+var gitRepoCache par.ErrCache[gitCacheKey, Repo]
+
+type gitCacheKey struct {
+ remote string
+ localOK bool
+}
+
+func newGitRepoCached(ctx context.Context, remote string, localOK bool) (Repo, error) {
+ return gitRepoCache.Do(gitCacheKey{remote, localOK}, func() (Repo, error) {
+ return newGitRepo(ctx, remote, localOK)
+ })
+}
+
+func newGitRepo(ctx context.Context, remote string, localOK bool) (Repo, error) {
+ r := &gitRepo{remote: remote}
+ if strings.Contains(remote, "://") {
+ // This is a remote path.
+ var err error
+ r.dir, r.mu.Path, err = WorkDir(ctx, gitWorkDirType, r.remote)
+ if err != nil {
+ return nil, err
+ }
+
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ if _, err := os.Stat(filepath.Join(r.dir, "objects")); err != nil {
+ if _, err := Run(ctx, r.dir, "git", "init", "--bare"); err != nil {
+ os.RemoveAll(r.dir)
+ return nil, err
+ }
+ // We could just say git fetch https://whatever later,
+ // but this lets us say git fetch origin instead, which
+ // is a little nicer. More importantly, using a named remote
+ // avoids a problem with Git LFS. See golang.org/issue/25605.
+ if _, err := Run(ctx, r.dir, "git", "remote", "add", "origin", "--", r.remote); err != nil {
+ os.RemoveAll(r.dir)
+ return nil, err
+ }
+ if runtime.GOOS == "windows" {
+ // Git for Windows by default does not support paths longer than
+ // MAX_PATH (260 characters) because that may interfere with navigation
+ // in some Windows programs. However, cmd/go should be able to handle
+ // long paths just fine, and we expect people to use 'go clean' to
+ // manipulate the module cache, so it should be harmless to set here,
+ // and in some cases may be necessary in order to download modules with
+ // long branch names.
+ //
+ // See https://github.com/git-for-windows/git/wiki/Git-cannot-create-a-file-or-directory-with-a-long-path.
+ if _, err := Run(ctx, r.dir, "git", "config", "core.longpaths", "true"); err != nil {
+ os.RemoveAll(r.dir)
+ return nil, err
+ }
+ }
+ }
+ r.remoteURL = r.remote
+ r.remote = "origin"
+ } else {
+ // Local path.
+ // Disallow colon (not in ://) because sometimes
+ // that's rcp-style host:path syntax and sometimes it's not (c:\work).
+ // The go command has always insisted on URL syntax for ssh.
+ if strings.Contains(remote, ":") {
+ return nil, fmt.Errorf("git remote cannot use host:path syntax")
+ }
+ if !localOK {
+ return nil, fmt.Errorf("git remote must not be local directory")
+ }
+ r.local = true
+ info, err := os.Stat(remote)
+ if err != nil {
+ return nil, err
+ }
+ if !info.IsDir() {
+ return nil, fmt.Errorf("%s exists but is not a directory", remote)
+ }
+ r.dir = remote
+ r.mu.Path = r.dir + ".lock"
+ }
+ return r, nil
+}
+
+type gitRepo struct {
+ ctx context.Context
+
+ remote, remoteURL string
+ local bool
+ dir string
+
+ mu lockedfile.Mutex // protects fetchLevel and git repo state
+
+ fetchLevel int
+
+ statCache par.ErrCache[string, *RevInfo]
+
+ refsOnce sync.Once
+ // refs maps branch and tag refs (e.g., "HEAD", "refs/heads/master")
+ // to commits (e.g., "37ffd2e798afde829a34e8955b716ab730b2a6d6")
+ refs map[string]string
+ refsErr error
+
+ localTagsOnce sync.Once
+ localTags map[string]bool
+}
+
+const (
+ // How much have we fetched into the git repo (in this process)?
+ fetchNone = iota // nothing yet
+ fetchSome // shallow fetches of individual hashes
+ fetchAll // "fetch -t origin": get all remote branches and tags
+)
+
+// loadLocalTags loads tag references from the local git cache
+// into the map r.localTags.
+// Should only be called as r.localTagsOnce.Do(r.loadLocalTags).
+func (r *gitRepo) loadLocalTags(ctx context.Context) {
+ // The git protocol sends all known refs and ls-remote filters them on the client side,
+ // so we might as well record both heads and tags in one shot.
+ // Most of the time we only care about tags but sometimes we care about heads too.
+ out, err := Run(ctx, r.dir, "git", "tag", "-l")
+ if err != nil {
+ return
+ }
+
+ r.localTags = make(map[string]bool)
+ for _, line := range strings.Split(string(out), "\n") {
+ if line != "" {
+ r.localTags[line] = true
+ }
+ }
+}
+
+func (r *gitRepo) CheckReuse(ctx context.Context, old *Origin, subdir string) error {
+ if old == nil {
+ return fmt.Errorf("missing origin")
+ }
+ if old.VCS != "git" || old.URL != r.remoteURL {
+ return fmt.Errorf("origin moved from %v %q to %v %q", old.VCS, old.URL, "git", r.remoteURL)
+ }
+ if old.Subdir != subdir {
+ return fmt.Errorf("origin moved from %v %q %q to %v %q %q", old.VCS, old.URL, old.Subdir, "git", r.remoteURL, subdir)
+ }
+
+ // Note: Can have Hash with no Ref and no TagSum and no RepoSum,
+ // meaning the Hash simply has to remain in the repo.
+ // In that case we assume it does in the absence of any real way to check.
+ // But if neither Hash nor TagSum is present, we have nothing to check,
+ // which we take to mean we didn't record enough information to be sure.
+ if old.Hash == "" && old.TagSum == "" && old.RepoSum == "" {
+ return fmt.Errorf("non-specific origin")
+ }
+
+ r.loadRefs(ctx)
+ if r.refsErr != nil {
+ return r.refsErr
+ }
+
+ if old.Ref != "" {
+ hash, ok := r.refs[old.Ref]
+ if !ok {
+ return fmt.Errorf("ref %q deleted", old.Ref)
+ }
+ if hash != old.Hash {
+ return fmt.Errorf("ref %q moved from %s to %s", old.Ref, old.Hash, hash)
+ }
+ }
+ if old.TagSum != "" {
+ tags, err := r.Tags(ctx, old.TagPrefix)
+ if err != nil {
+ return err
+ }
+ if tags.Origin.TagSum != old.TagSum {
+ return fmt.Errorf("tags changed")
+ }
+ }
+ if old.RepoSum != "" {
+ if r.repoSum(r.refs) != old.RepoSum {
+ return fmt.Errorf("refs changed")
+ }
+ }
+ return nil
+}
+
+// loadRefs loads heads and tags references from the remote into the map r.refs.
+// The result is cached in memory.
+func (r *gitRepo) loadRefs(ctx context.Context) (map[string]string, error) {
+ r.refsOnce.Do(func() {
+ // The git protocol sends all known refs and ls-remote filters them on the client side,
+ // so we might as well record both heads and tags in one shot.
+ // Most of the time we only care about tags but sometimes we care about heads too.
+ release, err := base.AcquireNet()
+ if err != nil {
+ r.refsErr = err
+ return
+ }
+ out, gitErr := Run(ctx, r.dir, "git", "ls-remote", "-q", r.remote)
+ release()
+
+ if gitErr != nil {
+ if rerr, ok := gitErr.(*RunError); ok {
+ if bytes.Contains(rerr.Stderr, []byte("fatal: could not read Username")) {
+ rerr.HelpText = "Confirm the import path was entered correctly.\nIf this is a private repository, see https://golang.org/doc/faq#git_https for additional information."
+ }
+ }
+
+ // If the remote URL doesn't exist at all, ideally we should treat the whole
+ // repository as nonexistent by wrapping the error in a notExistError.
+ // For HTTP and HTTPS, that's easy to detect: we'll try to fetch the URL
+ // ourselves and see what code it serves.
+ if u, err := url.Parse(r.remoteURL); err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+ if _, err := web.GetBytes(u); errors.Is(err, fs.ErrNotExist) {
+ gitErr = notExistError{gitErr}
+ }
+ }
+
+ r.refsErr = gitErr
+ return
+ }
+
+ refs := make(map[string]string)
+ for _, line := range strings.Split(string(out), "\n") {
+ f := strings.Fields(line)
+ if len(f) != 2 {
+ continue
+ }
+ if f[1] == "HEAD" || strings.HasPrefix(f[1], "refs/heads/") || strings.HasPrefix(f[1], "refs/tags/") {
+ refs[f[1]] = f[0]
+ }
+ }
+ for ref, hash := range refs {
+ if k, found := strings.CutSuffix(ref, "^{}"); found { // record unwrapped annotated tag as value of tag
+ refs[k] = hash
+ delete(refs, ref)
+ }
+ }
+ r.refs = refs
+ })
+ return r.refs, r.refsErr
+}
+
+func (r *gitRepo) Tags(ctx context.Context, prefix string) (*Tags, error) {
+ refs, err := r.loadRefs(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ tags := &Tags{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ TagPrefix: prefix,
+ },
+ List: []Tag{},
+ }
+ for ref, hash := range refs {
+ if !strings.HasPrefix(ref, "refs/tags/") {
+ continue
+ }
+ tag := ref[len("refs/tags/"):]
+ if !strings.HasPrefix(tag, prefix) {
+ continue
+ }
+ tags.List = append(tags.List, Tag{tag, hash})
+ }
+ sort.Slice(tags.List, func(i, j int) bool {
+ return tags.List[i].Name < tags.List[j].Name
+ })
+
+ dir := prefix[:strings.LastIndex(prefix, "/")+1]
+ h := sha256.New()
+ for _, tag := range tags.List {
+ if isOriginTag(strings.TrimPrefix(tag.Name, dir)) {
+ fmt.Fprintf(h, "%q %s\n", tag.Name, tag.Hash)
+ }
+ }
+ tags.Origin.TagSum = "t1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
+ return tags, nil
+}
+
+// repoSum returns a checksum of the entire repo state,
+// which can be checked (as Origin.RepoSum) to cache
+// the absence of a specific module version.
+// The caller must supply refs, the result of a successful r.loadRefs.
+func (r *gitRepo) repoSum(refs map[string]string) string {
+ var list []string
+ for ref := range refs {
+ list = append(list, ref)
+ }
+ sort.Strings(list)
+ h := sha256.New()
+ for _, ref := range list {
+ fmt.Fprintf(h, "%q %s\n", ref, refs[ref])
+ }
+ return "r1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+// unknownRevisionInfo returns a RevInfo containing an Origin containing a RepoSum of refs,
+// for use when returning an UnknownRevisionError.
+func (r *gitRepo) unknownRevisionInfo(refs map[string]string) *RevInfo {
+ return &RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ RepoSum: r.repoSum(refs),
+ },
+ }
+}
+
+func (r *gitRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ refs, err := r.loadRefs(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if refs["HEAD"] == "" {
+ return nil, ErrNoCommits
+ }
+ statInfo, err := r.Stat(ctx, refs["HEAD"])
+ if err != nil {
+ return nil, err
+ }
+
+ // Stat may return cached info, so make a copy to modify here.
+ info := new(RevInfo)
+ *info = *statInfo
+ info.Origin = new(Origin)
+ if statInfo.Origin != nil {
+ *info.Origin = *statInfo.Origin
+ }
+ info.Origin.Ref = "HEAD"
+ info.Origin.Hash = refs["HEAD"]
+
+ return info, nil
+}
+
+// findRef finds some ref name for the given hash,
+// for use when the server requires giving a ref instead of a hash.
+// There may be multiple ref names for a given hash,
+// in which case this returns some name - it doesn't matter which.
+func (r *gitRepo) findRef(ctx context.Context, hash string) (ref string, ok bool) {
+ refs, err := r.loadRefs(ctx)
+ if err != nil {
+ return "", false
+ }
+ for ref, h := range refs {
+ if h == hash {
+ return ref, true
+ }
+ }
+ return "", false
+}
+
+// minHashDigits is the minimum number of digits to require
+// before accepting a hex digit sequence as potentially identifying
+// a specific commit in a git repo. (Of course, users can always
+// specify more digits, and many will paste in all 40 digits,
+// but many of git's commands default to printing short hashes
+// as 7 digits.)
+const minHashDigits = 7
+
+// stat stats the given rev in the local repository,
+// or else it fetches more info from the remote repository and tries again.
+func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err error) {
+ if r.local {
+ return r.statLocal(ctx, rev, rev)
+ }
+
+ // Fast path: maybe rev is a hash we already have locally.
+ didStatLocal := false
+ if len(rev) >= minHashDigits && len(rev) <= 40 && AllHex(rev) {
+ if info, err := r.statLocal(ctx, rev, rev); err == nil {
+ return info, nil
+ }
+ didStatLocal = true
+ }
+
+ // Maybe rev is a tag we already have locally.
+ // (Note that we're excluding branches, which can be stale.)
+ r.localTagsOnce.Do(func() { r.loadLocalTags(ctx) })
+ if r.localTags[rev] {
+ return r.statLocal(ctx, rev, "refs/tags/"+rev)
+ }
+
+ // Maybe rev is the name of a tag or branch on the remote server.
+ // Or maybe it's the prefix of a hash of a named ref.
+ // Try to resolve to both a ref (git name) and full (40-hex-digit) commit hash.
+ refs, err := r.loadRefs(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // loadRefs may return an error if git fails, for example segfaults, or
+ // could not load a private repo, but defer checking to the else block
+ // below, in case we already have the rev in question in the local cache.
+ var ref, hash string
+ if refs["refs/tags/"+rev] != "" {
+ ref = "refs/tags/" + rev
+ hash = refs[ref]
+ // Keep rev as is: tags are assumed not to change meaning.
+ } else if refs["refs/heads/"+rev] != "" {
+ ref = "refs/heads/" + rev
+ hash = refs[ref]
+ rev = hash // Replace rev, because meaning of refs/heads/foo can change.
+ } else if rev == "HEAD" && refs["HEAD"] != "" {
+ ref = "HEAD"
+ hash = refs[ref]
+ rev = hash // Replace rev, because meaning of HEAD can change.
+ } else if len(rev) >= minHashDigits && len(rev) <= 40 && AllHex(rev) {
+ // At the least, we have a hash prefix we can look up after the fetch below.
+ // Maybe we can map it to a full hash using the known refs.
+ prefix := rev
+ // Check whether rev is prefix of known ref hash.
+ for k, h := range refs {
+ if strings.HasPrefix(h, prefix) {
+ if hash != "" && hash != h {
+ // Hash is an ambiguous hash prefix.
+ // More information will not change that.
+ return nil, fmt.Errorf("ambiguous revision %s", rev)
+ }
+ if ref == "" || ref > k { // Break ties deterministically when multiple refs point at same hash.
+ ref = k
+ }
+ rev = h
+ hash = h
+ }
+ }
+ if hash == "" && len(rev) == 40 { // Didn't find a ref, but rev is a full hash.
+ hash = rev
+ }
+ } else {
+ return r.unknownRevisionInfo(refs), &UnknownRevisionError{Rev: rev}
+ }
+
+ defer func() {
+ if info != nil {
+ info.Origin.Hash = info.Name
+ // There's a ref = hash below; don't write that hash down as Origin.Ref.
+ if ref != info.Origin.Hash {
+ info.Origin.Ref = ref
+ }
+ }
+ }()
+
+ // Protect r.fetchLevel and the "fetch more and more" sequence.
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ // Perhaps r.localTags did not have the ref when we loaded local tags,
+ // but we've since done fetches that pulled down the hash we need
+ // (or already have the hash we need, just without its tag).
+ // Either way, try a local stat before falling back to network I/O.
+ if !didStatLocal {
+ if info, err := r.statLocal(ctx, rev, hash); err == nil {
+ if after, found := strings.CutPrefix(ref, "refs/tags/"); found {
+ // Make sure tag exists, so it will be in localTags next time the go command is run.
+ Run(ctx, r.dir, "git", "tag", after, hash)
+ }
+ return info, nil
+ }
+ }
+
+ // If we know a specific commit we need and its ref, fetch it.
+ // We do NOT fetch arbitrary hashes (when we don't know the ref)
+ // because we want to avoid ever importing a commit that isn't
+ // reachable from refs/tags/* or refs/heads/* or HEAD.
+ // Both Gerrit and GitHub expose every CL/PR as a named ref,
+ // and we don't want those commits masquerading as being real
+ // pseudo-versions in the main repo.
+ if r.fetchLevel <= fetchSome && ref != "" && hash != "" && !r.local {
+ r.fetchLevel = fetchSome
+ var refspec string
+ if ref != "" && ref != "HEAD" {
+ // If we do know the ref name, save the mapping locally
+ // so that (if it is a tag) it can show up in localTags
+ // on a future call. Also, some servers refuse to allow
+ // full hashes in ref specs, so prefer a ref name if known.
+ refspec = ref + ":" + ref
+ } else {
+ // Fetch the hash but give it a local name (refs/dummy),
+ // because that triggers the fetch behavior of creating any
+ // other known remote tags for the hash. We never use
+ // refs/dummy (it's not refs/tags/dummy) and it will be
+ // overwritten in the next command, and that's fine.
+ ref = hash
+ refspec = hash + ":refs/dummy"
+ }
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return nil, err
+ }
+ _, err = Run(ctx, r.dir, "git", "fetch", "-f", "--depth=1", r.remote, refspec)
+ release()
+
+ if err == nil {
+ return r.statLocal(ctx, rev, ref)
+ }
+ // Don't try to be smart about parsing the error.
+ // It's too complex and varies too much by git version.
+ // No matter what went wrong, fall back to a complete fetch.
+ }
+
+ // Last resort.
+ // Fetch all heads and tags and hope the hash we want is in the history.
+ if err := r.fetchRefsLocked(ctx); err != nil {
+ return nil, err
+ }
+
+ return r.statLocal(ctx, rev, rev)
+}
+
+// fetchRefsLocked fetches all heads and tags from the origin, along with the
+// ancestors of those commits.
+//
+// We only fetch heads and tags, not arbitrary other commits: we don't want to
+// pull in off-branch commits (such as rejected GitHub pull requests) that the
+// server may be willing to provide. (See the comments within the stat method
+// for more detail.)
+//
+// fetchRefsLocked requires that r.mu remain locked for the duration of the call.
+func (r *gitRepo) fetchRefsLocked(ctx context.Context) error {
+ if r.fetchLevel < fetchAll {
+ // NOTE: To work around a bug affecting Git clients up to at least 2.23.0
+ // (2019-08-16), we must first expand the set of local refs, and only then
+ // unshallow the repository as a separate fetch operation. (See
+ // golang.org/issue/34266 and
+ // https://github.com/git/git/blob/4c86140027f4a0d2caaa3ab4bd8bfc5ce3c11c8a/transport.c#L1303-L1309.)
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ if _, err := Run(ctx, r.dir, "git", "fetch", "-f", r.remote, "refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"); err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(filepath.Join(r.dir, "shallow")); err == nil {
+ if _, err := Run(ctx, r.dir, "git", "fetch", "--unshallow", "-f", r.remote); err != nil {
+ return err
+ }
+ }
+
+ r.fetchLevel = fetchAll
+ }
+ return nil
+}
+
+// statLocal returns a new RevInfo describing rev in the local git repository.
+// It uses version as info.Version.
+func (r *gitRepo) statLocal(ctx context.Context, version, rev string) (*RevInfo, error) {
+ out, err := Run(ctx, r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--")
+ if err != nil {
+ // Return info with Origin.RepoSum if possible to allow caching of negative lookup.
+ var info *RevInfo
+ if refs, err := r.loadRefs(ctx); err == nil {
+ info = r.unknownRevisionInfo(refs)
+ }
+ return info, &UnknownRevisionError{Rev: rev}
+ }
+ f := strings.Fields(string(out))
+ if len(f) < 2 {
+ return nil, fmt.Errorf("unexpected response from git log: %q", out)
+ }
+ hash := f[0]
+ if strings.HasPrefix(hash, version) {
+ version = hash // extend to full hash
+ }
+ t, err := strconv.ParseInt(f[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid time from git log: %q", out)
+ }
+
+ info := &RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ Hash: hash,
+ },
+ Name: hash,
+ Short: ShortenSHA1(hash),
+ Time: time.Unix(t, 0).UTC(),
+ Version: hash,
+ }
+ if !strings.HasPrefix(hash, rev) {
+ info.Origin.Ref = rev
+ }
+
+ // Add tags. Output looks like:
+ // ede458df7cd0fdca520df19a33158086a8a68e81 1523994202 HEAD -> master, tag: v1.2.4-annotated, tag: v1.2.3, origin/master, origin/HEAD
+ for i := 2; i < len(f); i++ {
+ if f[i] == "tag:" {
+ i++
+ if i < len(f) {
+ info.Tags = append(info.Tags, strings.TrimSuffix(f[i], ","))
+ }
+ }
+ }
+ sort.Strings(info.Tags)
+
+ // Used hash as info.Version above.
+ // Use caller's suggested version if it appears in the tag list
+ // (filters out branch names, HEAD).
+ for _, tag := range info.Tags {
+ if version == tag {
+ info.Version = version
+ }
+ }
+
+ return info, nil
+}
+
+func (r *gitRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ if rev == "latest" {
+ return r.Latest(ctx)
+ }
+ return r.statCache.Do(rev, func() (*RevInfo, error) {
+ return r.stat(ctx, rev)
+ })
+}
+
+func (r *gitRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64) ([]byte, error) {
+ // TODO: Could use git cat-file --batch.
+ info, err := r.Stat(ctx, rev) // download rev into local git repo
+ if err != nil {
+ return nil, err
+ }
+ out, err := Run(ctx, r.dir, "git", "cat-file", "blob", info.Name+":"+file)
+ if err != nil {
+ return nil, fs.ErrNotExist
+ }
+ return out, nil
+}
+
+func (r *gitRepo) RecentTag(ctx context.Context, rev, prefix string, allowed func(tag string) bool) (tag string, err error) {
+ info, err := r.Stat(ctx, rev)
+ if err != nil {
+ return "", err
+ }
+ rev = info.Name // expand hash prefixes
+
+ // describe sets tag and err using 'git for-each-ref' and reports whether the
+ // result is definitive.
+ describe := func() (definitive bool) {
+ var out []byte
+ out, err = Run(ctx, r.dir, "git", "for-each-ref", "--format", "%(refname)", "refs/tags", "--merged", rev)
+ if err != nil {
+ return true
+ }
+
+ // prefixed tags aren't valid semver tags so compare without prefix, but only tags with correct prefix
+ var highest string
+ for _, line := range strings.Split(string(out), "\n") {
+ line = strings.TrimSpace(line)
+ // git do support lstrip in for-each-ref format, but it was added in v2.13.0. Stripping here
+ // instead gives support for git v2.7.0.
+ if !strings.HasPrefix(line, "refs/tags/") {
+ continue
+ }
+ line = line[len("refs/tags/"):]
+
+ if !strings.HasPrefix(line, prefix) {
+ continue
+ }
+ if !allowed(line) {
+ continue
+ }
+
+ semtag := line[len(prefix):]
+ if semver.Compare(semtag, highest) > 0 {
+ highest = semtag
+ }
+ }
+
+ if highest != "" {
+ tag = prefix + highest
+ }
+
+ return tag != "" && !AllHex(tag)
+ }
+
+ if describe() {
+ return tag, err
+ }
+
+ // Git didn't find a version tag preceding the requested rev.
+ // See whether any plausible tag exists.
+ tags, err := r.Tags(ctx, prefix+"v")
+ if err != nil {
+ return "", err
+ }
+ if len(tags.List) == 0 {
+ return "", nil
+ }
+
+ // There are plausible tags, but we don't know if rev is a descendent of any of them.
+ // Fetch the history to find out.
+
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return "", err
+ }
+ defer unlock()
+
+ if err := r.fetchRefsLocked(ctx); err != nil {
+ return "", err
+ }
+
+ // If we've reached this point, we have all of the commits that are reachable
+ // from all heads and tags.
+ //
+ // The only refs we should be missing are those that are no longer reachable
+ // (or never were reachable) from any branch or tag, including the master
+ // branch, and we don't want to resolve them anyway (they're probably
+ // unreachable for a reason).
+ //
+ // Try one last time in case some other goroutine fetched rev while we were
+ // waiting on the lock.
+ describe()
+ return tag, err
+}
+
+func (r *gitRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, error) {
+ // The "--is-ancestor" flag was added to "git merge-base" in version 1.8.0, so
+ // this won't work with Git 1.7.1. According to golang.org/issue/28550, cmd/go
+ // already doesn't work with Git 1.7.1, so at least it's not a regression.
+ //
+ // git merge-base --is-ancestor exits with status 0 if rev is an ancestor, or
+ // 1 if not.
+ _, err := Run(ctx, r.dir, "git", "merge-base", "--is-ancestor", "--", tag, rev)
+
+ // Git reports "is an ancestor" with exit code 0 and "not an ancestor" with
+ // exit code 1.
+ // Unfortunately, if we've already fetched rev with a shallow history, git
+ // merge-base has been observed to report a false-negative, so don't stop yet
+ // even if the exit code is 1!
+ if err == nil {
+ return true, nil
+ }
+
+ // See whether the tag and rev even exist.
+ tags, err := r.Tags(ctx, tag)
+ if err != nil {
+ return false, err
+ }
+ if len(tags.List) == 0 {
+ return false, nil
+ }
+
+ // NOTE: r.stat is very careful not to fetch commits that we shouldn't know
+ // about, like rejected GitHub pull requests, so don't try to short-circuit
+ // that here.
+ if _, err = r.stat(ctx, rev); err != nil {
+ return false, err
+ }
+
+ // Now fetch history so that git can search for a path.
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return false, err
+ }
+ defer unlock()
+
+ if r.fetchLevel < fetchAll {
+ // Fetch the complete history for all refs and heads. It would be more
+ // efficient to only fetch the history from rev to tag, but that's much more
+ // complicated, and any kind of shallow fetch is fairly likely to trigger
+ // bugs in JGit servers and/or the go command anyway.
+ if err := r.fetchRefsLocked(ctx); err != nil {
+ return false, err
+ }
+ }
+
+ _, err = Run(ctx, r.dir, "git", "merge-base", "--is-ancestor", "--", tag, rev)
+ if err == nil {
+ return true, nil
+ }
+ if ee, ok := err.(*RunError).Err.(*exec.ExitError); ok && ee.ExitCode() == 1 {
+ return false, nil
+ }
+ return false, err
+}
+
+func (r *gitRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) {
+ // TODO: Use maxSize or drop it.
+ args := []string{}
+ if subdir != "" {
+ args = append(args, "--", subdir)
+ }
+ info, err := r.Stat(ctx, rev) // download rev into local git repo
+ if err != nil {
+ return nil, err
+ }
+
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ if err := ensureGitAttributes(r.dir); err != nil {
+ return nil, err
+ }
+
+ // Incredibly, git produces different archives depending on whether
+ // it is running on a Windows system or not, in an attempt to normalize
+ // text file line endings. Setting -c core.autocrlf=input means only
+ // translate files on the way into the repo, not on the way out (archive).
+ // The -c core.eol=lf should be unnecessary but set it anyway.
+ archive, err := Run(ctx, r.dir, "git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", "--prefix=prefix/", info.Name, args)
+ if err != nil {
+ if bytes.Contains(err.(*RunError).Stderr, []byte("did not match any files")) {
+ return nil, fs.ErrNotExist
+ }
+ return nil, err
+ }
+
+ return io.NopCloser(bytes.NewReader(archive)), nil
+}
+
+// ensureGitAttributes makes sure export-subst and export-ignore features are
+// disabled for this repo. This is intended to be run prior to running git
+// archive so that zip files are generated that produce consistent ziphashes
+// for a given revision, independent of variables such as git version and the
+// size of the repo.
+//
+// See: https://github.com/golang/go/issues/27153
+func ensureGitAttributes(repoDir string) (err error) {
+ const attr = "\n* -export-subst -export-ignore\n"
+
+ d := repoDir + "/info"
+ p := d + "/attributes"
+
+ if err := os.MkdirAll(d, 0755); err != nil {
+ return err
+ }
+
+ f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ closeErr := f.Close()
+ if closeErr != nil {
+ err = closeErr
+ }
+ }()
+
+ b, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ if !bytes.HasSuffix(b, []byte(attr)) {
+ _, err := f.WriteString(attr)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/git_test.go b/src/cmd/go/internal/modfetch/codehost/git_test.go
new file mode 100644
index 0000000..328ab5b
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/git_test.go
@@ -0,0 +1,801 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codehost
+
+import (
+ "archive/zip"
+ "bytes"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/vcweb/vcstest"
+ "context"
+ "flag"
+ "internal/testenv"
+ "io"
+ "io/fs"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if err := testMain(m); err != nil {
+ log.Fatal(err)
+ }
+}
+
+var gitrepo1, hgrepo1, vgotest1 string
+
+var altRepos = func() []string {
+ return []string{
+ "localGitRepo",
+ hgrepo1,
+ }
+}
+
+// TODO: Convert gitrepo1 to svn, bzr, fossil and add tests.
+// For now, at least the hgrepo1 tests check the general vcs.go logic.
+
+// localGitRepo is like gitrepo1 but allows archive access
+// (although that doesn't really matter after CL 120041),
+// and has a file:// URL instead of http:// or https://
+// (which might still matter).
+var localGitRepo string
+
+// localGitURL initializes the repo in localGitRepo and returns its URL.
+func localGitURL(t testing.TB) string {
+ testenv.MustHaveExecPath(t, "git")
+ if runtime.GOOS == "android" && strings.HasSuffix(testenv.Builder(), "-corellium") {
+ testenv.SkipFlaky(t, 59940)
+ }
+
+ localGitURLOnce.Do(func() {
+ // Clone gitrepo1 into a local directory.
+ // If we use a file:// URL to access the local directory,
+ // then git starts up all the usual protocol machinery,
+ // which will let us test remote git archive invocations.
+ _, localGitURLErr = Run(context.Background(), "", "git", "clone", "--mirror", gitrepo1, localGitRepo)
+ if localGitURLErr != nil {
+ return
+ }
+ _, localGitURLErr = Run(context.Background(), localGitRepo, "git", "config", "daemon.uploadarch", "true")
+ })
+
+ if localGitURLErr != nil {
+ t.Fatal(localGitURLErr)
+ }
+ // Convert absolute path to file URL. LocalGitRepo will not accept
+ // Windows absolute paths because they look like a host:path remote.
+ // TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
+ if strings.HasPrefix(localGitRepo, "/") {
+ return "file://" + localGitRepo
+ } else {
+ return "file:///" + filepath.ToSlash(localGitRepo)
+ }
+}
+
+var (
+ localGitURLOnce sync.Once
+ localGitURLErr error
+)
+
+func testMain(m *testing.M) (err error) {
+ cfg.BuildX = testing.Verbose()
+
+ srv, err := vcstest.NewServer()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if closeErr := srv.Close(); err == nil {
+ err = closeErr
+ }
+ }()
+
+ gitrepo1 = srv.HTTP.URL + "/git/gitrepo1"
+ hgrepo1 = srv.HTTP.URL + "/hg/hgrepo1"
+ vgotest1 = srv.HTTP.URL + "/git/vgotest1"
+
+ dir, err := os.MkdirTemp("", "gitrepo-test-")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if rmErr := os.RemoveAll(dir); err == nil {
+ err = rmErr
+ }
+ }()
+
+ localGitRepo = filepath.Join(dir, "gitrepo2")
+
+ // Redirect the module cache to a fresh directory to avoid crosstalk, and make
+ // it read/write so that the test can still clean it up easily when done.
+ cfg.GOMODCACHE = filepath.Join(dir, "modcache")
+ cfg.ModCacheRW = true
+
+ m.Run()
+ return nil
+}
+
+func testContext(t testing.TB) context.Context {
+ w := newTestWriter(t)
+ return cfg.WithBuildXWriter(context.Background(), w)
+}
+
+// A testWriter is an io.Writer that writes to a test's log.
+//
+// The writer batches written data until the last byte of a write is a newline
+// character, then flushes the batched data as a single call to Logf.
+// Any remaining unflushed data is logged during Cleanup.
+type testWriter struct {
+ t testing.TB
+
+ mu sync.Mutex
+ buf bytes.Buffer
+}
+
+func newTestWriter(t testing.TB) *testWriter {
+ w := &testWriter{t: t}
+
+ t.Cleanup(func() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if b := w.buf.Bytes(); len(b) > 0 {
+ w.t.Logf("%s", b)
+ w.buf.Reset()
+ }
+ })
+
+ return w
+}
+
+func (w *testWriter) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ n, err := w.buf.Write(p)
+ if b := w.buf.Bytes(); len(b) > 0 && b[len(b)-1] == '\n' {
+ w.t.Logf("%s", b)
+ w.buf.Reset()
+ }
+ return n, err
+}
+
+func testRepo(ctx context.Context, t *testing.T, remote string) (Repo, error) {
+ if remote == "localGitRepo" {
+ return LocalGitRepo(ctx, localGitURL(t))
+ }
+ vcsName := "git"
+ for _, k := range []string{"hg"} {
+ if strings.Contains(remote, "/"+k+"/") {
+ vcsName = k
+ }
+ }
+ if testing.Short() && vcsName == "hg" {
+ t.Skipf("skipping hg test in short mode: hg is slow")
+ }
+ testenv.MustHaveExecPath(t, vcsName)
+ if runtime.GOOS == "android" && strings.HasSuffix(testenv.Builder(), "-corellium") {
+ testenv.SkipFlaky(t, 59940)
+ }
+ return NewRepo(ctx, vcsName, remote)
+}
+
+func TestTags(t *testing.T) {
+ t.Parallel()
+
+ type tagsTest struct {
+ repo string
+ prefix string
+ tags []Tag
+ }
+
+ runTest := func(tt tagsTest) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Parallel()
+ ctx := testContext(t)
+
+ r, err := testRepo(ctx, t, tt.repo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tags, err := r.Tags(ctx, tt.prefix)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tags == nil || !reflect.DeepEqual(tags.List, tt.tags) {
+ t.Errorf("Tags(%q): incorrect tags\nhave %v\nwant %v", tt.prefix, tags, tt.tags)
+ }
+ }
+ }
+
+ for _, tt := range []tagsTest{
+ {gitrepo1, "xxx", []Tag{}},
+ {gitrepo1, "", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
+ {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ }},
+ {gitrepo1, "v", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
+ {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ }},
+ {gitrepo1, "v1", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ }},
+ {gitrepo1, "2", []Tag{}},
+ } {
+ t.Run(path.Base(tt.repo)+"/"+tt.prefix, runTest(tt))
+ if tt.repo == gitrepo1 {
+ // Clear hashes.
+ clearTags := []Tag{}
+ for _, tag := range tt.tags {
+ clearTags = append(clearTags, Tag{tag.Name, ""})
+ }
+ tags := tt.tags
+ for _, tt.repo = range altRepos() {
+ if strings.Contains(tt.repo, "Git") {
+ tt.tags = tags
+ } else {
+ tt.tags = clearTags
+ }
+ t.Run(path.Base(tt.repo)+"/"+tt.prefix, runTest(tt))
+ }
+ }
+ }
+}
+
+func TestLatest(t *testing.T) {
+ t.Parallel()
+
+ type latestTest struct {
+ repo string
+ info *RevInfo
+ }
+ runTest := func(tt latestTest) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Parallel()
+ ctx := testContext(t)
+
+ r, err := testRepo(ctx, t, tt.repo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info, err := r.Latest(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(info, tt.info) {
+ if !reflect.DeepEqual(info.Tags, tt.info.Tags) {
+ testenv.SkipFlaky(t, 56881)
+ }
+ t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin)
+ }
+ }
+ }
+
+ for _, tt := range []latestTest{
+ {
+ gitrepo1,
+ &RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: gitrepo1,
+ Ref: "HEAD",
+ Hash: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ },
+ Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Short: "ede458df7cd0",
+ Version: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ Tags: []string{"v1.2.3", "v1.2.4-annotated"},
+ },
+ },
+ {
+ hgrepo1,
+ &RevInfo{
+ Origin: &Origin{
+ VCS: "hg",
+ URL: hgrepo1,
+ Hash: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
+ },
+ Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
+ Short: "18518c07eb8e",
+ Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
+ Time: time.Date(2018, 6, 27, 16, 16, 30, 0, time.UTC),
+ },
+ },
+ } {
+ t.Run(path.Base(tt.repo), runTest(tt))
+ if tt.repo == gitrepo1 {
+ tt.repo = "localGitRepo"
+ info := *tt.info
+ tt.info = &info
+ o := *info.Origin
+ info.Origin = &o
+ o.URL = localGitURL(t)
+ t.Run(path.Base(tt.repo), runTest(tt))
+ }
+ }
+}
+
+func TestReadFile(t *testing.T) {
+ t.Parallel()
+
+ type readFileTest struct {
+ repo string
+ rev string
+ file string
+ err string
+ data string
+ }
+ runTest := func(tt readFileTest) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Parallel()
+ ctx := testContext(t)
+
+ r, err := testRepo(ctx, t, tt.repo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := r.ReadFile(ctx, tt.rev, tt.file, 100)
+ if err != nil {
+ if tt.err == "" {
+ t.Fatalf("ReadFile: unexpected error %v", err)
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Fatalf("ReadFile: wrong error %q, want %q", err, tt.err)
+ }
+ if len(data) != 0 {
+ t.Errorf("ReadFile: non-empty data %q with error %v", data, err)
+ }
+ return
+ }
+ if tt.err != "" {
+ t.Fatalf("ReadFile: no error, wanted %v", tt.err)
+ }
+ if string(data) != tt.data {
+ t.Errorf("ReadFile: incorrect data\nhave %q\nwant %q", data, tt.data)
+ }
+ }
+ }
+
+ for _, tt := range []readFileTest{
+ {
+ repo: gitrepo1,
+ rev: "latest",
+ file: "README",
+ data: "",
+ },
+ {
+ repo: gitrepo1,
+ rev: "v2",
+ file: "another.txt",
+ data: "another\n",
+ },
+ {
+ repo: gitrepo1,
+ rev: "v2.3.4",
+ file: "another.txt",
+ err: fs.ErrNotExist.Error(),
+ },
+ } {
+ t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.file, runTest(tt))
+ if tt.repo == gitrepo1 {
+ for _, tt.repo = range altRepos() {
+ t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.file, runTest(tt))
+ }
+ }
+ }
+}
+
+type zipFile struct {
+ name string
+ size int64
+}
+
+func TestReadZip(t *testing.T) {
+ t.Parallel()
+
+ type readZipTest struct {
+ repo string
+ rev string
+ subdir string
+ err string
+ files map[string]uint64
+ }
+ runTest := func(tt readZipTest) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Parallel()
+ ctx := testContext(t)
+
+ r, err := testRepo(ctx, t, tt.repo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rc, err := r.ReadZip(ctx, tt.rev, tt.subdir, 100000)
+ if err != nil {
+ if tt.err == "" {
+ t.Fatalf("ReadZip: unexpected error %v", err)
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Fatalf("ReadZip: wrong error %q, want %q", err, tt.err)
+ }
+ if rc != nil {
+ t.Errorf("ReadZip: non-nil io.ReadCloser with error %v", err)
+ }
+ return
+ }
+ defer rc.Close()
+ if tt.err != "" {
+ t.Fatalf("ReadZip: no error, wanted %v", tt.err)
+ }
+ zipdata, err := io.ReadAll(rc)
+ if err != nil {
+ t.Fatal(err)
+ }
+ z, err := zip.NewReader(bytes.NewReader(zipdata), int64(len(zipdata)))
+ if err != nil {
+ t.Fatalf("ReadZip: cannot read zip file: %v", err)
+ }
+ have := make(map[string]bool)
+ for _, f := range z.File {
+ size, ok := tt.files[f.Name]
+ if !ok {
+ t.Errorf("ReadZip: unexpected file %s", f.Name)
+ continue
+ }
+ have[f.Name] = true
+ if size != ^uint64(0) && f.UncompressedSize64 != size {
+ t.Errorf("ReadZip: file %s has unexpected size %d != %d", f.Name, f.UncompressedSize64, size)
+ }
+ }
+ for name := range tt.files {
+ if !have[name] {
+ t.Errorf("ReadZip: missing file %s", name)
+ }
+ }
+ }
+ }
+
+ for _, tt := range []readZipTest{
+ {
+ repo: gitrepo1,
+ rev: "v2.3.4",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/README": 0,
+ "prefix/v2": 3,
+ },
+ },
+ {
+ repo: hgrepo1,
+ rev: "v2.3.4",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/.hg_archival.txt": ^uint64(0),
+ "prefix/README": 0,
+ "prefix/v2": 3,
+ },
+ },
+
+ {
+ repo: gitrepo1,
+ rev: "v2",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/README": 0,
+ "prefix/v2": 3,
+ "prefix/another.txt": 8,
+ "prefix/foo.txt": 13,
+ },
+ },
+ {
+ repo: hgrepo1,
+ rev: "v2",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/.hg_archival.txt": ^uint64(0),
+ "prefix/README": 0,
+ "prefix/v2": 3,
+ "prefix/another.txt": 8,
+ "prefix/foo.txt": 13,
+ },
+ },
+
+ {
+ repo: gitrepo1,
+ rev: "v3",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/v3/": 0,
+ "prefix/v3/sub/": 0,
+ "prefix/v3/sub/dir/": 0,
+ "prefix/v3/sub/dir/file.txt": 16,
+ "prefix/README": 0,
+ },
+ },
+ {
+ repo: hgrepo1,
+ rev: "v3",
+ subdir: "",
+ files: map[string]uint64{
+ "prefix/.hg_archival.txt": ^uint64(0),
+ "prefix/.hgtags": 405,
+ "prefix/v3/sub/dir/file.txt": 16,
+ "prefix/README": 0,
+ },
+ },
+
+ {
+ repo: gitrepo1,
+ rev: "v3",
+ subdir: "v3/sub/dir",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/v3/": 0,
+ "prefix/v3/sub/": 0,
+ "prefix/v3/sub/dir/": 0,
+ "prefix/v3/sub/dir/file.txt": 16,
+ },
+ },
+ {
+ repo: hgrepo1,
+ rev: "v3",
+ subdir: "v3/sub/dir",
+ files: map[string]uint64{
+ "prefix/v3/sub/dir/file.txt": 16,
+ },
+ },
+
+ {
+ repo: gitrepo1,
+ rev: "v3",
+ subdir: "v3/sub",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/v3/": 0,
+ "prefix/v3/sub/": 0,
+ "prefix/v3/sub/dir/": 0,
+ "prefix/v3/sub/dir/file.txt": 16,
+ },
+ },
+ {
+ repo: hgrepo1,
+ rev: "v3",
+ subdir: "v3/sub",
+ files: map[string]uint64{
+ "prefix/v3/sub/dir/file.txt": 16,
+ },
+ },
+
+ {
+ repo: gitrepo1,
+ rev: "aaaaaaaaab",
+ subdir: "",
+ err: "unknown revision",
+ },
+ {
+ repo: hgrepo1,
+ rev: "aaaaaaaaab",
+ subdir: "",
+ err: "unknown revision",
+ },
+
+ {
+ repo: vgotest1,
+ rev: "submod/v1.0.4",
+ subdir: "submod",
+ files: map[string]uint64{
+ "prefix/": 0,
+ "prefix/submod/": 0,
+ "prefix/submod/go.mod": 53,
+ "prefix/submod/pkg/": 0,
+ "prefix/submod/pkg/p.go": 31,
+ },
+ },
+ } {
+ t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.subdir, runTest(tt))
+ if tt.repo == gitrepo1 {
+ tt.repo = "localGitRepo"
+ t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.subdir, runTest(tt))
+ }
+ }
+}
+
+var hgmap = map[string]string{
+ "HEAD": "41964ddce1180313bdc01d0a39a2813344d6261d", // not tip due to bad hgrepo1 conversion
+ "9d02800338b8a55be062c838d1f02e0c5780b9eb": "8f49ee7a6ddcdec6f0112d9dca48d4a2e4c3c09e",
+ "76a00fb249b7f93091bc2c89a789dab1fc1bc26f": "88fde824ec8b41a76baa16b7e84212cee9f3edd0",
+ "ede458df7cd0fdca520df19a33158086a8a68e81": "41964ddce1180313bdc01d0a39a2813344d6261d",
+ "97f6aa59c81c623494825b43d39e445566e429a4": "c0cbbfb24c7c3c50c35c7b88e7db777da4ff625d",
+}
+
+func TestStat(t *testing.T) {
+ t.Parallel()
+
+ type statTest struct {
+ repo string
+ rev string
+ err string
+ info *RevInfo
+ }
+ runTest := func(tt statTest) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Parallel()
+ ctx := testContext(t)
+
+ r, err := testRepo(ctx, t, tt.repo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info, err := r.Stat(ctx, tt.rev)
+ if err != nil {
+ if tt.err == "" {
+ t.Fatalf("Stat: unexpected error %v", err)
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Fatalf("Stat: wrong error %q, want %q", err, tt.err)
+ }
+ if info != nil && info.Origin == nil {
+ t.Errorf("Stat: non-nil info with nil Origin with error %q", err)
+ }
+ return
+ }
+ info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough
+ if !reflect.DeepEqual(info, tt.info) {
+ if !reflect.DeepEqual(info.Tags, tt.info.Tags) {
+ testenv.SkipFlaky(t, 56881)
+ }
+ t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info)
+ }
+ }
+ }
+
+ for _, tt := range []statTest{
+ {
+ repo: gitrepo1,
+ rev: "HEAD",
+ info: &RevInfo{
+ Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Short: "ede458df7cd0",
+ Version: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ Tags: []string{"v1.2.3", "v1.2.4-annotated"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "v2", // branch
+ info: &RevInfo{
+ Name: "9d02800338b8a55be062c838d1f02e0c5780b9eb",
+ Short: "9d02800338b8",
+ Version: "9d02800338b8a55be062c838d1f02e0c5780b9eb",
+ Time: time.Date(2018, 4, 17, 20, 00, 32, 0, time.UTC),
+ Tags: []string{"v2.0.2"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "v2.3.4", // badly-named branch (semver should be a tag)
+ info: &RevInfo{
+ Name: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f",
+ Short: "76a00fb249b7",
+ Version: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f",
+ Time: time.Date(2018, 4, 17, 19, 45, 48, 0, time.UTC),
+ Tags: []string{"v2.0.1", "v2.3"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "v2.3", // badly-named tag (we only respect full semver v2.3.0)
+ info: &RevInfo{
+ Name: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f",
+ Short: "76a00fb249b7",
+ Version: "v2.3",
+ Time: time.Date(2018, 4, 17, 19, 45, 48, 0, time.UTC),
+ Tags: []string{"v2.0.1", "v2.3"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "v1.2.3", // tag
+ info: &RevInfo{
+ Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Short: "ede458df7cd0",
+ Version: "v1.2.3",
+ Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ Tags: []string{"v1.2.3", "v1.2.4-annotated"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "ede458df", // hash prefix in refs
+ info: &RevInfo{
+ Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Short: "ede458df7cd0",
+ Version: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ Tags: []string{"v1.2.3", "v1.2.4-annotated"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "97f6aa59", // hash prefix not in refs
+ info: &RevInfo{
+ Name: "97f6aa59c81c623494825b43d39e445566e429a4",
+ Short: "97f6aa59c81c",
+ Version: "97f6aa59c81c623494825b43d39e445566e429a4",
+ Time: time.Date(2018, 4, 17, 20, 0, 19, 0, time.UTC),
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "v1.2.4-annotated", // annotated tag uses unwrapped commit hash
+ info: &RevInfo{
+ Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ Short: "ede458df7cd0",
+ Version: "v1.2.4-annotated",
+ Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ Tags: []string{"v1.2.3", "v1.2.4-annotated"},
+ },
+ },
+ {
+ repo: gitrepo1,
+ rev: "aaaaaaaaab",
+ err: "unknown revision",
+ },
+ } {
+ t.Run(path.Base(tt.repo)+"/"+tt.rev, runTest(tt))
+ if tt.repo == gitrepo1 {
+ for _, tt.repo = range altRepos() {
+ old := tt
+ var m map[string]string
+ if tt.repo == hgrepo1 {
+ m = hgmap
+ }
+ if tt.info != nil {
+ info := *tt.info
+ tt.info = &info
+ tt.info.Name = remap(tt.info.Name, m)
+ tt.info.Version = remap(tt.info.Version, m)
+ tt.info.Short = remap(tt.info.Short, m)
+ }
+ tt.rev = remap(tt.rev, m)
+ t.Run(path.Base(tt.repo)+"/"+tt.rev, runTest(tt))
+ tt = old
+ }
+ }
+ }
+}
+
+func remap(name string, m map[string]string) string {
+ if m[name] != "" {
+ return m[name]
+ }
+ if AllHex(name) {
+ for k, v := range m {
+ if strings.HasPrefix(k, name) {
+ return v[:len(name)]
+ }
+ }
+ }
+ return name
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/shell.go b/src/cmd/go/internal/modfetch/codehost/shell.go
new file mode 100644
index 0000000..eaa0195
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/shell.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+// Interactive debugging shell for codehost.Repo implementations.
+
+package main
+
+import (
+ "archive/zip"
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+ "time"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch/codehost"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go run shell.go vcs remote\n")
+ os.Exit(2)
+}
+
+func main() {
+ cfg.GOMODCACHE = "/tmp/vcswork"
+ log.SetFlags(0)
+ log.SetPrefix("shell: ")
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() != 2 {
+ usage()
+ }
+
+ repo, err := codehost.NewRepo(flag.Arg(0), flag.Arg(1))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ b := bufio.NewReader(os.Stdin)
+ for {
+ fmt.Fprintf(os.Stderr, ">>> ")
+ line, err := b.ReadString('\n')
+ if err != nil {
+ log.Fatal(err)
+ }
+ f := strings.Fields(line)
+ if len(f) == 0 {
+ continue
+ }
+ switch f[0] {
+ default:
+ fmt.Fprintf(os.Stderr, "?unknown command\n")
+ continue
+ case "tags":
+ prefix := ""
+ if len(f) == 2 {
+ prefix = f[1]
+ }
+ if len(f) > 2 {
+ fmt.Fprintf(os.Stderr, "?usage: tags [prefix]\n")
+ continue
+ }
+ tags, err := repo.Tags(prefix)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ for _, tag := range tags {
+ fmt.Printf("%s\n", tag)
+ }
+
+ case "stat":
+ if len(f) != 2 {
+ fmt.Fprintf(os.Stderr, "?usage: stat rev\n")
+ continue
+ }
+ info, err := repo.Stat(f[1])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ fmt.Printf("name=%s short=%s version=%s time=%s\n", info.Name, info.Short, info.Version, info.Time.UTC().Format(time.RFC3339))
+
+ case "read":
+ if len(f) != 3 {
+ fmt.Fprintf(os.Stderr, "?usage: read rev file\n")
+ continue
+ }
+ data, err := repo.ReadFile(f[1], f[2], 10<<20)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ os.Stdout.Write(data)
+
+ case "zip":
+ if len(f) != 4 {
+ fmt.Fprintf(os.Stderr, "?usage: zip rev subdir output\n")
+ continue
+ }
+ subdir := f[2]
+ if subdir == "-" {
+ subdir = ""
+ }
+ rc, err := repo.ReadZip(f[1], subdir, 10<<20)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ data, err := io.ReadAll(rc)
+ rc.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+
+ if f[3] != "-" {
+ if err := os.WriteFile(f[3], data, 0666); err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ }
+ z, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "?%s\n", err)
+ continue
+ }
+ for _, f := range z.File {
+ fmt.Printf("%s %d\n", f.Name, f.UncompressedSize64)
+ }
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/svn.go b/src/cmd/go/internal/modfetch/codehost/svn.go
new file mode 100644
index 0000000..9c1c100
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/svn.go
@@ -0,0 +1,168 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codehost
+
+import (
+ "archive/zip"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "time"
+
+ "cmd/go/internal/base"
+)
+
+func svnParseStat(rev, out string) (*RevInfo, error) {
+ var log struct {
+ Logentry struct {
+ Revision int64 `xml:"revision,attr"`
+ Date string `xml:"date"`
+ } `xml:"logentry"`
+ }
+ if err := xml.Unmarshal([]byte(out), &log); err != nil {
+ return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out)
+ }
+
+ t, err := time.Parse(time.RFC3339, log.Logentry.Date)
+ if err != nil {
+ return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out)
+ }
+
+ info := &RevInfo{
+ Name: strconv.FormatInt(log.Logentry.Revision, 10),
+ Short: fmt.Sprintf("%012d", log.Logentry.Revision),
+ Time: t.UTC(),
+ Version: rev,
+ }
+ return info, nil
+}
+
+func svnReadZip(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) (err error) {
+ // The subversion CLI doesn't provide a command to write the repository
+ // directly to an archive, so we need to export it to the local filesystem
+ // instead. Unfortunately, the local filesystem might apply arbitrary
+ // normalization to the filenames, so we need to obtain those directly.
+ //
+ // 'svn export' prints the filenames as they are written, but from reading the
+ // svn source code (as of revision 1868933), those filenames are encoded using
+ // the system locale rather than preserved byte-for-byte from the origin. For
+ // our purposes, that won't do, but we don't want to go mucking around with
+ // the user's locale settings either — that could impact error messages, and
+ // we don't know what locales the user has available or what LC_* variables
+ // their platform supports.
+ //
+ // Instead, we'll do a two-pass export: first we'll run 'svn list' to get the
+ // canonical filenames, then we'll 'svn export' and look for those filenames
+ // in the local filesystem. (If there is an encoding problem at that point, we
+ // would probably reject the resulting module anyway.)
+
+ remotePath := remote
+ if subdir != "" {
+ remotePath += "/" + subdir
+ }
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ out, err := Run(ctx, workDir, []string{
+ "svn", "list",
+ "--non-interactive",
+ "--xml",
+ "--incremental",
+ "--recursive",
+ "--revision", rev,
+ "--", remotePath,
+ })
+ release()
+ if err != nil {
+ return err
+ }
+
+ type listEntry struct {
+ Kind string `xml:"kind,attr"`
+ Name string `xml:"name"`
+ Size int64 `xml:"size"`
+ }
+ var list struct {
+ Entries []listEntry `xml:"entry"`
+ }
+ if err := xml.Unmarshal(out, &list); err != nil {
+ return vcsErrorf("unexpected response from svn list --xml: %v\n%s", err, out)
+ }
+
+ exportDir := filepath.Join(workDir, "export")
+ // Remove any existing contents from a previous (failed) run.
+ if err := os.RemoveAll(exportDir); err != nil {
+ return err
+ }
+ defer os.RemoveAll(exportDir) // best-effort
+
+ release, err = base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ _, err = Run(ctx, workDir, []string{
+ "svn", "export",
+ "--non-interactive",
+ "--quiet",
+
+ // Suppress any platform- or host-dependent transformations.
+ "--native-eol", "LF",
+ "--ignore-externals",
+ "--ignore-keywords",
+
+ "--revision", rev,
+ "--", remotePath,
+ exportDir,
+ })
+ release()
+ if err != nil {
+ return err
+ }
+
+ // Scrape the exported files out of the filesystem and encode them in the zipfile.
+
+ // “All files in the zip file are expected to be
+ // nested in a single top-level directory, whose name is not specified.”
+ // We'll (arbitrarily) choose the base of the remote path.
+ basePath := path.Join(path.Base(remote), subdir)
+
+ zw := zip.NewWriter(dst)
+ for _, e := range list.Entries {
+ if e.Kind != "file" {
+ continue
+ }
+
+ zf, err := zw.Create(path.Join(basePath, e.Name))
+ if err != nil {
+ return err
+ }
+
+ f, err := os.Open(filepath.Join(exportDir, e.Name))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return vcsErrorf("file reported by 'svn list', but not written by 'svn export': %s", e.Name)
+ }
+ return fmt.Errorf("error opening file created by 'svn export': %v", err)
+ }
+
+ n, err := io.Copy(zf, f)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ if n != e.Size {
+ return vcsErrorf("file size differs between 'svn list' and 'svn export': file %s listed as %v bytes, but exported as %v bytes", e.Name, e.Size, n)
+ }
+ }
+
+ return zw.Close()
+}
diff --git a/src/cmd/go/internal/modfetch/codehost/vcs.go b/src/cmd/go/internal/modfetch/codehost/vcs.go
new file mode 100644
index 0000000..5bd1005
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/codehost/vcs.go
@@ -0,0 +1,644 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codehost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "internal/lazyregexp"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/par"
+ "cmd/go/internal/str"
+)
+
+// A VCSError indicates an error using a version control system.
+// The implication of a VCSError is that we know definitively where
+// to get the code, but we can't access it due to the error.
+// The caller should report this error instead of continuing to probe
+// other possible module paths.
+//
+// TODO(golang.org/issue/31730): See if we can invert this. (Return a
+// distinguished error for “repo not found” and treat everything else
+// as terminal.)
+type VCSError struct {
+ Err error
+}
+
+func (e *VCSError) Error() string { return e.Err.Error() }
+
+func (e *VCSError) Unwrap() error { return e.Err }
+
+func vcsErrorf(format string, a ...any) error {
+ return &VCSError{Err: fmt.Errorf(format, a...)}
+}
+
+type vcsCacheKey struct {
+ vcs string
+ remote string
+}
+
+func NewRepo(ctx context.Context, vcs, remote string) (Repo, error) {
+ return vcsRepoCache.Do(vcsCacheKey{vcs, remote}, func() (Repo, error) {
+ repo, err := newVCSRepo(ctx, vcs, remote)
+ if err != nil {
+ return nil, &VCSError{err}
+ }
+ return repo, nil
+ })
+}
+
+var vcsRepoCache par.ErrCache[vcsCacheKey, Repo]
+
+type vcsRepo struct {
+ mu lockedfile.Mutex // protects all commands, so we don't have to decide which are safe on a per-VCS basis
+
+ remote string
+ cmd *vcsCmd
+ dir string
+
+ tagsOnce sync.Once
+ tags map[string]bool
+
+ branchesOnce sync.Once
+ branches map[string]bool
+
+ fetchOnce sync.Once
+ fetchErr error
+}
+
+func newVCSRepo(ctx context.Context, vcs, remote string) (Repo, error) {
+ if vcs == "git" {
+ return newGitRepo(ctx, remote, false)
+ }
+ cmd := vcsCmds[vcs]
+ if cmd == nil {
+ return nil, fmt.Errorf("unknown vcs: %s %s", vcs, remote)
+ }
+ if !strings.Contains(remote, "://") {
+ return nil, fmt.Errorf("invalid vcs remote: %s %s", vcs, remote)
+ }
+
+ r := &vcsRepo{remote: remote, cmd: cmd}
+ var err error
+ r.dir, r.mu.Path, err = WorkDir(ctx, vcsWorkDirType+vcs, r.remote)
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.init == nil {
+ return r, nil
+ }
+
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ if _, err := os.Stat(filepath.Join(r.dir, "."+vcs)); err != nil {
+ release, err := base.AcquireNet()
+ if err != nil {
+ return nil, err
+ }
+ _, err = Run(ctx, r.dir, cmd.init(r.remote))
+ release()
+
+ if err != nil {
+ os.RemoveAll(r.dir)
+ return nil, err
+ }
+ }
+ return r, nil
+}
+
+const vcsWorkDirType = "vcs1."
+
+type vcsCmd struct {
+ vcs string // vcs name "hg"
+ init func(remote string) []string // cmd to init repo to track remote
+ tags func(remote string) []string // cmd to list local tags
+ tagRE *lazyregexp.Regexp // regexp to extract tag names from output of tags cmd
+ branches func(remote string) []string // cmd to list local branches
+ branchRE *lazyregexp.Regexp // regexp to extract branch names from output of tags cmd
+ badLocalRevRE *lazyregexp.Regexp // regexp of names that must not be served out of local cache without doing fetch first
+ statLocal func(rev, remote string) []string // cmd to stat local rev
+ parseStat func(rev, out string) (*RevInfo, error) // cmd to parse output of statLocal
+ fetch []string // cmd to fetch everything from remote
+ latest string // name of latest commit on remote (tip, HEAD, etc)
+ readFile func(rev, file, remote string) []string // cmd to read rev's file
+ readZip func(rev, subdir, remote, target string) []string // cmd to read rev's subdir as zip file
+ doReadZip func(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) error // arbitrary function to read rev's subdir as zip file
+}
+
+var re = lazyregexp.New
+
+var vcsCmds = map[string]*vcsCmd{
+ "hg": {
+ vcs: "hg",
+ init: func(remote string) []string {
+ return []string{"hg", "clone", "-U", "--", remote, "."}
+ },
+ tags: func(remote string) []string {
+ return []string{"hg", "tags", "-q"}
+ },
+ tagRE: re(`(?m)^[^\n]+$`),
+ branches: func(remote string) []string {
+ return []string{"hg", "branches", "-c", "-q"}
+ },
+ branchRE: re(`(?m)^[^\n]+$`),
+ badLocalRevRE: re(`(?m)^(tip)$`),
+ statLocal: func(rev, remote string) []string {
+ return []string{"hg", "log", "-l1", "-r", rev, "--template", "{node} {date|hgdate} {tags}"}
+ },
+ parseStat: hgParseStat,
+ fetch: []string{"hg", "pull", "-f"},
+ latest: "tip",
+ readFile: func(rev, file, remote string) []string {
+ return []string{"hg", "cat", "-r", rev, file}
+ },
+ readZip: func(rev, subdir, remote, target string) []string {
+ pattern := []string{}
+ if subdir != "" {
+ pattern = []string{"-I", subdir + "/**"}
+ }
+ return str.StringList("hg", "archive", "-t", "zip", "--no-decode", "-r", rev, "--prefix=prefix/", pattern, "--", target)
+ },
+ },
+
+ "svn": {
+ vcs: "svn",
+ init: nil, // no local checkout
+ tags: func(remote string) []string {
+ return []string{"svn", "list", "--", strings.TrimSuffix(remote, "/trunk") + "/tags"}
+ },
+ tagRE: re(`(?m)^(.*?)/?$`),
+ statLocal: func(rev, remote string) []string {
+ suffix := "@" + rev
+ if rev == "latest" {
+ suffix = ""
+ }
+ return []string{"svn", "log", "-l1", "--xml", "--", remote + suffix}
+ },
+ parseStat: svnParseStat,
+ latest: "latest",
+ readFile: func(rev, file, remote string) []string {
+ return []string{"svn", "cat", "--", remote + "/" + file + "@" + rev}
+ },
+ doReadZip: svnReadZip,
+ },
+
+ "bzr": {
+ vcs: "bzr",
+ init: func(remote string) []string {
+ return []string{"bzr", "branch", "--use-existing-dir", "--", remote, "."}
+ },
+ fetch: []string{
+ "bzr", "pull", "--overwrite-tags",
+ },
+ tags: func(remote string) []string {
+ return []string{"bzr", "tags"}
+ },
+ tagRE: re(`(?m)^\S+`),
+ badLocalRevRE: re(`^revno:-`),
+ statLocal: func(rev, remote string) []string {
+ return []string{"bzr", "log", "-l1", "--long", "--show-ids", "-r", rev}
+ },
+ parseStat: bzrParseStat,
+ latest: "revno:-1",
+ readFile: func(rev, file, remote string) []string {
+ return []string{"bzr", "cat", "-r", rev, file}
+ },
+ readZip: func(rev, subdir, remote, target string) []string {
+ extra := []string{}
+ if subdir != "" {
+ extra = []string{"./" + subdir}
+ }
+ return str.StringList("bzr", "export", "--format=zip", "-r", rev, "--root=prefix/", "--", target, extra)
+ },
+ },
+
+ "fossil": {
+ vcs: "fossil",
+ init: func(remote string) []string {
+ return []string{"fossil", "clone", "--", remote, ".fossil"}
+ },
+ fetch: []string{"fossil", "pull", "-R", ".fossil"},
+ tags: func(remote string) []string {
+ return []string{"fossil", "tag", "-R", ".fossil", "list"}
+ },
+ tagRE: re(`XXXTODO`),
+ statLocal: func(rev, remote string) []string {
+ return []string{"fossil", "info", "-R", ".fossil", rev}
+ },
+ parseStat: fossilParseStat,
+ latest: "trunk",
+ readFile: func(rev, file, remote string) []string {
+ return []string{"fossil", "cat", "-R", ".fossil", "-r", rev, file}
+ },
+ readZip: func(rev, subdir, remote, target string) []string {
+ extra := []string{}
+ if subdir != "" && !strings.ContainsAny(subdir, "*?[],") {
+ extra = []string{"--include", subdir}
+ }
+ // Note that vcsRepo.ReadZip below rewrites this command
+ // to run in a different directory, to work around a fossil bug.
+ return str.StringList("fossil", "zip", "-R", ".fossil", "--name", "prefix", extra, "--", rev, target)
+ },
+ },
+}
+
+func (r *vcsRepo) loadTags(ctx context.Context) {
+ out, err := Run(ctx, r.dir, r.cmd.tags(r.remote))
+ if err != nil {
+ return
+ }
+
+ // Run tag-listing command and extract tags.
+ r.tags = make(map[string]bool)
+ for _, tag := range r.cmd.tagRE.FindAllString(string(out), -1) {
+ if r.cmd.badLocalRevRE != nil && r.cmd.badLocalRevRE.MatchString(tag) {
+ continue
+ }
+ r.tags[tag] = true
+ }
+}
+
+func (r *vcsRepo) loadBranches(ctx context.Context) {
+ if r.cmd.branches == nil {
+ return
+ }
+
+ out, err := Run(ctx, r.dir, r.cmd.branches(r.remote))
+ if err != nil {
+ return
+ }
+
+ r.branches = make(map[string]bool)
+ for _, branch := range r.cmd.branchRE.FindAllString(string(out), -1) {
+ if r.cmd.badLocalRevRE != nil && r.cmd.badLocalRevRE.MatchString(branch) {
+ continue
+ }
+ r.branches[branch] = true
+ }
+}
+
+func (r *vcsRepo) CheckReuse(ctx context.Context, old *Origin, subdir string) error {
+ return fmt.Errorf("vcs %s: CheckReuse: %w", r.cmd.vcs, errors.ErrUnsupported)
+}
+
+func (r *vcsRepo) Tags(ctx context.Context, prefix string) (*Tags, error) {
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ r.tagsOnce.Do(func() { r.loadTags(ctx) })
+ tags := &Tags{
+ // None of the other VCS provide a reasonable way to compute TagSum
+ // without downloading the whole repo, so we only include VCS and URL
+ // in the Origin.
+ Origin: &Origin{
+ VCS: r.cmd.vcs,
+ URL: r.remote,
+ },
+ List: []Tag{},
+ }
+ for tag := range r.tags {
+ if strings.HasPrefix(tag, prefix) {
+ tags.List = append(tags.List, Tag{tag, ""})
+ }
+ }
+ sort.Slice(tags.List, func(i, j int) bool {
+ return tags.List[i].Name < tags.List[j].Name
+ })
+ return tags, nil
+}
+
+func (r *vcsRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ if rev == "latest" {
+ rev = r.cmd.latest
+ }
+ r.branchesOnce.Do(func() { r.loadBranches(ctx) })
+ revOK := (r.cmd.badLocalRevRE == nil || !r.cmd.badLocalRevRE.MatchString(rev)) && !r.branches[rev]
+ if revOK {
+ if info, err := r.statLocal(ctx, rev); err == nil {
+ return info, nil
+ }
+ }
+
+ r.fetchOnce.Do(func() { r.fetch(ctx) })
+ if r.fetchErr != nil {
+ return nil, r.fetchErr
+ }
+ info, err := r.statLocal(ctx, rev)
+ if err != nil {
+ return nil, err
+ }
+ if !revOK {
+ info.Version = info.Name
+ }
+ return info, nil
+}
+
+func (r *vcsRepo) fetch(ctx context.Context) {
+ if len(r.cmd.fetch) > 0 {
+ release, err := base.AcquireNet()
+ if err != nil {
+ r.fetchErr = err
+ return
+ }
+ _, r.fetchErr = Run(ctx, r.dir, r.cmd.fetch)
+ release()
+ }
+}
+
+func (r *vcsRepo) statLocal(ctx context.Context, rev string) (*RevInfo, error) {
+ out, err := Run(ctx, r.dir, r.cmd.statLocal(rev, r.remote))
+ if err != nil {
+ return nil, &UnknownRevisionError{Rev: rev}
+ }
+ info, err := r.cmd.parseStat(rev, string(out))
+ if err != nil {
+ return nil, err
+ }
+ if info.Origin == nil {
+ info.Origin = new(Origin)
+ }
+ info.Origin.VCS = r.cmd.vcs
+ info.Origin.URL = r.remote
+ return info, nil
+}
+
+func (r *vcsRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ return r.Stat(ctx, "latest")
+}
+
+func (r *vcsRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64) ([]byte, error) {
+ if rev == "latest" {
+ rev = r.cmd.latest
+ }
+ _, err := r.Stat(ctx, rev) // download rev into local repo
+ if err != nil {
+ return nil, err
+ }
+
+ // r.Stat acquires r.mu, so lock after that.
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ out, err := Run(ctx, r.dir, r.cmd.readFile(rev, file, r.remote))
+ if err != nil {
+ return nil, fs.ErrNotExist
+ }
+ return out, nil
+}
+
+func (r *vcsRepo) RecentTag(ctx context.Context, rev, prefix string, allowed func(string) bool) (tag string, err error) {
+ // We don't technically need to lock here since we're returning an error
+ // uncondititonally, but doing so anyway will help to avoid baking in
+ // lock-inversion bugs.
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return "", err
+ }
+ defer unlock()
+
+ return "", vcsErrorf("vcs %s: RecentTag: %w", r.cmd.vcs, errors.ErrUnsupported)
+}
+
+func (r *vcsRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, error) {
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return false, err
+ }
+ defer unlock()
+
+ return false, vcsErrorf("vcs %s: DescendsFrom: %w", r.cmd.vcs, errors.ErrUnsupported)
+}
+
+func (r *vcsRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) {
+ if r.cmd.readZip == nil && r.cmd.doReadZip == nil {
+ return nil, vcsErrorf("vcs %s: ReadZip: %w", r.cmd.vcs, errors.ErrUnsupported)
+ }
+
+ unlock, err := r.mu.Lock()
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+
+ if rev == "latest" {
+ rev = r.cmd.latest
+ }
+ f, err := os.CreateTemp("", "go-readzip-*.zip")
+ if err != nil {
+ return nil, err
+ }
+ if r.cmd.doReadZip != nil {
+ lw := &limitedWriter{
+ W: f,
+ N: maxSize,
+ ErrLimitReached: errors.New("ReadZip: encoded file exceeds allowed size"),
+ }
+ err = r.cmd.doReadZip(ctx, lw, r.dir, rev, subdir, r.remote)
+ if err == nil {
+ _, err = f.Seek(0, io.SeekStart)
+ }
+ } else if r.cmd.vcs == "fossil" {
+ // If you run
+ // fossil zip -R .fossil --name prefix trunk /tmp/x.zip
+ // fossil fails with "unable to create directory /tmp" [sic].
+ // Change the command to run in /tmp instead,
+ // replacing the -R argument with an absolute path.
+ args := r.cmd.readZip(rev, subdir, r.remote, filepath.Base(f.Name()))
+ for i := range args {
+ if args[i] == ".fossil" {
+ args[i] = filepath.Join(r.dir, ".fossil")
+ }
+ }
+ _, err = Run(ctx, filepath.Dir(f.Name()), args)
+ } else {
+ _, err = Run(ctx, r.dir, r.cmd.readZip(rev, subdir, r.remote, f.Name()))
+ }
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ return nil, err
+ }
+ return &deleteCloser{f}, nil
+}
+
+// deleteCloser is a file that gets deleted on Close.
+type deleteCloser struct {
+ *os.File
+}
+
+func (d *deleteCloser) Close() error {
+ defer os.Remove(d.File.Name())
+ return d.File.Close()
+}
+
+func hgParseStat(rev, out string) (*RevInfo, error) {
+ f := strings.Fields(out)
+ if len(f) < 3 {
+ return nil, vcsErrorf("unexpected response from hg log: %q", out)
+ }
+ hash := f[0]
+ version := rev
+ if strings.HasPrefix(hash, version) {
+ version = hash // extend to full hash
+ }
+ t, err := strconv.ParseInt(f[1], 10, 64)
+ if err != nil {
+ return nil, vcsErrorf("invalid time from hg log: %q", out)
+ }
+
+ var tags []string
+ for _, tag := range f[3:] {
+ if tag != "tip" {
+ tags = append(tags, tag)
+ }
+ }
+ sort.Strings(tags)
+
+ info := &RevInfo{
+ Origin: &Origin{
+ Hash: hash,
+ },
+ Name: hash,
+ Short: ShortenSHA1(hash),
+ Time: time.Unix(t, 0).UTC(),
+ Version: version,
+ Tags: tags,
+ }
+ return info, nil
+}
+
+func bzrParseStat(rev, out string) (*RevInfo, error) {
+ var revno int64
+ var tm time.Time
+ for _, line := range strings.Split(out, "\n") {
+ if line == "" || line[0] == ' ' || line[0] == '\t' {
+ // End of header, start of commit message.
+ break
+ }
+ if line[0] == '-' {
+ continue
+ }
+ before, after, found := strings.Cut(line, ":")
+ if !found {
+ // End of header, start of commit message.
+ break
+ }
+ key, val := before, strings.TrimSpace(after)
+ switch key {
+ case "revno":
+ if j := strings.Index(val, " "); j >= 0 {
+ val = val[:j]
+ }
+ i, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return nil, vcsErrorf("unexpected revno from bzr log: %q", line)
+ }
+ revno = i
+ case "timestamp":
+ j := strings.Index(val, " ")
+ if j < 0 {
+ return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line)
+ }
+ t, err := time.Parse("2006-01-02 15:04:05 -0700", val[j+1:])
+ if err != nil {
+ return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line)
+ }
+ tm = t.UTC()
+ }
+ }
+ if revno == 0 || tm.IsZero() {
+ return nil, vcsErrorf("unexpected response from bzr log: %q", out)
+ }
+
+ info := &RevInfo{
+ Name: strconv.FormatInt(revno, 10),
+ Short: fmt.Sprintf("%012d", revno),
+ Time: tm,
+ Version: rev,
+ }
+ return info, nil
+}
+
+func fossilParseStat(rev, out string) (*RevInfo, error) {
+ for _, line := range strings.Split(out, "\n") {
+ if strings.HasPrefix(line, "uuid:") || strings.HasPrefix(line, "hash:") {
+ f := strings.Fields(line)
+ if len(f) != 5 || len(f[1]) != 40 || f[4] != "UTC" {
+ return nil, vcsErrorf("unexpected response from fossil info: %q", line)
+ }
+ t, err := time.Parse(time.DateTime, f[2]+" "+f[3])
+ if err != nil {
+ return nil, vcsErrorf("unexpected response from fossil info: %q", line)
+ }
+ hash := f[1]
+ version := rev
+ if strings.HasPrefix(hash, version) {
+ version = hash // extend to full hash
+ }
+ info := &RevInfo{
+ Origin: &Origin{
+ Hash: hash,
+ },
+ Name: hash,
+ Short: ShortenSHA1(hash),
+ Time: t,
+ Version: version,
+ }
+ return info, nil
+ }
+ }
+ return nil, vcsErrorf("unexpected response from fossil info: %q", out)
+}
+
+type limitedWriter struct {
+ W io.Writer
+ N int64
+ ErrLimitReached error
+}
+
+func (l *limitedWriter) Write(p []byte) (n int, err error) {
+ if l.N > 0 {
+ max := len(p)
+ if l.N < int64(max) {
+ max = int(l.N)
+ }
+ n, err = l.W.Write(p[:max])
+ l.N -= int64(n)
+ if err != nil || n >= len(p) {
+ return n, err
+ }
+ }
+
+ return n, l.ErrLimitReached
+}
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
new file mode 100644
index 0000000..8fe432a
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -0,0 +1,1189 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "archive/zip"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch/codehost"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+ modzip "golang.org/x/mod/zip"
+)
+
+// A codeRepo implements modfetch.Repo using an underlying codehost.Repo.
+type codeRepo struct {
+ modPath string
+
+ // code is the repository containing this module.
+ code codehost.Repo
+ // codeRoot is the import path at the root of code.
+ codeRoot string
+ // codeDir is the directory (relative to root) at which we expect to find the module.
+ // If pathMajor is non-empty and codeRoot is not the full modPath,
+ // then we look in both codeDir and codeDir/pathMajor[1:].
+ codeDir string
+
+ // pathMajor is the suffix of modPath that indicates its major version,
+ // or the empty string if modPath is at major version 0 or 1.
+ //
+ // pathMajor is typically of the form "/vN", but possibly ".vN", or
+ // ".vN-unstable" for modules resolved using gopkg.in.
+ pathMajor string
+ // pathPrefix is the prefix of modPath that excludes pathMajor.
+ // It is used only for logging.
+ pathPrefix string
+
+ // pseudoMajor is the major version prefix to require when generating
+ // pseudo-versions for this module, derived from the module path. pseudoMajor
+ // is empty if the module path does not include a version suffix (that is,
+ // accepts either v0 or v1).
+ pseudoMajor string
+}
+
+// newCodeRepo returns a Repo that reads the source code for the module with the
+// given path, from the repo stored in code, with the root of the repo
+// containing the path given by codeRoot.
+func newCodeRepo(code codehost.Repo, codeRoot, path string) (Repo, error) {
+ if !hasPathPrefix(path, codeRoot) {
+ return nil, fmt.Errorf("mismatched repo: found %s for %s", codeRoot, path)
+ }
+ pathPrefix, pathMajor, ok := module.SplitPathVersion(path)
+ if !ok {
+ return nil, fmt.Errorf("invalid module path %q", path)
+ }
+ if codeRoot == path {
+ pathPrefix = path
+ }
+ pseudoMajor := module.PathMajorPrefix(pathMajor)
+
+ // Compute codeDir = bar, the subdirectory within the repo
+ // corresponding to the module root.
+ //
+ // At this point we might have:
+ // path = github.com/rsc/foo/bar/v2
+ // codeRoot = github.com/rsc/foo
+ // pathPrefix = github.com/rsc/foo/bar
+ // pathMajor = /v2
+ // pseudoMajor = v2
+ //
+ // which gives
+ // codeDir = bar
+ //
+ // We know that pathPrefix is a prefix of path, and codeRoot is a prefix of
+ // path, but codeRoot may or may not be a prefix of pathPrefix, because
+ // codeRoot may be the entire path (in which case codeDir should be empty).
+ // That occurs in two situations.
+ //
+ // One is when a go-import meta tag resolves the complete module path,
+ // including the pathMajor suffix:
+ // path = nanomsg.org/go/mangos/v2
+ // codeRoot = nanomsg.org/go/mangos/v2
+ // pathPrefix = nanomsg.org/go/mangos
+ // pathMajor = /v2
+ // pseudoMajor = v2
+ //
+ // The other is similar: for gopkg.in only, the major version is encoded
+ // with a dot rather than a slash, and thus can't be in a subdirectory.
+ // path = gopkg.in/yaml.v2
+ // codeRoot = gopkg.in/yaml.v2
+ // pathPrefix = gopkg.in/yaml
+ // pathMajor = .v2
+ // pseudoMajor = v2
+ //
+ codeDir := ""
+ if codeRoot != path {
+ if !hasPathPrefix(pathPrefix, codeRoot) {
+ return nil, fmt.Errorf("repository rooted at %s cannot contain module %s", codeRoot, path)
+ }
+ codeDir = strings.Trim(pathPrefix[len(codeRoot):], "/")
+ }
+
+ r := &codeRepo{
+ modPath: path,
+ code: code,
+ codeRoot: codeRoot,
+ codeDir: codeDir,
+ pathPrefix: pathPrefix,
+ pathMajor: pathMajor,
+ pseudoMajor: pseudoMajor,
+ }
+
+ return r, nil
+}
+
+func (r *codeRepo) ModulePath() string {
+ return r.modPath
+}
+
+func (r *codeRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return r.code.CheckReuse(ctx, old, r.codeDir)
+}
+
+func (r *codeRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
+ // Special case: gopkg.in/macaroon-bakery.v2-unstable
+ // does not use the v2 tags (those are for macaroon-bakery.v2).
+ // It has no possible tags at all.
+ if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") {
+ return &Versions{}, nil
+ }
+
+ p := prefix
+ if r.codeDir != "" {
+ p = r.codeDir + "/" + p
+ }
+ tags, err := r.code.Tags(ctx, p)
+ if err != nil {
+ return nil, &module.ModuleError{
+ Path: r.modPath,
+ Err: err,
+ }
+ }
+ if tags.Origin != nil {
+ tags.Origin.Subdir = r.codeDir
+ }
+
+ var list, incompatible []string
+ for _, tag := range tags.List {
+ if !strings.HasPrefix(tag.Name, p) {
+ continue
+ }
+ v := tag.Name
+ if r.codeDir != "" {
+ v = v[len(r.codeDir)+1:]
+ }
+ // Note: ./codehost/codehost.go's isOriginTag knows about these conditions too.
+ // If these are relaxed, isOriginTag will need to be relaxed as well.
+ if v == "" || v != semver.Canonical(v) {
+ // Ignore non-canonical tags: Stat rewrites those to canonical
+ // pseudo-versions. Note that we compare against semver.Canonical here
+ // instead of module.CanonicalVersion: revToRev strips "+incompatible"
+ // suffixes before looking up tags, so a tag like "v2.0.0+incompatible"
+ // would not resolve at all. (The Go version string "v2.0.0+incompatible"
+ // refers to the "v2.0.0" version tag, which we handle below.)
+ continue
+ }
+ if module.IsPseudoVersion(v) {
+ // Ignore tags that look like pseudo-versions: Stat rewrites those
+ // unambiguously to the underlying commit, and tagToVersion drops them.
+ continue
+ }
+
+ if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
+ if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" {
+ incompatible = append(incompatible, v)
+ }
+ continue
+ }
+
+ list = append(list, v)
+ }
+ semver.Sort(list)
+ semver.Sort(incompatible)
+
+ return r.appendIncompatibleVersions(ctx, tags.Origin, list, incompatible)
+}
+
+// appendIncompatibleVersions appends "+incompatible" versions to list if
+// appropriate, returning the final list.
+//
+// The incompatible list contains candidate versions without the '+incompatible'
+// prefix.
+//
+// Both list and incompatible must be sorted in semantic order.
+func (r *codeRepo) appendIncompatibleVersions(ctx context.Context, origin *codehost.Origin, list, incompatible []string) (*Versions, error) {
+ versions := &Versions{
+ Origin: origin,
+ List: list,
+ }
+ if len(incompatible) == 0 || r.pathMajor != "" {
+ // No +incompatible versions are possible, so no need to check them.
+ return versions, nil
+ }
+
+ versionHasGoMod := func(v string) (bool, error) {
+ _, err := r.code.ReadFile(ctx, v, "go.mod", codehost.MaxGoMod)
+ if err == nil {
+ return true, nil
+ }
+ if !os.IsNotExist(err) {
+ return false, &module.ModuleError{
+ Path: r.modPath,
+ Err: err,
+ }
+ }
+ return false, nil
+ }
+
+ if len(list) > 0 {
+ ok, err := versionHasGoMod(list[len(list)-1])
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ // The latest compatible version has a go.mod file, so assume that all
+ // subsequent versions do as well, and do not include any +incompatible
+ // versions. Even if we are wrong, the author clearly intends module
+ // consumers to be on the v0/v1 line instead of a higher +incompatible
+ // version. (See https://golang.org/issue/34189.)
+ //
+ // We know of at least two examples where this behavior is desired
+ // (github.com/russross/blackfriday@v2.0.0 and
+ // github.com/libp2p/go-libp2p@v6.0.23), and (as of 2019-10-29) have no
+ // concrete examples for which it is undesired.
+ return versions, nil
+ }
+ }
+
+ var (
+ lastMajor string
+ lastMajorHasGoMod bool
+ )
+ for i, v := range incompatible {
+ major := semver.Major(v)
+
+ if major != lastMajor {
+ rem := incompatible[i:]
+ j := sort.Search(len(rem), func(j int) bool {
+ return semver.Major(rem[j]) != major
+ })
+ latestAtMajor := rem[j-1]
+
+ var err error
+ lastMajor = major
+ lastMajorHasGoMod, err = versionHasGoMod(latestAtMajor)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if lastMajorHasGoMod {
+ // The latest release of this major version has a go.mod file, so it is
+ // not allowed as +incompatible. It would be confusing to include some
+ // minor versions of this major version as +incompatible but require
+ // semantic import versioning for others, so drop all +incompatible
+ // versions for this major version.
+ //
+ // If we're wrong about a minor version in the middle, users will still be
+ // able to 'go get' specific tags for that version explicitly — they just
+ // won't appear in 'go list' or as the results for queries with inequality
+ // bounds.
+ continue
+ }
+ versions.List = append(versions.List, v+"+incompatible")
+ }
+
+ return versions, nil
+}
+
+func (r *codeRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ if rev == "latest" {
+ return r.Latest(ctx)
+ }
+ codeRev := r.revToRev(rev)
+ info, err := r.code.Stat(ctx, codeRev)
+ if err != nil {
+ // Note: info may be non-nil to supply Origin for caching error.
+ var revInfo *RevInfo
+ if info != nil {
+ revInfo = &RevInfo{
+ Origin: info.Origin,
+ Version: rev,
+ }
+ }
+ return revInfo, &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: rev,
+ Err: err,
+ },
+ }
+ }
+ return r.convert(ctx, info, rev)
+}
+
+func (r *codeRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ info, err := r.code.Latest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return r.convert(ctx, info, "")
+}
+
+// convert converts a version as reported by the code host to a version as
+// interpreted by the module system.
+//
+// If statVers is a valid module version, it is used for the Version field.
+// Otherwise, the Version is derived from the passed-in info and recent tags.
+func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers string) (*RevInfo, error) {
+ // If this is a plain tag (no dir/ prefix)
+ // and the module path is unversioned,
+ // and if the underlying file tree has no go.mod,
+ // then allow using the tag with a +incompatible suffix.
+ //
+ // (If the version is +incompatible, then the go.mod file must not exist:
+ // +incompatible is not an ongoing opt-out from semantic import versioning.)
+ incompatibleOk := map[string]bool{}
+ canUseIncompatible := func(v string) bool {
+ if r.codeDir != "" || r.pathMajor != "" {
+ // A non-empty codeDir indicates a module within a subdirectory,
+ // which necessarily has a go.mod file indicating the module boundary.
+ // A non-empty pathMajor indicates a module path with a major-version
+ // suffix, which must match.
+ return false
+ }
+
+ ok, seen := incompatibleOk[""]
+ if !seen {
+ _, errGoMod := r.code.ReadFile(ctx, info.Name, "go.mod", codehost.MaxGoMod)
+ ok = (errGoMod != nil)
+ incompatibleOk[""] = ok
+ }
+ if !ok {
+ // A go.mod file exists at the repo root.
+ return false
+ }
+
+ // Per https://go.dev/issue/51324, previous versions of the 'go' command
+ // didn't always check for go.mod files in subdirectories, so if the user
+ // requests a +incompatible version explicitly, we should continue to allow
+ // it. Otherwise, if vN/go.mod exists, expect that release tags for that
+ // major version are intended for the vN module.
+ if v != "" && !strings.HasSuffix(statVers, "+incompatible") {
+ major := semver.Major(v)
+ ok, seen = incompatibleOk[major]
+ if !seen {
+ _, errGoModSub := r.code.ReadFile(ctx, info.Name, path.Join(major, "go.mod"), codehost.MaxGoMod)
+ ok = (errGoModSub != nil)
+ incompatibleOk[major] = ok
+ }
+ if !ok {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ // checkCanonical verifies that the canonical version v is compatible with the
+ // module path represented by r, adding a "+incompatible" suffix if needed.
+ //
+ // If statVers is also canonical, checkCanonical also verifies that v is
+ // either statVers or statVers with the added "+incompatible" suffix.
+ checkCanonical := func(v string) (*RevInfo, error) {
+ // If r.codeDir is non-empty, then the go.mod file must exist: the module
+ // author — not the module consumer, — gets to decide how to carve up the repo
+ // into modules.
+ //
+ // Conversely, if the go.mod file exists, the module author — not the module
+ // consumer — gets to determine the module's path
+ //
+ // r.findDir verifies both of these conditions. Execute it now so that
+ // r.Stat will correctly return a notExistError if the go.mod location or
+ // declared module path doesn't match.
+ _, _, _, err := r.findDir(ctx, v)
+ if err != nil {
+ // TODO: It would be nice to return an error like "not a module".
+ // Right now we return "missing go.mod", which is a little confusing.
+ return nil, &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: v,
+ Err: notExistError{err: err},
+ },
+ }
+ }
+
+ invalidf := func(format string, args ...any) error {
+ return &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: v,
+ Err: fmt.Errorf(format, args...),
+ },
+ }
+ }
+
+ // Add the +incompatible suffix if needed or requested explicitly, and
+ // verify that its presence or absence is appropriate for this version
+ // (which depends on whether it has an explicit go.mod file).
+
+ if v == strings.TrimSuffix(statVers, "+incompatible") {
+ v = statVers
+ }
+ base := strings.TrimSuffix(v, "+incompatible")
+ var errIncompatible error
+ if !module.MatchPathMajor(base, r.pathMajor) {
+ if canUseIncompatible(base) {
+ v = base + "+incompatible"
+ } else {
+ if r.pathMajor != "" {
+ errIncompatible = invalidf("module path includes a major version suffix, so major version must match")
+ } else {
+ errIncompatible = invalidf("module contains a go.mod file, so module path must match major version (%q)", path.Join(r.pathPrefix, semver.Major(v)))
+ }
+ }
+ } else if strings.HasSuffix(v, "+incompatible") {
+ errIncompatible = invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(v))
+ }
+
+ if statVers != "" && statVers == module.CanonicalVersion(statVers) {
+ // Since the caller-requested version is canonical, it would be very
+ // confusing to resolve it to anything but itself, possibly with a
+ // "+incompatible" suffix. Error out explicitly.
+ if statBase := strings.TrimSuffix(statVers, "+incompatible"); statBase != base {
+ return nil, &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: statVers,
+ Err: fmt.Errorf("resolves to version %v (%s is not a tag)", v, statBase),
+ },
+ }
+ }
+ }
+
+ if errIncompatible != nil {
+ return nil, errIncompatible
+ }
+
+ origin := info.Origin
+ if origin != nil {
+ o := *origin
+ origin = &o
+ origin.Subdir = r.codeDir
+ if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) {
+ // Add tags that are relevant to pseudo-version calculation to origin.
+ prefix := r.codeDir
+ if prefix != "" {
+ prefix += "/"
+ }
+ if r.pathMajor != "" { // "/v2" or "/.v2"
+ prefix += r.pathMajor[1:] + "." // += "v2."
+ }
+ tags, err := r.code.Tags(ctx, prefix)
+ if err != nil {
+ return nil, err
+ }
+ origin.TagPrefix = tags.Origin.TagPrefix
+ origin.TagSum = tags.Origin.TagSum
+ }
+ }
+
+ return &RevInfo{
+ Origin: origin,
+ Name: info.Name,
+ Short: info.Short,
+ Time: info.Time,
+ Version: v,
+ }, nil
+ }
+
+ // Determine version.
+
+ if module.IsPseudoVersion(statVers) {
+ if err := r.validatePseudoVersion(ctx, info, statVers); err != nil {
+ return nil, err
+ }
+ return checkCanonical(statVers)
+ }
+
+ // statVers is not a pseudo-version, so we need to either resolve it to a
+ // canonical version or verify that it is already a canonical tag
+ // (not a branch).
+
+ // Derive or verify a version from a code repo tag.
+ // Tag must have a prefix matching codeDir.
+ tagPrefix := ""
+ if r.codeDir != "" {
+ tagPrefix = r.codeDir + "/"
+ }
+
+ isRetracted, err := r.retractedVersions(ctx)
+ if err != nil {
+ isRetracted = func(string) bool { return false }
+ }
+
+ // tagToVersion returns the version obtained by trimming tagPrefix from tag.
+ // If the tag is invalid, retracted, or a pseudo-version, tagToVersion returns
+ // an empty version.
+ tagToVersion := func(tag string) (v string, tagIsCanonical bool) {
+ if !strings.HasPrefix(tag, tagPrefix) {
+ return "", false
+ }
+ trimmed := tag[len(tagPrefix):]
+ // Tags that look like pseudo-versions would be confusing. Ignore them.
+ if module.IsPseudoVersion(tag) {
+ return "", false
+ }
+
+ v = semver.Canonical(trimmed) // Not module.Canonical: we don't want to pick up an explicit "+incompatible" suffix from the tag.
+ if v == "" || !strings.HasPrefix(trimmed, v) {
+ return "", false // Invalid or incomplete version (just vX or vX.Y).
+ }
+ if v == trimmed {
+ tagIsCanonical = true
+ }
+ return v, tagIsCanonical
+ }
+
+ // If the VCS gave us a valid version, use that.
+ if v, tagIsCanonical := tagToVersion(info.Version); tagIsCanonical {
+ if info, err := checkCanonical(v); err == nil {
+ return info, err
+ }
+ }
+
+ // Look through the tags on the revision for either a usable canonical version
+ // or an appropriate base for a pseudo-version.
+ var (
+ highestCanonical string
+ pseudoBase string
+ )
+ for _, pathTag := range info.Tags {
+ v, tagIsCanonical := tagToVersion(pathTag)
+ if statVers != "" && semver.Compare(v, statVers) == 0 {
+ // The tag is equivalent to the version requested by the user.
+ if tagIsCanonical {
+ // This tag is the canonical form of the requested version,
+ // not some other form with extra build metadata.
+ // Use this tag so that the resolved version will match exactly.
+ // (If it isn't actually allowed, we'll error out in checkCanonical.)
+ return checkCanonical(v)
+ } else {
+ // The user explicitly requested something equivalent to this tag. We
+ // can't use the version from the tag directly: since the tag is not
+ // canonical, it could be ambiguous. For example, tags v0.0.1+a and
+ // v0.0.1+b might both exist and refer to different revisions.
+ //
+ // The tag is otherwise valid for the module, so we can at least use it as
+ // the base of an unambiguous pseudo-version.
+ //
+ // If multiple tags match, tagToVersion will canonicalize them to the same
+ // base version.
+ pseudoBase = v
+ }
+ }
+ // Save the highest non-retracted canonical tag for the revision.
+ // If we don't find a better match, we'll use it as the canonical version.
+ if tagIsCanonical && semver.Compare(highestCanonical, v) < 0 && !isRetracted(v) {
+ if module.MatchPathMajor(v, r.pathMajor) || canUseIncompatible(v) {
+ highestCanonical = v
+ }
+ }
+ }
+
+ // If we found a valid canonical tag for the revision, return it.
+ // Even if we found a good pseudo-version base, a canonical version is better.
+ if highestCanonical != "" {
+ return checkCanonical(highestCanonical)
+ }
+
+ // Find the highest tagged version in the revision's history, subject to
+ // major version and +incompatible constraints. Use that version as the
+ // pseudo-version base so that the pseudo-version sorts higher. Ignore
+ // retracted versions.
+ tagAllowed := func(tag string) bool {
+ v, _ := tagToVersion(tag)
+ if v == "" {
+ return false
+ }
+ if !module.MatchPathMajor(v, r.pathMajor) && !canUseIncompatible(v) {
+ return false
+ }
+ return !isRetracted(v)
+ }
+ if pseudoBase == "" {
+ tag, err := r.code.RecentTag(ctx, info.Name, tagPrefix, tagAllowed)
+ if err != nil && !errors.Is(err, errors.ErrUnsupported) {
+ return nil, err
+ }
+ if tag != "" {
+ pseudoBase, _ = tagToVersion(tag)
+ }
+ }
+
+ return checkCanonical(module.PseudoVersion(r.pseudoMajor, pseudoBase, info.Time, info.Short))
+}
+
+// validatePseudoVersion checks that version has a major version compatible with
+// r.modPath and encodes a base version and commit metadata that agrees with
+// info.
+//
+// Note that verifying a nontrivial base version in particular may be somewhat
+// expensive: in order to do so, r.code.DescendsFrom will need to fetch at least
+// enough of the commit history to find a path between version and its base.
+// Fortunately, many pseudo-versions — such as those for untagged repositories —
+// have trivial bases!
+func (r *codeRepo) validatePseudoVersion(ctx context.Context, info *codehost.RevInfo, version string) (err error) {
+ defer func() {
+ if err != nil {
+ if _, ok := err.(*module.ModuleError); !ok {
+ if _, ok := err.(*module.InvalidVersionError); !ok {
+ err = &module.InvalidVersionError{Version: version, Pseudo: true, Err: err}
+ }
+ err = &module.ModuleError{Path: r.modPath, Err: err}
+ }
+ }
+ }()
+
+ rev, err := module.PseudoVersionRev(version)
+ if err != nil {
+ return err
+ }
+ if rev != info.Short {
+ switch {
+ case strings.HasPrefix(rev, info.Short):
+ return fmt.Errorf("revision is longer than canonical (expected %s)", info.Short)
+ case strings.HasPrefix(info.Short, rev):
+ return fmt.Errorf("revision is shorter than canonical (expected %s)", info.Short)
+ default:
+ return fmt.Errorf("does not match short name of revision (expected %s)", info.Short)
+ }
+ }
+
+ t, err := module.PseudoVersionTime(version)
+ if err != nil {
+ return err
+ }
+ if !t.Equal(info.Time.Truncate(time.Second)) {
+ return fmt.Errorf("does not match version-control timestamp (expected %s)", info.Time.UTC().Format(module.PseudoVersionTimestampFormat))
+ }
+
+ tagPrefix := ""
+ if r.codeDir != "" {
+ tagPrefix = r.codeDir + "/"
+ }
+
+ // A pseudo-version should have a precedence just above its parent revisions,
+ // and no higher. Otherwise, it would be possible for library authors to "pin"
+ // dependency versions (and bypass the usual minimum version selection) by
+ // naming an extremely high pseudo-version rather than an accurate one.
+ //
+ // Moreover, if we allow a pseudo-version to use any arbitrary pre-release
+ // tag, we end up with infinitely many possible names for each commit. Each
+ // name consumes resources in the module cache and proxies, so we want to
+ // restrict them to a finite set under control of the module author.
+ //
+ // We address both of these issues by requiring the tag upon which the
+ // pseudo-version is based to refer to some ancestor of the revision. We
+ // prefer the highest such tag when constructing a new pseudo-version, but do
+ // not enforce that property when resolving existing pseudo-versions: we don't
+ // know when the parent tags were added, and the highest-tagged parent may not
+ // have existed when the pseudo-version was first resolved.
+ base, err := module.PseudoVersionBase(strings.TrimSuffix(version, "+incompatible"))
+ if err != nil {
+ return err
+ }
+ if base == "" {
+ if r.pseudoMajor == "" && semver.Major(version) == "v1" {
+ return fmt.Errorf("major version without preceding tag must be v0, not v1")
+ }
+ return nil
+ } else {
+ for _, tag := range info.Tags {
+ versionOnly := strings.TrimPrefix(tag, tagPrefix)
+ if versionOnly == base {
+ // The base version is canonical, so if the version from the tag is
+ // literally equal (not just equivalent), then the tag is canonical too.
+ //
+ // We allow pseudo-versions to be derived from non-canonical tags on the
+ // same commit, so that tags like "v1.1.0+some-metadata" resolve as
+ // close as possible to the canonical version ("v1.1.0") while still
+ // enforcing a total ordering ("v1.1.1-0.[…]" with a unique suffix).
+ //
+ // However, canonical tags already have a total ordering, so there is no
+ // reason not to use the canonical tag directly, and we know that the
+ // canonical tag must already exist because the pseudo-version is
+ // derived from it. In that case, referring to the revision by a
+ // pseudo-version derived from its own canonical tag is just confusing.
+ return fmt.Errorf("tag (%s) found on revision %s is already canonical, so should not be replaced with a pseudo-version derived from that tag", tag, rev)
+ }
+ }
+ }
+
+ tags, err := r.code.Tags(ctx, tagPrefix+base)
+ if err != nil {
+ return err
+ }
+
+ var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base.
+ ancestorFound := false
+ for _, tag := range tags.List {
+ versionOnly := strings.TrimPrefix(tag.Name, tagPrefix)
+ if semver.Compare(versionOnly, base) == 0 {
+ lastTag = tag.Name
+ ancestorFound, err = r.code.DescendsFrom(ctx, info.Name, tag.Name)
+ if ancestorFound {
+ break
+ }
+ }
+ }
+
+ if lastTag == "" {
+ return fmt.Errorf("preceding tag (%s) not found", base)
+ }
+
+ if !ancestorFound {
+ if err != nil {
+ return err
+ }
+ rev, err := module.PseudoVersionRev(version)
+ if err != nil {
+ return fmt.Errorf("not a descendent of preceding tag (%s)", lastTag)
+ }
+ return fmt.Errorf("revision %s is not a descendent of preceding tag (%s)", rev, lastTag)
+ }
+ return nil
+}
+
+func (r *codeRepo) revToRev(rev string) string {
+ if semver.IsValid(rev) {
+ if module.IsPseudoVersion(rev) {
+ r, _ := module.PseudoVersionRev(rev)
+ return r
+ }
+ if semver.Build(rev) == "+incompatible" {
+ rev = rev[:len(rev)-len("+incompatible")]
+ }
+ if r.codeDir == "" {
+ return rev
+ }
+ return r.codeDir + "/" + rev
+ }
+ return rev
+}
+
+func (r *codeRepo) versionToRev(version string) (rev string, err error) {
+ if !semver.IsValid(version) {
+ return "", &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: version,
+ Err: errors.New("syntax error"),
+ },
+ }
+ }
+ return r.revToRev(version), nil
+}
+
+// findDir locates the directory within the repo containing the module.
+//
+// If r.pathMajor is non-empty, this can be either r.codeDir or — if a go.mod
+// file exists — r.codeDir/r.pathMajor[1:].
+func (r *codeRepo) findDir(ctx context.Context, version string) (rev, dir string, gomod []byte, err error) {
+ rev, err = r.versionToRev(version)
+ if err != nil {
+ return "", "", nil, err
+ }
+
+ // Load info about go.mod but delay consideration
+ // (except I/O error) until we rule out v2/go.mod.
+ file1 := path.Join(r.codeDir, "go.mod")
+ gomod1, err1 := r.code.ReadFile(ctx, rev, file1, codehost.MaxGoMod)
+ if err1 != nil && !os.IsNotExist(err1) {
+ return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file1, rev, err1)
+ }
+ mpath1 := modfile.ModulePath(gomod1)
+ found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1))
+
+ var file2 string
+ if r.pathMajor != "" && r.codeRoot != r.modPath && !strings.HasPrefix(r.pathMajor, ".") {
+ // Suppose pathMajor is "/v2".
+ // Either go.mod should claim v2 and v2/go.mod should not exist,
+ // or v2/go.mod should exist and claim v2. Not both.
+ // Note that we don't check the full path, just the major suffix,
+ // because of replacement modules. This might be a fork of
+ // the real module, found at a different path, usable only in
+ // a replace directive.
+ dir2 := path.Join(r.codeDir, r.pathMajor[1:])
+ file2 = path.Join(dir2, "go.mod")
+ gomod2, err2 := r.code.ReadFile(ctx, rev, file2, codehost.MaxGoMod)
+ if err2 != nil && !os.IsNotExist(err2) {
+ return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file2, rev, err2)
+ }
+ mpath2 := modfile.ModulePath(gomod2)
+ found2 := err2 == nil && isMajor(mpath2, r.pathMajor)
+
+ if found1 && found2 {
+ return "", "", nil, fmt.Errorf("%s/%s and ...%s/go.mod both have ...%s module paths at revision %s", r.pathPrefix, file1, r.pathMajor, r.pathMajor, rev)
+ }
+ if found2 {
+ return rev, dir2, gomod2, nil
+ }
+ if err2 == nil {
+ if mpath2 == "" {
+ return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.codeRoot, file2, rev)
+ }
+ return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.codeRoot, file2, r.pathMajor, mpath2, rev)
+ }
+ }
+
+ // Not v2/go.mod, so it's either go.mod or nothing. Which is it?
+ if found1 {
+ // Explicit go.mod with matching major version ok.
+ return rev, r.codeDir, gomod1, nil
+ }
+ if err1 == nil {
+ // Explicit go.mod with non-matching major version disallowed.
+ suffix := ""
+ if file2 != "" {
+ suffix = fmt.Sprintf(" (and ...%s/go.mod does not exist)", r.pathMajor)
+ }
+ if mpath1 == "" {
+ return "", "", nil, fmt.Errorf("%s is missing module path%s at revision %s", file1, suffix, rev)
+ }
+ if r.pathMajor != "" { // ".v1", ".v2" for gopkg.in
+ return "", "", nil, fmt.Errorf("%s has non-...%s module path %q%s at revision %s", file1, r.pathMajor, mpath1, suffix, rev)
+ }
+ if _, _, ok := module.SplitPathVersion(mpath1); !ok {
+ return "", "", nil, fmt.Errorf("%s has malformed module path %q%s at revision %s", file1, mpath1, suffix, rev)
+ }
+ return "", "", nil, fmt.Errorf("%s has post-%s module path %q%s at revision %s", file1, semver.Major(version), mpath1, suffix, rev)
+ }
+
+ if r.codeDir == "" && (r.pathMajor == "" || strings.HasPrefix(r.pathMajor, ".")) {
+ // Implicit go.mod at root of repo OK for v0/v1 and for gopkg.in.
+ return rev, "", nil, nil
+ }
+
+ // Implicit go.mod below root of repo or at v2+ disallowed.
+ // Be clear about possibility of using either location for v2+.
+ if file2 != "" {
+ return "", "", nil, fmt.Errorf("missing %s/go.mod and ...%s/go.mod at revision %s", r.pathPrefix, r.pathMajor, rev)
+ }
+ return "", "", nil, fmt.Errorf("missing %s/go.mod at revision %s", r.pathPrefix, rev)
+}
+
+// isMajor reports whether the versions allowed for mpath are compatible with
+// the major version(s) implied by pathMajor, or false if mpath has an invalid
+// version suffix.
+func isMajor(mpath, pathMajor string) bool {
+ if mpath == "" {
+ // If we don't have a path, we don't know what version(s) it is compatible with.
+ return false
+ }
+ _, mpathMajor, ok := module.SplitPathVersion(mpath)
+ if !ok {
+ // An invalid module path is not compatible with any version.
+ return false
+ }
+ if pathMajor == "" {
+ // All of the valid versions for a gopkg.in module that requires major
+ // version v0 or v1 are compatible with the "v0 or v1" implied by an empty
+ // pathMajor.
+ switch module.PathMajorPrefix(mpathMajor) {
+ case "", "v0", "v1":
+ return true
+ default:
+ return false
+ }
+ }
+ if mpathMajor == "" {
+ // Even if pathMajor is ".v0" or ".v1", we can't be sure that a module
+ // without a suffix is tagged appropriately. Besides, we don't expect clones
+ // of non-gopkg.in modules to have gopkg.in paths, so a non-empty,
+ // non-gopkg.in mpath is probably the wrong module for any such pathMajor
+ // anyway.
+ return false
+ }
+ // If both pathMajor and mpathMajor are non-empty, then we only care that they
+ // have the same major-version validation rules. A clone fetched via a /v2
+ // path might replace a module with path gopkg.in/foo.v2-unstable, and that's
+ // ok.
+ return pathMajor[1:] == mpathMajor[1:]
+}
+
+// canReplaceMismatchedVersionDueToBug reports whether versions of r
+// could replace versions of mpath with otherwise-mismatched major versions
+// due to a historical bug in the Go command (golang.org/issue/34254).
+func (r *codeRepo) canReplaceMismatchedVersionDueToBug(mpath string) bool {
+ // The bug caused us to erroneously accept unversioned paths as replacements
+ // for versioned gopkg.in paths.
+ unversioned := r.pathMajor == ""
+ replacingGopkgIn := strings.HasPrefix(mpath, "gopkg.in/")
+ return unversioned && replacingGopkgIn
+}
+
+func (r *codeRepo) GoMod(ctx context.Context, version string) (data []byte, err error) {
+ if version != module.CanonicalVersion(version) {
+ return nil, fmt.Errorf("version %s is not canonical", version)
+ }
+
+ if module.IsPseudoVersion(version) {
+ // findDir ignores the metadata encoded in a pseudo-version,
+ // only using the revision at the end.
+ // Invoke Stat to verify the metadata explicitly so we don't return
+ // a bogus file for an invalid version.
+ _, err := r.Stat(ctx, version)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ rev, dir, gomod, err := r.findDir(ctx, version)
+ if err != nil {
+ return nil, err
+ }
+ if gomod != nil {
+ return gomod, nil
+ }
+ data, err = r.code.ReadFile(ctx, rev, path.Join(dir, "go.mod"), codehost.MaxGoMod)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return LegacyGoMod(r.modPath), nil
+ }
+ return nil, err
+ }
+ return data, nil
+}
+
+// LegacyGoMod generates a fake go.mod file for a module that doesn't have one.
+// The go.mod file contains a module directive and nothing else: no go version,
+// no requirements.
+//
+// We used to try to build a go.mod reflecting pre-existing
+// package management metadata files, but the conversion
+// was inherently imperfect (because those files don't have
+// exactly the same semantics as go.mod) and, when done
+// for dependencies in the middle of a build, impossible to
+// correct. So we stopped.
+func LegacyGoMod(modPath string) []byte {
+ return fmt.Appendf(nil, "module %s\n", modfile.AutoQuote(modPath))
+}
+
+func (r *codeRepo) modPrefix(rev string) string {
+ return r.modPath + "@" + rev
+}
+
+func (r *codeRepo) retractedVersions(ctx context.Context) (func(string) bool, error) {
+ vs, err := r.Versions(ctx, "")
+ if err != nil {
+ return nil, err
+ }
+ versions := vs.List
+
+ for i, v := range versions {
+ if strings.HasSuffix(v, "+incompatible") {
+ // We're looking for the latest release tag that may list retractions in a
+ // go.mod file. +incompatible versions necessarily do not, and they start
+ // at major version 2 — which is higher than any version that could
+ // validly contain a go.mod file.
+ versions = versions[:i]
+ break
+ }
+ }
+ if len(versions) == 0 {
+ return func(string) bool { return false }, nil
+ }
+
+ var highest string
+ for i := len(versions) - 1; i >= 0; i-- {
+ v := versions[i]
+ if semver.Prerelease(v) == "" {
+ highest = v
+ break
+ }
+ }
+ if highest == "" {
+ highest = versions[len(versions)-1]
+ }
+
+ data, err := r.GoMod(ctx, highest)
+ if err != nil {
+ return nil, err
+ }
+ f, err := modfile.ParseLax("go.mod", data, nil)
+ if err != nil {
+ return nil, err
+ }
+ retractions := make([]modfile.VersionInterval, 0, len(f.Retract))
+ for _, r := range f.Retract {
+ retractions = append(retractions, r.VersionInterval)
+ }
+
+ return func(v string) bool {
+ for _, r := range retractions {
+ if semver.Compare(r.Low, v) <= 0 && semver.Compare(v, r.High) <= 0 {
+ return true
+ }
+ }
+ return false
+ }, nil
+}
+
+func (r *codeRepo) Zip(ctx context.Context, dst io.Writer, version string) error {
+ if version != module.CanonicalVersion(version) {
+ return fmt.Errorf("version %s is not canonical", version)
+ }
+
+ if module.IsPseudoVersion(version) {
+ // findDir ignores the metadata encoded in a pseudo-version,
+ // only using the revision at the end.
+ // Invoke Stat to verify the metadata explicitly so we don't return
+ // a bogus file for an invalid version.
+ _, err := r.Stat(ctx, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ rev, subdir, _, err := r.findDir(ctx, version)
+ if err != nil {
+ return err
+ }
+
+ if gomod, err := r.code.ReadFile(ctx, rev, filepath.Join(subdir, "go.mod"), codehost.MaxGoMod); err == nil {
+ goVers := gover.GoModLookup(gomod, "go")
+ if gover.Compare(goVers, gover.Local()) > 0 {
+ return &gover.TooNewError{What: r.ModulePath() + "@" + version, GoVersion: goVers}
+ }
+ } else if !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+
+ dl, err := r.code.ReadZip(ctx, rev, subdir, codehost.MaxZipFile)
+ if err != nil {
+ return err
+ }
+ defer dl.Close()
+ subdir = strings.Trim(subdir, "/")
+
+ // Spool to local file.
+ f, err := os.CreateTemp("", "go-codehost-")
+ if err != nil {
+ dl.Close()
+ return err
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ maxSize := int64(codehost.MaxZipFile)
+ lr := &io.LimitedReader{R: dl, N: maxSize + 1}
+ if _, err := io.Copy(f, lr); err != nil {
+ dl.Close()
+ return err
+ }
+ dl.Close()
+ if lr.N <= 0 {
+ return fmt.Errorf("downloaded zip file too large")
+ }
+ size := (maxSize + 1) - lr.N
+ if _, err := f.Seek(0, 0); err != nil {
+ return err
+ }
+
+ // Translate from zip file we have to zip file we want.
+ zr, err := zip.NewReader(f, size)
+ if err != nil {
+ return err
+ }
+
+ var files []modzip.File
+ if subdir != "" {
+ subdir += "/"
+ }
+ haveLICENSE := false
+ topPrefix := ""
+ for _, zf := range zr.File {
+ if topPrefix == "" {
+ i := strings.Index(zf.Name, "/")
+ if i < 0 {
+ return fmt.Errorf("missing top-level directory prefix")
+ }
+ topPrefix = zf.Name[:i+1]
+ }
+ var name string
+ var found bool
+ if name, found = strings.CutPrefix(zf.Name, topPrefix); !found {
+ return fmt.Errorf("zip file contains more than one top-level directory")
+ }
+
+ if name, found = strings.CutPrefix(name, subdir); !found {
+ continue
+ }
+
+ if name == "" || strings.HasSuffix(name, "/") {
+ continue
+ }
+ files = append(files, zipFile{name: name, f: zf})
+ if name == "LICENSE" {
+ haveLICENSE = true
+ }
+ }
+
+ if !haveLICENSE && subdir != "" {
+ data, err := r.code.ReadFile(ctx, rev, "LICENSE", codehost.MaxLICENSE)
+ if err == nil {
+ files = append(files, dataFile{name: "LICENSE", data: data})
+ }
+ }
+
+ return modzip.Create(dst, module.Version{Path: r.modPath, Version: version}, files)
+}
+
+type zipFile struct {
+ name string
+ f *zip.File
+}
+
+func (f zipFile) Path() string { return f.name }
+func (f zipFile) Lstat() (fs.FileInfo, error) { return f.f.FileInfo(), nil }
+func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() }
+
+type dataFile struct {
+ name string
+ data []byte
+}
+
+func (f dataFile) Path() string { return f.name }
+func (f dataFile) Lstat() (fs.FileInfo, error) { return dataFileInfo{f}, nil }
+func (f dataFile) Open() (io.ReadCloser, error) {
+ return io.NopCloser(bytes.NewReader(f.data)), nil
+}
+
+type dataFileInfo struct {
+ f dataFile
+}
+
+func (fi dataFileInfo) Name() string { return path.Base(fi.f.name) }
+func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) }
+func (fi dataFileInfo) Mode() fs.FileMode { return 0644 }
+func (fi dataFileInfo) ModTime() time.Time { return time.Time{} }
+func (fi dataFileInfo) IsDir() bool { return false }
+func (fi dataFileInfo) Sys() any { return nil }
+
+func (fi dataFileInfo) String() string {
+ return fs.FormatFileInfo(fi)
+}
+
+// hasPathPrefix reports whether the path s begins with the
+// elements in prefix.
+func hasPathPrefix(s, prefix string) bool {
+ switch {
+ default:
+ return false
+ case len(s) == len(prefix):
+ return s == prefix
+ case len(s) > len(prefix):
+ if prefix != "" && prefix[len(prefix)-1] == '/' {
+ return strings.HasPrefix(s, prefix)
+ }
+ return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
+ }
+}
diff --git a/src/cmd/go/internal/modfetch/coderepo_test.go b/src/cmd/go/internal/modfetch/coderepo_test.go
new file mode 100644
index 0000000..aad7872
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/coderepo_test.go
@@ -0,0 +1,965 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "archive/zip"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "flag"
+ "hash"
+ "internal/testenv"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/vcweb/vcstest"
+
+ "golang.org/x/mod/sumdb/dirhash"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if err := testMain(m); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func testMain(m *testing.M) (err error) {
+
+ cfg.GOPROXY = "direct"
+
+ // The sum database is populated using a released version of the go command,
+ // but this test may include fixes for additional modules that previously
+ // could not be fetched. Since this test isn't executing any of the resolved
+ // code, bypass the sum database.
+ cfg.GOSUMDB = "off"
+
+ dir, err := os.MkdirTemp("", "gitrepo-test-")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if rmErr := os.RemoveAll(dir); err == nil {
+ err = rmErr
+ }
+ }()
+
+ cfg.GOMODCACHE = filepath.Join(dir, "modcache")
+ if err := os.Mkdir(cfg.GOMODCACHE, 0755); err != nil {
+ return err
+ }
+
+ srv, err := vcstest.NewServer()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if closeErr := srv.Close(); err == nil {
+ err = closeErr
+ }
+ }()
+
+ m.Run()
+ return nil
+}
+
+const (
+ vgotest1git = "github.com/rsc/vgotest1"
+ vgotest1hg = "vcs-test.golang.org/hg/vgotest1.hg"
+)
+
+var altVgotests = map[string]string{
+ "hg": vgotest1hg,
+}
+
+type codeRepoTest struct {
+ vcs string
+ path string
+ mpath string
+ rev string
+ err string
+ version string
+ name string
+ short string
+ time time.Time
+ gomod string
+ gomodErr string
+ zip []string
+ zipErr string
+ zipSum string
+ zipFileHash string
+}
+
+var codeRepoTests = []codeRepoTest{
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v0.0.0",
+ version: "v0.0.0",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ zipSum: "h1:zVEjciLdlk/TPWCOyZo7k24T+tOKRQC+u8MKq/xS80I=",
+ zipFileHash: "738a00ddbfe8c329dce6b48e1f23c8e22a92db50f3cfb2653caa0d62676bc09c",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v0.0.0-20180219231006-80d85c5d4d17",
+ version: "v0.0.0-20180219231006-80d85c5d4d17",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ zipSum: "h1:nOznk2xKsLGkTnXe0q9t1Ewt9jxK+oadtafSUqHM3Ec=",
+ zipFileHash: "bacb08f391e29d2eaaef8281b5c129ee6d890e608ee65877e0003c0181a766c8",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v0.0.1-0.20180219231006-80d85c5d4d17",
+ err: `github.com/rsc/vgotest1@v0.0.1-0.20180219231006-80d85c5d4d17: invalid pseudo-version: tag (v0.0.0) found on revision 80d85c5d4d17 is already canonical, so should not be replaced with a pseudo-version derived from that tag`,
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v1.0.0",
+ version: "v1.0.0",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ zipSum: "h1:e040hOoWGeuJLawDjK9DW6med+cz9FxMFYDMOVG8ctQ=",
+ zipFileHash: "74caab65cfbea427c341fa815f3bb0378681d8f0e3cf62a7f207014263ec7be3",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.0",
+ version: "v2.0.0",
+ name: "45f53230a74ad275c7127e117ac46914c8126160",
+ short: "45f53230a74a",
+ time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC),
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "80d85c5",
+ version: "v1.0.0",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ zipSum: "h1:e040hOoWGeuJLawDjK9DW6med+cz9FxMFYDMOVG8ctQ=",
+ zipFileHash: "74caab65cfbea427c341fa815f3bb0378681d8f0e3cf62a7f207014263ec7be3",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "mytag",
+ version: "v1.0.0",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "45f53230a",
+ version: "v2.0.0",
+ name: "45f53230a74ad275c7127e117ac46914c8126160",
+ short: "45f53230a74a",
+ time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC),
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v54321",
+ rev: "80d85c5",
+ version: "v54321.0.0-20180219231006-80d85c5d4d17",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v54321/go.mod at revision 80d85c5d4d17",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/submod",
+ rev: "v1.0.0",
+ err: "unknown revision submod/v1.0.0",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/submod",
+ rev: "v1.0.3",
+ err: "unknown revision submod/v1.0.3",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/submod",
+ rev: "v1.0.4",
+ version: "v1.0.4",
+ name: "8afe2b2efed96e0880ecd2a69b98a53b8c2738b6",
+ short: "8afe2b2efed9",
+ time: time.Date(2018, 2, 19, 23, 12, 7, 0, time.UTC),
+ gomod: "module \"github.com/vgotest1/submod\" // submod/go.mod\n",
+ zip: []string{
+ "go.mod",
+ "pkg/p.go",
+ "LICENSE",
+ },
+ zipSum: "h1:iMsJ/9uQsk6MnZNnJK311f11QiSlmN92Q2aSjCywuJY=",
+ zipFileHash: "95801bfa69c5197ae809af512946d22f22850068527cd78100ae3f176bc8043b",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v1.1.0",
+ version: "v1.1.0",
+ name: "b769f2de407a4db81af9c5de0a06016d60d2ea09",
+ short: "b769f2de407a",
+ time: time.Date(2018, 2, 19, 23, 13, 36, 0, time.UTC),
+ gomod: "module \"github.com/rsc/vgotest1\" // root go.mod\nrequire \"github.com/rsc/vgotest1/submod\" v1.0.5\n",
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "go.mod",
+ "pkg/p.go",
+ },
+ zipSum: "h1:M69k7q+8bQ+QUpHov45Z/NoR8rj3DsQJUnXLWvf01+Q=",
+ zipFileHash: "58af45fb248d320ea471f568e006379e2b8d71d6d1663f9b19b2e00fd9ac9265",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.1",
+ version: "v2.0.1",
+ name: "ea65f87c8f52c15ea68f3bdd9925ef17e20d91e9",
+ short: "ea65f87c8f52",
+ time: time.Date(2018, 2, 19, 23, 14, 23, 0, time.UTC),
+ gomod: "module \"github.com/rsc/vgotest1/v2\" // root go.mod\n",
+ zipSum: "h1:QmgYy/zt+uoWhDpcsgrSVzYFvKtBEjl5zT/FRz9GTzA=",
+ zipFileHash: "1aedf1546d322a0121879ddfd6d0e8bfbd916d2cafbeb538ddb440e04b04b9ef",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.3",
+ version: "v2.0.3",
+ name: "f18795870fb14388a21ef3ebc1d75911c8694f31",
+ short: "f18795870fb1",
+ time: time.Date(2018, 2, 19, 23, 16, 4, 0, time.UTC),
+ err: "github.com/rsc/vgotest1/v2/go.mod has non-.../v2 module path \"github.com/rsc/vgotest\" at revision v2.0.3",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.4",
+ version: "v2.0.4",
+ name: "1f863feb76bc7029b78b21c5375644838962f88d",
+ short: "1f863feb76bc",
+ time: time.Date(2018, 2, 20, 0, 3, 38, 0, time.UTC),
+ err: "github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision v2.0.4",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.5",
+ version: "v2.0.5",
+ name: "2f615117ce481c8efef46e0cc0b4b4dccfac8fea",
+ short: "2f615117ce48",
+ time: time.Date(2018, 2, 20, 0, 3, 59, 0, time.UTC),
+ gomod: "module \"github.com/rsc/vgotest1/v2\" // v2/go.mod\n",
+ zipSum: "h1:RIEb9q1SUSEQOzMn0zfl/LQxGFWlhWEAdeEguf1MLGU=",
+ zipFileHash: "7d92c2c328c5e9b0694101353705d5843746ec1d93a1e986d0da54c8a14dfe6d",
+ },
+ {
+ // redirect to github
+ vcs: "git",
+ path: "rsc.io/quote",
+ rev: "v1.0.0",
+ version: "v1.0.0",
+ name: "f488df80bcdbd3e5bafdc24ad7d1e79e83edd7e6",
+ short: "f488df80bcdb",
+ time: time.Date(2018, 2, 14, 0, 45, 20, 0, time.UTC),
+ gomod: "module \"rsc.io/quote\"\n",
+ zipSum: "h1:haUSojyo3j2M9g7CEUFG8Na09dtn7QKxvPGaPVQdGwM=",
+ zipFileHash: "5c08ba2c09a364f93704aaa780e7504346102c6ef4fe1333a11f09904a732078",
+ },
+ {
+ // redirect to static hosting proxy
+ vcs: "mod",
+ path: "swtch.com/testmod",
+ rev: "v1.0.0",
+ version: "v1.0.0",
+ // NO name or short - we intentionally ignore those in the proxy protocol
+ time: time.Date(1972, 7, 18, 12, 34, 56, 0, time.UTC),
+ gomod: "module \"swtch.com/testmod\"\n",
+ },
+ {
+ // redirect to googlesource
+ vcs: "git",
+ path: "golang.org/x/text",
+ rev: "4e4a3210bb",
+ version: "v0.3.1-0.20180208041248-4e4a3210bb54",
+ name: "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1",
+ short: "4e4a3210bb54",
+ time: time.Date(2018, 2, 8, 4, 12, 48, 0, time.UTC),
+ zipSum: "h1:Yxu6pHX9X2RECiuw/Q5/4uvajuaowck8zOFKXgbfNBk=",
+ zipFileHash: "ac2c165a5c10aa5a7545dea60a08e019270b982fa6c8bdcb5943931de64922fe",
+ },
+ {
+ vcs: "git",
+ path: "github.com/pkg/errors",
+ rev: "v0.8.0",
+ version: "v0.8.0",
+ name: "645ef00459ed84a119197bfb8d8205042c6df63d",
+ short: "645ef00459ed",
+ time: time.Date(2016, 9, 29, 1, 48, 1, 0, time.UTC),
+ zipSum: "h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=",
+ zipFileHash: "e4fa69ba057356614edbc1da881a7d3ebb688505be49f65965686bcb859e2fae",
+ },
+ {
+ // package in subdirectory - custom domain
+ // In general we can't reject these definitively in Lookup,
+ // but gopkg.in is special.
+ vcs: "git",
+ path: "gopkg.in/yaml.v2/abc",
+ err: "invalid module path \"gopkg.in/yaml.v2/abc\"",
+ },
+ {
+ // package in subdirectory - github
+ // Because it's a package, Stat should fail entirely.
+ vcs: "git",
+ path: "github.com/rsc/quote/buggy",
+ rev: "c4d4236f",
+ err: "missing github.com/rsc/quote/buggy/go.mod at revision c4d4236f9242",
+ },
+ {
+ vcs: "git",
+ path: "gopkg.in/yaml.v2",
+ rev: "d670f940",
+ version: "v2.0.0",
+ name: "d670f9405373e636a5a2765eea47fac0c9bc91a4",
+ short: "d670f9405373",
+ time: time.Date(2018, 1, 9, 11, 43, 31, 0, time.UTC),
+ gomod: "module gopkg.in/yaml.v2\n",
+ zipSum: "h1:uUkhRGrsEyx/laRdeS6YIQKIys8pg+lRSRdVMTYjivs=",
+ zipFileHash: "7b0a141b1b0b49772ab4eecfd11dfd6609a94a5e868cab04a3abb1861ffaa877",
+ },
+ {
+ vcs: "git",
+ path: "gopkg.in/check.v1",
+ rev: "20d25e280405",
+ version: "v1.0.0-20161208181325-20d25e280405",
+ name: "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
+ short: "20d25e280405",
+ time: time.Date(2016, 12, 8, 18, 13, 25, 0, time.UTC),
+ gomod: "module gopkg.in/check.v1\n",
+ zipSum: "h1:829vOVxxusYHC+IqBtkX5mbKtsY9fheQiQn0MZRVLfQ=",
+ zipFileHash: "9e7cb3f4f1e66d722306442b0dbe1f6f43d74d1736d54c510537bdfb1d6f432f",
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/go/mod/gitrepo1",
+ rev: "master",
+ version: "v1.2.4-annotated",
+ name: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ short: "ede458df7cd0",
+ time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC),
+ gomod: "module vcs-test.golang.org/go/mod/gitrepo1\n",
+ zipSum: "h1:YJYZRsM9BHFTlVr8YADjT0cJH8uFIDtoc5NLiVqZEx8=",
+ zipFileHash: "c15e49d58b7a4c37966cbe5bc01a0330cd5f2927e990e1839bda1d407766d9c5",
+ },
+ {
+ vcs: "git",
+ path: "gopkg.in/natefinch/lumberjack.v2",
+ // This repo has a v2.1 tag.
+ // We only allow semver references to tags that are fully qualified, as in v2.1.0.
+ // Because we can't record v2.1.0 (the actual tag is v2.1), we record a pseudo-version
+ // instead, same as if the tag were any other non-version-looking string.
+ // We use a v2 pseudo-version here because of the .v2 in the path, not because
+ // of the v2 in the rev.
+ rev: "v2.1", // non-canonical semantic version turns into pseudo-version
+ version: "v2.0.0-20170531160350-a96e63847dc3",
+ name: "a96e63847dc3c67d17befa69c303767e2f84e54f",
+ short: "a96e63847dc3",
+ time: time.Date(2017, 5, 31, 16, 3, 50, 0, time.UTC),
+ gomod: "module gopkg.in/natefinch/lumberjack.v2\n",
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/go/v2module/v2",
+ rev: "v2.0.0",
+ version: "v2.0.0",
+ name: "203b91c896acd173aa719e4cdcb7d463c4b090fa",
+ short: "203b91c896ac",
+ time: time.Date(2019, 4, 3, 15, 52, 15, 0, time.UTC),
+ gomod: "module vcs-test.golang.org/go/v2module/v2\n\ngo 1.12\n",
+ zipSum: "h1:JItBZ+gwA5WvtZEGEbuDL4lUttGtLrs53lmdurq3bOg=",
+ zipFileHash: "9ea9ae1673cffcc44b7fdd3cc89953d68c102449b46c982dbf085e4f2e394da5",
+ },
+ {
+ // Git branch with a semver name, +incompatible version, and no go.mod file.
+ vcs: "git",
+ path: "vcs-test.golang.org/go/mod/gitrepo1",
+ rev: "v2.3.4+incompatible",
+ err: `resolves to version v2.0.1+incompatible (v2.3.4 is not a tag)`,
+ },
+ {
+ // Git branch with a semver name, matching go.mod file, and compatible version.
+ vcs: "git",
+ path: "vcs-test.golang.org/git/semver-branch.git",
+ rev: "v1.0.0",
+ err: `resolves to version v0.1.1-0.20220202191944-09c4d8f6938c (v1.0.0 is not a tag)`,
+ },
+ {
+ // Git branch with a semver name, matching go.mod file, and disallowed +incompatible version.
+ // The version/tag mismatch takes precedence over the +incompatible mismatched.
+ vcs: "git",
+ path: "vcs-test.golang.org/git/semver-branch.git",
+ rev: "v2.0.0+incompatible",
+ err: `resolves to version v0.1.0 (v2.0.0 is not a tag)`,
+ },
+ {
+ // Git branch with a semver name, matching go.mod file, and mismatched version.
+ // The version/tag mismatch takes precedence over the +incompatible mismatched.
+ vcs: "git",
+ path: "vcs-test.golang.org/git/semver-branch.git",
+ rev: "v2.0.0",
+ err: `resolves to version v0.1.0 (v2.0.0 is not a tag)`,
+ },
+ {
+ // v3.0.0-devel is the same as tag v4.0.0-beta.1, but v4.0.0-beta.1 would
+ // not be allowed because it is incompatible and a go.mod file exists.
+ // The error message should refer to a valid pseudo-version, not the
+ // unusable semver tag.
+ vcs: "git",
+ path: "vcs-test.golang.org/git/semver-branch.git",
+ rev: "v3.0.0-devel",
+ err: `resolves to version v0.1.1-0.20220203155313-d59622f6e4d7 (v3.0.0-devel is not a tag)`,
+ },
+
+ // If v2/go.mod exists, then we should prefer to match the "v2"
+ // pseudo-versions to the nested module, and resolve the module in the parent
+ // directory to only compatible versions.
+ //
+ // However (https://go.dev/issue/51324), previous versions of the 'go' command
+ // didn't always do so, so if the user explicitly requests a +incompatible
+ // version (as would be present in an existing go.mod file), we should
+ // continue to allow it.
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "80beb17a1603",
+ version: "v0.0.0-20220222205507-80beb17a1603",
+ name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5",
+ short: "80beb17a1603",
+ time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.0",
+ err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`,
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.1-0.20220222205507-80beb17a1603",
+ err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`,
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.0+incompatible",
+ version: "v2.0.0+incompatible",
+ name: "5fcd3eaeeb391d399f562fd45a50dac9fc34ae8b",
+ short: "5fcd3eaeeb39",
+ time: time.Date(2022, 2, 22, 20, 53, 33, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/v2sub.git",
+ rev: "v2.0.1-0.20220222205507-80beb17a1603+incompatible",
+ version: "v2.0.1-0.20220222205507-80beb17a1603+incompatible",
+ name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5",
+ short: "80beb17a1603",
+ time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC),
+ },
+
+ // A version tag with explicit build metadata is valid but not canonical.
+ // It should resolve to a pseudo-version based on the same tag.
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "v0.1.0+build-metadata",
+ version: "v0.1.1-0.20220223184835-9d863d525bbf",
+ name: "9d863d525bbfcc8eda09364738c4032393711a56",
+ short: "9d863d525bbf",
+ time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "9d863d525bbf",
+ version: "v0.1.1-0.20220223184835-9d863d525bbf",
+ name: "9d863d525bbfcc8eda09364738c4032393711a56",
+ short: "9d863d525bbf",
+ time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC),
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "latest",
+ version: "v0.1.1-0.20220223184835-9d863d525bbf",
+ name: "9d863d525bbfcc8eda09364738c4032393711a56",
+ short: "9d863d525bbf",
+ time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC),
+ },
+
+ // A version tag with an erroneous "+incompatible" suffix should resolve using
+ // only the prefix before the "+incompatible" suffix, not the "+incompatible"
+ // tag itself. (Otherwise, we would potentially have two different commits
+ // both named "v2.0.0+incompatible".) However, the tag is still valid semver
+ // and can still be used as the base for an unambiguous pseudo-version.
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "v2.0.0+incompatible",
+ err: `unknown revision v2.0.0`,
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "12d19af20458",
+ version: "v2.0.1-0.20220223184802-12d19af20458+incompatible",
+ name: "12d19af204585b0db3d2a876ceddf5b9323f5a4a",
+ short: "12d19af20458",
+ time: time.Date(2022, 2, 23, 18, 48, 2, 0, time.UTC),
+ },
+
+ // Similarly, a pseudo-version must resolve to the named commit, even if a tag
+ // matching that pseudo-version is present on a *different* commit.
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ rev: "v3.0.0-20220223184802-12d19af20458",
+ version: "v3.0.0-20220223184802-12d19af20458+incompatible",
+ name: "12d19af204585b0db3d2a876ceddf5b9323f5a4a",
+ short: "12d19af20458",
+ time: time.Date(2022, 2, 23, 18, 48, 2, 0, time.UTC),
+ },
+}
+
+func TestCodeRepo(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+ tmpdir := t.TempDir()
+
+ for _, tt := range codeRepoTests {
+ f := func(tt codeRepoTest) func(t *testing.T) {
+ return func(t *testing.T) {
+ if strings.Contains(tt.path, "gopkg.in") {
+ testenv.SkipFlaky(t, 54503)
+ }
+
+ t.Parallel()
+ if tt.vcs != "mod" {
+ testenv.MustHaveExecPath(t, tt.vcs)
+ }
+ ctx := context.Background()
+
+ repo := Lookup(ctx, "direct", tt.path)
+
+ if tt.mpath == "" {
+ tt.mpath = tt.path
+ }
+ if mpath := repo.ModulePath(); mpath != tt.mpath {
+ t.Errorf("repo.ModulePath() = %q, want %q", mpath, tt.mpath)
+ }
+
+ info, err := repo.Stat(ctx, tt.rev)
+ if err != nil {
+ if tt.err != "" {
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Fatalf("repoStat(%q): %v, wanted %q", tt.rev, err, tt.err)
+ }
+ return
+ }
+ t.Fatalf("repo.Stat(%q): %v", tt.rev, err)
+ }
+ if tt.err != "" {
+ t.Errorf("repo.Stat(%q): success, wanted error", tt.rev)
+ }
+ if info.Version != tt.version {
+ t.Errorf("info.Version = %q, want %q", info.Version, tt.version)
+ }
+ if info.Name != tt.name {
+ t.Errorf("info.Name = %q, want %q", info.Name, tt.name)
+ }
+ if info.Short != tt.short {
+ t.Errorf("info.Short = %q, want %q", info.Short, tt.short)
+ }
+ if !info.Time.Equal(tt.time) {
+ t.Errorf("info.Time = %v, want %v", info.Time, tt.time)
+ }
+
+ if tt.gomod != "" || tt.gomodErr != "" {
+ data, err := repo.GoMod(ctx, tt.version)
+ if err != nil && tt.gomodErr == "" {
+ t.Errorf("repo.GoMod(%q): %v", tt.version, err)
+ } else if err != nil && tt.gomodErr != "" {
+ if err.Error() != tt.gomodErr {
+ t.Errorf("repo.GoMod(%q): %v, want %q", tt.version, err, tt.gomodErr)
+ }
+ } else if tt.gomodErr != "" {
+ t.Errorf("repo.GoMod(%q) = %q, want error %q", tt.version, data, tt.gomodErr)
+ } else if string(data) != tt.gomod {
+ t.Errorf("repo.GoMod(%q) = %q, want %q", tt.version, data, tt.gomod)
+ }
+ }
+
+ needHash := !testing.Short() && (tt.zipFileHash != "" || tt.zipSum != "")
+ if tt.zip != nil || tt.zipErr != "" || needHash {
+ f, err := os.CreateTemp(tmpdir, tt.version+".zip.")
+ if err != nil {
+ t.Fatalf("os.CreateTemp: %v", err)
+ }
+ zipfile := f.Name()
+ defer func() {
+ f.Close()
+ os.Remove(zipfile)
+ }()
+
+ var w io.Writer
+ var h hash.Hash
+ if needHash {
+ h = sha256.New()
+ w = io.MultiWriter(f, h)
+ } else {
+ w = f
+ }
+ err = repo.Zip(ctx, w, tt.version)
+ f.Close()
+ if err != nil {
+ if tt.zipErr != "" {
+ if err.Error() == tt.zipErr {
+ return
+ }
+ t.Fatalf("repo.Zip(%q): %v, want error %q", tt.version, err, tt.zipErr)
+ }
+ t.Fatalf("repo.Zip(%q): %v", tt.version, err)
+ }
+ if tt.zipErr != "" {
+ t.Errorf("repo.Zip(%q): success, want error %q", tt.version, tt.zipErr)
+ }
+
+ if tt.zip != nil {
+ prefix := tt.path + "@" + tt.version + "/"
+ z, err := zip.OpenReader(zipfile)
+ if err != nil {
+ t.Fatalf("open zip %s: %v", zipfile, err)
+ }
+ var names []string
+ for _, file := range z.File {
+ if !strings.HasPrefix(file.Name, prefix) {
+ t.Errorf("zip entry %v does not start with prefix %v", file.Name, prefix)
+ continue
+ }
+ names = append(names, file.Name[len(prefix):])
+ }
+ z.Close()
+ if !reflect.DeepEqual(names, tt.zip) {
+ t.Fatalf("zip = %v\nwant %v\n", names, tt.zip)
+ }
+ }
+
+ if needHash {
+ sum, err := dirhash.HashZip(zipfile, dirhash.Hash1)
+ if err != nil {
+ t.Errorf("repo.Zip(%q): %v", tt.version, err)
+ } else if sum != tt.zipSum {
+ t.Errorf("repo.Zip(%q): got file with sum %q, want %q", tt.version, sum, tt.zipSum)
+ } else if zipFileHash := hex.EncodeToString(h.Sum(nil)); zipFileHash != tt.zipFileHash {
+ t.Errorf("repo.Zip(%q): got file with hash %q, want %q (but content has correct sum)", tt.version, zipFileHash, tt.zipFileHash)
+ }
+ }
+ }
+ }
+ }
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f(tt))
+ if strings.HasPrefix(tt.path, vgotest1git) {
+ for vcs, alt := range altVgotests {
+ altTest := tt
+ altTest.vcs = vcs
+ altTest.path = alt + strings.TrimPrefix(altTest.path, vgotest1git)
+ if strings.HasPrefix(altTest.mpath, vgotest1git) {
+ altTest.mpath = alt + strings.TrimPrefix(altTest.mpath, vgotest1git)
+ }
+ var m map[string]string
+ if alt == vgotest1hg {
+ m = hgmap
+ }
+ altTest.version = remap(altTest.version, m)
+ altTest.name = remap(altTest.name, m)
+ altTest.short = remap(altTest.short, m)
+ altTest.rev = remap(altTest.rev, m)
+ altTest.err = remap(altTest.err, m)
+ altTest.gomodErr = remap(altTest.gomodErr, m)
+ altTest.zipErr = remap(altTest.zipErr, m)
+ altTest.zipSum = ""
+ altTest.zipFileHash = ""
+ t.Run(strings.ReplaceAll(altTest.path, "/", "_")+"/"+altTest.rev, f(altTest))
+ }
+ }
+ }
+}
+
+var hgmap = map[string]string{
+ "github.com/rsc/vgotest1": "vcs-test.golang.org/hg/vgotest1.hg",
+ "f18795870fb14388a21ef3ebc1d75911c8694f31": "a9ad6d1d14eb544f459f446210c7eb3b009807c6",
+ "ea65f87c8f52c15ea68f3bdd9925ef17e20d91e9": "f1fc0f22021b638d073d31c752847e7bf385def7",
+ "b769f2de407a4db81af9c5de0a06016d60d2ea09": "92c7eb888b4fac17f1c6bd2e1060a1b881a3b832",
+ "8afe2b2efed96e0880ecd2a69b98a53b8c2738b6": "4e58084d459ae7e79c8c2264d0e8e9a92eb5cd44",
+ "2f615117ce481c8efef46e0cc0b4b4dccfac8fea": "879ea98f7743c8eff54f59a918f3a24123d1cf46",
+ "80d85c5d4d17598a0e9055e7c175a32b415d6128": "e125018e286a4b09061079a81e7b537070b7ff71",
+ "1f863feb76bc7029b78b21c5375644838962f88d": "bf63880162304a9337477f3858f5b7e255c75459",
+ "45f53230a74ad275c7127e117ac46914c8126160": "814fce58e83abd5bf2a13892e0b0e1198abefcd4",
+}
+
+func remap(name string, m map[string]string) string {
+ if m[name] != "" {
+ return m[name]
+ }
+ if codehost.AllHex(name) {
+ for k, v := range m {
+ if strings.HasPrefix(k, name) {
+ return v[:len(name)]
+ }
+ }
+ }
+ for k, v := range m {
+ name = strings.ReplaceAll(name, k, v)
+ if codehost.AllHex(k) {
+ name = strings.ReplaceAll(name, k[:12], v[:12])
+ }
+ }
+ return name
+}
+
+var codeRepoVersionsTests = []struct {
+ vcs string
+ path string
+ prefix string
+ versions []string
+}{
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ versions: []string{"v0.0.0", "v0.0.1", "v1.0.0", "v1.0.1", "v1.0.2", "v1.0.3", "v1.1.0"},
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ prefix: "v1.0",
+ versions: []string{"v1.0.0", "v1.0.1", "v1.0.2", "v1.0.3"},
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ versions: []string{"v2.0.0", "v2.0.1", "v2.0.2", "v2.0.3", "v2.0.4", "v2.0.5", "v2.0.6"},
+ },
+ {
+ vcs: "mod",
+ path: "swtch.com/testmod",
+ versions: []string{"v1.0.0", "v1.1.1"},
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/odd-tags.git",
+ versions: nil,
+ },
+}
+
+func TestCodeRepoVersions(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+
+ for _, tt := range codeRepoVersionsTests {
+ tt := tt
+ t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
+ if strings.Contains(tt.path, "gopkg.in") {
+ testenv.SkipFlaky(t, 54503)
+ }
+
+ t.Parallel()
+ if tt.vcs != "mod" {
+ testenv.MustHaveExecPath(t, tt.vcs)
+ }
+ ctx := context.Background()
+
+ repo := Lookup(ctx, "direct", tt.path)
+ list, err := repo.Versions(ctx, tt.prefix)
+ if err != nil {
+ t.Fatalf("Versions(%q): %v", tt.prefix, err)
+ }
+ if !reflect.DeepEqual(list.List, tt.versions) {
+ t.Fatalf("Versions(%q):\nhave %v\nwant %v", tt.prefix, list, tt.versions)
+ }
+ })
+ }
+}
+
+var latestTests = []struct {
+ vcs string
+ path string
+ version string
+ err string
+}{
+ {
+ vcs: "git",
+ path: "github.com/rsc/empty",
+ err: "no commits",
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ err: `github.com/rsc/vgotest1@v0.0.0-20180219223237-a08abb797a67: invalid version: go.mod has post-v0 module path "github.com/vgotest1/v2" at revision a08abb797a67`,
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ err: `github.com/rsc/vgotest1/v2@v2.0.0-20180219223237-a08abb797a67: invalid version: github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision a08abb797a67`,
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/subdir",
+ err: "github.com/rsc/vgotest1/subdir@v0.0.0-20180219223237-a08abb797a67: invalid version: missing github.com/rsc/vgotest1/subdir/go.mod at revision a08abb797a67",
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/commit-after-tag.git",
+ version: "v1.0.1-0.20190715211727-b325d8217783",
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/no-tags.git",
+ version: "v0.0.0-20190715212047-e706ba1d9f6d",
+ },
+ {
+ vcs: "mod",
+ path: "swtch.com/testmod",
+ version: "v1.1.1",
+ },
+}
+
+func TestLatest(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+
+ for _, tt := range latestTests {
+ name := strings.ReplaceAll(tt.path, "/", "_")
+ t.Run(name, func(t *testing.T) {
+ tt := tt
+ t.Parallel()
+ if tt.vcs != "mod" {
+ testenv.MustHaveExecPath(t, tt.vcs)
+ }
+ ctx := context.Background()
+
+ repo := Lookup(ctx, "direct", tt.path)
+ info, err := repo.Latest(ctx)
+ if err != nil {
+ if tt.err != "" {
+ if err.Error() == tt.err {
+ return
+ }
+ t.Fatalf("Latest(): %v, want %q", err, tt.err)
+ }
+ t.Fatalf("Latest(): %v", err)
+ }
+ if tt.err != "" {
+ t.Fatalf("Latest() = %v, want error %q", info.Version, tt.err)
+ }
+ if info.Version != tt.version {
+ t.Fatalf("Latest() = %v, want %v", info.Version, tt.version)
+ }
+ })
+ }
+}
+
+// fixedTagsRepo is a fake codehost.Repo that returns a fixed list of tags
+type fixedTagsRepo struct {
+ tags []string
+ codehost.Repo
+}
+
+func (ch *fixedTagsRepo) Tags(ctx context.Context, prefix string) (*codehost.Tags, error) {
+ tags := &codehost.Tags{}
+ for _, t := range ch.tags {
+ tags.List = append(tags.List, codehost.Tag{Name: t})
+ }
+ return tags, nil
+}
+
+func TestNonCanonicalSemver(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ root := "golang.org/x/issue24476"
+ ch := &fixedTagsRepo{
+ tags: []string{
+ "", "huh?", "1.0.1",
+ // what about "version 1 dot dogcow"?
+ "v1.🐕.🐄",
+ "v1", "v0.1",
+ // and one normal one that should pass through
+ "v1.0.1",
+ },
+ }
+
+ cr, err := newCodeRepo(ch, root, root)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := cr.Versions(ctx, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(v.List) != 1 || v.List[0] != "v1.0.1" {
+ t.Fatal("unexpected versions returned:", v)
+ }
+}
diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go
new file mode 100644
index 0000000..4279686
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/fetch.go
@@ -0,0 +1,998 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "archive/zip"
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/par"
+ "cmd/go/internal/robustio"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/dirhash"
+ modzip "golang.org/x/mod/zip"
+)
+
+var downloadCache par.ErrCache[module.Version, string] // version → directory
+
+var ErrToolchain = errors.New("internal error: invalid operation on toolchain module")
+
+// Download downloads the specific module version to the
+// local download cache and returns the name of the directory
+// corresponding to the root of the module's file tree.
+func Download(ctx context.Context, mod module.Version) (dir string, err error) {
+ if gover.IsToolchain(mod.Path) {
+ return "", ErrToolchain
+ }
+ if err := checkCacheDir(ctx); err != nil {
+ base.Fatal(err)
+ }
+
+ // The par.Cache here avoids duplicate work.
+ return downloadCache.Do(mod, func() (string, error) {
+ dir, err := download(ctx, mod)
+ if err != nil {
+ return "", err
+ }
+ checkMod(ctx, mod)
+
+ // If go.mod exists (not an old legacy module), check version is not too new.
+ if data, err := os.ReadFile(filepath.Join(dir, "go.mod")); err == nil {
+ goVersion := gover.GoModLookup(data, "go")
+ if gover.Compare(goVersion, gover.Local()) > 0 {
+ return "", &gover.TooNewError{What: mod.String(), GoVersion: goVersion}
+ }
+ } else if !errors.Is(err, fs.ErrNotExist) {
+ return "", err
+ }
+
+ return dir, nil
+ })
+}
+
+func download(ctx context.Context, mod module.Version) (dir string, err error) {
+ ctx, span := trace.StartSpan(ctx, "modfetch.download "+mod.String())
+ defer span.Done()
+
+ dir, err = DownloadDir(ctx, mod)
+ if err == nil {
+ // The directory has already been completely extracted (no .partial file exists).
+ return dir, nil
+ } else if dir == "" || !errors.Is(err, fs.ErrNotExist) {
+ return "", err
+ }
+
+ // To avoid cluttering the cache with extraneous files,
+ // DownloadZip uses the same lockfile as Download.
+ // Invoke DownloadZip before locking the file.
+ zipfile, err := DownloadZip(ctx, mod)
+ if err != nil {
+ return "", err
+ }
+
+ unlock, err := lockVersion(ctx, mod)
+ if err != nil {
+ return "", err
+ }
+ defer unlock()
+
+ ctx, span = trace.StartSpan(ctx, "unzip "+zipfile)
+ defer span.Done()
+
+ // Check whether the directory was populated while we were waiting on the lock.
+ _, dirErr := DownloadDir(ctx, mod)
+ if dirErr == nil {
+ return dir, nil
+ }
+ _, dirExists := dirErr.(*DownloadDirPartialError)
+
+ // Clean up any remaining temporary directories created by old versions
+ // (before 1.16), as well as partially extracted directories (indicated by
+ // DownloadDirPartialError, usually because of a .partial file). This is only
+ // safe to do because the lock file ensures that their writers are no longer
+ // active.
+ parentDir := filepath.Dir(dir)
+ tmpPrefix := filepath.Base(dir) + ".tmp-"
+ if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(parentDir), str.QuoteGlob(tmpPrefix)+"*")); err == nil {
+ for _, path := range old {
+ RemoveAll(path) // best effort
+ }
+ }
+ if dirExists {
+ if err := RemoveAll(dir); err != nil {
+ return "", err
+ }
+ }
+
+ partialPath, err := CachePath(ctx, mod, "partial")
+ if err != nil {
+ return "", err
+ }
+
+ // Extract the module zip directory at its final location.
+ //
+ // To prevent other processes from reading the directory if we crash,
+ // create a .partial file before extracting the directory, and delete
+ // the .partial file afterward (all while holding the lock).
+ //
+ // Before Go 1.16, we extracted to a temporary directory with a random name
+ // then renamed it into place with os.Rename. On Windows, this failed with
+ // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner)
+ // opened files in the temporary directory.
+ //
+ // Go 1.14.2 and higher respect .partial files. Older versions may use
+ // partially extracted directories. 'go mod verify' can detect this,
+ // and 'go clean -modcache' can fix it.
+ if err := os.MkdirAll(parentDir, 0777); err != nil {
+ return "", err
+ }
+ if err := os.WriteFile(partialPath, nil, 0666); err != nil {
+ return "", err
+ }
+ if err := modzip.Unzip(dir, mod, zipfile); err != nil {
+ fmt.Fprintf(os.Stderr, "-> %s\n", err)
+ if rmErr := RemoveAll(dir); rmErr == nil {
+ os.Remove(partialPath)
+ }
+ return "", err
+ }
+ if err := os.Remove(partialPath); err != nil {
+ return "", err
+ }
+
+ if !cfg.ModCacheRW {
+ makeDirsReadOnly(dir)
+ }
+ return dir, nil
+}
+
+var downloadZipCache par.ErrCache[module.Version, string]
+
+// DownloadZip downloads the specific module version to the
+// local zip cache and returns the name of the zip file.
+func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err error) {
+ // The par.Cache here avoids duplicate work.
+ return downloadZipCache.Do(mod, func() (string, error) {
+ zipfile, err := CachePath(ctx, mod, "zip")
+ if err != nil {
+ return "", err
+ }
+ ziphashfile := zipfile + "hash"
+
+ // Return without locking if the zip and ziphash files exist.
+ if _, err := os.Stat(zipfile); err == nil {
+ if _, err := os.Stat(ziphashfile); err == nil {
+ return zipfile, nil
+ }
+ }
+
+ // The zip or ziphash file does not exist. Acquire the lock and create them.
+ if cfg.CmdName != "mod download" {
+ vers := mod.Version
+ if mod.Path == "golang.org/toolchain" {
+ // Shorten v0.0.1-go1.13.1.darwin-amd64 to go1.13.1.darwin-amd64
+ _, vers, _ = strings.Cut(vers, "-")
+ if i := strings.LastIndex(vers, "."); i >= 0 {
+ goos, goarch, _ := strings.Cut(vers[i+1:], "-")
+ vers = vers[:i] + " (" + goos + "/" + goarch + ")"
+ }
+ fmt.Fprintf(os.Stderr, "go: downloading %s\n", vers)
+ } else {
+ fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, vers)
+ }
+ }
+ unlock, err := lockVersion(ctx, mod)
+ if err != nil {
+ return "", err
+ }
+ defer unlock()
+
+ if err := downloadZip(ctx, mod, zipfile); err != nil {
+ return "", err
+ }
+ return zipfile, nil
+ })
+}
+
+func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err error) {
+ ctx, span := trace.StartSpan(ctx, "modfetch.downloadZip "+zipfile)
+ defer span.Done()
+
+ // Double-check that the zipfile was not created while we were waiting for
+ // the lock in DownloadZip.
+ ziphashfile := zipfile + "hash"
+ var zipExists, ziphashExists bool
+ if _, err := os.Stat(zipfile); err == nil {
+ zipExists = true
+ }
+ if _, err := os.Stat(ziphashfile); err == nil {
+ ziphashExists = true
+ }
+ if zipExists && ziphashExists {
+ return nil
+ }
+
+ // Create parent directories.
+ if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil {
+ return err
+ }
+
+ // Clean up any remaining tempfiles from previous runs.
+ // This is only safe to do because the lock file ensures that their
+ // writers are no longer active.
+ tmpPattern := filepath.Base(zipfile) + "*.tmp"
+ if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil {
+ for _, path := range old {
+ os.Remove(path) // best effort
+ }
+ }
+
+ // If the zip file exists, the ziphash file must have been deleted
+ // or lost after a file system crash. Re-hash the zip without downloading.
+ if zipExists {
+ return hashZip(mod, zipfile, ziphashfile)
+ }
+
+ // From here to the os.Rename call below is functionally almost equivalent to
+ // renameio.WriteToFile, with one key difference: we want to validate the
+ // contents of the file (by hashing it) before we commit it. Because the file
+ // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to
+ // validate it: we can't just tee the stream as we write it.
+ f, err := tempFile(ctx, filepath.Dir(zipfile), filepath.Base(zipfile), 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ }
+ }()
+
+ var unrecoverableErr error
+ err = TryProxies(func(proxy string) error {
+ if unrecoverableErr != nil {
+ return unrecoverableErr
+ }
+ repo := Lookup(ctx, proxy, mod.Path)
+ err := repo.Zip(ctx, f, mod.Version)
+ if err != nil {
+ // Zip may have partially written to f before failing.
+ // (Perhaps the server crashed while sending the file?)
+ // Since we allow fallback on error in some cases, we need to fix up the
+ // file to be empty again for the next attempt.
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ unrecoverableErr = err
+ return err
+ }
+ if err := f.Truncate(0); err != nil {
+ unrecoverableErr = err
+ return err
+ }
+ }
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ // Double-check that the paths within the zip file are well-formed.
+ //
+ // TODO(bcmills): There is a similar check within the Unzip function. Can we eliminate one?
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ z, err := zip.NewReader(f, fi.Size())
+ if err != nil {
+ return err
+ }
+ prefix := mod.Path + "@" + mod.Version + "/"
+ for _, f := range z.File {
+ if !strings.HasPrefix(f.Name, prefix) {
+ return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name)
+ }
+ }
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+
+ // Hash the zip file and check the sum before renaming to the final location.
+ if err := hashZip(mod, f.Name(), ziphashfile); err != nil {
+ return err
+ }
+ if err := os.Rename(f.Name(), zipfile); err != nil {
+ return err
+ }
+
+ // TODO(bcmills): Should we make the .zip and .ziphash files read-only to discourage tampering?
+
+ return nil
+}
+
+// hashZip reads the zip file opened in f, then writes the hash to ziphashfile,
+// overwriting that file if it exists.
+//
+// If the hash does not match go.sum (or the sumdb if enabled), hashZip returns
+// an error and does not write ziphashfile.
+func hashZip(mod module.Version, zipfile, ziphashfile string) (err error) {
+ hash, err := dirhash.HashZip(zipfile, dirhash.DefaultHash)
+ if err != nil {
+ return err
+ }
+ if err := checkModSum(mod, hash); err != nil {
+ return err
+ }
+ hf, err := lockedfile.Create(ziphashfile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if closeErr := hf.Close(); err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }()
+ if err := hf.Truncate(int64(len(hash))); err != nil {
+ return err
+ }
+ if _, err := hf.WriteAt([]byte(hash), 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir
+// and its transitive contents.
+func makeDirsReadOnly(dir string) {
+ type pathMode struct {
+ path string
+ mode fs.FileMode
+ }
+ var dirs []pathMode // in lexical order
+ filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err == nil && d.IsDir() {
+ info, err := d.Info()
+ if err == nil && info.Mode()&0222 != 0 {
+ dirs = append(dirs, pathMode{path, info.Mode()})
+ }
+ }
+ return nil
+ })
+
+ // Run over list backward to chmod children before parents.
+ for i := len(dirs) - 1; i >= 0; i-- {
+ os.Chmod(dirs[i].path, dirs[i].mode&^0222)
+ }
+}
+
+// RemoveAll removes a directory written by Download or Unzip, first applying
+// any permission changes needed to do so.
+func RemoveAll(dir string) error {
+ // Module cache has 0555 directories; make them writable in order to remove content.
+ filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return nil // ignore errors walking in file system
+ }
+ if info.IsDir() {
+ os.Chmod(path, 0777)
+ }
+ return nil
+ })
+ return robustio.RemoveAll(dir)
+}
+
+var GoSumFile string // path to go.sum; set by package modload
+var WorkspaceGoSumFiles []string // path to module go.sums in workspace; set by package modload
+
+type modSum struct {
+ mod module.Version
+ sum string
+}
+
+var goSum struct {
+ mu sync.Mutex
+ m map[module.Version][]string // content of go.sum file
+ w map[string]map[module.Version][]string // sum file in workspace -> content of that sum file
+ status map[modSum]modSumStatus // state of sums in m
+ overwrite bool // if true, overwrite go.sum without incorporating its contents
+ enabled bool // whether to use go.sum at all
+}
+
+type modSumStatus struct {
+ used, dirty bool
+}
+
+// Reset resets globals in the modfetch package, so previous loads don't affect
+// contents of go.sum files.
+func Reset() {
+ GoSumFile = ""
+ WorkspaceGoSumFiles = nil
+
+ // Uses of lookupCache and downloadCache both can call checkModSum,
+ // which in turn sets the used bit on goSum.status for modules.
+ // Reset them so used can be computed properly.
+ lookupCache = par.Cache[lookupCacheKey, Repo]{}
+ downloadCache = par.ErrCache[module.Version, string]{}
+
+ // Clear all fields on goSum. It will be initialized later
+ goSum.mu.Lock()
+ goSum.m = nil
+ goSum.w = nil
+ goSum.status = nil
+ goSum.overwrite = false
+ goSum.enabled = false
+ goSum.mu.Unlock()
+}
+
+// initGoSum initializes the go.sum data.
+// The boolean it returns reports whether the
+// use of go.sum is now enabled.
+// The goSum lock must be held.
+func initGoSum() (bool, error) {
+ if GoSumFile == "" {
+ return false, nil
+ }
+ if goSum.m != nil {
+ return true, nil
+ }
+
+ goSum.m = make(map[module.Version][]string)
+ goSum.status = make(map[modSum]modSumStatus)
+ goSum.w = make(map[string]map[module.Version][]string)
+
+ for _, f := range WorkspaceGoSumFiles {
+ goSum.w[f] = make(map[module.Version][]string)
+ _, err := readGoSumFile(goSum.w[f], f)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ enabled, err := readGoSumFile(goSum.m, GoSumFile)
+ goSum.enabled = enabled
+ return enabled, err
+}
+
+func readGoSumFile(dst map[module.Version][]string, file string) (bool, error) {
+ var (
+ data []byte
+ err error
+ )
+ if actualSumFile, ok := fsys.OverlayPath(file); ok {
+ // Don't lock go.sum if it's part of the overlay.
+ // On Plan 9, locking requires chmod, and we don't want to modify any file
+ // in the overlay. See #44700.
+ data, err = os.ReadFile(actualSumFile)
+ } else {
+ data, err = lockedfile.Read(file)
+ }
+ if err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+ readGoSum(dst, file, data)
+
+ return true, nil
+}
+
+// emptyGoModHash is the hash of a 1-file tree containing a 0-length go.mod.
+// A bug caused us to write these into go.sum files for non-modules.
+// We detect and remove them.
+const emptyGoModHash = "h1:G7mAYYxgmS0lVkHyy2hEOLQCFB0DlQFTMLWggykrydY="
+
+// readGoSum parses data, which is the content of file,
+// and adds it to goSum.m. The goSum lock must be held.
+func readGoSum(dst map[module.Version][]string, file string, data []byte) error {
+ lineno := 0
+ for len(data) > 0 {
+ var line []byte
+ lineno++
+ i := bytes.IndexByte(data, '\n')
+ if i < 0 {
+ line, data = data, nil
+ } else {
+ line, data = data[:i], data[i+1:]
+ }
+ f := strings.Fields(string(line))
+ if len(f) == 0 {
+ // blank line; skip it
+ continue
+ }
+ if len(f) != 3 {
+ return fmt.Errorf("malformed go.sum:\n%s:%d: wrong number of fields %v", file, lineno, len(f))
+ }
+ if f[2] == emptyGoModHash {
+ // Old bug; drop it.
+ continue
+ }
+ mod := module.Version{Path: f[0], Version: f[1]}
+ dst[mod] = append(dst[mod], f[2])
+ }
+ return nil
+}
+
+// HaveSum returns true if the go.sum file contains an entry for mod.
+// The entry's hash must be generated with a known hash algorithm.
+// mod.Version may have a "/go.mod" suffix to distinguish sums for
+// .mod and .zip files.
+func HaveSum(mod module.Version) bool {
+ goSum.mu.Lock()
+ defer goSum.mu.Unlock()
+ inited, err := initGoSum()
+ if err != nil || !inited {
+ return false
+ }
+ for _, goSums := range goSum.w {
+ for _, h := range goSums[mod] {
+ if !strings.HasPrefix(h, "h1:") {
+ continue
+ }
+ if !goSum.status[modSum{mod, h}].dirty {
+ return true
+ }
+ }
+ }
+ for _, h := range goSum.m[mod] {
+ if !strings.HasPrefix(h, "h1:") {
+ continue
+ }
+ if !goSum.status[modSum{mod, h}].dirty {
+ return true
+ }
+ }
+ return false
+}
+
+// checkMod checks the given module's checksum and Go version.
+func checkMod(ctx context.Context, mod module.Version) {
+ // Do the file I/O before acquiring the go.sum lock.
+ ziphash, err := CachePath(ctx, mod, "ziphash")
+ if err != nil {
+ base.Fatalf("verifying %v", module.VersionError(mod, err))
+ }
+ data, err := lockedfile.Read(ziphash)
+ if err != nil {
+ base.Fatalf("verifying %v", module.VersionError(mod, err))
+ }
+ data = bytes.TrimSpace(data)
+ if !isValidSum(data) {
+ // Recreate ziphash file from zip file and use that to check the mod sum.
+ zip, err := CachePath(ctx, mod, "zip")
+ if err != nil {
+ base.Fatalf("verifying %v", module.VersionError(mod, err))
+ }
+ err = hashZip(mod, zip, ziphash)
+ if err != nil {
+ base.Fatalf("verifying %v", module.VersionError(mod, err))
+ }
+ return
+ }
+ h := string(data)
+ if !strings.HasPrefix(h, "h1:") {
+ base.Fatalf("verifying %v", module.VersionError(mod, fmt.Errorf("unexpected ziphash: %q", h)))
+ }
+
+ if err := checkModSum(mod, h); err != nil {
+ base.Fatalf("%s", err)
+ }
+}
+
+// goModSum returns the checksum for the go.mod contents.
+func goModSum(data []byte) (string, error) {
+ return dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) {
+ return io.NopCloser(bytes.NewReader(data)), nil
+ })
+}
+
+// checkGoMod checks the given module's go.mod checksum;
+// data is the go.mod content.
+func checkGoMod(path, version string, data []byte) error {
+ h, err := goModSum(data)
+ if err != nil {
+ return &module.ModuleError{Path: path, Version: version, Err: fmt.Errorf("verifying go.mod: %v", err)}
+ }
+
+ return checkModSum(module.Version{Path: path, Version: version + "/go.mod"}, h)
+}
+
+// checkModSum checks that the recorded checksum for mod is h.
+//
+// mod.Version may have the additional suffix "/go.mod" to request the checksum
+// for the module's go.mod file only.
+func checkModSum(mod module.Version, h string) error {
+ // We lock goSum when manipulating it,
+ // but we arrange to release the lock when calling checkSumDB,
+ // so that parallel calls to checkModHash can execute parallel calls
+ // to checkSumDB.
+
+ // Check whether mod+h is listed in go.sum already. If so, we're done.
+ goSum.mu.Lock()
+ inited, err := initGoSum()
+ if err != nil {
+ goSum.mu.Unlock()
+ return err
+ }
+ done := inited && haveModSumLocked(mod, h)
+ if inited {
+ st := goSum.status[modSum{mod, h}]
+ st.used = true
+ goSum.status[modSum{mod, h}] = st
+ }
+ goSum.mu.Unlock()
+
+ if done {
+ return nil
+ }
+
+ // Not listed, so we want to add them.
+ // Consult checksum database if appropriate.
+ if useSumDB(mod) {
+ // Calls base.Fatalf if mismatch detected.
+ if err := checkSumDB(mod, h); err != nil {
+ return err
+ }
+ }
+
+ // Add mod+h to go.sum, if it hasn't appeared already.
+ if inited {
+ goSum.mu.Lock()
+ addModSumLocked(mod, h)
+ st := goSum.status[modSum{mod, h}]
+ st.dirty = true
+ goSum.status[modSum{mod, h}] = st
+ goSum.mu.Unlock()
+ }
+ return nil
+}
+
+// haveModSumLocked reports whether the pair mod,h is already listed in go.sum.
+// If it finds a conflicting pair instead, it calls base.Fatalf.
+// goSum.mu must be locked.
+func haveModSumLocked(mod module.Version, h string) bool {
+ sumFileName := "go.sum"
+ if strings.HasSuffix(GoSumFile, "go.work.sum") {
+ sumFileName = "go.work.sum"
+ }
+ for _, vh := range goSum.m[mod] {
+ if h == vh {
+ return true
+ }
+ if strings.HasPrefix(vh, "h1:") {
+ base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, sumFileName, vh)
+ }
+ }
+ // Also check workspace sums.
+ foundMatch := false
+ // Check sums from all files in case there are conflicts between
+ // the files.
+ for goSumFile, goSums := range goSum.w {
+ for _, vh := range goSums[mod] {
+ if h == vh {
+ foundMatch = true
+ } else if strings.HasPrefix(vh, "h1:") {
+ base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, goSumFile, vh)
+ }
+ }
+ }
+ return foundMatch
+}
+
+// addModSumLocked adds the pair mod,h to go.sum.
+// goSum.mu must be locked.
+func addModSumLocked(mod module.Version, h string) {
+ if haveModSumLocked(mod, h) {
+ return
+ }
+ if len(goSum.m[mod]) > 0 {
+ fmt.Fprintf(os.Stderr, "warning: verifying %s@%s: unknown hashes in go.sum: %v; adding %v"+hashVersionMismatch, mod.Path, mod.Version, strings.Join(goSum.m[mod], ", "), h)
+ }
+ goSum.m[mod] = append(goSum.m[mod], h)
+}
+
+// checkSumDB checks the mod, h pair against the Go checksum database.
+// It calls base.Fatalf if the hash is to be rejected.
+func checkSumDB(mod module.Version, h string) error {
+ modWithoutSuffix := mod
+ noun := "module"
+ if before, found := strings.CutSuffix(mod.Version, "/go.mod"); found {
+ noun = "go.mod"
+ modWithoutSuffix.Version = before
+ }
+
+ db, lines, err := lookupSumDB(mod)
+ if err != nil {
+ return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: %v", noun, err))
+ }
+
+ have := mod.Path + " " + mod.Version + " " + h
+ prefix := mod.Path + " " + mod.Version + " h1:"
+ for _, line := range lines {
+ if line == have {
+ return nil
+ }
+ if strings.HasPrefix(line, prefix) {
+ return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, noun, h, db, line[len(prefix)-len("h1:"):]))
+ }
+ }
+ return nil
+}
+
+// Sum returns the checksum for the downloaded copy of the given module,
+// if present in the download cache.
+func Sum(ctx context.Context, mod module.Version) string {
+ if cfg.GOMODCACHE == "" {
+ // Do not use current directory.
+ return ""
+ }
+
+ ziphash, err := CachePath(ctx, mod, "ziphash")
+ if err != nil {
+ return ""
+ }
+ data, err := lockedfile.Read(ziphash)
+ if err != nil {
+ return ""
+ }
+ data = bytes.TrimSpace(data)
+ if !isValidSum(data) {
+ return ""
+ }
+ return string(data)
+}
+
+// isValidSum returns true if data is the valid contents of a zip hash file.
+// Certain critical files are written to disk by first truncating
+// then writing the actual bytes, so that if the write fails
+// the corrupt file should contain at least one of the null
+// bytes written by the truncate operation.
+func isValidSum(data []byte) bool {
+ if bytes.IndexByte(data, '\000') >= 0 {
+ return false
+ }
+
+ if len(data) != len("h1:")+base64.StdEncoding.EncodedLen(sha256.Size) {
+ return false
+ }
+
+ return true
+}
+
+var ErrGoSumDirty = errors.New("updates to go.sum needed, disabled by -mod=readonly")
+
+// WriteGoSum writes the go.sum file if it needs to be updated.
+//
+// keep is used to check whether a newly added sum should be saved in go.sum.
+// It should have entries for both module content sums and go.mod sums
+// (version ends with "/go.mod"). Existing sums will be preserved unless they
+// have been marked for deletion with TrimGoSum.
+func WriteGoSum(ctx context.Context, keep map[module.Version]bool, readonly bool) error {
+ goSum.mu.Lock()
+ defer goSum.mu.Unlock()
+
+ // If we haven't read the go.sum file yet, don't bother writing it.
+ if !goSum.enabled {
+ return nil
+ }
+
+ // Check whether we need to add sums for which keep[m] is true or remove
+ // unused sums marked with TrimGoSum. If there are no changes to make,
+ // just return without opening go.sum.
+ dirty := false
+Outer:
+ for m, hs := range goSum.m {
+ for _, h := range hs {
+ st := goSum.status[modSum{m, h}]
+ if st.dirty && (!st.used || keep[m]) {
+ dirty = true
+ break Outer
+ }
+ }
+ }
+ if !dirty {
+ return nil
+ }
+ if readonly {
+ return ErrGoSumDirty
+ }
+ if _, ok := fsys.OverlayPath(GoSumFile); ok {
+ base.Fatalf("go: updates to go.sum needed, but go.sum is part of the overlay specified with -overlay")
+ }
+
+ // Make a best-effort attempt to acquire the side lock, only to exclude
+ // previous versions of the 'go' command from making simultaneous edits.
+ if unlock, err := SideLock(ctx); err == nil {
+ defer unlock()
+ }
+
+ err := lockedfile.Transform(GoSumFile, func(data []byte) ([]byte, error) {
+ if !goSum.overwrite {
+ // Incorporate any sums added by other processes in the meantime.
+ // Add only the sums that we actually checked: the user may have edited or
+ // truncated the file to remove erroneous hashes, and we shouldn't restore
+ // them without good reason.
+ goSum.m = make(map[module.Version][]string, len(goSum.m))
+ readGoSum(goSum.m, GoSumFile, data)
+ for ms, st := range goSum.status {
+ if st.used && !sumInWorkspaceModulesLocked(ms.mod) {
+ addModSumLocked(ms.mod, ms.sum)
+ }
+ }
+ }
+
+ var mods []module.Version
+ for m := range goSum.m {
+ mods = append(mods, m)
+ }
+ module.Sort(mods)
+
+ var buf bytes.Buffer
+ for _, m := range mods {
+ list := goSum.m[m]
+ sort.Strings(list)
+ str.Uniq(&list)
+ for _, h := range list {
+ st := goSum.status[modSum{m, h}]
+ if (!st.dirty || (st.used && keep[m])) && !sumInWorkspaceModulesLocked(m) {
+ fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h)
+ }
+ }
+ }
+ return buf.Bytes(), nil
+ })
+
+ if err != nil {
+ return fmt.Errorf("updating go.sum: %w", err)
+ }
+
+ goSum.status = make(map[modSum]modSumStatus)
+ goSum.overwrite = false
+ return nil
+}
+
+func sumInWorkspaceModulesLocked(m module.Version) bool {
+ for _, goSums := range goSum.w {
+ if _, ok := goSums[m]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// TrimGoSum trims go.sum to contain only the modules needed for reproducible
+// builds.
+//
+// keep is used to check whether a sum should be retained in go.mod. It should
+// have entries for both module content sums and go.mod sums (version ends
+// with "/go.mod").
+func TrimGoSum(keep map[module.Version]bool) {
+ goSum.mu.Lock()
+ defer goSum.mu.Unlock()
+ inited, err := initGoSum()
+ if err != nil {
+ base.Fatalf("%s", err)
+ }
+ if !inited {
+ return
+ }
+
+ for m, hs := range goSum.m {
+ if !keep[m] {
+ for _, h := range hs {
+ goSum.status[modSum{m, h}] = modSumStatus{used: false, dirty: true}
+ }
+ goSum.overwrite = true
+ }
+ }
+}
+
+const goSumMismatch = `
+
+SECURITY ERROR
+This download does NOT match an earlier download recorded in go.sum.
+The bits may have been replaced on the origin server, or an attacker may
+have intercepted the download attempt.
+
+For more information, see 'go help module-auth'.
+`
+
+const sumdbMismatch = `
+
+SECURITY ERROR
+This download does NOT match the one reported by the checksum server.
+The bits may have been replaced on the origin server, or an attacker may
+have intercepted the download attempt.
+
+For more information, see 'go help module-auth'.
+`
+
+const hashVersionMismatch = `
+
+SECURITY WARNING
+This download is listed in go.sum, but using an unknown hash algorithm.
+The download cannot be verified.
+
+For more information, see 'go help module-auth'.
+
+`
+
+var HelpModuleAuth = &base.Command{
+ UsageLine: "module-auth",
+ Short: "module authentication using go.sum",
+ Long: `
+When the go command downloads a module zip file or go.mod file into the
+module cache, it computes a cryptographic hash and compares it with a known
+value to verify the file hasn't changed since it was first downloaded. Known
+hashes are stored in a file in the module root directory named go.sum. Hashes
+may also be downloaded from the checksum database depending on the values of
+GOSUMDB, GOPRIVATE, and GONOSUMDB.
+
+For details, see https://golang.org/ref/mod#authenticating.
+`,
+}
+
+var HelpPrivate = &base.Command{
+ UsageLine: "private",
+ Short: "configuration for downloading non-public code",
+ Long: `
+The go command defaults to downloading modules from the public Go module
+mirror at proxy.golang.org. It also defaults to validating downloaded modules,
+regardless of source, against the public Go checksum database at sum.golang.org.
+These defaults work well for publicly available source code.
+
+The GOPRIVATE environment variable controls which modules the go command
+considers to be private (not available publicly) and should therefore not use
+the proxy or checksum database. The variable is a comma-separated list of
+glob patterns (in the syntax of Go's path.Match) of module path prefixes.
+For example,
+
+ GOPRIVATE=*.corp.example.com,rsc.io/private
+
+causes the go command to treat as private any module with a path prefix
+matching either pattern, including git.corp.example.com/xyzzy, rsc.io/private,
+and rsc.io/private/quux.
+
+For fine-grained control over module download and validation, the GONOPROXY
+and GONOSUMDB environment variables accept the same kind of glob list
+and override GOPRIVATE for the specific decision of whether to use the proxy
+and checksum database, respectively.
+
+For example, if a company ran a module proxy serving private modules,
+users would configure go using:
+
+ GOPRIVATE=*.corp.example.com
+ GOPROXY=proxy.example.com
+ GONOPROXY=none
+
+The GOPRIVATE variable is also used to define the "public" and "private"
+patterns for the GOVCS variable; see 'go help vcs'. For that usage,
+GOPRIVATE applies even in GOPATH mode. In that case, it matches import paths
+instead of module paths.
+
+The 'go env -w' command (see 'go help env') can be used to set these variables
+for future go command invocations.
+
+For more details, see https://golang.org/ref/mod#private-modules.
+`,
+}
diff --git a/src/cmd/go/internal/modfetch/key.go b/src/cmd/go/internal/modfetch/key.go
new file mode 100644
index 0000000..06f9989
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/key.go
@@ -0,0 +1,9 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+var knownGOSUMDB = map[string]string{
+ "sum.golang.org": "sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8",
+}
diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go
new file mode 100644
index 0000000..dd37ba9
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/proxy.go
@@ -0,0 +1,449 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "path"
+ pathpkg "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/web"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+var HelpGoproxy = &base.Command{
+ UsageLine: "goproxy",
+ Short: "module proxy protocol",
+ Long: `
+A Go module proxy is any web server that can respond to GET requests for
+URLs of a specified form. The requests have no query parameters, so even
+a site serving from a fixed file system (including a file:/// URL)
+can be a module proxy.
+
+For details on the GOPROXY protocol, see
+https://golang.org/ref/mod#goproxy-protocol.
+`,
+}
+
+var proxyOnce struct {
+ sync.Once
+ list []proxySpec
+ err error
+}
+
+type proxySpec struct {
+ // url is the proxy URL or one of "off", "direct", "noproxy".
+ url string
+
+ // fallBackOnError is true if a request should be attempted on the next proxy
+ // in the list after any error from this proxy. If fallBackOnError is false,
+ // the request will only be attempted on the next proxy if the error is
+ // equivalent to os.ErrNotFound, which is true for 404 and 410 responses.
+ fallBackOnError bool
+}
+
+func proxyList() ([]proxySpec, error) {
+ proxyOnce.Do(func() {
+ if cfg.GONOPROXY != "" && cfg.GOPROXY != "direct" {
+ proxyOnce.list = append(proxyOnce.list, proxySpec{url: "noproxy"})
+ }
+
+ goproxy := cfg.GOPROXY
+ for goproxy != "" {
+ var url string
+ fallBackOnError := false
+ if i := strings.IndexAny(goproxy, ",|"); i >= 0 {
+ url = goproxy[:i]
+ fallBackOnError = goproxy[i] == '|'
+ goproxy = goproxy[i+1:]
+ } else {
+ url = goproxy
+ goproxy = ""
+ }
+
+ url = strings.TrimSpace(url)
+ if url == "" {
+ continue
+ }
+ if url == "off" {
+ // "off" always fails hard, so can stop walking list.
+ proxyOnce.list = append(proxyOnce.list, proxySpec{url: "off"})
+ break
+ }
+ if url == "direct" {
+ proxyOnce.list = append(proxyOnce.list, proxySpec{url: "direct"})
+ // For now, "direct" is the end of the line. We may decide to add some
+ // sort of fallback behavior for them in the future, so ignore
+ // subsequent entries for forward-compatibility.
+ break
+ }
+
+ // Single-word tokens are reserved for built-in behaviors, and anything
+ // containing the string ":/" or matching an absolute file path must be a
+ // complete URL. For all other paths, implicitly add "https://".
+ if strings.ContainsAny(url, ".:/") && !strings.Contains(url, ":/") && !filepath.IsAbs(url) && !path.IsAbs(url) {
+ url = "https://" + url
+ }
+
+ // Check that newProxyRepo accepts the URL.
+ // It won't do anything with the path.
+ if _, err := newProxyRepo(url, "golang.org/x/text"); err != nil {
+ proxyOnce.err = err
+ return
+ }
+
+ proxyOnce.list = append(proxyOnce.list, proxySpec{
+ url: url,
+ fallBackOnError: fallBackOnError,
+ })
+ }
+
+ if len(proxyOnce.list) == 0 ||
+ len(proxyOnce.list) == 1 && proxyOnce.list[0].url == "noproxy" {
+ // There were no proxies, other than the implicit "noproxy" added when
+ // GONOPROXY is set. This can happen if GOPROXY is a non-empty string
+ // like "," or " ".
+ proxyOnce.err = fmt.Errorf("GOPROXY list is not the empty string, but contains no entries")
+ }
+ })
+
+ return proxyOnce.list, proxyOnce.err
+}
+
+// TryProxies iterates f over each configured proxy (including "noproxy" and
+// "direct" if applicable) until f returns no error or until f returns an
+// error that is not equivalent to fs.ErrNotExist on a proxy configured
+// not to fall back on errors.
+//
+// TryProxies then returns that final error.
+//
+// If GOPROXY is set to "off", TryProxies invokes f once with the argument
+// "off".
+func TryProxies(f func(proxy string) error) error {
+ proxies, err := proxyList()
+ if err != nil {
+ return err
+ }
+ if len(proxies) == 0 {
+ panic("GOPROXY list is empty")
+ }
+
+ // We try to report the most helpful error to the user. "direct" and "noproxy"
+ // errors are best, followed by proxy errors other than ErrNotExist, followed
+ // by ErrNotExist.
+ //
+ // Note that errProxyOff, errNoproxy, and errUseProxy are equivalent to
+ // ErrNotExist. errUseProxy should only be returned if "noproxy" is the only
+ // proxy. errNoproxy should never be returned, since there should always be a
+ // more useful error from "noproxy" first.
+ const (
+ notExistRank = iota
+ proxyRank
+ directRank
+ )
+ var bestErr error
+ bestErrRank := notExistRank
+ for _, proxy := range proxies {
+ err := f(proxy.url)
+ if err == nil {
+ return nil
+ }
+ isNotExistErr := errors.Is(err, fs.ErrNotExist)
+
+ if proxy.url == "direct" || (proxy.url == "noproxy" && err != errUseProxy) {
+ bestErr = err
+ bestErrRank = directRank
+ } else if bestErrRank <= proxyRank && !isNotExistErr {
+ bestErr = err
+ bestErrRank = proxyRank
+ } else if bestErrRank == notExistRank {
+ bestErr = err
+ }
+
+ if !proxy.fallBackOnError && !isNotExistErr {
+ break
+ }
+ }
+ return bestErr
+}
+
+type proxyRepo struct {
+ url *url.URL
+ path string
+ redactedURL string
+
+ listLatestOnce sync.Once
+ listLatest *RevInfo
+ listLatestErr error
+}
+
+func newProxyRepo(baseURL, path string) (Repo, error) {
+ base, err := url.Parse(baseURL)
+ if err != nil {
+ return nil, err
+ }
+ switch base.Scheme {
+ case "http", "https":
+ // ok
+ case "file":
+ if *base != (url.URL{Scheme: base.Scheme, Path: base.Path, RawPath: base.RawPath}) {
+ return nil, fmt.Errorf("invalid file:// proxy URL with non-path elements: %s", base.Redacted())
+ }
+ case "":
+ return nil, fmt.Errorf("invalid proxy URL missing scheme: %s", base.Redacted())
+ default:
+ return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", base.Redacted())
+ }
+
+ enc, err := module.EscapePath(path)
+ if err != nil {
+ return nil, err
+ }
+ redactedURL := base.Redacted()
+ base.Path = strings.TrimSuffix(base.Path, "/") + "/" + enc
+ base.RawPath = strings.TrimSuffix(base.RawPath, "/") + "/" + pathEscape(enc)
+ return &proxyRepo{base, path, redactedURL, sync.Once{}, nil, nil}, nil
+}
+
+func (p *proxyRepo) ModulePath() string {
+ return p.path
+}
+
+var errProxyReuse = fmt.Errorf("proxy does not support CheckReuse")
+
+func (p *proxyRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return errProxyReuse
+}
+
+// versionError returns err wrapped in a ModuleError for p.path.
+func (p *proxyRepo) versionError(version string, err error) error {
+ if version != "" && version != module.CanonicalVersion(version) {
+ return &module.ModuleError{
+ Path: p.path,
+ Err: &module.InvalidVersionError{
+ Version: version,
+ Pseudo: module.IsPseudoVersion(version),
+ Err: err,
+ },
+ }
+ }
+
+ return &module.ModuleError{
+ Path: p.path,
+ Version: version,
+ Err: err,
+ }
+}
+
+func (p *proxyRepo) getBytes(ctx context.Context, path string) ([]byte, error) {
+ body, err := p.getBody(ctx, path)
+ if err != nil {
+ return nil, err
+ }
+ defer body.Close()
+
+ b, err := io.ReadAll(body)
+ if err != nil {
+ // net/http doesn't add context to Body errors, so add it here.
+ // (See https://go.dev/issue/52727.)
+ return b, &url.Error{Op: "read", URL: strings.TrimSuffix(p.redactedURL, "/") + "/" + path, Err: err}
+ }
+ return b, nil
+}
+
+func (p *proxyRepo) getBody(ctx context.Context, path string) (r io.ReadCloser, err error) {
+ fullPath := pathpkg.Join(p.url.Path, path)
+
+ target := *p.url
+ target.Path = fullPath
+ target.RawPath = pathpkg.Join(target.RawPath, pathEscape(path))
+
+ resp, err := web.Get(web.DefaultSecurity, &target)
+ if err != nil {
+ return nil, err
+ }
+ if err := resp.Err(); err != nil {
+ resp.Body.Close()
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (p *proxyRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
+ data, err := p.getBytes(ctx, "@v/list")
+ if err != nil {
+ p.listLatestOnce.Do(func() {
+ p.listLatest, p.listLatestErr = nil, p.versionError("", err)
+ })
+ return nil, p.versionError("", err)
+ }
+ var list []string
+ allLine := strings.Split(string(data), "\n")
+ for _, line := range allLine {
+ f := strings.Fields(line)
+ if len(f) >= 1 && semver.IsValid(f[0]) && strings.HasPrefix(f[0], prefix) && !module.IsPseudoVersion(f[0]) {
+ list = append(list, f[0])
+ }
+ }
+ p.listLatestOnce.Do(func() {
+ p.listLatest, p.listLatestErr = p.latestFromList(ctx, allLine)
+ })
+ semver.Sort(list)
+ return &Versions{List: list}, nil
+}
+
+func (p *proxyRepo) latest(ctx context.Context) (*RevInfo, error) {
+ p.listLatestOnce.Do(func() {
+ data, err := p.getBytes(ctx, "@v/list")
+ if err != nil {
+ p.listLatestErr = p.versionError("", err)
+ return
+ }
+ list := strings.Split(string(data), "\n")
+ p.listLatest, p.listLatestErr = p.latestFromList(ctx, list)
+ })
+ return p.listLatest, p.listLatestErr
+}
+
+func (p *proxyRepo) latestFromList(ctx context.Context, allLine []string) (*RevInfo, error) {
+ var (
+ bestTime time.Time
+ bestVersion string
+ )
+ for _, line := range allLine {
+ f := strings.Fields(line)
+ if len(f) >= 1 && semver.IsValid(f[0]) {
+ // If the proxy includes timestamps, prefer the timestamp it reports.
+ // Otherwise, derive the timestamp from the pseudo-version.
+ var (
+ ft time.Time
+ )
+ if len(f) >= 2 {
+ ft, _ = time.Parse(time.RFC3339, f[1])
+ } else if module.IsPseudoVersion(f[0]) {
+ ft, _ = module.PseudoVersionTime(f[0])
+ } else {
+ // Repo.Latest promises that this method is only called where there are
+ // no tagged versions. Ignore any tagged versions that were added in the
+ // meantime.
+ continue
+ }
+ if bestTime.Before(ft) {
+ bestTime = ft
+ bestVersion = f[0]
+ }
+ }
+ }
+ if bestVersion == "" {
+ return nil, p.versionError("", codehost.ErrNoCommits)
+ }
+
+ // Call Stat to get all the other fields, including Origin information.
+ return p.Stat(ctx, bestVersion)
+}
+
+func (p *proxyRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ encRev, err := module.EscapeVersion(rev)
+ if err != nil {
+ return nil, p.versionError(rev, err)
+ }
+ data, err := p.getBytes(ctx, "@v/"+encRev+".info")
+ if err != nil {
+ return nil, p.versionError(rev, err)
+ }
+ info := new(RevInfo)
+ if err := json.Unmarshal(data, info); err != nil {
+ return nil, p.versionError(rev, fmt.Errorf("invalid response from proxy %q: %w", p.redactedURL, err))
+ }
+ if info.Version != rev && rev == module.CanonicalVersion(rev) && module.Check(p.path, rev) == nil {
+ // If we request a correct, appropriate version for the module path, the
+ // proxy must return either exactly that version or an error — not some
+ // arbitrary other version.
+ return nil, p.versionError(rev, fmt.Errorf("proxy returned info for version %s instead of requested version", info.Version))
+ }
+ return info, nil
+}
+
+func (p *proxyRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ data, err := p.getBytes(ctx, "@latest")
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ return nil, p.versionError("", err)
+ }
+ return p.latest(ctx)
+ }
+ info := new(RevInfo)
+ if err := json.Unmarshal(data, info); err != nil {
+ return nil, p.versionError("", fmt.Errorf("invalid response from proxy %q: %w", p.redactedURL, err))
+ }
+ return info, nil
+}
+
+func (p *proxyRepo) GoMod(ctx context.Context, version string) ([]byte, error) {
+ if version != module.CanonicalVersion(version) {
+ return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical"))
+ }
+
+ encVer, err := module.EscapeVersion(version)
+ if err != nil {
+ return nil, p.versionError(version, err)
+ }
+ data, err := p.getBytes(ctx, "@v/"+encVer+".mod")
+ if err != nil {
+ return nil, p.versionError(version, err)
+ }
+ return data, nil
+}
+
+func (p *proxyRepo) Zip(ctx context.Context, dst io.Writer, version string) error {
+ if version != module.CanonicalVersion(version) {
+ return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical"))
+ }
+
+ encVer, err := module.EscapeVersion(version)
+ if err != nil {
+ return p.versionError(version, err)
+ }
+ path := "@v/" + encVer + ".zip"
+ body, err := p.getBody(ctx, path)
+ if err != nil {
+ return p.versionError(version, err)
+ }
+ defer body.Close()
+
+ lr := &io.LimitedReader{R: body, N: codehost.MaxZipFile + 1}
+ if _, err := io.Copy(dst, lr); err != nil {
+ // net/http doesn't add context to Body errors, so add it here.
+ // (See https://go.dev/issue/52727.)
+ err = &url.Error{Op: "read", URL: pathpkg.Join(p.redactedURL, path), Err: err}
+ return p.versionError(version, err)
+ }
+ if lr.N <= 0 {
+ return p.versionError(version, fmt.Errorf("downloaded zip file too large"))
+ }
+ return nil
+}
+
+// pathEscape escapes s so it can be used in a path.
+// That is, it escapes things like ? and # (which really shouldn't appear anyway).
+// It does not escape / to %2F: our REST API is designed so that / can be left as is.
+func pathEscape(s string) string {
+ return strings.ReplaceAll(url.PathEscape(s), "%2F", "/")
+}
diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go
new file mode 100644
index 0000000..25fb02d
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/repo.go
@@ -0,0 +1,411 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "strconv"
+ "time"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/par"
+ "cmd/go/internal/vcs"
+ web "cmd/go/internal/web"
+
+ "golang.org/x/mod/module"
+)
+
+const traceRepo = false // trace all repo actions, for debugging
+
+// A Repo represents a repository storing all versions of a single module.
+// It must be safe for simultaneous use by multiple goroutines.
+type Repo interface {
+ // ModulePath returns the module path.
+ ModulePath() string
+
+ // CheckReuse checks whether the validation criteria in the origin
+ // are still satisfied on the server corresponding to this module.
+ // If so, the caller can reuse any cached Versions or RevInfo containing
+ // this origin rather than redownloading those from the server.
+ CheckReuse(ctx context.Context, old *codehost.Origin) error
+
+ // Versions lists all known versions with the given prefix.
+ // Pseudo-versions are not included.
+ //
+ // Versions should be returned sorted in semver order
+ // (implementations can use semver.Sort).
+ //
+ // Versions returns a non-nil error only if there was a problem
+ // fetching the list of versions: it may return an empty list
+ // along with a nil error if the list of matching versions
+ // is known to be empty.
+ //
+ // If the underlying repository does not exist,
+ // Versions returns an error matching errors.Is(_, os.NotExist).
+ Versions(ctx context.Context, prefix string) (*Versions, error)
+
+ // Stat returns information about the revision rev.
+ // A revision can be any identifier known to the underlying service:
+ // commit hash, branch, tag, and so on.
+ Stat(ctx context.Context, rev string) (*RevInfo, error)
+
+ // Latest returns the latest revision on the default branch,
+ // whatever that means in the underlying source code repository.
+ // It is only used when there are no tagged versions.
+ Latest(ctx context.Context) (*RevInfo, error)
+
+ // GoMod returns the go.mod file for the given version.
+ GoMod(ctx context.Context, version string) (data []byte, err error)
+
+ // Zip writes a zip file for the given version to dst.
+ Zip(ctx context.Context, dst io.Writer, version string) error
+}
+
+// A Versions describes the available versions in a module repository.
+type Versions struct {
+ Origin *codehost.Origin `json:",omitempty"` // origin information for reuse
+
+ List []string // semver versions
+}
+
+// A RevInfo describes a single revision in a module repository.
+type RevInfo struct {
+ Version string // suggested version string for this revision
+ Time time.Time // commit time
+
+ // These fields are used for Stat of arbitrary rev,
+ // but they are not recorded when talking about module versions.
+ Name string `json:"-"` // complete ID in underlying repository
+ Short string `json:"-"` // shortened ID, for use in pseudo-version
+
+ Origin *codehost.Origin `json:",omitempty"` // provenance for reuse
+}
+
+// Re: module paths, import paths, repository roots, and lookups
+//
+// A module is a collection of Go packages stored in a file tree
+// with a go.mod file at the root of the tree.
+// The go.mod defines the module path, which is the import path
+// corresponding to the root of the file tree.
+// The import path of a directory within that file tree is the module path
+// joined with the name of the subdirectory relative to the root.
+//
+// For example, the module with path rsc.io/qr corresponds to the
+// file tree in the repository https://github.com/rsc/qr.
+// That file tree has a go.mod that says "module rsc.io/qr".
+// The package in the root directory has import path "rsc.io/qr".
+// The package in the gf256 subdirectory has import path "rsc.io/qr/gf256".
+// In this example, "rsc.io/qr" is both a module path and an import path.
+// But "rsc.io/qr/gf256" is only an import path, not a module path:
+// it names an importable package, but not a module.
+//
+// As a special case to incorporate code written before modules were
+// introduced, if a path p resolves using the pre-module "go get" lookup
+// to the root of a source code repository without a go.mod file,
+// that repository is treated as if it had a go.mod in its root directory
+// declaring module path p. (The go.mod is further considered to
+// contain requirements corresponding to any legacy version
+// tracking format such as Gopkg.lock, vendor/vendor.conf, and so on.)
+//
+// The presentation so far ignores the fact that a source code repository
+// has many different versions of a file tree, and those versions may
+// differ in whether a particular go.mod exists and what it contains.
+// In fact there is a well-defined mapping only from a module path, version
+// pair - often written path@version - to a particular file tree.
+// For example rsc.io/qr@v0.1.0 depends on the "implicit go.mod at root of
+// repository" rule, while rsc.io/qr@v0.2.0 has an explicit go.mod.
+// Because the "go get" import paths rsc.io/qr and github.com/rsc/qr
+// both redirect to the Git repository https://github.com/rsc/qr,
+// github.com/rsc/qr@v0.1.0 is the same file tree as rsc.io/qr@v0.1.0
+// but a different module (a different name). In contrast, since v0.2.0
+// of that repository has an explicit go.mod that declares path rsc.io/qr,
+// github.com/rsc/qr@v0.2.0 is an invalid module path, version pair.
+// Before modules, import comments would have had the same effect.
+//
+// The set of import paths associated with a given module path is
+// clearly not fixed: at the least, new directories with new import paths
+// can always be added. But another potential operation is to split a
+// subtree out of a module into its own module. If done carefully,
+// this operation can be done while preserving compatibility for clients.
+// For example, suppose that we want to split rsc.io/qr/gf256 into its
+// own module, so that there would be two modules rsc.io/qr and rsc.io/qr/gf256.
+// Then we can simultaneously issue rsc.io/qr v0.3.0 (dropping the gf256 subdirectory)
+// and rsc.io/qr/gf256 v0.1.0, including in their respective go.mod
+// cyclic requirements pointing at each other: rsc.io/qr v0.3.0 requires
+// rsc.io/qr/gf256 v0.1.0 and vice versa. Then a build can be
+// using an older rsc.io/qr module that includes the gf256 package, but if
+// it adds a requirement on either the newer rsc.io/qr or the newer
+// rsc.io/qr/gf256 module, it will automatically add the requirement
+// on the complementary half, ensuring both that rsc.io/qr/gf256 is
+// available for importing by the build and also that it is only defined
+// by a single module. The gf256 package could move back into the
+// original by another simultaneous release of rsc.io/qr v0.4.0 including
+// the gf256 subdirectory and an rsc.io/qr/gf256 v0.2.0 with no code
+// in its root directory, along with a new requirement cycle.
+// The ability to shift module boundaries in this way is expected to be
+// important in large-scale program refactorings, similar to the ones
+// described in https://talks.golang.org/2016/refactor.article.
+//
+// The possibility of shifting module boundaries reemphasizes
+// that you must know both the module path and its version
+// to determine the set of packages provided directly by that module.
+//
+// On top of all this, it is possible for a single code repository
+// to contain multiple modules, either in branches or subdirectories,
+// as a limited kind of monorepo. For example rsc.io/qr/v2,
+// the v2.x.x continuation of rsc.io/qr, is expected to be found
+// in v2-tagged commits in https://github.com/rsc/qr, either
+// in the root or in a v2 subdirectory, disambiguated by go.mod.
+// Again the precise file tree corresponding to a module
+// depends on which version we are considering.
+//
+// It is also possible for the underlying repository to change over time,
+// without changing the module path. If I copy the github repo over
+// to https://bitbucket.org/rsc/qr and update https://rsc.io/qr?go-get=1,
+// then clients of all versions should start fetching from bitbucket
+// instead of github. That is, in contrast to the exact file tree,
+// the location of the source code repository associated with a module path
+// does not depend on the module version. (This is by design, as the whole
+// point of these redirects is to allow package authors to establish a stable
+// name that can be updated as code moves from one service to another.)
+//
+// All of this is important background for the lookup APIs defined in this
+// file.
+//
+// The Lookup function takes a module path and returns a Repo representing
+// that module path. Lookup can do only a little with the path alone.
+// It can check that the path is well-formed (see semver.CheckPath)
+// and it can check that the path can be resolved to a target repository.
+// To avoid version control access except when absolutely necessary,
+// Lookup does not attempt to connect to the repository itself.
+
+var lookupCache par.Cache[lookupCacheKey, Repo]
+
+type lookupCacheKey struct {
+ proxy, path string
+}
+
+// Lookup returns the module with the given module path,
+// fetched through the given proxy.
+//
+// The distinguished proxy "direct" indicates that the path should be fetched
+// from its origin, and "noproxy" indicates that the patch should be fetched
+// directly only if GONOPROXY matches the given path.
+//
+// For the distinguished proxy "off", Lookup always returns a Repo that returns
+// a non-nil error for every method call.
+//
+// A successful return does not guarantee that the module
+// has any defined versions.
+func Lookup(ctx context.Context, proxy, path string) Repo {
+ if traceRepo {
+ defer logCall("Lookup(%q, %q)", proxy, path)()
+ }
+
+ return lookupCache.Do(lookupCacheKey{proxy, path}, func() Repo {
+ return newCachingRepo(ctx, path, func(ctx context.Context) (Repo, error) {
+ r, err := lookup(ctx, proxy, path)
+ if err == nil && traceRepo {
+ r = newLoggingRepo(r)
+ }
+ return r, err
+ })
+ })
+}
+
+// lookup returns the module with the given module path.
+func lookup(ctx context.Context, proxy, path string) (r Repo, err error) {
+ if cfg.BuildMod == "vendor" {
+ return nil, errLookupDisabled
+ }
+
+ switch path {
+ case "go", "toolchain":
+ return &toolchainRepo{path, Lookup(ctx, proxy, "golang.org/toolchain")}, nil
+ }
+
+ if module.MatchPrefixPatterns(cfg.GONOPROXY, path) {
+ switch proxy {
+ case "noproxy", "direct":
+ return lookupDirect(ctx, path)
+ default:
+ return nil, errNoproxy
+ }
+ }
+
+ switch proxy {
+ case "off":
+ return errRepo{path, errProxyOff}, nil
+ case "direct":
+ return lookupDirect(ctx, path)
+ case "noproxy":
+ return nil, errUseProxy
+ default:
+ return newProxyRepo(proxy, path)
+ }
+}
+
+type lookupDisabledError struct{}
+
+func (lookupDisabledError) Error() string {
+ if cfg.BuildModReason == "" {
+ return fmt.Sprintf("module lookup disabled by -mod=%s", cfg.BuildMod)
+ }
+ return fmt.Sprintf("module lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
+}
+
+var errLookupDisabled error = lookupDisabledError{}
+
+var (
+ errProxyOff = notExistErrorf("module lookup disabled by GOPROXY=off")
+ errNoproxy error = notExistErrorf("disabled by GOPRIVATE/GONOPROXY")
+ errUseProxy error = notExistErrorf("path does not match GOPRIVATE/GONOPROXY")
+)
+
+func lookupDirect(ctx context.Context, path string) (Repo, error) {
+ security := web.SecureOnly
+
+ if module.MatchPrefixPatterns(cfg.GOINSECURE, path) {
+ security = web.Insecure
+ }
+ rr, err := vcs.RepoRootForImportPath(path, vcs.PreferMod, security)
+ if err != nil {
+ // We don't know where to find code for a module with this path.
+ return nil, notExistError{err: err}
+ }
+
+ if rr.VCS.Name == "mod" {
+ // Fetch module from proxy with base URL rr.Repo.
+ return newProxyRepo(rr.Repo, path)
+ }
+
+ code, err := lookupCodeRepo(ctx, rr)
+ if err != nil {
+ return nil, err
+ }
+ return newCodeRepo(code, rr.Root, path)
+}
+
+func lookupCodeRepo(ctx context.Context, rr *vcs.RepoRoot) (codehost.Repo, error) {
+ code, err := codehost.NewRepo(ctx, rr.VCS.Cmd, rr.Repo)
+ if err != nil {
+ if _, ok := err.(*codehost.VCSError); ok {
+ return nil, err
+ }
+ return nil, fmt.Errorf("lookup %s: %v", rr.Root, err)
+ }
+ return code, nil
+}
+
+// A loggingRepo is a wrapper around an underlying Repo
+// that prints a log message at the start and end of each call.
+// It can be inserted when debugging.
+type loggingRepo struct {
+ r Repo
+}
+
+func newLoggingRepo(r Repo) *loggingRepo {
+ return &loggingRepo{r}
+}
+
+// logCall prints a log message using format and args and then
+// also returns a function that will print the same message again,
+// along with the elapsed time.
+// Typical usage is:
+//
+// defer logCall("hello %s", arg)()
+//
+// Note the final ().
+func logCall(format string, args ...any) func() {
+ start := time.Now()
+ fmt.Fprintf(os.Stderr, "+++ %s\n", fmt.Sprintf(format, args...))
+ return func() {
+ fmt.Fprintf(os.Stderr, "%.3fs %s\n", time.Since(start).Seconds(), fmt.Sprintf(format, args...))
+ }
+}
+
+func (l *loggingRepo) ModulePath() string {
+ return l.r.ModulePath()
+}
+
+func (l *loggingRepo) CheckReuse(ctx context.Context, old *codehost.Origin) (err error) {
+ defer func() {
+ logCall("CheckReuse[%s]: %v", l.r.ModulePath(), err)
+ }()
+ return l.r.CheckReuse(ctx, old)
+}
+
+func (l *loggingRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
+ defer logCall("Repo[%s]: Versions(%q)", l.r.ModulePath(), prefix)()
+ return l.r.Versions(ctx, prefix)
+}
+
+func (l *loggingRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ defer logCall("Repo[%s]: Stat(%q)", l.r.ModulePath(), rev)()
+ return l.r.Stat(ctx, rev)
+}
+
+func (l *loggingRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ defer logCall("Repo[%s]: Latest()", l.r.ModulePath())()
+ return l.r.Latest(ctx)
+}
+
+func (l *loggingRepo) GoMod(ctx context.Context, version string) ([]byte, error) {
+ defer logCall("Repo[%s]: GoMod(%q)", l.r.ModulePath(), version)()
+ return l.r.GoMod(ctx, version)
+}
+
+func (l *loggingRepo) Zip(ctx context.Context, dst io.Writer, version string) error {
+ dstName := "_"
+ if dst, ok := dst.(interface{ Name() string }); ok {
+ dstName = strconv.Quote(dst.Name())
+ }
+ defer logCall("Repo[%s]: Zip(%s, %q)", l.r.ModulePath(), dstName, version)()
+ return l.r.Zip(ctx, dst, version)
+}
+
+// errRepo is a Repo that returns the same error for all operations.
+//
+// It is useful in conjunction with caching, since cache hits will not attempt
+// the prohibited operations.
+type errRepo struct {
+ modulePath string
+ err error
+}
+
+func (r errRepo) ModulePath() string { return r.modulePath }
+
+func (r errRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { return r.err }
+func (r errRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { return nil, r.err }
+func (r errRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { return nil, r.err }
+func (r errRepo) Latest(ctx context.Context) (*RevInfo, error) { return nil, r.err }
+func (r errRepo) GoMod(ctx context.Context, version string) ([]byte, error) { return nil, r.err }
+func (r errRepo) Zip(ctx context.Context, dst io.Writer, version string) error { return r.err }
+
+// A notExistError is like fs.ErrNotExist, but with a custom message
+type notExistError struct {
+ err error
+}
+
+func notExistErrorf(format string, args ...any) error {
+ return notExistError{fmt.Errorf(format, args...)}
+}
+
+func (e notExistError) Error() string {
+ return e.err.Error()
+}
+
+func (notExistError) Is(target error) bool {
+ return target == fs.ErrNotExist
+}
+
+func (e notExistError) Unwrap() error {
+ return e.err
+}
diff --git a/src/cmd/go/internal/modfetch/sumdb.go b/src/cmd/go/internal/modfetch/sumdb.go
new file mode 100644
index 0000000..ea7d561
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/sumdb.go
@@ -0,0 +1,315 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go checksum database lookup
+
+//go:build !cmd_go_bootstrap
+
+package modfetch
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/web"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb"
+ "golang.org/x/mod/sumdb/note"
+)
+
+// useSumDB reports whether to use the Go checksum database for the given module.
+func useSumDB(mod module.Version) bool {
+ if mod.Path == "golang.org/toolchain" {
+ must := true
+ // Downloaded toolchains cannot be listed in go.sum,
+ // so we require checksum database lookups even if
+ // GOSUMDB=off or GONOSUMDB matches the pattern.
+ // If GOSUMDB=off, then the eventual lookup will fail
+ // with a good error message.
+
+ // Exception #1: using GOPROXY=file:// to test a distpack.
+ if strings.HasPrefix(cfg.GOPROXY, "file://") && !strings.ContainsAny(cfg.GOPROXY, ",|") {
+ must = false
+ }
+ // Exception #2: the Go proxy+checksum database cannot check itself
+ // while doing the initial download.
+ if strings.Contains(os.Getenv("GIT_HTTP_USER_AGENT"), "proxy.golang.org") {
+ must = false
+ }
+
+ // Another potential exception would be GOPROXY=direct,
+ // but that would make toolchain downloads only as secure
+ // as HTTPS, and in particular they'd be susceptible to MITM
+ // attacks on systems with less-than-trustworthy root certificates.
+ // The checksum database provides a stronger guarantee,
+ // so we don't make that exception.
+
+ // Otherwise, require the checksum database.
+ if must {
+ return true
+ }
+ }
+ return cfg.GOSUMDB != "off" && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path)
+}
+
+// lookupSumDB returns the Go checksum database's go.sum lines for the given module,
+// along with the name of the database.
+func lookupSumDB(mod module.Version) (dbname string, lines []string, err error) {
+ dbOnce.Do(func() {
+ dbName, db, dbErr = dbDial()
+ })
+ if dbErr != nil {
+ return "", nil, dbErr
+ }
+ lines, err = db.Lookup(mod.Path, mod.Version)
+ return dbName, lines, err
+}
+
+var (
+ dbOnce sync.Once
+ dbName string
+ db *sumdb.Client
+ dbErr error
+)
+
+func dbDial() (dbName string, db *sumdb.Client, err error) {
+ // $GOSUMDB can be "key" or "key url",
+ // and the key can be a full verifier key
+ // or a host on our list of known keys.
+
+ // Special case: sum.golang.google.cn
+ // is an alias, reachable inside mainland China,
+ // for sum.golang.org. If there are more
+ // of these we should add a map like knownGOSUMDB.
+ gosumdb := cfg.GOSUMDB
+ if gosumdb == "sum.golang.google.cn" {
+ gosumdb = "sum.golang.org https://sum.golang.google.cn"
+ }
+
+ if gosumdb == "off" {
+ return "", nil, fmt.Errorf("checksum database disabled by GOSUMDB=off")
+ }
+
+ key := strings.Fields(gosumdb)
+ if len(key) >= 1 {
+ if k := knownGOSUMDB[key[0]]; k != "" {
+ key[0] = k
+ }
+ }
+ if len(key) == 0 {
+ return "", nil, fmt.Errorf("missing GOSUMDB")
+ }
+ if len(key) > 2 {
+ return "", nil, fmt.Errorf("invalid GOSUMDB: too many fields")
+ }
+ vkey, err := note.NewVerifier(key[0])
+ if err != nil {
+ return "", nil, fmt.Errorf("invalid GOSUMDB: %v", err)
+ }
+ name := vkey.Name()
+
+ // No funny business in the database name.
+ direct, err := url.Parse("https://" + name)
+ if err != nil || strings.HasSuffix(name, "/") || *direct != (url.URL{Scheme: "https", Host: direct.Host, Path: direct.Path, RawPath: direct.RawPath}) || direct.RawPath != "" || direct.Host == "" {
+ return "", nil, fmt.Errorf("invalid sumdb name (must be host[/path]): %s %+v", name, *direct)
+ }
+
+ // Determine how to get to database.
+ var base *url.URL
+ if len(key) >= 2 {
+ // Use explicit alternate URL listed in $GOSUMDB,
+ // bypassing both the default URL derivation and any proxies.
+ u, err := url.Parse(key[1])
+ if err != nil {
+ return "", nil, fmt.Errorf("invalid GOSUMDB URL: %v", err)
+ }
+ base = u
+ }
+
+ return name, sumdb.NewClient(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil
+}
+
+type dbClient struct {
+ key string
+ name string
+ direct *url.URL
+
+ once sync.Once
+ base *url.URL
+ baseErr error
+}
+
+func (c *dbClient) ReadRemote(path string) ([]byte, error) {
+ c.once.Do(c.initBase)
+ if c.baseErr != nil {
+ return nil, c.baseErr
+ }
+
+ var data []byte
+ start := time.Now()
+ targ := web.Join(c.base, path)
+ data, err := web.GetBytes(targ)
+ if false {
+ fmt.Fprintf(os.Stderr, "%.3fs %s\n", time.Since(start).Seconds(), targ.Redacted())
+ }
+ return data, err
+}
+
+// initBase determines the base URL for connecting to the database.
+// Determining the URL requires sending network traffic to proxies,
+// so this work is delayed until we need to download something from
+// the database. If everything we need is in the local cache and
+// c.ReadRemote is never called, we will never do this work.
+func (c *dbClient) initBase() {
+ if c.base != nil {
+ return
+ }
+
+ // Try proxies in turn until we find out how to connect to this database.
+ //
+ // Before accessing any checksum database URL using a proxy, the proxy
+ // client should first fetch <proxyURL>/sumdb/<sumdb-name>/supported.
+ //
+ // If that request returns a successful (HTTP 200) response, then the proxy
+ // supports proxying checksum database requests. In that case, the client
+ // should use the proxied access method only, never falling back to a direct
+ // connection to the database.
+ //
+ // If the /sumdb/<sumdb-name>/supported check fails with a “not found” (HTTP
+ // 404) or “gone” (HTTP 410) response, or if the proxy is configured to fall
+ // back on errors, the client will try the next proxy. If there are no
+ // proxies left or if the proxy is "direct" or "off", the client should
+ // connect directly to that database.
+ //
+ // Any other response is treated as the database being unavailable.
+ //
+ // See https://golang.org/design/25530-sumdb#proxying-a-checksum-database.
+ err := TryProxies(func(proxy string) error {
+ switch proxy {
+ case "noproxy":
+ return errUseProxy
+ case "direct", "off":
+ return errProxyOff
+ default:
+ proxyURL, err := url.Parse(proxy)
+ if err != nil {
+ return err
+ }
+ if _, err := web.GetBytes(web.Join(proxyURL, "sumdb/"+c.name+"/supported")); err != nil {
+ return err
+ }
+ // Success! This proxy will help us.
+ c.base = web.Join(proxyURL, "sumdb/"+c.name)
+ return nil
+ }
+ })
+ if errors.Is(err, fs.ErrNotExist) {
+ // No proxies, or all proxies failed (with 404, 410, or were allowed
+ // to fall back), or we reached an explicit "direct" or "off".
+ c.base = c.direct
+ } else if err != nil {
+ c.baseErr = err
+ }
+}
+
+// ReadConfig reads the key from c.key
+// and otherwise reads the config (a latest tree head) from GOPATH/pkg/sumdb/<file>.
+func (c *dbClient) ReadConfig(file string) (data []byte, err error) {
+ if file == "key" {
+ return []byte(c.key), nil
+ }
+
+ if cfg.SumdbDir == "" {
+ return nil, fmt.Errorf("could not locate sumdb file: missing $GOPATH: %s",
+ cfg.GoPathError)
+ }
+ targ := filepath.Join(cfg.SumdbDir, file)
+ data, err = lockedfile.Read(targ)
+ if errors.Is(err, fs.ErrNotExist) {
+ // Treat non-existent as empty, to bootstrap the "latest" file
+ // the first time we connect to a given database.
+ return []byte{}, nil
+ }
+ return data, err
+}
+
+// WriteConfig rewrites the latest tree head.
+func (*dbClient) WriteConfig(file string, old, new []byte) error {
+ if file == "key" {
+ // Should not happen.
+ return fmt.Errorf("cannot write key")
+ }
+ if cfg.SumdbDir == "" {
+ return fmt.Errorf("could not locate sumdb file: missing $GOPATH: %s",
+ cfg.GoPathError)
+ }
+ targ := filepath.Join(cfg.SumdbDir, file)
+ os.MkdirAll(filepath.Dir(targ), 0777)
+ f, err := lockedfile.Edit(targ)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ data, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 && !bytes.Equal(data, old) {
+ return sumdb.ErrWriteConflict
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return err
+ }
+ if err := f.Truncate(0); err != nil {
+ return err
+ }
+ if _, err := f.Write(new); err != nil {
+ return err
+ }
+ return f.Close()
+}
+
+// ReadCache reads cached lookups or tiles from
+// GOPATH/pkg/mod/cache/download/sumdb,
+// which will be deleted by "go clean -modcache".
+func (*dbClient) ReadCache(file string) ([]byte, error) {
+ targ := filepath.Join(cfg.GOMODCACHE, "cache/download/sumdb", file)
+ data, err := lockedfile.Read(targ)
+ // lockedfile.Write does not atomically create the file with contents.
+ // There is a moment between file creation and locking the file for writing,
+ // during which the empty file can be locked for reading.
+ // Treat observing an empty file as file not found.
+ if err == nil && len(data) == 0 {
+ err = &fs.PathError{Op: "read", Path: targ, Err: fs.ErrNotExist}
+ }
+ return data, err
+}
+
+// WriteCache updates cached lookups or tiles.
+func (*dbClient) WriteCache(file string, data []byte) {
+ targ := filepath.Join(cfg.GOMODCACHE, "cache/download/sumdb", file)
+ os.MkdirAll(filepath.Dir(targ), 0777)
+ lockedfile.Write(targ, bytes.NewReader(data), 0666)
+}
+
+func (*dbClient) Log(msg string) {
+ // nothing for now
+}
+
+func (*dbClient) SecurityError(msg string) {
+ base.Fatalf("%s", msg)
+}
diff --git a/src/cmd/go/internal/modfetch/toolchain.go b/src/cmd/go/internal/modfetch/toolchain.go
new file mode 100644
index 0000000..0d7cfcf
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/toolchain.go
@@ -0,0 +1,181 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfetch
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch/codehost"
+)
+
+// A toolchainRepo is a synthesized repository reporting Go toolchain versions.
+// It has path "go" or "toolchain". The "go" repo reports versions like "1.2".
+// The "toolchain" repo reports versions like "go1.2".
+//
+// Note that the repo ONLY reports versions. It does not actually support
+// downloading of the actual toolchains. Instead, that is done using
+// the regular repo code with "golang.org/toolchain".
+// The naming conflict is unfortunate: "golang.org/toolchain"
+// should perhaps have been "go.dev/dl", but it's too late.
+//
+// For clarity, this file refers to golang.org/toolchain as the "DL" repo,
+// the one you can actually download.
+type toolchainRepo struct {
+ path string // either "go" or "toolchain"
+ repo Repo // underlying DL repo
+}
+
+func (r *toolchainRepo) ModulePath() string {
+ return r.path
+}
+
+func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions, error) {
+ // Read DL repo list and convert to "go" or "toolchain" version list.
+ versions, err := r.repo.Versions(ctx, "")
+ if err != nil {
+ return nil, err
+ }
+ versions.Origin = nil
+ var list []string
+ have := make(map[string]bool)
+ goPrefix := ""
+ if r.path == "toolchain" {
+ goPrefix = "go"
+ }
+ for _, v := range versions.List {
+ v, ok := dlToGo(v)
+ if !ok {
+ continue
+ }
+ if !have[v] {
+ have[v] = true
+ list = append(list, goPrefix+v)
+ }
+ }
+
+ // Always include our own version.
+ // This means that the development branch of Go 1.21 (say) will allow 'go get go@1.21'
+ // even though there are no Go 1.21 releases yet.
+ // Once there is a release, 1.21 will be treated as a query matching the latest available release.
+ // Before then, 1.21 will be treated as a query that resolves to this entry we are adding (1.21).
+ if v := gover.Local(); !have[v] {
+ list = append(list, goPrefix+v)
+ }
+
+ if r.path == "go" {
+ sort.Slice(list, func(i, j int) bool {
+ return gover.Compare(list[i], list[j]) < 0
+ })
+ } else {
+ sort.Slice(list, func(i, j int) bool {
+ return gover.Compare(gover.FromToolchain(list[i]), gover.FromToolchain(list[j])) < 0
+ })
+ }
+ versions.List = list
+ return versions, nil
+}
+
+func (r *toolchainRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
+ // Convert rev to DL version and stat that to make sure it exists.
+ // In theory the go@ versions should be like 1.21.0
+ // and the toolchain@ versions should be like go1.21.0
+ // but people will type the wrong one, and so we accept
+ // both and silently correct it to the standard form.
+ prefix := ""
+ v := rev
+ v = strings.TrimPrefix(v, "go")
+ if r.path == "toolchain" {
+ prefix = "go"
+ }
+
+ if !gover.IsValid(v) {
+ return nil, fmt.Errorf("invalid %s version %s", r.path, rev)
+ }
+
+ // If we're asking about "go" (not "toolchain"), pretend to have
+ // all earlier Go versions available without network access:
+ // we will provide those ourselves, at least in GOTOOLCHAIN=auto mode.
+ if r.path == "go" && gover.Compare(v, gover.Local()) <= 0 {
+ return &RevInfo{Version: prefix + v}, nil
+ }
+
+ // Similarly, if we're asking about *exactly* the current toolchain,
+ // we don't need to access the network to know that it exists.
+ if r.path == "toolchain" && v == gover.Local() {
+ return &RevInfo{Version: prefix + v}, nil
+ }
+
+ if gover.IsLang(v) {
+ // We can only use a language (development) version if the current toolchain
+ // implements that version, and the two checks above have ruled that out.
+ return nil, fmt.Errorf("go language version %s is not a toolchain version", rev)
+ }
+
+ // Check that the underlying toolchain exists.
+ // We always ask about linux-amd64 because that one
+ // has always existed and is likely to always exist in the future.
+ // This avoids different behavior validating go versions on different
+ // architectures. The eventual download uses the right GOOS-GOARCH.
+ info, err := r.repo.Stat(ctx, goToDL(v, "linux", "amd64"))
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the info using the canonicalized rev
+ // (toolchain 1.2 => toolchain go1.2).
+ return &RevInfo{Version: prefix + v, Time: info.Time}, nil
+}
+
+func (r *toolchainRepo) Latest(ctx context.Context) (*RevInfo, error) {
+ versions, err := r.Versions(ctx, "")
+ if err != nil {
+ return nil, err
+ }
+ var max string
+ for _, v := range versions.List {
+ if max == "" || gover.ModCompare(r.path, v, max) > 0 {
+ max = v
+ }
+ }
+ return r.Stat(ctx, max)
+}
+
+func (r *toolchainRepo) GoMod(ctx context.Context, version string) (data []byte, err error) {
+ return []byte("module " + r.path + "\n"), nil
+}
+
+func (r *toolchainRepo) Zip(ctx context.Context, dst io.Writer, version string) error {
+ return fmt.Errorf("invalid use of toolchainRepo: Zip")
+}
+
+func (r *toolchainRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return fmt.Errorf("invalid use of toolchainRepo: CheckReuse")
+}
+
+// goToDL converts a Go version like "1.2" to a DL module version like "v0.0.1-go1.2.linux-amd64".
+func goToDL(v, goos, goarch string) string {
+ return "v0.0.1-go" + v + ".linux-amd64"
+}
+
+// dlToGo converts a DL module version like "v0.0.1-go1.2.linux-amd64" to a Go version like "1.2".
+func dlToGo(v string) (string, bool) {
+ // v0.0.1-go1.19.7.windows-amd64
+ // cut v0.0.1-
+ _, v, ok := strings.Cut(v, "-")
+ if !ok {
+ return "", false
+ }
+ // cut .windows-amd64
+ i := strings.LastIndex(v, ".")
+ if i < 0 || !strings.Contains(v[i+1:], "-") {
+ return "", false
+ }
+ return strings.TrimPrefix(v[:i], "go"), true
+}
diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv b/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv
new file mode 100644
index 0000000..0906975
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv
@@ -0,0 +1,2119 @@
+9fans.net/go,v0.0.2,h1:RYM6lWITV8oADrwLfdzxmt8ucfW6UtP9v1jg4qAbqts=,2c42aad9ed60e24046fbf5720f438884942897197cb790ce58cccdacedd9532d
+aahframe.work,v0.12.3,h1:hc3chv+f49yLYVT/aSEhgpoqd8bS0rDKEew1un8AkSo=,0c7e3fab03920a79ace8e0a9ddf4517225f595ce39f2124ec3d9353508da5dbd
+aahframework.org/essentials.v0,v0.8.0,h1:R/lcfOuhvZptG4IWX/CzAtpiVJFUjbCxLao6DfmeWBA=,d640fe6b83a31ffe09d12eea37de000be7ec8d7330c0a1d7413d6e31a675628d
+bazil.org/fuse,v0.0.0-20180421153158-65cc252bf669,h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=,fce7ed008451861ba30974e95468d716f5ff4fde14e9605dbc2db5fac935c71d
+bitbucket.org/abex/go-optional,v0.0.0-20150902044611-5304370459de,h1:iGmurCCO42qFsQ46DzROSsZJFf8/7AKFH/VpRGd2PBw=,02e1b23db09fb6945ba4ca57c0af8125b608fceae125fe625f6536b9e466e7e0
+bitbucket.org/abex/pathfinder,v0.0.0-20170507112819-bdce8b2efbc9,h1:M1jjfmrcOcmWy2/aABpm3k9h/M6NccmjgLtE5gVl+y8=,8469c0a656a895863d4714a658ee4a9634e78547142fa7239331e15d0143c679
+bitbucket.org/abex/sliceconv,v0.0.0-20151017061159-594a23261816,h1:7XPf5/Oar0LfWbnUY29doBDzSr6ToseiJRqkZtb0YOo=,e2433a32246bd5e2fb5d52bf6dc36188a75c4b59b98f76eb7607035b8525dd37
+bitbucket.org/abex/yumeko,v0.0.0-20190825151534-d98ca20ac08c,h1:ES4kIm83Q1RYr9uhhpQhqh/tqjt8H+Xz4xuSAv5Crcw=,1d352a11b3ed5850a425fde048cafa65b2c079c4e9647c52a339b28276065ba9
+bitbucket.org/liamstask/goose,v0.0.0-20150115234039-8488cc47d90c,h1:bkb2NMGo3/Du52wvYj9Whth5KZfMV6d3O0Vbr3nz/UE=,3d64cac7774bf87a9d050222b87387c112bcb6ef0ea0e2b3324a95330573a0c5
+bitbucket.org/ww/goautoneg,v0.0.0-20120707110453-75cd24fc2f2c,h1:t+Ra932MCC0eeyD/vigXqMbZTzgZjd4JOfBJWC6VSMI=,8ad2afdee1dc46b2c78e986bc2cce89cd0b8815b278a01879ef08d56585c247f
+bou.ke/monkey,v1.0.1,h1:zEMLInw9xvNakzUUPjfS4Ds6jYPqCFx3m7bRmG5NH2U=,20cb7da509322267189d32a125d7e0f782264508bc8e17306c80424514e797ce
+cloud.google.com/go,v0.47.0,h1:1JUtpcY9E7+eTospEwWS2QXP3DEn7poB3E2j0jN74mM=,7739fd24e36a536488115ef0ec9d739e608ee68448f8a469e84855c008b00ecd
+cloud.google.com/go/bigquery,v1.0.1,h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=,738d1f726ce24f618ee7563f6c9419e6307f8814548f45ad8a227cffbb1448c0
+cloud.google.com/go/datastore,v1.0.0,h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=,41e93ec9526ae580da90300d7e421a6d39d79cb6118d62ad1d3c06422d8a71bf
+cloud.google.com/go/pubsub,v1.0.1,h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=,8bca46a7c5f0dcd576d23fa9a5f107955316d6f0d8f306ee1d6faa7de99c3d29
+cloud.google.com/go/spanner,v1.0.0,h1:jLKThep5kbWLeBhLgtEfm/OPT08n1z7itVTR82WUBQg=,90579f16545e352c662ae9f62dd02dddf834fe10b33d1dbcfbf0a8aadfcd21f8
+cloud.google.com/go/storage,v1.0.0,h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=,baec4756c573ede58f19eb7ae4acaebd7ac3f0c56413ecbbd216ad46a589a5da
+code.cloudfoundry.org/clock,v0.0.0-20180518195852-02e53af36e6c,h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=,61785787db7dadaf695506636dcb98c26bbdd0c847f589aa1fb4bbe9ef0e4455
+code.cloudfoundry.org/gofileutils,v0.0.0-20170111115228-4d0c80011a0f,h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk=,ec71ca818158525773e53568f71db38f63423822a426e1a18f7d34318e97eb3e
+code.cloudfoundry.org/lager,v2.0.0+incompatible,h1:WZwDKDB2PLd/oL+USK4b4aEjUymIej9My2nUQ9oWEwQ=,ce1da175885c2587ca091532a937108ed646e3bd6bd902640891f75ae70adb8b
+code.gitea.io/gitea,v1.9.5,h1:Q3PROlfPth1NlLGaeYcr6YVqyfAy7txnFpDKe1BXo7Q=,c7b63394004fb8f355d859f11a007ff17126eac092f90507a80392335351a6df
+code.gitea.io/sdk,v0.0.0-20191030144301-2a5a0e75e5cf,h1:uXUz7lXbs33QAYIu1rF0o8tNsa3DlDDSMYek/3CldIo=,6472a2b30b8108cae9b6a4914ce986d61e9ed37baade5ad35cb337270602b70a
+code.gitea.io/sdk/gitea,v0.0.0-20191030144301-2a5a0e75e5cf,h1:aAwV+RyellgKMACMu21Vyv/XgSHipLvbJsXDoXP1Yv0=,62570a621e1bf13724fb1f45d7ea95c48de02abb00468cf1da4b35820203d3b4
+contrib.go.opencensus.io/exporter/aws,v0.0.0-20181029163544-2befc13012d0,h1:YsbWYxDZkC7x2OxlsDEYvvEXZ3cBI3qBgUK5BqkZvRw=,3e351a39c3caf9ce263155f2d6e5a4e0cd84177661e1bf40f0d8fd06854831e9
+contrib.go.opencensus.io/exporter/ocagent,v0.6.0,h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM=,e526ae16b06c682c3661738938f912a2e301a5e2d0ba875c7a0ec40fde825491
+contrib.go.opencensus.io/exporter/stackdriver,v0.12.8,h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=,db745d331f8a0455abbbcfeb4bb33dbc5cbb73a119b4e86f833cd497cfc72559
+contrib.go.opencensus.io/integrations/ocsql,v0.1.4,h1:kfg5Yyy1nYUrqzyfW5XX+dzMASky8IJXhtHe0KTYNS4=,0a4be97a579c5212bd83d21a177b279bc5b0c04350a63c56e8f8e611ffcba09c
+contrib.go.opencensus.io/resource,v0.1.1,h1:4r2CANuYhKGmYWP02+5E94rLRcS/YeD+KlxSrOsMxk0=,07ad3d36f96cb86ecba376353d02730855e117db3ffac5c2ab2c7cdf4eca25dc
+cuelang.org/go,v0.0.11,h1:t7s006dOWh6tgnwPifvO3l704eg8oPuIH7AR1hfTFYk=,69bdc6b3f1000308d399f166dd0d46576019f68cbf37765bd30821584b1296de
+dmitri.shuralyov.com/app/changes,v0.0.0-20180602232624-0a106ad413e3,h1:hJiie5Bf3QucGRa4ymsAUOxyhYwGEz1xrsVk0P8erlw=,a4d9079d5550094191f608c628ff2eb6999e0d0b6aea894ba59d063107777dfa
+dmitri.shuralyov.com/gpu/mtl,v0.0.0-20190408044501-666a987793e9,h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=,ca5330901fcda83d09553ac362576d196c531157bc9c502e76b237cca262b400
+dmitri.shuralyov.com/html/belt,v0.0.0-20180602232347-f7d459c86be0,h1:SPOUaucgtVls75mg+X7CXigS71EnsfVUK/2CgVrwqgw=,bd6b059cceaea8ab23e65b8118fab5d22f82149417fcc5fcf930ef9a52d582f1
+dmitri.shuralyov.com/service/change,v0.0.0-20181023043359-a85b471d5412,h1:GvWw74lx5noHocd+f6HBMXK6DuggBB1dhVkuGZbv7qM=,8a1ba9c7ba7eea08389c15315a23485d19fc7166d30b5b47a35fab949c4bf886
+dmitri.shuralyov.com/state,v0.0.0-20190403024436-2cf192113e66,h1:/74W9PTF+vJhgRsWpPWlZT77+phX7vXPcelX7JXFu5s=,eda200c06f669f06c56e1d53a1879b88dd7ee99eea1f56d329028fa773cfc2dd
+docker.io/go-docker,v1.0.0,h1:VdXS/aNYQxyA9wdLD5z8Q8Ro688/hG8HzKxYVEVbE6s=,b162036b1af6e1e5434e2e5a35faa7191014529259fbf2f4f1b3e7de6b816516
+fyne.io/fyne,v1.1.2,h1:a9YLFXxqN7lKNqTrk+ocw3/3ROrn6aFiofix8ATVOBc=,dc2d7fd4a4ee9852328fc79c52459a53d94d26f6ac3282ebcacc0c6cd6688d23
+git.apache.org/thrift.git,v0.13.0,h1:/3bz5WZ+sqYArk7MBBBbDufMxKKOA56/6JO6psDpUDY=,10412b7bc503ef2a7cc3bf58fe69e5a2d2594354ae3cc5ab2baa2b3ecc8c4f1d
+git.fd.io/govpp.git,v0.1.0,h1:fV5H9ghURFfmNAjk7Scb/aG3OGwevLayHfSdS8GsYjE=,0a023d4b5b36131a4fde2c3d19047bbd4f5c3a7cc07c1ccf40bfb75a501f51b3
+git.torproject.org/pluggable-transports/goptlib.git,v1.1.0,h1:LMQAA8pAho+QtYrrVNimJQiINNEwcwuuD99vezD/PAo=,f6769c4813dedf933071289bfd9381aa5eb3a012b3a32d1da02aa9bebd3a3b5b
+gitee.com/nggs/util,v0.0.0-20190830024003-3e49d2efc84b,h1:6KQpPEs326uPrICQy9x/PxmR8U0v/XsFzpt0k1nFKcY=,a062c99c2b560a36168fe51eab8f17f4fadf5d534238881628e83d8d61e51c2a
+github.com/1and1/oneandone-cloudserver-sdk-go,v1.0.1,h1:RMTyvS5bjvSWiUcfqfr/E2pxHEMrALvU+E12n6biymg=,7f068808fc0857d7de8c8f829cc380dce1c6611a3fc819daf4421e9bcb75a07c
+github.com/99designs/gqlgen,v0.10.1,h1:1BgB6XKGTHq7uH4G1/PYyKe2Kz7/vw3AlvMZlD3TEEY=,04b9e7d8a3df6543cd870325b1140ce9ac3f4bbfd8c90ebecec4f908dd420d08
+github.com/AndreasBriese/bbloom,v0.0.0-20190306092124-e2d15f34fcf9,h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=,6d7c1af06f8597fde1e86166f26416057392f1b0bdb84f2af555aa461282dd18
+github.com/AsynkronIT/goconsole,v0.0.0-20160504192649-bfa12eebf716,h1:Pk/Kzi5O0T4QxfqvbaUsh8UklbJ9BklZ/ClZBptX5WU=,5a2507b89bb4436881718d785a0ef383652aa99782508b7444cf20255082dab9
+github.com/Azure/azure-amqp-common-go,v1.1.4,h1:DmPXxmLZwi/71CgRTZIKR6yiKEW3eC42S4gSBhfG7y0=,4b800793ff4fefa86a427c445e3a4671b8d1dcd87a44075f6309cace6b0e01e2
+github.com/Azure/azure-amqp-common-go/v2,v2.1.0,h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ=,9a91c6ac9656faea0ddfb0bb497c109451faaba09b85ce3237309f5982b095a3
+github.com/Azure/azure-pipeline-go,v0.2.2,h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=,3e4f90f6ec86d4875e8758f01947adece11c1b4317b448fe0197188765c83efc
+github.com/Azure/azure-sdk-for-go,v36.0.0+incompatible,h1:XIaBmA4pgKqQ7jInQPaNJQ4pOHrdJjw9gYXhbyiChaU=,71db17c798b784b96a45efdbabd18ad86d03e5f490701081a2f7bf19efa67c13
+github.com/Azure/azure-service-bus-go,v0.9.1,h1:G1qBLQvHCFDv9pcpgwgFkspzvnGknJRR0PYJ9ytY/JA=,81e42ed51354d71b53daf93b5b9f0f2c20fb7d2923f45ab88eea22419bfbc63a
+github.com/Azure/azure-storage-blob-go,v0.8.0,h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=,3b02b720c25bbb6cdaf77f45a29a21e374e087081dedfeac2700aed6147b4b35
+github.com/Azure/go-ansiterm,v0.0.0-20170929234023-d6e3b3328b78,h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=,b7a14d18891abef8b8a2e622f62a3cebeac32f9f1223dc9d62a6f8769861aaf2
+github.com/Azure/go-autorest,v13.3.0+incompatible,h1:8Ix0VdeOllBx9jEcZ2Wb1uqWUpE1awmJiaHztwaJCPk=,44fdf420bd96bb97df7910806efb25f2fae701078c39f5592f5c4131ffce41e6
+github.com/Azure/go-autorest/autorest,v0.9.2,h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=,26df5fc6c03e8c66021dd272b04242f6c2ce2a5975f87799dfcf1b9597800dba
+github.com/Azure/go-autorest/autorest/adal,v0.8.0,h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I=,af59c00ec7e19cda9b960babaee7bfe27cf3d5f7415ac3afdb4cddc73d4b5743
+github.com/Azure/go-autorest/autorest/azure/auth,v0.4.0,h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530=,2fe394de946f42c2ea8ad07f1b282eac6bb56e372f5c2a35e49dfef0cf015ccb
+github.com/Azure/go-autorest/autorest/azure/cli,v0.3.0,h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=,729d09b69a1912faa7c2395389bbf67ec22a420d42c15414823d43a380a2f09a
+github.com/Azure/go-autorest/autorest/date,v0.2.0,h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=,9ec7b48c865a185b72d3822ac2dff7e0163315a23911c87a479a3db616af9853
+github.com/Azure/go-autorest/autorest/mocks,v0.3.0,h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=,d5daf74cf531c37b27b39d3bf65b6930aee4b226b5fb4ea91a87be93aaf37f10
+github.com/Azure/go-autorest/autorest/to,v0.3.0,h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=,955ee6bde8af1314d22b51f265799147f42f7c705714b1cc1c51144441d5fa9c
+github.com/Azure/go-autorest/autorest/validation,v0.2.0,h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=,10f40b0d943d4d1a0a1cbcb9fdb058b8a3a59a55ae26583566dfaa82883f86ea
+github.com/Azure/go-autorest/logger,v0.1.0,h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=,5e0804944db0707502c9d29defb54961c281a19311c9eb321a246ba054ac5256
+github.com/Azure/go-autorest/tracing,v0.5.0,h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=,4951b0f4a88a44b7ed4e4834654e4e01922ade35d97899b8596998184abbc652
+github.com/Azure/go-ntlmssp,v0.0.0-20180810175552-4a21cbd618b4,h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA=,64cd585589154ce18d7557ccfd8d26e2c2f5c4ecf13b17bdbfb913e17863d280
+github.com/BurntSushi/locker,v0.0.0-20171006230638-a6e239ea1c69,h1:+tu3HOoMXB7RXEINRVIpxJCT+KdYiI7LAEAUrOw3dIU=,836038343df9e9126b59d54201951191898bd875ec32d93c2018d759f358fcfb
+github.com/BurntSushi/toml,v0.3.1,h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=,815c6e594745f2d8842ff9a4b0569c6695e6cdfd5e07e5b3d98d06b72ca41e3c
+github.com/BurntSushi/xgb,v0.0.0-20160522181843-27f122750802,h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=,f52962c7fbeca81ea8a777d1f8b1f1d25803dc437fbb490f253344232884328e
+github.com/BurntSushi/xgbutil,v0.0.0-20190907113008-ad855c713046,h1:O/r2Sj+8QcMF7V5IcmiE2sMFV2q3J47BEirxbXJAdzA=,492ce6b11d7faaec4e15d1279d81e28d2e0e9844ad117f9de9411286a5b0e305
+github.com/ChrisTrenkamp/goxpath,v0.0.0-20170922090931-c385f95c6022,h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=,8d79cd78a309a1b0f22790d354b9c4c929c64d03c7e572627ba430908fbb9d78
+github.com/CodisLabs/codis,v0.0.0-20181104082235-de1ad026e329,h1:KyRmPlfd2xewxb54vIBPNILFyCh2R3zNDwLZURDxT0E=,f61ae85688d10dddf0d62c30aaaa2701373fc11851dae4435de0212513c578c1
+github.com/DATA-DOG/go-sqlmock,v1.3.3,h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=,5dc430c2836af3bfc85f590366a6e284a251978e9397d0d54fa97db913263461
+github.com/DataDog/datadog-go,v3.2.0+incompatible,h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=,ede4a024d3c106b2f57ca04d7bfc7610e0c83f4d8a3bace2cf87b42fd5cf66cd
+github.com/DataDog/zstd,v1.4.0,h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=,601f6fe1f4138d676946f4b27f7a714bbedea8c1785d10c1b74a03c68ad13070
+github.com/FiloSottile/b2,v0.0.0-20170207175032-b197f7a2c317,h1:1GuMjC4tjfwnWBdoTS7YqtQ3JIsEft6NRcdmXdzvYYc=,6ff3cfed3f510fc69b47f263936642950afc7892f557ed716dd8c5584f187411
+github.com/GeertJohan/go-sourcepath,v0.0.0-20150925135350-83e8b8723a9b,h1:D4H5C4VvURnduTQydyEhA6OWnNcZTLUlNX4YBw5yelY=,8bdcf0b6cc58f5ec1cef031b4052e6d699683bf1daf4a1a20f92f67d5be06b82
+github.com/GeertJohan/go.incremental,v1.0.0,h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=,ce46b3b717f8d2927046bcfb99c6f490b1b547a681e6b23240ac2c2292a891e8
+github.com/GeertJohan/go.rice,v1.0.0,h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=,2fc48b9422bf356c18ed3fe32ec52f6a8b87ac168f83d2eed249afaebcc3eeb8
+github.com/GoogleCloudPlatform/cloudsql-proxy,v0.0.0-20191017031552-46c5533ff5ba,h1:ZNYxMf89tMi+NydPAq7yGAxMfMNaMHgG+7WL1CEabjc=,25a1fe9f189e6a4d6e108f22abaec7dd36edc819ab5af1a3e448450b73026271
+github.com/GoogleCloudPlatform/docker-credential-gcr,v1.5.0,h1:wykTgKwhVr2t2qs+xI020s6W5dt614QqCHV+7W9dg64=,4acfcaddfe2aa53e1e643ea13ff3534a2fca1e043d008ab5bba5a0910db1f7c2
+github.com/IBM/go-sdk-core,v1.0.1,h1:vF9Lsoih6fxrAxzJp2fWqnO6Mg8x8O8fzwQAdFoUdok=,e063c8f79f94936a165355f61bdc6f2c404975472ad6be5e47bfdb87fb393c72
+github.com/Jeffail/gabs,v1.4.0,h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=,cb193b1477109c19b0d2521fc61735619202e58ac4699605f72313d70884ca9e
+github.com/Joker/hpp,v0.0.0-20180418125244-6893e659854a,h1:PiDAizhfJbwZMISZ1Itx1ZTFeOFCml89Ofmz3V8rhoU=,4e99372a7576c587c107fb16d1ae0e8662111e2ca5e5127f7cd93bb01cd02076
+github.com/Joker/jade,v1.0.0,h1:lOCEPvTAtWfLpSZYMOv/g44MGQFAolbKh2khHHGu0Kc=,c4a7f39e7483446ff7b0d7e213a4cd813c783108d6d2e7c6e9a8e968789b18bc
+github.com/Knetic/govaluate,v3.0.1-0.20171022003610-9aa49832a739+incompatible,h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=,d1d4ac5b4f5759726368f68b0d47f3c17c6d8689243ec66272311359d28a865b
+github.com/Kodeworks/golang-image-ico,v0.0.0-20141118225523-73f0f4cfade9,h1:1ltqoej5GtaWF8jaiA49HwsZD459jqm9YFz9ZtMFpQA=,1d677069e35c4a3e4f290e68c6e2391f6237aee9ce3f39448ed09a2ddab274b0
+github.com/Kubuxu/go-os-helper,v0.0.1,h1:EJiD2VUQyh5A9hWJLmc6iWg6yIcJ7jpBcwC8GMGXfDk=,90a16f95a8a238910ab0dc9004cb6e56242a10810bf1e296a263d2e385f002e0
+github.com/KyleBanks/depth,v1.2.1,h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=,8f3e9af2e038f561d9c34b631fddc7db39e39992a121fd087f0bf980026464d9
+github.com/MakeNowJust/heredoc,v1.0.0,h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=,062afe6e11aa3c3ac0035d08907b80d5e5b7563905603391ee774bda440abf16
+github.com/Masterminds/goutils,v1.1.0,h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=,b9520e8d2775ac1ff3fbf18c93dbc4b921133f957ae274f5b047965e9359d27d
+github.com/Masterminds/semver,v1.5.0,h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=,15f6b54a695c15ffb205d5719e5ed50fab9ba9a739e1b4bdf3a0a319f51a7202
+github.com/Masterminds/semver/v3,v3.0.1,h1:2kKm5lb7dKVrt5TYUiAavE6oFc1cFT0057UVGT+JqLk=,f1eef1a1b6489d895eb32326f3369bd1615812a4c5fbfe60b2b6cc774c6340f0
+github.com/Masterminds/sprig,v2.22.0+incompatible,h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=,1b4d772334cc94e5703291b5f0fe4ac4965ac265424b1060baf18ef5ff9d845c
+github.com/Masterminds/squirrel,v1.1.0,h1:baP1qLdoQCeTw3ifCdOq2dkYc6vGcmRdaociKLbEJXs=,cede1b0a054e000a5e6a8000cb02de7ab64ddca9e0f4153732274627adeed0ae
+github.com/Microsoft/go-winio,v0.4.14,h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=,7a86644691d3c86c77ae0b639fa27029706552f00cd51b445389a61694576f6b
+github.com/Microsoft/hcsshim,v0.8.6,h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=,900feaaec1c41d4e111a66bbde330b41fc78902c70c0af37d611505bf42e0632
+github.com/NYTimes/gziphandler,v1.1.1,h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=,2948d9f70e4388f13f4ed9400df41dca60841059f7dcc30cf909c82796cc705a
+github.com/NaverCloudPlatform/ncloud-sdk-go,v0.0.0-20180110055012-c2e73f942591,h1:/P9HCl71+Eh6vDbKNyRu+rpIIR70UCZWNOGexVV3e6k=,2e9eacfe3e6785beef75391bcebc14a6a082687c0f0582bc441c3d0106b8bf5c
+github.com/NebulousLabs/entropy-mnemonics,v0.0.0-20181203154559-bc7e13c5ccd8,h1:wPFCU8DwC4k5C2LfJc/rVp4cmTqzF3vyydxRR3b3HhQ=,6a65ca779cd216db7bf326ebbb5a26a87d85ff6a6ba832eec281c5c09a8294e3
+github.com/Netflix/go-expect,v0.0.0-20180615182759-c93bf25de8e8,h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=,fbe7b2f58ecb0e1067a6670bbcf0718d54ec407aab81790cc9e58db9a6774775
+github.com/NickBall/go-aes-key-wrap,v0.0.0-20170929221519-1c3aa3e4dfc5,h1:5BIUS5hwyLM298mOf8e8TEgD3cCYqc86uaJdQCYZo/o=,fd78212ec77052b032b9fc308c028e8fc166de3d6ae4494f5eb3254930728a0b
+github.com/Nvveen/Gotty,v0.0.0-20120604004816-cd527374f1e5,h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=,362ac7b59d74231419471b65b60079d167785b97fd4aa0de71575088cd192b1e
+github.com/OneOfOne/xxhash,v1.2.5,h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=,7ab3c6a0e7c16c987a589e50a9a353e8877cfffea02bf9e04e370fd26a0c85e1
+github.com/OpenBazaar/jsonpb,v0.0.0-20171123000858-37d32ddf4eef,h1:+aqKrHtCJTRp8ziyrjfHbTF5puPQZfgRt65+iM7FD2w=,5f6ea1466b9d27f016c1bf2650669c788db623142cdc8a1794bc1784fc80fc4e
+github.com/OpenBazaar/wallet-interface,v0.0.0-20190807004547-aa8e214acd9b,h1:KjQH45msWRtDhb5JAbBW+eU4M/9xIm11rsOSgAaqDOs=,f7ac40d665241766533b1a49a726068d9dfea5e02c7fd426df81f9e390a7003e
+github.com/OpenDNS/vegadns2client,v0.0.0-20180418235048-a3fa4a771d87,h1:xPMsUicZ3iosVPSIP7bW5EcGUzjiiMl1OYTe14y/R24=,b73d6b37d519c7bf181e502b92962f1bf961bb0ca3a9ef7057c3d9a8a3c2f3cd
+github.com/OwnLocal/goes,v1.0.0,h1:81QQ3z6dvLhgXlkNpLkaYhk8jiKS7saFG01xy039KaU=,ebb6c7e2c12577c590d2d5546b7a4b4e6fa75c9a408ae5244b5ba2cf09dec1d6
+github.com/PuerkitoBio/goquery,v1.5.0,h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=,f0064ad35f21c2b9d1377b94f09ead56ec1862da3807e78c26b99c4b3a04f5e6
+github.com/PuerkitoBio/purell,v1.1.1,h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=,59e636760d7f2ab41c2f80c1784b1c73d381d44888d1999228dedd634ddcf5ed
+github.com/PuerkitoBio/urlesc,v0.0.0-20170810143723-de5bf2ad4578,h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=,1793124273dd94e7089e95716d40529bcf70b9e87162d60218f68dde4d6aeb9d
+github.com/Quasilyte/inltest,v0.7.0,h1:yHvFAaoXn+6iK2uKtb8mXB9KURz6SDPyszoyBAC0Xk4=,8fb4273cea3514742aec06ed58f20cea1214cc542799c70c331a80865aaf3988
+github.com/RangelReale/osin,v1.0.1,h1:JcqBe8ljQq9WQJPtioXGxBWyIcfuVMw0BX6yJ9E4HKw=,edbcc6208879bffa533369bbf417db41c1322193ca05d0deecf13075972c9d57
+github.com/RangelReale/osincli,v0.0.0-20160924135400-fababb0555f2,h1:x8Brv0YNEe6jY3V/hQglIG2nd8g5E2Zj5ubGKkPQctQ=,82fc65bad3da9fc26cc77b485e10ee117459e830547ce89592c41d92871e1129
+github.com/Rican7/retry,v0.1.0,h1:FqK94z34ly8Baa6K+G8Mmza9rYWTKOJk+yckIBB5qVk=,c0e956967f2f632ffc889eeae5b82e437f30e9be409870cdd1e7998def458843
+github.com/RoaringBitmap/roaring,v0.4.7,h1:eGUudvFzvF7Kxh7JjYvXfI1f7l22/2duFby7r5+d4oc=,515892d9b8e4350e5ac5b7a487da94d5d9ab9641071e002b778dd864b7a31c2a
+github.com/SAP/go-hdb,v0.14.1,h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE=,273de28a254c39e9f24293b864c1d664488e4a5d44d535755a5e5b68ae7eed8d
+github.com/Sereal/Sereal,v0.0.0-20190529075751-4d99287c2c28,h1:kmfzzWpCZIrVhxx4V/2oSGhGnhtX+/JijVIlPuKYfHg=,eebfe79e62b5a07f98a367d8a84bcf33ed69818c031e70c3ebc6e9fc34361466
+github.com/SermoDigital/jose,v0.0.0-20180104203859-803625baeddc,h1:LkkwnbY+S8WmwkWq1SVyRWMH9nYWO1P5XN3OD1tts/w=,1711f20ec5b1498c98e46b96e578f39b723557ab50183d644702d40f44a1a345
+github.com/Shopify/go-lua,v0.0.0-20181106184032-48449c60c0a9,h1:+2M9NEk3+xSg0+bWzt1kxsL6EtoEg7sgtT11CZjGwq8=,3e399584ff4a876314243c01be3cba5b98b46bba483d6996dd2d0e7f161b7ad8
+github.com/Shopify/goreferrer,v0.0.0-20181106222321-ec9c9a553398,h1:WDC6ySpJzbxGWFh4aMxFFC28wwGp5pEuoTtvA4q/qQ4=,e47cdf750e6aa39707b90e62f4f87e97abb8d64b2525a16c021c82efb24f9969
+github.com/Shopify/sarama,v1.24.1,h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI=,c5e06f9c835846eeb5cbbbc540ab949f9775ff37c08cab503dd820b858b1f2e7
+github.com/Shopify/toxiproxy,v2.1.4+incompatible,h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=,9427e70698ee6a906904dfa0652624f640619acef40652a1e5490e13b31e7f61
+github.com/Sirupsen/logrus,v1.0.6,h1:HCAGQRk48dRVPA5Y+Yh0qdCSTzPOyU1tBJ7Q9YzotII=,dc69c77019152ace477a7f5c0cd97fd25d6ab866e01e1dd06f391722f4f9fba9
+github.com/StackExchange/wmi,v0.0.0-20190523213315-cbe66965904d,h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=,68f499ad4c3f45fc6c286fd2a5966e8e15c0f3abc1f96fbf4a979245df936e16
+github.com/Stebalien/go-bitfield,v0.0.1,h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo=,9b17a2749922c810f3598606b87b5f2ba0f3c6abc70966911a8c32f0533ee827
+github.com/Telmate/proxmox-api-go,v0.0.0-20190815172943-ef9222844e60,h1:iEmbIRk4brAP3wevhCr5MGAqxHUbbIDHvE+6D1/7pRA=,55dd16e2cd8e6c1464c6456007cdc5d8676b8b096e90230312daa8c84b57b34d
+github.com/TheThingsNetwork/api,v0.0.0-20190522113053-d844e8c040fc,h1:hDk+SAT2tV584ye1hqMN5+NHL6RHJDIbe97cNot6/WQ=,7931f7c4699cd1019c950a68361e3b3fec8bbb8e9204c65c08b50bd588ac506a
+github.com/TheThingsNetwork/go-account-lib,v2.0.3+incompatible,h1:pnDIalIqac/VlXenPr+L1XEEf3gIq1eIoZ78S5AP1/s=,e62dcb784cbd28bcec55cf332f7dc06779c75c7df66299f4ce542cd6852358b4
+github.com/TheThingsNetwork/go-cayenne-lib,v1.0.0,h1:be7h6E/69+qaYs1iwQ2xjGjSFPXzvU3q6AWBCWayG2Y=,17091b77ac39b8e73ca6ac3f39f34909bc6a3770098ff2dd534b59a10f6e66ad
+github.com/TheThingsNetwork/go-utils,v0.0.0-20190813113035-8715cf82e887,h1:DF/1gkOPk3jtwWa9dFd5tUtwb6z3bLw9tZ/UALbS5Ck=,be4c7c2955630b63300f21773efcdc991d5ff201a53b01d92b2f20fede77065c
+github.com/TheThingsNetwork/ttn,v2.10.1+incompatible,h1:LQw+g+kinajii5DHJ6I2o82ObaU/Ws+YYgdLkF5eF54=,4a803fe23636f9c99926e6b5a2b956fb42e84ca38c98458a12bd7fe1c22f7439
+github.com/TheThingsNetwork/ttn/api,v0.0.0-20190516081709-034d40b328bd,h1:vCjDYImJDdW+39EXwij00yzDi1pd3TmP6XtCteDJBd0=,9cda2f899f15f57e8f649bbffb955a8153d6c25a13a4e969df8898bb61559a44
+github.com/TheThingsNetwork/ttn/core/types,v0.0.0-20191015060859-00a6f7874bb9,h1:tlWwCxI3/Zu4vJ4dLWb2wMOYSkeMBvLAxQGwJDCFXi8=,7b3895805d6ac341e5df44c2c8154b374b4bed4c8f54e248ea967abfc37186e7
+github.com/TheThingsNetwork/ttn/utils/errors,v0.0.0-20190516081709-034d40b328bd,h1:ITXOJpmUR4Jhp3Xb/xNUIJH4WR0h2/NsxZkSDzFIFiU=,d64decf456c10fdbbb887212ea63749b495264c40bb5ac047b9f0e5ccd7e540b
+github.com/TheThingsNetwork/ttn/utils/random,v0.0.0-20190516081709-034d40b328bd,h1:zKTRK1r3K55XxHuUGxnqYg9aiPDduYeilHUEHua+F+Y=,c504030254919a902b3957267b7ce1870f909cbdd65f0f927819f60710e41d9b
+github.com/TheThingsNetwork/ttn/utils/security,v0.0.0-20190516081709-034d40b328bd,h1:og10Wq5S/QC+f4ziON4vrxlYKv9gfEKxG8v/MDs00xw=,e6f013adee3a7a212a6f892db59c8efaf715fe49413e6dbca22229fa04f0d006
+github.com/Unknwon/cae,v0.0.0-20160715032808-c6aac99ea2ca,h1:xU8R31tsvj6TesCBog973+UgI3TXjh/LqN5clki6hcc=,15a1394a603423c5bcd4659275be09d7696774990d5f127500f4156c1a78eb85
+github.com/Unknwon/com,v0.0.0-20190321035513-0fed4efef755,h1:1B7wb36fHLSwZfHg6ngZhhtIEHQjiC5H4p7qQGBEffg=,2cfba36da8f59c6dd8c7a20af59e5ccf9558f42bde7e0918a64a9b68dafcf271
+github.com/Unknwon/i18n,v0.0.0-20171114194641-b64d33658966,h1:Mp8GNJ/tdTZIEdLdZfykEJaL3mTyEYrSzYNcdoQKpJk=,a5ce1436582e797d60e967d853fd22458fc7edeb31bd390d6ace979133bedb78
+github.com/Unknwon/paginater,v0.0.0-20170405233947-45e5d631308e,h1:HnbTtNLKnRmwn85vBmyl7nNJCXUw4rh6X3UeIX5nvko=,60e3af4ba9b482892127f829ec7cc837977ca9e9e634be855d599cc08230e606
+github.com/VividCortex/ewma,v1.1.1,h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=,eebee7c0f20e96abbda1611ed2a3d26b4c2c10393caa6a2dfd1605763a5c1a12
+github.com/VividCortex/gohistogram,v1.0.0,h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=,16ebeceeb7e4066f90edbfb90282cd90d4dad0f71339199551de3fbdc7e8c545
+github.com/Workiva/go-datastructures,v1.0.50,h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo=,1ac8c9334b63ee2b089b7ecc3b6c8d45793cc4ef4c460f6ebbfd6ecea3ee83bc
+github.com/a8m/mark,v0.1.1-0.20170507133748-44f2db618845,h1:hIjQrEARcc9LcH8igte3JBpWBZ7+SpinU70dOjU/afo=,048bfeb7427ff5622874d874a52d7215a2cea99f9741c031e9963348785103c2
+github.com/abbot/go-http-auth,v0.4.0,h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=,8204bca24734f55f179dd1c0b820ae5be83151268693a147086f33cd2d4d473c
+github.com/abdullin/seq,v0.0.0-20160510034733-d5467c17e7af,h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=,bcbe9a2c1e3ac0b981ee436cd1bbb2da8220527511b3cea6517a28a881636814
+github.com/abronan/valkeyrie,v0.0.0-20191010124425-1ae9442de16e,h1:4SrbWyef51DHDc957/8Ms/fDM4D+3bkbXqg6OTnIEAo=,553dce6f5ff57f7ccc5ed6a94e6bf29b38b8773236f3b85bb4025dc0d10d2a92
+github.com/aead/siphash,v1.0.1,h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=,25da04ff418e0b2871b1193a3478977b4aa66c20737b9ca70a5040b876b6d3d9
+github.com/aerogo/http,v1.0.12,h1:1o5QW6TQLNuutQLuPCX0Tn7g/sSH3JMHv79UGIBpvkw=,a58d344ff2010737d2418050f4188339087cfb369c903bd31e20ccba388304a1
+github.com/afex/hystrix-go,v0.0.0-20180502004556-fa1af6a1f4f5,h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw=,c0e0ea63b57e95784eeeb18ab8988ac2c3d3a17dc729d557c963f391f372301c
+github.com/agext/levenshtein,v1.2.2,h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=,07caaae8fcdb7c83195a0afffc03c9df76275b1e9a7b69dabfe0d2f47729bc7c
+github.com/agl/ed25519,v0.0.0-20170116200512-5312a6153412,h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=,98c1510ac20b7d61bf4e2c76e7184fcbd0a8b78b0fc667c2b772777912963d3f
+github.com/agnivade/levenshtein,v1.0.1,h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=,cb0e7f070ba2b6a10e1c600d71f06508404801ff45046853001b83be6ebedac3
+github.com/ajg/form,v1.5.1,h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=,b063b07639670ce9b6a0065b4dc35ef9e4cebc0c601be27f5494a3e6a87eb78b
+github.com/ajstarks/svgo,v0.0.0-20190826172357-de52242f3d65,h1:kZegOsPGxfV9mM8WzfllNZOx3MvM5zItmhQlvITKVvA=,1459a44f9162f463b59eacf58e4bb8873e612c5b3df45fc6e34074310d2269ae
+github.com/akamai/AkamaiOPEN-edgegrid-golang,v0.9.0,h1:rXPPPxDA4GCPN0YWwyVHMzcxVpVg8gai2uGhJ3VqOSs=,91c3a4743d959b3bb2bb7359790df4688021830e482d393ea6d4f3a27aebd63d
+github.com/akavel/rsrc,v0.8.0,h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=,13954a09edc3a680d633c5ea7b4be902df3a70ca1720b349faadca44dc0c7ecc
+github.com/akyoto/assert,v0.2.3,h1:ftENRGDEK5AKuKmZb9LtbDIHeE8p8cIYI4M92CbA9nE=,f0a31d5859109c37568b8702fcf92cd3a49ec4892dace74d113df5fd49491975
+github.com/akyoto/color,v1.8.11,h1:uCQi+uRyngo1cJhJSv28PQmduGFiOAGNF6F9MFoRDek=,0fa16c51743ca03e108fde20eabb070d17d25111cb287e15f8567268d439098a
+github.com/akyoto/colorable,v0.1.7,h1:ge91E25hiOiT/Zu47ij/rTO3cks7wMlTrcQspua1hFM=,07c2dd4d994d9ff1dad97ad2e2650ff60d90f50ecd52380f357e562efda99613
+github.com/akyoto/stringutils,v0.2.6,h1:IP+7jtH8uofpan8MYlV/WMNaLDGBRbzgiTKYnxcAwkw=,802a3b54f91b930c1e8f2376bebf783b16894da626fc7c8064268b07ab567f7c
+github.com/akyoto/tty,v0.1.3,h1:AdnLETzgooimWLvoBQLn5bT1j+i0yiB4E596BfFKnmA=,749381ec9dce8bc96bec66c5dfb0874db917a008f9685d9c65c15da43ede964c
+github.com/alcortesm/tgz,v0.0.0-20161220082320-9c5fe88206d7,h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=,ccedffb2c46724216b787fb1a79ae33fb0dfdd672c669db000c4ed5a68b08014
+github.com/alecthomas/assert,v0.0.0-20170929043011-405dbfeb8e38,h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=,873d257170b1363142cbf5e16b49c6a21cccb3e4aaceb9d370c3b78b051a5663
+github.com/alecthomas/chroma,v0.6.8,h1:TW4JJaIdbAbMyUtGEd6BukFlOKYvVQz3vVhLBEUNwMU=,ebc5202e6a0ededc5a2c7396b01b76c050331bead9d047f31fe648cb63e68aa3
+github.com/alecthomas/colour,v0.0.0-20160524082231-60882d9e2721,h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=,334101c562d2e74338f6baab1de04f3bbff89021d24f4206c551ef47b96a2bfe
+github.com/alecthomas/kingpin,v2.2.6+incompatible,h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=,a88daee47262ffeca1f6e348399c16c9be160f3c5e972c0b6c9dc275d85bcdc6
+github.com/alecthomas/kong,v0.2.1-0.20190708041108-0548c6b1afae,h1:C4Q9m+oXOxcSWwYk9XzzafY2xAVAaeubZbUHJkw3PlY=,4292d9b6903d67f060d3bd57ffca0a4ebca359824ce2d32a512ac1b963fa3dc0
+github.com/alecthomas/kong-hcl,v0.1.8-0.20190615233001-b21fea9723c8,h1:atLL+K8Hg0e8863K2X+k7qu+xz3M2a/mWFIACAPf55M=,21a34d6ee62e3419601d0e083b8829001a9833899dd3c2d27a82c794426fd0ee
+github.com/alecthomas/log4go,v0.0.0-20180109082532-d146e6b86faa,h1:0zdYOLyuQ3TWIgWNgEH+LnmZNMmkO1ze3wriQt093Mk=,04bdaa7d57a681072316927175c21ca7c9e7a19bd7fee2102b5f40e5b01a7559
+github.com/alecthomas/repr,v0.0.0-20181024024818-d37bc2a10ba1,h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E=,c01a833ec56f68113f6cd7ed82b7da9bfaec641a10e929e0e3e5e5dadb1a85ad
+github.com/alecthomas/template,v0.0.0-20190718012654-fb15b899a751,h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=,25e3be7192932d130d0af31ce5bcddae887647ba4afcfb32009c3b9b79dbbdb3
+github.com/alecthomas/units,v0.0.0-20190924025748-f65c72e2690d,h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=,e6b0ccb38bfba85d90092d1c57671d5f7996757bd71f6f1970c6ae2f9dae3f6e
+github.com/alicebob/gopher-json,v0.0.0-20180125190556-5a6b3ba71ee6,h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=,2374b534198621157afb9466a52d361b6eed33dcf9bb0674019515e64b16129e
+github.com/alicebob/miniredis,v0.0.0-20180911162847-3657542c8629,h1:gLoh8jzwIxdisBnHiWRIuReqtH9cpslSE2564UWXun0=,14b5e988ec6d8357a25ba19a7adbdb34920f5f91401b2b26eb25f04fed9893b0
+github.com/aliyun/alibaba-cloud-sdk-go,v0.0.0-20191031111935-12810c79403d,h1:CmGtZPPsr0C31ZBrzdP+D2oczTbyEBbO3bYg6z5EIDY=,4f0f25f45d954ab970b24783e31b716b619b128081acc9ed7b00727cd7c2d536
+github.com/aliyun/aliyun-oss-go-sdk,v2.0.3+incompatible,h1:724q2AmQ3m1mrdD9kYqK5+1+Zr77vS21jdQ9iF9t4b8=,47ede6a440ad4bb1a1c33d71bd12f76f44aa2487f676b8770152130be3021657
+github.com/aliyun/aliyun-tablestore-go-sdk,v4.1.2+incompatible,h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM=,82c8ced9cd377462c6ea5070258f97c77ffddd66621e8960b08184eb58416846
+github.com/allegro/bigcache,v1.2.1,h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=,9250edab8c7851cfa0c6c173e721cf70831e90742a7485c2eba1d6e2cc8c71eb
+github.com/anacrolix/envpprof,v1.1.0,h1:hz8QWMN1fA01YNQsUtVvl9hBXQWWMxSnHHoOK9IdrNY=,97f2340bcb169956bad97c59fdc17bbd2eb7c0acefe4e2ae327c7d6bd5a5f6cf
+github.com/anacrolix/log,v0.3.0,h1:Btxh7GkT4JYWvWJ1uKOwgobf+7q/1eFQaDdCUXCtssw=,e8bc14381d8746426c7e272228780047e0594d695d02e188269f1e86ef1644d4
+github.com/anacrolix/missinggo,v1.2.1,h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQyjHvw=,2fb8cba1f6eaf69989ca5c522c2d4afd6c1071ad9459f940b6058dbfc2f3b285
+github.com/anacrolix/missinggo/perf,v1.0.0,h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw=,f4271e6359cf3dd5cba81bcf1436e8abc5d0c96c11820b881544708caa131713
+github.com/anacrolix/sync,v0.0.0-20180808010631-44578de4e778,h1:XpCDEixzXOB8yaTW/4YBzKrJdMcFI0DzpPTYNv75wzk=,bef95f54e1b17e4e7666cbf552e541e670f29fc3fd354aba0ebeee73f744ea24
+github.com/anacrolix/tagflag,v1.0.1,h1:Yd3d5DaKbRA70k7CoFuBsbmfSWIsvtZ9t80xW/x4vQY=,8fc0a5b5607cde223bacd9e4fa3b26f6166c09a09bfabe2c2c803e45e17971fa
+github.com/anacrolix/utp,v0.0.0-20180219060659-9e0e1d1d0572,h1:kpt6TQTVi6gognY+svubHfxxpq0DLU9AfTQyZVc3UOc=,35c47428844d10f077225195f9a6c7587c671b7fc70bbaf59ef74cd6d8834e32
+github.com/andreyvit/diff,v0.0.0-20170406064948-c7f18ee00883,h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=,d39614ff930006640ec15865bca0bb6bf8e1ed145bccf30bab08b88c1d90f670
+github.com/andybalholm/cascadia,v1.0.0,h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=,7fd82e560ca1a453974a64c9bf6514b17322d1b7392bad730a5006d929996906
+github.com/andygrunwald/go-jira,v1.5.0,h1:/1CyYLNdwus7TvB/DHyD3udb52K12aYL9m7WaGAO9m4=,3ee973941f400bf95005cada54e09e319cb4943cd6c8d66480243d3b40895821
+github.com/anmitsu/go-shlex,v0.0.0-20161002113705-648efa622239,h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=,3b8376ff631f30d47e0348a8f847050b97c3db89483f45d1cd8f11d23c7c56a2
+github.com/antchfx/htmlquery,v1.0.0,h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=,81c86507bf2a226d5a3d20db547503d490f1e3b77035f267056e80cd73e240e2
+github.com/antchfx/xmlquery,v1.0.0,h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=,969fc21438fe076aee032574578158ac7e030979153dcf7b5ff5c133cbfa4d86
+github.com/antchfx/xpath,v0.0.0-20190129040759-c8489ed3251e,h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=,22cb767dc0cafecba39e1b0322cc8aebbc6fd912e4b0fcda8c2c1dde2d80c4d2
+github.com/antchfx/xquery,v0.0.0-20180515051857-ad5b8c7a47b0,h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=,9ddc9d830f2d6c7a22604035f0c621228ffa4ed6ff1f1d34655ee477c203c899
+github.com/antihax/optional,v0.0.0-20180407024304-ca021399b1a6,h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=,7b0a2bf3eb029d9abe761db1874a501b60f267e675d72ae8c4b8c6f406ddcfd0
+github.com/apache/arrow/go/arrow,v0.0.0-20191024131854-af6fa24be0db,h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E=,4bd8443c24bc06843c0270df4f08f98b3eee6116604ff16d14dce34b242783cf
+github.com/apache/thrift,v0.13.0,h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=,d75265e363da943c24e7ed69104bf018429024a50968421e48a6ab3e624733c2
+github.com/apex/log,v1.1.1,h1:BwhRZ0qbjYtTob0I+2M+smavV0kOC8XgcnGZcyL9liA=,5bb0f19e5c68b104ed32a311ea9c6f6e2a5e8fa597b342695e069468e2248d83
+github.com/aphistic/golf,v0.0.0-20180712155816-02c07f170c5a,h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk=,a0ca77a50520037607c3a2a798b66aee1d5df63f4800b4236f51be2f1e3c1d70
+github.com/aphistic/gomol,v0.0.0-20190314031446-1546845ba714,h1:ml3df+ybkktxzxTLInLXEDqfoFQUMC8kQtdfv8iwI+M=,c2fd1a9db2fb7a5ca7ba9132fbddb5d8efd64babcff7c0f66d41d3cf97b8caab
+github.com/aphistic/gomol-console,v0.0.0-20180111152223-9fa1742697a8,h1:tzgowv45TOFALtZLJ9y3k+krzOh2J8IkCvJ8T//6VAU=,26a1b99db9a92a7f5d088e529c43db6de957a3a1650c27d7a872495f73a52880
+github.com/aphistic/gomol-gelf,v0.0.0-20170516042314-573e82a82082,h1:PgPqI/JnStmzwTof+PtT53Pz53dlrz2BmF7cn5CAwQM=,e44d4de8d62391c1e0e70c3b27f4c341bb0398083f33b99be46e29144fad3c50
+github.com/aphistic/gomol-json,v1.1.0,h1:XJWwW8PxYOHf0f0FquuBWcgvZBvQ89nPxZsqQ9pfpro=,0e1ab66a46afe81c4662f8a49ca38042f0c6bc8645895336399adef1eedaff59
+github.com/aphistic/sweet,v0.2.0,h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs=,02bebcef905b02cf7195137d9b20920367bb5f8c635a6e5a112b787596414f51
+github.com/aphistic/sweet-junit,v0.0.0-20190314030539-8d7e248096c2,h1:qDCG/a4+mCcRqj+QHTc1RNncar6rpg0oGz9ynH4IRME=,6a3ab195b97bd1981f2ae87a172bc24ecfb44ffbd8d28428f97bfa46e66f559b
+github.com/apparentlymart/go-cidr,v1.0.1,h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=,5af128e1ecdf5f2203fda104a653f13fb2e46acc3f68b2d7634a760a8f556ea0
+github.com/apparentlymart/go-dump,v0.0.0-20190214190832-042adf3cf4a0,h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=,3506757fd2dcbcf8e77aa962c923d9ceaf918538bf9b117f98aa562bc83c77ef
+github.com/apparentlymart/go-textseg,v1.0.0,h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=,2572a77af285125f1980e9b751e5a7c3ae59b73c4fc97e7c2407681609991142
+github.com/appc/spec,v0.8.11,h1:BFwMCTHSDwanDlAA3ONbsLllTw4pCW85kVm290dNrV4=,4a17d699b3e2c3cc8b301de260a45c8fc31054fbb5c689e567f24e3e63bf8f79
+github.com/apple/foundationdb/bindings/go,v0.0.0-20190411004307-cd5c9d91fad2,h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k=,a2dc6bd23d9066d3acf174c9b33378c08ae4a95cfd017abc70a16388e74ea2c3
+github.com/approvals/go-approval-tests,v0.0.0-20160714161514-ad96e53bea43,h1:ePCAQPf5tUc5IMcUvu6euhSGna7jzs7eiXtJXHig6Zc=,e3b51ab88c4f3b1c4aea2fadd0b3d3e2ec178d37232066b9fe3b0177e1c6e9aa
+github.com/aquasecurity/fanal,v0.0.0-20191031102512-c1c079886da6,h1:B84l/SNXzzcqwgIORAmEv7gs4K4l+DJkdliI6ib/zNw=,7247188e1746360364e7ff77aa0c531df69074c49b23e7f67d65134ca577b0e0
+github.com/aquasecurity/go-dep-parser,v0.0.0-20190819075924-ea223f0ef24b,h1:55Ulc/gvfWm4ylhVaR7MxOwujRjA6et7KhmUbSgUFf4=,73ce01b48b9aa56349d928a27bdd4b77c149541385e645951b2e25f1d6ab5d26
+github.com/araddon/dateparse,v0.0.0-20190622164848-0fb0a474d195,h1:c4mLfegoDw6OhSJXTd2jUEQgZUQuJWtocudb97Qn9EM=,3b88bff198316e2795d11340862ef873387cd7dba97eeb17f106f41deb00d602
+github.com/araddon/gou,v0.0.0-20190110011759-c797efecbb61,h1:Xz25cuW4REGC5W5UtpMU3QItMIImag615HiQcRbxqKQ=,936e20f4c9eaa45f54586ab86bce911f0b1f935d0410dd683dc647797ed7225d
+github.com/aristanetworks/fsnotify,v1.4.2,h1:it2ydpY6k0aXB7qjb4vGhOYOL6YDC/sr8vhqwokFQwQ=,9c0dd5427e82f044a9e5808a3436b43472ff032f23ac853829e5c166171044a3
+github.com/aristanetworks/glog,v0.0.0-20180419172825-c15b03b3054f,h1:Gj+4e4j6g8zOhckHfGbZnpa0k8yDrc0XRmiyQj2jzlU=,496dd08756b324a7925b670a907328433f1477763a229b76a4eef8ed254c9683
+github.com/aristanetworks/goarista,v0.0.0-20191023202215-f096da5361bb,h1:gXDS2cX8AS8KbnP32J6XMSjzC1FhHEdHfUUCy018VrA=,2c348fcdf827ac0d1238fb556f66ad1f13f05d8c5a6d2b3efe5f94be40af5021
+github.com/aristanetworks/splunk-hec-go,v0.3.3,h1:O7zlcm4ve7JvqTyEK3vSBh1LngLezraqcxv8Ya6tQFY=,545adec43ebdf1c9cdc65cd3d738d131f1b02706d25876de1fda65c4989195af
+github.com/armon/circbuf,v0.0.0-20190214190532-5111143e8da2,h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=,c8b7ba977844b5378a2413c123c3e55d0885fb67f64ad6cf06575a791a36b827
+github.com/armon/consul-api,v0.0.0-20180202201655-eb2c6b5be1b6,h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=,091b79667f16ae245785956c490fe05ee26970a89f8ecdbe858ae3510d725088
+github.com/armon/go-metrics,v0.0.0-20190430140413-ec5e00d3c878,h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=,3d48bc38dda0cff4dbf0b56b9b6e2e8fc3e6be2282f2a612a96a6702cc8a9fc5
+github.com/armon/go-proxyproto,v0.0.0-20190211145416-68259f75880e,h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g=,1004212be9a343c99e1849425845af1ec5e3e35cc4917483721cb03620982d58
+github.com/armon/go-radix,v1.0.0,h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=,df93c816505baf12c3efe61328dc6f8fa42438f68f80b0b3725cae957d021c90
+github.com/armon/go-socks5,v0.0.0-20160902184237-e75332964ef5,h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=,f473e6dce826a0552639833cf72cfaa8bc7141daa7b537622d7f78eacfd9dfb3
+github.com/asaskevich/govalidator,v0.0.0-20190424111038-f61b66f89f4a,h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=,b5dfb936e0256459bc633c8acf433f4a01a468868db9bd6e390a67f4678185f5
+github.com/asdine/storm,v2.1.2+incompatible,h1:dczuIkyqwY2LrtXPz8ixMrU/OFgZp71kbKTHGrXYt/Q=,ffea8b759006a871732554e1e0a42753fb9a5dd9884eb150e1b42806d51cd5fd
+github.com/assetsadapterstore/tivalue-adapter,v1.0.3,h1:zcFcT1x1rWDYQEaA3wI7Hr7F25Cspy+O1cr+vUMjrks=,c42adddd544495ef0ebe1d8730bad20c4251c7646e1782542782bc946c839eca
+github.com/astaxie/beego,v1.12.0,h1:MRhVoeeye5N+Flul5PoVfD9CslfdoH+xqC/xvSQ5u2Y=,1f14eb5d216170c027754bea1129bbcdafc06a035650e635375c61a17be6f316
+github.com/asticode/go-astilog,v1.0.0,h1:l9tek0K7KoQCmhZ7cvBTtVu0NsKpS9hB6jBLtQyxWYk=,49fe2b286073848e780a9326f7d37771372e61827ff07b80db89667e6ac4d1d4
+github.com/aws/amazon-ssm-agent,v0.0.0-20191011205301-04bb0617297b,h1:xv695CeRjoBS0baQSS5UfQkeo63GiMjmDwiAeY09bSw=,08ede8d7aa20210a4738e0ea033f1bf8fd1ce13bba6c375431c8c1e7a8565c37
+github.com/aws/aws-lambda-go,v1.13.2,h1:8lYuRVn6rESoUNZXdbCmtGB4bBk4vcVYojiHjE4mMrM=,05b1633366a8df9e313df4409d003a277ff7ae46f1079b3ad7f6b48c0dabfb75
+github.com/aws/aws-sdk-go,v1.25.25,h1:j3HLOqcDWjNox1DyvJRs+kVQF42Ghtv6oL6cVBfXS3U=,c34d718d97487766a9a8ac818d37dd135d75d747a8d191a616b75425c32456f2
+github.com/aybabtme/rgbterm,v0.0.0-20170906152045-cc83f3b3ce59,h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=,a4456a42277e0c987de99e9c4ba141db064107ce737ad1dd2e050aeb1149b67e
+github.com/aymerick/raymond,v2.0.2+incompatible,h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0=,df6e22632cb314b76ab10dd6a1c2c66a79da44200bfec9f5e4f321100d90dc64
+github.com/baiyubin/aliyun-sts-go-sdk,v0.0.0-20180326062324-cfa1a18b161f,h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=,0965da027355d9b385358331ec359cf729ec4571ec4ca86339da925364c13559
+github.com/bartekn/go-bip39,v0.0.0-20171116152956-a05967ea095d,h1:1aAija9gr0Hyv4KfQcRcwlmFIrhkDmIj2dz5bkg/s/8=,6a278508499838d4c57c1dbdafcfc9f9f909e7358c518a8699728053b695d0c5
+github.com/bazelbuild/buildtools,v0.0.0-20191024175656-9f3978593d3e,h1:QdfIPgk+fJY8AcfjVk2/tdc2dNtl6d+7x8dhVBP72Ik=,f768dd2a38a1dedc924740f9b7a3194ca68d8a24db8fb840c547aee3911162d3
+github.com/bbangert/toml,v0.0.0-20130821181452-a2063ce2e5cf,h1:SGoM2ypzNnI+hMs01svW6wRddndk7eWRs1Bx1zOGRTI=,63690dcb3fcf13b55193cfe263b4a4fdbbe2ee9d7f93440375815dac28d34cb9
+github.com/bcext/cashutil,v0.0.0-20190126062106-1194a0af0582,h1:+sgikGWB0jvS9rzLlPww+SSFoieOLB8yieXyX9DRCF4=,4d5b42e5d472015edeef1b6bf54e253a85bab6df1ac16aabea7fd0dea4aa85e3
+github.com/bcext/gcash,v0.0.0-20190404152342-2e38815af4f2,h1:XVuqYNixmuo81vR/PnBRDDiTH7596mAwQlQ8BucvGnM=,6b24e00369a493c32e730a4d78d8c4fd122ffe0ce319c5d72f3c7d2f12ede4b7
+github.com/beego/goyaml2,v0.0.0-20130207012346-5545475820dd,h1:jZtX5jh5IOMu0fpOTC3ayh6QGSPJ/KWOv1lgPvbRw1M=,aaa4165412caaacbb2df4427207a206e09215c3f7a19f8309e9222ca9ff80691
+github.com/beego/x2j,v0.0.0-20131220205130-a0352aadc542,h1:nYXb+3jF6Oq/j8R/y90XrKpreCxIalBWfeyeKymgOPk=,f9a32026b2107f3cc3610ac6b75c4c64818646a316c35e648c8811d4276a9993
+github.com/beevik/etree,v1.1.0,h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=,614a33736f8b9262a809f101df5bf71f47777879b1191165b6247d6b67c7468c
+github.com/beevik/guid,v0.0.0-20170504223318-d0ea8faecee0,h1:oLd/YLOTOgA4D4aAUhIE8vhl/LAP1ZJrj0mDQpl7GB8=,5add94fcade6c7afa236112c8da300d47ec499ad1789a5e805c8198062dd0749
+github.com/beevik/ntp,v0.2.0,h1:sGsd+kAXzT0bfVfzJfce04g+dSRfrs+tbQW8lweuYgw=,42e14f30c23ba2f5ddaff76101016d87f0f0a0f1d96d3d20e42fd02842091c76
+github.com/beorn7/perks,v1.0.1,h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=,25bd9e2d94aca770e6dbc1f53725f84f6af4432f631d35dd2c46f96ef0512f1a
+github.com/bep/debounce,v1.2.0,h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=,ddc0a77e4819b6b826d69fdf1a5a153f3f867a31e030cfe28296355b670adf21
+github.com/bep/gitmap,v1.1.1,h1:Nf8ySnC3I7/xPjuWeCwzukUFv185iTUQ6nOvLy9gCJA=,364163e67741ae331d164fd881964160f19fdbdfe094e0e762314cc37aac646a
+github.com/bep/go-tocss,v0.6.0,h1:lJf+nIjsQDpifUr+NgHi9QMBnrr9cFvMvEBT+uV9Q9E=,40e7175da9564796e184e4383bfce703f63244b850999b5a54fd5792bfc5baf5
+github.com/bep/tmc,v0.5.0,h1:AP43LlBcCeJuXqwuQkVbTUOG6gQCo04Et4dHqOOx4hA=,f8e0be71fb845a4ca22825f5b9c51c1a66c29e9ccff723e063781ee64c664c66
+github.com/bgentry/go-netrc,v0.0.0-20140422174119-9fd32a8b3d3d,h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=,59fbb1e8e307ccd7052f77186990d744284b186e8b1c5ebdfb12405ae8d7f935
+github.com/bgentry/speakeasy,v0.1.0,h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=,d4bfd48b9bf68c87f92c94478ac910bcdab272e15eb909d58f1fb939233f75f0
+github.com/bifurcation/mint,v0.0.0-20180715133206-93c51c6ce115,h1:fUjoj2bT6dG8LoEe+uNsKk8J+sLkDbQkJnB6Z1F02Bc=,40a4bd02b9e3477271638bc17ae8537e2675ace0a9b85d753820e979dbf97f36
+github.com/binance-chain/go-sdk,v1.0.8,h1:mC1Tai9diqIWuKTJmrFLal90OCsgtDvyLEItMvglaHA=,3d0f86f959b38f11174d8ee574e77e5d80d2c672d0720dee519f3708e873b0ca
+github.com/binance-chain/ledger-cosmos-go,v0.9.9-binance.1,h1:8mAtw1Tp/BhhTrsXmXM60H1fihcvcKLfo2ZSxShaXKw=,f6dc2bfb4d29db01cad72815615301e089d727110d1d5a0de43e829953e45041
+github.com/biogo/hts,v0.0.0-20160420073057-50da7d4131a3,h1:3b+p838vN4sc37brz9W2HDphtSwZFcXZwFLyzm5Vk28=,93be93b79da8920fb5f02bb2e50a364e2b33dc831229d163e7be70c1010cdb9e
+github.com/bitcoinsv/bsvd,v0.0.0-20190609155523-4c29707f7173,h1:2yTIV9u7H0BhRDGXH5xrAwAz7XibWJtX2dNezMeNsUo=,8e1e554ddc232e763fac27ddc0661cfe543163802b0d6bb9a2904bf24756ddc3
+github.com/bitcoinsv/bsvlog,v0.0.0-20181216181007-cb81b076bf2e,h1:6f+gRvaPE/4h0g39dqTNPr9/P4mikw0aB+dhiExaWN8=,89f0c34e6936d82a1629d5d255923ff27c0adeb99709269cf62071e48cb5fbd8
+github.com/bitcoinsv/bsvutil,v0.0.0-20181216182056-1d77cf353ea9,h1:hFI8rT84FCA0FFy3cFrkW5Nz4FyNKlIdCvEvvTNySKg=,4d4923e8743012e1f8ed1a1ef721786fc2d5249cc5dafd96fdd350c485378cfe
+github.com/bitly/go-hostpool,v0.0.0-20171023180738-a3a6125de932,h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=,9a55584d7fa2c1639d0ea11cd5b437786c2eadc2401d825e699ad6445fc8e476
+github.com/bitly/go-simplejson,v0.5.0,h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=,53930281dc7fba8947c1b1f07c82952a38dcaefae23bd3c8e71d70a6daa6cb40
+github.com/blackducksoftware/horizon,v0.0.0-20190625151958-16cafa9109a3,h1:noI1RY2cUFZfdZMIz1+1LzT8ZeuWK703gwmH/ZC2YnQ=,ece353e9e973ce03d131b29c6c00aea53f1b2e507960b389cdfeb2cc317897ef
+github.com/blacktear23/go-proxyprotocol,v0.0.0-20180807104634-af7a81e8dd0d,h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ=,123c82a455309b3a3118504c0a70771352292abced294dca39a570b89e48adba
+github.com/blakesmith/ar,v0.0.0-20190502131153-809d4375e1fb,h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=,015878daba57ba5ce7228f772b843fffa847d99c7afeb308089bef77f433c510
+github.com/blang/semver,v3.5.1+incompatible,h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=,8d032399cf835b93f7cf641b5477a31a002059eed7888a775f97bd3e9677ad3c
+github.com/blevesearch/bleve,v0.8.1,h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU=,58a5b5ade8123d54b7510e463c25e1e59e6cd3d98acdcb4d582c42db67c03519
+github.com/blevesearch/blevex,v0.0.0-20180227211930-4b158bb555a3,h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA=,defa5966f802eab571cc8d9315323104b776751dd13caae9d8fc0476576d57ca
+github.com/blevesearch/go-porterstemmer,v0.0.0-20141230013033-23a2c8e5cf1f,h1:J9ZVHbB2X6JNxbKw/f3Y4E9Xq+Ro+zPiivzgmi3RTvg=,e13cc37d08c58870cdbad544b726934cd62ca6aa2ae35f02598f72e30d7c0f59
+github.com/blevesearch/segment,v0.0.0-20160105220820-db70c57796cc,h1:7OfDAkuAGx71ruzOIFqCkHqGIsVZU0C7PMw5u1bIrwU=,21278826e6ba0f63024a953c480467bf41d6717ae4a87c3021a9f74d2f2ae618
+github.com/blocktree/arkecosystem-adapter,v1.0.4,h1:TkZWCzAgi20CjAMlOpwTDppt6XO7X8Fn5EjSUsuB6kI=,22346af6957b0b8fae47d982605f565e5e16e86bc52a9fdd234b023067896cf2
+github.com/blocktree/bitshares-adapter,v1.0.5,h1:mzYlpip0crtYaDaXbKqtGLAxad83p19HLTVa9LLW3fc=,03ce80398ab59af79feb92b054e5da02a290ef27aef5facb93cdd86de2e0df91
+github.com/blocktree/ddmchain-adapter,v1.0.5,h1:Lx8zD0lOHb9TJ7EcGJQhyvpDkYko6OoV8uwudKRKlJA=,81e65ed5692152fcaa1dbf997f71dc43192abc72dad6fb78b721d742c05c1a7a
+github.com/blocktree/eosio-adapter,v1.0.0,h1:cncKE4QbQxDsr8B+HlhU7tywbCtZRsWMln2ek8I5lbc=,cc658d6f9fa5470c5affb4caad58e17637a6d46f8e5d1b3730ed06e570e61959
+github.com/blocktree/ethereum-adapter,v1.1.10,h1:PkmQeRT5ljyCOQZPT0diJo+4G9OqOcJsnRcXeF5fitU=,cf8465db958e214c8196e6311fd5db24f8d28265a37914c2cf9d2dac54a5fd1a
+github.com/blocktree/futurepia-adapter,v1.0.12,h1:mL1rDvcM55hKwLhHOkg1v2GwnCEsDniUrqrMG3PK/+4=,ae29bbdb9a4a1ec345f7d220d6a736ed827a307454d0466bb065dacf3d94200d
+github.com/blocktree/go-owcdrivers,v1.1.18,h1:KCNm+HczpDfxyUf+Wrvbj/iWwQDJ+ca/FBjm3H06rIY=,65bcca1918d8b9e1048bac14b1393dec246402320b6a5dde20ee6afe84585736
+github.com/blocktree/go-owcrypt,v1.0.3,h1:qfAwJsWYp7WaI26hAwPuFUrMXhD9bWwuGXYWBOLsVes=,f365daad6adfcc5aee14faa1455f772b5e39b1c9ff3598afb1c3645587cb6b2e
+github.com/blocktree/moacchain-adapter,v1.0.3,h1:k9drMeekvBsXORortW/zJXaO6CokXVv2EL0/YK3c1/A=,66c87656369c5246bad1527f7385d1cb98bbeda431383f154a23e9bf821a05a2
+github.com/blocktree/nulsio-adapter,v1.1.7,h1:d0xuovBqodBAv8BE/CPZjfe5CNma6FFSP6W3ynJRD0U=,c814c05686483abc345bf4fb997fc61595e738b99cc8a8d36742414f93e948b0
+github.com/blocktree/ontology-adapter,v1.0.8,h1:Lej35ZPPgjS6nP5CEumIUskRNASMZswgrByYSxrWPe0=,fcdeb4c6d8f37a22f52e2938ebc51b4ff1f4cf4116eebce8ecc7591995236853
+github.com/blocktree/openwallet,v1.5.3,h1:6hNj61wLfzEGqbbY0ZOeqGAjSj9snoRSBikgSlWPqZI=,1f169b69cd3ec4a4f82836c2b2178eb162464d6413c09f8170f73a838d28650b
+github.com/blocktree/ripple-adapter,v1.0.13,h1:zgJt7onq5+V6pvQ7Kl3xiiSkk3uxuCF07OpwCtJTM8w=,bb7f515a6573eb185da0bebb28bf57364d175ee7f937e18af5b2eac98741464f
+github.com/blocktree/virtualeconomy-adapter,v1.1.5,h1:YJ2JKUifSsCjCneM0NUky3WbG0LEm7IKUBmf9EAmAXc=,7a5085b8b0b114e2491032ec6f95e300c28fa309ec5883044d8b954d7d4db06e
+github.com/blocktree/waykichain-adapter,v1.0.3,h1:qY/Txh+n4iIJA49rDMj41qpIUj3McjBir8Ls+sX8c3w=,62ea2ff873c84a32d3482c8ec1687221a1e46055b9be18fcd25be584cf2cac5d
+github.com/bluele/gcache,v0.0.0-20190518031135-bc40bd653833,h1:yCfXxYaelOyqnia8F/Yng47qhmfC9nKTRIbYRrRueq4=,334accb65479b1b18fb569b08d14eebceb6478ea16abe9fbad2f1c6b6586deb6
+github.com/bluele/slack,v0.0.0-20180528010058-b4b4d354a079,h1:dm7wU6Dyf+rVGryOAB8/J/I+pYT/9AdG8dstD3kdMWU=,2b0055c292b7baa49f56eb9fc710f35f005747ddbef16427d5c985617c3b697d
+github.com/bmatcuk/doublestar,v1.1.5,h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk=,81f592b11277591e943b91522497c323fcf0c6b4f3099f495de10f83e8c3e697
+github.com/bmizerany/assert,v0.0.0-20160611221934-b7ed37b82869,h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=,2532a167df77ade7e8012f07c0e3db4d4c15abdb7ffa7b05e1d961408da9a539
+github.com/bmizerany/pat,v0.0.0-20170815010413-6226ea591a40,h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=,ed04bed4d193e25371ebc6524984da4af9ece5c107fcc82d5aa4914b726706d2
+github.com/bndr/gotabulate,v1.1.2,h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c=,2c1ecc544368e40010082f800c1ee24eaf1b8e0f96fa76a56e4f61dda4cd0d60
+github.com/boltdb/bolt,v1.3.1,h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=,ecaf17b0dbe7c85a017704c72667b2526b492b1a753ce7302a27dd2fb2e6ee79
+github.com/boombuler/barcode,v1.0.0,h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc=,ef3832c4d22a09377323980bacd9f5f2ab43d0d20da115e1cfb139e093d7bb9b
+github.com/bradfitz/go-smtpd,v0.0.0-20170404230938-deb6d6237625,h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss=,0a06dd547fed38e2744800b5f4ebae5ac00ee08717ded281510a8d319b8db8f3
+github.com/bradfitz/gomemcache,v0.0.0-20190913173617-a41fca850d0b,h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=,eb71acfac0c4ce5f0b6537d8029de98902d83fd38fdcbfd757f06697c6323f78
+github.com/bradfitz/iter,v0.0.0-20190303215204-33e6a9893b0c,h1:FUUopH4brHNO2kJoNN3pV+OBEYmgraLT/KHZrMM69r0=,6883ce0960849ca9c024a4a4e7508ff521da2a3bb66d1974ea2f970a5265ea39
+github.com/bradfitz/latlong,v0.0.0-20140711231157-b74550508561,h1:mz4equOOUOnI4q5E7dyHlRx1x63YEaYwhlVluCDila4=,d1c124508f1825697a2bdb9fac48d2b8805b41f8e546d262fc487d8450962cec
+github.com/bradhe/stopwatch,v0.0.0-20180424000511-fd55e776a960,h1:YJWTgxlTgeHlvhe7tZJm0yBcg2GhjDQs8zig5O5vup8=,c2926a4febee7eea0f523b3d4fcaa414c27effc2abc053137a3dbf0b3a4fa324
+github.com/briankassouf/jose,v0.9.2-0.20180619214549-d2569464773f,h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g=,c0b50157ec3c39fbd6ded9d5e6bc763890e6d909db38b337a72876124c2baeeb
+github.com/brocaar/lorawan,v0.0.0-20190925120821-154a30dbdce2,h1:51WcQ+VAc/6jZ/8GBJiQ3B7FrT2aXI+YsUx2iG9tJlw=,0082cebaf26ed36c901f9b44b6d785eccc2a0c123088642eac7c9b5711b7d0ca
+github.com/bsm/go-vlq,v0.0.0-20150828105119-ec6e8d4f5f4e,h1:D64GF/Xr5zSUnM3q1Jylzo4sK7szhP/ON+nb2DB5XJA=,61fc03674cd72d5a4c55413e8b58fc8eafc58fbb71fb89c719225650754b3469
+github.com/bsm/sarama-cluster,v2.1.15+incompatible,h1:RkV6WiNRnqEEbp81druK8zYhmnIgdOjqSVi0+9Cnl2A=,a8a4867f09704222362b75fa00c9894106a928dc7cf905f1b80ca7bbd1a3b8e5
+github.com/btcsuite/btclog,v0.0.0-20170628155309-84c8d2346e9f,h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=,74ad4defbabf48c98bbb547be1c40c11fa2c286f599412c774d1c5604dc1808d
+github.com/btcsuite/btcutil,v0.0.0-20190425235716-9e5f4b9a998d,h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=,de1ee450ff2cfec2df220fec0d3e265cc812f214892bfad601e142632e2cf3f9
+github.com/btcsuite/go-socks,v0.0.0-20170105172521-4720035b7bfd,h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=,cc27776f56f7c58c2808af55781e9b3f7d0eb0dc08e4c19c38c6bdf2465ce0e7
+github.com/btcsuite/goleveldb,v1.0.0,h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4=,13e37462cb2fe5976221f57d357051c1c3cc63a9b0e67e6ed97f98af795d0815
+github.com/btcsuite/snappy-go,v1.0.0,h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE=,d136165bdbf91780ded5d3ebaba9026f900595e56c19aa0ef29896015eae9627
+github.com/btcsuite/websocket,v0.0.0-20150119174127-31079b680792,h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=,d45ac16f59082ac369e61c7bbe23153e289cad03619ab8041963d54cd700d6f0
+github.com/btcsuite/winsvc,v1.0.0,h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=,6893f7a62faec17d7b0856c7464754cab14c4d913e27af5276f6a98b25f3c779
+github.com/buger/jsonparser,v0.0.0-20191004114745-ee4c978eae7e,h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=,7e2dda4c1b4217408903f3b4a1f2cdd93d71bc7682387ba860cfa0cc9fcf88be
+github.com/bugsnag/bugsnag-go,v1.5.3,h1:yeRUT3mUE13jL1tGwvoQsKdVbAsQx9AJ+fqahKveP04=,8aaf02df2c1a4e8a5725eea1d91af69c4f9e157c2559a3452388f64a977534c0
+github.com/bugsnag/panicwrap,v1.2.0,h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=,75357d3a5cd89dc04f1f101e02686fc1ef33b4a4f67edb82b3fa63fded3f47e9
+github.com/bwmarrin/discordgo,v0.20.1,h1:Ihh3/mVoRwy3otmaoPDUioILBJq4fdWkpsi83oj2Lmk=,616d49cc107ccd85872b6008f028c4aca021f66381828bb921f15f9e8149988a
+github.com/bwmarrin/snowflake,v0.0.0-20180412010544-68117e6bbede,h1:lTJlWdyhwqq7h29GtuIDHW/xi+sMN+JOLMgYAwQ5O74=,2e13ad82f7ae64821f9851a66b4800f1589e413b27b469f28d21970957a3c6da
+github.com/c-bata/go-prompt,v0.2.2,h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=,ffe765d86d90afdf8519def13cb027c94a1fbafea7a18e9625210786663436c4
+github.com/c2h5oh/datasize,v0.0.0-20171227191756-4eba002a5eae,h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo=,b5543f3e104a84e35ac51780968282b455dd30c88730d0da166d8d6512301da6
+github.com/caarlos0/ctrlc,v1.0.0,h1:2DtF8GSIcajgffDFJzyG15vO+1PuBWOMUdFut7NnXhw=,e4b5e9dd37cee2d47ff1c5eeba9a4b6e2b778c349a3615ca9653531f035a3ca6
+github.com/cactus/go-statsd-client/statsd,v0.0.0-20191030180650-a68a2246f89c,h1:rrLWPlpOKwnBpVUXitbgM3+Nie1eBaFfBZqfiPpxVj8=,cbb94149ec688419a91406b374955946c3679b1dde0752d7c0ffdc87432cd0b3
+github.com/caddyserver/caddy,v1.0.3,h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w=,029f14052f1ec9937c4028f3231899bf5391d5eeb7f58795d5d470a6f4c338a7
+github.com/campoy/unique,v0.0.0-20180121183637-88950e537e7e,h1:V9a67dfYqPLAvzk5hMQOXYJlZ4SLIXgyKIE+ZiHzgGQ=,4bc20f70e0b170ecdabd740a5de012d05f4c9149e2882fbdb303dc1b1793a77e
+github.com/casbin/casbin,v1.9.1,h1:ucjbS5zTrmSLtH4XogqOG920Poe6QatdXtz1FEbApeM=,e2ef71d15eb595374d27961d255941b50691f9eaa91b5590f081fe3a4ab195c2
+github.com/cavaliercoder/go-cpio,v0.0.0-20180626203310-925f9528c45e,h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=,08b68e1d424b545418828c05c46bce5d795bbb8b534871667650ec6b3e7b33a6
+github.com/cenk/backoff,v2.2.1+incompatible,h1:djdFT7f4gF2ttuzRKPbMOWgZajgesItGLwG5FTQKmmE=,e3d1c641f85f548370aedc6bae3d4b975b09e3b2d1d9060f0e72bd5e2710d4c9
+github.com/cenkalti/backoff,v2.2.1+incompatible,h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=,f8196815a1b4d25e5b8158029d5264801fc8aa5ff128ccf30752fd169693d43b
+github.com/cenkalti/backoff/v3,v3.0.0,h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=,c69bf77e7b43cb3935d763c24af3810d9869a664bbcd26ffad9d3dc1bf602006
+github.com/census-instrumentation/opencensus-proto,v0.2.1,h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=,b3c09f3e635d47b4138695a547d1f2c7138f382cbe5a8b5865b66a8e08233461
+github.com/centrify/cloud-golang-sdk,v0.0.0-20190214225812-119110094d0f,h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU=,dc3de1393d7ae63ce35393630417ff8c5421a2a03cbf1a20680c7d57a74cd311
+github.com/certifi/gocertifi,v0.0.0-20180118203423-deb3ae2ef261,h1:6/yVvBsKeAw05IUj4AzvrxaCnDjN4nUqKjW9+w5wixg=,054d6c3a6f8d78fba2f08fbc2f23ec839d5a4aead4a184270d87d095c80eb6dc
+github.com/cespare/cp,v1.1.1,h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=,25f2ed5bac9ac3c1891ff364b213f6b7b0ee2e7aed13510738ced93ea71860e3
+github.com/cespare/xxhash,v1.1.0,h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=,fe98c56670b21631f7fd3305a29a3b17e86a6cce3876a2119460717a18538e2e
+github.com/cespare/xxhash/v2,v2.1.0,h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=,655feb22a395d9f56315280770d386eb99cdca79a97970812dbd3b30a7940638
+github.com/chaseadamsio/goorgeous,v0.0.0-20170901132237-098da33fde5f,h1:REH9VH5ubNR0skLaOxK7TRJeRbE2dDfvaouQo8FsRcA=,f81f4ef8ac52852b232ea971d009ec88007f1258c29e10e49918a31a99c6c4cc
+github.com/checkpoint-restore/go-criu,v0.0.0-20190109184317-bdb7599cd87b,h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=,1d1f5c6e529c87259305d8ed6bf4d381dabbf85458de187981204339e251a5be
+github.com/cheekybits/genny,v1.0.0,h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=,770f3e01425b9b0a87a5e0b29fc6ac2cfa67a3f1265aafb16c96a47bafc304e4
+github.com/cheekybits/is,v0.0.0-20150225183255-68e9c0620927,h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=,f7bf9ac5b1fc574ef5a373382909af550ef1a7f01182469eaa12e18c7c5fc7cb
+github.com/cheggaaa/pb,v2.0.7+incompatible,h1:gLKifR1UkZ/kLkda5gC0K6c8g+jU2sINPtBeOiNlMhU=,383b717f271a2471e57ac52f64dbb77304ec1c0b53c5efeb7a1392668f59d0b4
+github.com/cheggaaa/pb/v3,v3.0.1,h1:m0BngUk2LuSRYdx4fujDKNRXNDpbNCfptPfVT2m6OJY=,781be3118614dfaeb2df44d31d8af36c703c2aaed18e9ca49fa4ef9ba1539236
+github.com/chewxy/hm,v1.0.0,h1:zy/TSv3LV2nD3dwUEQL2VhXeoXbb9QkpmdRAVUFiA6k=,68ab03d9f8cb3d92d6c8234cfd879004be2fd69457d2c9fa6834d1c6ddb22b43
+github.com/chewxy/math32,v1.0.4,h1:dfqy3+BbCmet2zCkaDaIQv9fpMxnmYYlAEV2Iqe3DZo=,7885f637bb90729d04f125e030542b9a6999f9e5dffd3294baffbcdd548bbc3e
+github.com/chrismalek/oktasdk-go,v0.0.0-20181212195951-3430665dfaa0,h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ=,094a132bc1e950677f75e570b17a52f103edd6acd3ec1c0943cf9cda3cd6355a
+github.com/chromedp/cdproto,v0.0.0-20191009033829-c22f49c9ff0a,h1:AuIGvB6IuWpMEdfKQ+t77D6dzLpNftzxAsktehYyWn8=,bf85eeebdc65b1e90d851b42f56a3dbf5bcff4923aa426692a1c0d0a1727a522
+github.com/chromedp/chromedp,v0.5.1,h1:PAqhoCWCHzRphYnmmxLSiYk7EEwDplCm4woTCCaV2cQ=,59cd1ab42eeb90e32cc60e77a8fbb19ca629603200d5bd40d611f780e646062b
+github.com/chzyer/logex,v1.1.10,h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=,2c94771c1e335a2c58a96444b3768b8e00297747d6ce7e7c14bab2e8b39d91bd
+github.com/chzyer/readline,v0.0.0-20180603132655-2972be24d48e,h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=,3dc842677887278fb33d25078d375ae6a7a94bb77a8d205ee2230b581b6947a6
+github.com/chzyer/test,v0.0.0-20180213035817-a1ea475d72b1,h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=,ad8550bed3c4a94bbef57b9fc5bb15806eaceda00925716404320580d60e2f7d
+github.com/cihub/seelog,v0.0.0-20170130134532-f561c5e57575,h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=,fc279208e6094fb22c8ea651c6e9794844069693c9b916c225276c54f7e76bfe
+github.com/circonus-labs/circonus-gometrics,v2.3.1+incompatible,h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=,d8081141497e3cd34844df66af016c7900d58b324fb689e17e57bc053d91c9ba
+github.com/circonus-labs/circonusllhist,v0.1.3,h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=,4dc805d9735dd9ca9b8875c0ad23126abb5bc969c5a40c61b5bc891808dbdcb6
+github.com/clbanning/mxj,v1.8.4,h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=,8947cf617bdd9efc62817c8ddb17bafe497f35abdf10a3c60f295e387f633f70
+github.com/client9/misspell,v0.3.4,h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=,a3af206372e131dd10a68ac470c66a1b18eaf51c6afacb55b2e2a06e39b90728
+github.com/cloudflare/backoff,v0.0.0-20161212185259-647f3cdfc87a,h1:8d1CEOF1xldesKds5tRG3tExBsMOgWYownMHNCsev54=,2aea6d1528c42cf5f111e035bba564fd0481cb4ddb3b50f783f2481d855947cb
+github.com/cloudflare/cfssl,v1.4.0,h1:TdyQbj/bDUMUHf2IkcHU2EHUmzCmRLuJ3fFd8EYMg1E=,845fc5f4a7f4c2356d676916fdd7b4b2217b76c8f9b7a960290ab8884d6f8e0e
+github.com/cloudflare/cloudflare-go,v0.10.4,h1:7C1D9mtcNFZLCqmhkHK2BlwKKm9fi4cBqY6qpYtQv5E=,e8f6ee817c9b807c98559ff87d4ed7a284738d9dc253b6db7520911d93bd81e3
+github.com/cloudflare/go-metrics,v0.0.0-20151117154305-6a9aea36fb41,h1:/8sZyuGTAU2+fYv0Sz9lBcipqX0b7i4eUl8pSStk/4g=,9176a680ad7a72cf717e3e01ee1ca6b292cb576b543e12ff1770cc58957bc222
+github.com/cloudflare/golz4,v0.0.0-20150217214814-ef862a3cdc58,h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=,75832d1c2989b2a0d7eb8d2cec300f6d457254d42927a23f522b164833e791d4
+github.com/cloudflare/redoctober,v0.0.0-20171127175943-746a508df14c,h1:p0Q1GvgWtVf46XpMMibupKiE7aQxPYUIb+/jLTTK2kM=,e69334393aec994f9ba55bbdfa8a65c0cfa46080230068c44ca16a85c0a74079
+github.com/cloudfoundry-community/go-cfclient,v0.0.0-20190201205600-f136f9222381,h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s=,f01d41c3c911b59bf717674690799c978f3a841ef695c7ee09f4afe5f7c96e64
+github.com/cloudfoundry-incubator/candiedyaml,v0.0.0-20170901234223-a41693b7b7af,h1:6Cpkahw28+gcBdnXQL7LcMTX488+6jl6hfoTMRT6Hm4=,325af9d6827b8d120a72992c38ba776187fbd947a39c9f1928a43a1a2b262453
+github.com/cloudfoundry/bosh-agent,v2.271.0+incompatible,h1:277mM9hsUzyrd5Qd/5e1LFwiobIYorE7vTBRZohRV8s=,42e253b855d03655ec2cf59ab01a14aa0037f25029517be595dda26ff9a2a552
+github.com/cloudfoundry/bosh-utils,v0.0.0-20191026100324-0b6803ec5382,h1:Rrpgz+K2Zso//XUmqbGlnYi9rw6EtYJ4uLlTNSnSBIw=,c08bbf97e510b2de271fd64f5b2acedfa011b4fd3f30092804992084c67b68b7
+github.com/cloudfoundry/gosigar,v1.1.0,h1:V/dVCzhKOdIU3WRB5inQU20s4yIgL9Dxx/Mhi0SF8eM=,53acb43e5111c6af6af138e1144907bb5f9bf8abc28e71a703502f92c13ba274
+github.com/cloudfoundry/sonde-go,v0.0.0-20171206171820-b33733203bb4,h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI=,6124fdcac54e1baf09703ed2b938a4e2bb55d9cd20f78451f25c16638a95f62d
+github.com/cockroachdb/apd,v1.1.0,h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=,fef7ec2fae220f84bfacb17fbfc1b04a666ab7f6fc04f3ff6d2b1e05c380777d
+github.com/cockroachdb/apd/v2,v2.0.1,h1:y1Rh3tEU89D+7Tgbw+lp52T6p/GJLpDmNvr10UWqLTE=,9f1c35b8118f70f08150bf5e9da225fa1201f5d0f8c22f326468ea22ab6b791d
+github.com/cockroachdb/cockroach-go,v0.0.0-20190916165215-ad57a61cc915,h1:QX2Zc22B15gdWwDCwS7BXmbeD/SWdcRK12gOfZ5BsIs=,e3faa1cdf2a15357d1e2eb200b3bdb81dae3fb084cb04534e0caf27a68487a88
+github.com/cockroachdb/datadriven,v0.0.0-20190809214429-80d97fb3cbaa,h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=,170480bf3daa133144f2578e3f051f0fd98313666642cab64cef3359753a5c32
+github.com/codahale/hdrhistogram,v0.0.0-20161010025455-3a0bb77429bd,h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=,e7e117da64da2f921b1f9dc57c524430a7f74a78c4b0bad718d85b08e8374e78
+github.com/codegangsta/inject,v0.0.0-20150114235600-33e0aa1cb7c0,h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=,0a324d56992bffd288fa70a6d10eb9b8a9467665b0b1eb749ac6ae80e8977ee2
+github.com/codegangsta/negroni,v1.0.0,h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY=,2e6301aa682a7c38305f2ee72b276181cd0990f224f9fe115a433a5beb138488
+github.com/codeskyblue/go-sh,v0.0.0-20190412065543-76bd3d59ff27,h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=,77348ab27860460a015d0e65d08f18ed2194c13981f5fd722143a6e0c2dbb589
+github.com/confluentinc/confluent-kafka-go,v1.1.0,h1:HIW7Nkm8IeKRotC34mGY06DwQMf9Mp9PZMyqDxid2wI=,bc9aee1c8052340809bc43bf015a183985ec3426d404c34acfa3970e3b245340
+github.com/container-storage-interface/spec,v1.2.0,h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s=,86ecb02d57af97c9a4de8f2f3cacbceb5c7f2f96ee007133e0cfb9525ce45177
+github.com/containerd/cgroups,v0.0.0-20191011165608-5fbad35c2a7e,h1:3bt+8T1I/CuYx+a5ww32+UT4fc9x8iRiXrhfduFTlBU=,4646f14f27a365ff08abb1266b7ca4dffc1acd5e8e74b57211acbba22b496d46
+github.com/containerd/console,v0.0.0-20181022165439-0650fd9eeb50,h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU=,62a7f1da11b3be4c0ef4f9f03b99dcf59dc988f062749f35e4e6bb585fb4e4fe
+github.com/containerd/containerd,v1.3.0,h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=,e3f529147f2c909c85ac461126ad092a3c5d5a2abcc4f3c22600685af6dc2f08
+github.com/containerd/continuity,v0.0.0-20190827140505-75bee3e2ccb6,h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=,ef1a3a4c2c1508d293eb2730e47e9601cba19d939393b1018d8e476b30dfd90b
+github.com/containerd/fifo,v0.0.0-20190816180239-bda0ff6ed73c,h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=,0c1b858ee9dd28bd915a3f7bd108b98b1d689be3c14535e7e8aee4a60c4a72c0
+github.com/containerd/go-runc,v0.0.0-20190923131748-a2952bc25f51,h1:vmF3zULCGpZ4QJCCLsGUXX7tNXW+0x3r9owerRAmRaU=,76ce6296dc07f1f5957867e9a5925cf9e16c69ad2b635f74a4ec471e6672ee51
+github.com/containerd/ttrpc,v0.0.0-20191028202541-4f1b8fe65a5c,h1:+RqLdWzn0xFunb+sxXaEzHOg8NuEG/eaI+9C1xXX8Mw=,f43884f8f37259c4b50a4413092064f35abd03b9db3bbe2ca3264b5a4b591b04
+github.com/containerd/typeurl,v0.0.0-20190911142611-5eb25027c9fd,h1:bRLyitWw3PT/2YuVaCKTPg0cA5dOFKFwKtkfcP2dLsA=,aa4e0823acf7b686a9521617134a171c5b5813de302e3fba742cd3b7f43ba944
+github.com/containernetworking/cni,v0.7.1,h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=,b83f1b8e9bba747e41512737383da57e517cf425beb1bd58882904dae9348b1d
+github.com/containers/image,v3.0.2+incompatible,h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE=,dadc25bfff923d4f2c8b570471be3b0fd1449f42251fb6c318b68e04f6d47b3a
+github.com/containers/storage,v1.12.13,h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A=,08f5ee958be629b73ff02296eb11f4b0698dbd90e585ce019c5428a8e1d371d4
+github.com/containous/flaeg,v1.4.1,h1:VTouP7EF2JeowNvknpP3fJAJLUDsQ1lDHq/QQTQc1xc=,d097191570bb92f920cd15500a93205e6e93b5ee4723a51c9b8e3bfbcfaae505
+github.com/corbym/gocrest,v1.0.3,h1:gwEdq6RkTmq+09CTuM29DfKOCtZ7G7bcyxs3IZ6EVdU=,f13221d177442318b04f468fa57ea92bd9892d86e7cf7bb7299e0c58cea9df48
+github.com/coredns/coredns,v1.1.2,h1:bAFHrSsBeTeRG5W3Nf2su3lUGw7Npw2UKeCJm/3A638=,cbf720a9af4fdc5be08b0eea67fe219bb08c75292e22dca90095bf45cbd4a926
+github.com/coreos/bbolt,v1.3.3,h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=,63ea574f28bd03b6d2a82304e0f7c96dcb30fa048311a4c8c3ad512dbacc4630
+github.com/coreos/clair,v0.0.0-20180919182544-44ae4bc9590a,h1:glxUtT0RlaVJU86kg78ygzfhwW6D+uj5H+aOK01QDgI=,3bc8c4b06a61c5673fcc69d5278b3a5313633fca1166e94a7140c363399c3dc6
+github.com/coreos/etcd,v3.3.17+incompatible,h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=,d7ca8db509166ce05482c9b3e80cfb8d1086691901e80202f571d152da912153
+github.com/coreos/go-etcd,v2.0.0+incompatible,h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=,4b226732835b9298af65db5d075024a5971aa11ef4b456899a3830bccd435b07
+github.com/coreos/go-iptables,v0.4.3,h1:jJg1aFuhCqWbgBl1VTqgTHG5faPM60A5JDMjQ2HYv+A=,4626df8f719f93e5d66bd995d586ae3540c24b2203c0d2aab7c6d5e60f89a3dc
+github.com/coreos/go-oidc,v2.1.0+incompatible,h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=,e2e123270614dd7d47d95ae1fce80a9102df019f9e820d4f5cf5c92c64e1ad91
+github.com/coreos/go-semver,v0.3.0,h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=,b2fc075395ffc34cff4b964681d0ae3cd22096cfcadd2970eeaa877596ceb210
+github.com/coreos/go-systemd,v0.0.0-20190719114852-fd7a80b32e1f,h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=,22237f0aed3ab6018a1025c65f4f45b4c05f9aa0c0bb9ec880294273b9a15bf2
+github.com/coreos/pkg,v0.0.0-20180928190104-399ea9e2e55f,h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=,7fe161d49439a9b4136c932233cb4b803b9e3ac7ee46f39ce247defc4f4ea8d7
+github.com/coreos/rkt,v1.30.0,h1:Kkt6sYeEGKxA3Y7SCrY+nHoXkWed6Jr2BBY42GqMymM=,436e294b735bada49407ad3c066ae251ef105ce59076ef8f0f732c586a72970e
+github.com/cosiner/argv,v0.0.0-20170225145430-13bacc38a0a5,h1:rIXlvz2IWiupMFlC45cZCXZFvKX/ExBcSLrDy2G0Lp8=,deb11c1c7a2fa44b3497731d497b3d7be5a51cf696ed43280e01822e2eed9b96
+github.com/cosmos/cosmos-sdk,v0.35.0,h1:EPeie1aKHwnXtTzKggvabG7aAPN+DDmju2xquvjFwao=,ccc975b48e3b40f4eb054e28e9243ecb48c0d8ecdf52b9512da26a8200cc7c43
+github.com/cosmos/go-bip39,v0.0.0-20180819234021-555e2067c45d,h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU=,e41d7ea781b15421a4690bedf78543f2eaad00c36c439dd4973131dec1985177
+github.com/cosmos/ledger-cosmos-go,v0.10.3,h1:Qhi5yTR5Pg1CaTpd00pxlGwNl4sFRdtK1J96OTjeFFc=,f1089701d8868e4ff3fd9e9a4104476963f725a713ee2a476b4ef8094a0bca20
+github.com/cosmos/ledger-go,v0.9.2,h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI=,a77b2063a64133d8dda638d5d602071429d7e2500576bfff5c1763f8572a8517
+github.com/couchbase/go-couchbase,v0.0.0-20191031153726-96c2e23d589a,h1:eKnoG+AQQQIxHEcBIbudmwLJv3S9UQU6oGHzvqhttqE=,5dd3e610f24adb44b31e7ecc6a80a8974b769bd622d569c69fb98bd02610bbef
+github.com/couchbase/gomemcached,v0.0.0-20191004160342-7b5da2ec40b2,h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8=,5b9a280cd2d546cd0d70fbd6828e73fa0b07fb9d3c0b6bff88d8e23d8e4256f4
+github.com/couchbase/goutils,v0.0.0-20190315194238-f9d42b11473b,h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo=,a2820e0f01d8c944b70c70515b9924f41b450f3688d19ad4d506b2b9b367c433
+github.com/couchbase/vellum,v0.0.0-20190111184608-e91b68ff3efe,h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M=,06e3ca28a98c95bcdfd909168e1dcf45a6667ef59ad59112a01e6bbdcf591e84
+github.com/couchbaselabs/go-couchbase,v0.0.0-20190708161019-23e7ca2ce2b7,h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8=,3429eb55dd38b07bab5e9a57a3e2451449b49bdbc6f16585f8b7557067572499
+github.com/cpu/goacmedns,v0.0.1,h1:GeIU5chKys9zmHgOAgP+bstRaLqcGQ6HJh/hLw9hrus=,12acca48bb444f3832a87b8d238e573bbfa60e5c25dfcf6787a003dfacaf055d
+github.com/cpuguy83/go-md2man,v1.0.10,h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=,b9b153bb97e2a702ec5c41f6815985d4295524cdf4f2a9e5633f98e9739f4d6e
+github.com/cpuguy83/go-md2man/v2,v2.0.0,h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=,f2fdd06287a80f1bea5552f572d7f2314ec829285a3040b63469e0635f66fb6d
+github.com/creack/goselect,v0.1.0,h1:4QiXIhcpSQF50XGaBsFzesjwX/1qOY5bOveQPmN9CXY=,24d8028970032b1a45091ad8ff9b9c280693def1433cb5948ed92c0c975226ea
+github.com/creack/pty,v1.1.7,h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=,e7ea3403784d186aefbe84caed958f8cba2e72a04f30cdb291ece19bec39c8f3
+github.com/cskr/pubsub,v1.0.2,h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=,39e40a42c10058c188f331ed0bb660a0504d7c2ddd9e835a9970786fdc35feb0
+github.com/cupcake/rdb,v0.0.0-20161107195141-43ba34106c76,h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=,019a246ac0d7f6fcf3758587a031767730cfb824003c311686a4eb552a1dcc57
+github.com/cweill/gotests,v1.5.3,h1:k3t4wW/x/YNixWZJhUIn+mivmK5iV1tJVOwVYkx0UcU=,7ced96d4223a0afcd41922c4d3ae064493dd5bedbc72f6541716fce1cab24b7d
+github.com/cxr29/aliyun-openapi-go-sdk,v0.0.0-20151123082822-0b043e4d1e0c,h1:WEWetvNRZlk7JW3M4fycSA3f/2xZGxRdrwmpgRkGoQc=,6c80128745e3acdd01f59bc6c6e3a1f24193e89eb627ad6dcc615e763878b6e4
+github.com/cyphar/filepath-securejoin,v0.2.2,h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=,d022873dbb9e8d3b7a43c9dedbea54dfc9a6c15f9632ba522a1257e8b948c100
+github.com/cznic/b,v0.0.0-20181122101859-a26611c4d92d,h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8=,1c34b27ce98f70cb0e97c2bbe0bdae216cc1ea6b2617b0e984e2ce30adc06338
+github.com/cznic/fileutil,v0.0.0-20181122101858-4d67cfea8c87,h1:94XgeeTZ+3Xi9zsdgBjP1Byx/wywCImjF8FzQ7OaKdU=,109b4c91722a0f9a4f941d77eff34270684e53ca36e7d14ab2cd4a4e80841d73
+github.com/cznic/golex,v0.0.0-20181122101858-9c343928389c,h1:G8zTsaqyVfIHpgMFcGgdbhHSFhlNc77rAKkhVbQ9kQg=,d2b11a6e0e1de5125a2d550650b4cbb7bf44280ebf1cda74ef4a63e3cfa11012
+github.com/cznic/internal,v0.0.0-20181122101858-3279554c546e,h1:58AcyflCe84EONph4gkyo3eDOEQcW5HIPfQBrD76W68=,bc177d001529bca3f46aa84855db4e783a041c188d3ba237f68fa4522bdca74b
+github.com/cznic/kv,v0.0.0-20181122101858-e9cdcade440e,h1:8ji4rZgRKWMQUJlPNEzfzCkX7yFAZFR829Mrh7PXxLA=,4f992bdaf6d17487c7b16669b6d55afa76b321e63f8e4b6a6d1126b44b18b0d9
+github.com/cznic/lldb,v1.1.0,h1:AIA+ham6TSJ+XkMe8imQ/g8KPzMUVWAwqUQQdtuMsHs=,ddec7228568547a5fbfbc6a91208cbcafeed4338a38c41d483448957e4bec186
+github.com/cznic/mathutil,v0.0.0-20181122101859-297441e03548,h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=,8f69a36f60d885e011b0a90b91246a7e88223cb2883dc6e71eab3f42d653231b
+github.com/cznic/parser,v0.0.0-20181122101858-d773202d5b1f,h1:DUtr2TvhM9rmiHKVJWoLqDY2+MdxljW9hlaS/oYoi1c=,18b746a4090720bd9dfe219d0f7bb7fb28565df70417208d7e99dfd79f1ea264
+github.com/cznic/ql,v1.2.0,h1:lcKp95ZtdF0XkWhGnVIXGF8dVD2X+ClS08tglKtf+ak=,05164e379d43eaada0efdd763a50a9ef8f4b7f73a5de7ab866093bb25a4fb747
+github.com/cznic/sortutil,v0.0.0-20181122101858-f5f958428db8,h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4=,67783879c1ae4472fdabb377b1772e4e4c5ced181528c2fc4569b565cb47a57b
+github.com/cznic/strutil,v0.0.0-20181122101858-275e90344537,h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM=,867902276444cbffca84d9d5f63754e8b22092d93a94480d8dfebd234ac8ffbd
+github.com/cznic/y,v0.0.0-20181122101901-b05e8c2e8d7b,h1:gvFsf4zJcnW6GRN+HPGTxwuw+7sTwzmoeoBQQCZDEnk=,8c84f5e4f9dc5f0809d8ad22d057e404c3e8644dc28e8fc52abbb1d2350f8d3e
+github.com/cznic/zappy,v0.0.0-20181122101859-ca47d358d4b1,h1:ytLS5Cgkxq6jObotJ+a13nsejdqzLFPliDf8CQ8OkAA=,505c19b52924ee21b65611bc45640d3ff4671e50ee04f7c17c38342190645595
+github.com/d2g/dhcp4,v0.0.0-20170904100407-a1d1b6c41b1c,h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc=,15df9468cf548a626e1319e92d550432512c4319cf555bf278ea9215de3504e3
+github.com/daaku/go.zipexe,v1.0.0,h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=,74d7a0242c03c3c03220e56a59da5f97d3478743250740df538e05e6b609f553
+github.com/danwakefield/fnmatch,v0.0.0-20160403171240-cbb64ac3d964,h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=,f601e8d25a43ed32e00851e1686a93b0175dadea8f4e32c8af2f1533f20736bc
+github.com/dave/jennifer,v1.2.0,h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=,85b37a1b99b7d67664389b8c11b7174f521a396bb59d4e0e766df16336a7f112
+github.com/dave/services,v0.1.0,h1:7isGzpZHJWmOYTV+Pn3f6gpQUmrveJqsQpAkH0HXFbU=,e52a7ffba3aa07cca4888e08248771211abd139928b5cde9b228a61da88eddcc
+github.com/davecgh/go-spew,v1.1.1,h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=,6b44a843951f371b7010c754ecc3cabefe815d5ced1c5b9409fb2d697e8a890d
+github.com/davecgh/go-xdr,v0.0.0-20161123171359-e6a2ba005892,h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o=,11cb87912b5288e13534cb396935694c257eb9164ffc20ce21e3bc9955edd82a
+github.com/daviddengcn/go-colortext,v0.0.0-20180409174941-186a3d44e920,h1:d/cVoZOrJPJHKH1NdeUjyVAWKp4OpOT+Q+6T1sH7jeU=,159d727adf4f0763ec3dc6156fd46531a2afbffdc17feeb6b5ffe2eb54b35d41
+github.com/davyxu/cellnet,v4.1.0+incompatible,h1:zDRqhkFRhBTD7ajra2888aoRLN1qlv8LV8+qHg/emO4=,f085f088b68b2e379a6dc37501ef2c9809836cfac147a30ed3025571c2d57df7
+github.com/davyxu/golog,v0.1.0,h1:SsV3m2x37sCzFaQzq5OHc5S+PE2VMiL7XUx34JCa7mo=,a3c240bc4b958fa4b4e73caa59c28fc658afbabdb1f28b237874803ca96dcb1f
+github.com/dchest/blake256,v1.0.0,h1:6gUgI5MHdz9g0TdrgKqXsoDX+Zjxmm1Sc6OsoGru50I=,9a9ed00a3024f2f7480b59c7b2ee1013cae3026d7dc2f065ce225dcce8cf357e
+github.com/dchest/siphash,v1.2.1,h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=,877a468e533e28c777c59b3dfea175b38a1f0bc1f8551e3a9e1739b1821c7e3e
+github.com/dchest/uniuri,v0.0.0-20160212164326-8902c56451e9,h1:74lLNRzvsdIlkTgfDSMuaPjBr4cf6k7pwQQANm/yLKU=,41db9fb52a841d11d8592a1d4f56e8a440e3991b699ae0f95ab5f5a7b2aeb24c
+github.com/deckarep/golang-set,v1.7.1,h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=,86606609df42529fda55a15475b495f993f0c1cc4be6e1e50a9165a514d1ed71
+github.com/decker502/dnspod-go,v0.2.0,h1:6dwhUFCYbC5bgpebLKn7PrI43e/5mn9tpUL9YcYCdTU=,381fb0bb29ac973f318db3d464f76e5d3016d4963c78ccd7df7dbc4231a68455
+github.com/decred/base58,v1.0.0,h1:BVi1FQCThIjZ0ehG+I99NJ51o0xcc9A/fDKhmJxY6+w=,75b1a2c78759ee2e8755156806ce770c9199464c2d58541388d5ec7c000c99e1
+github.com/decred/dcrd/chaincfg,v1.5.1,h1:u1Xbq0VTnAXIHW5ECqrWe0VYSgf5vWHqpSiwoLBzxAQ=,7344cd4dc90a82342c90811c8180b1fef6c79e9c49caa38135f271cf0ecb056f
+github.com/decred/dcrd/chaincfg/chainhash,v1.0.2,h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=,a8b24e2c4e64015430b8a6502f9e8c3eeea246021638884dc510508eccda31a0
+github.com/decred/dcrd/chaincfg/v2,v2.0.2,h1:VeGY52lHuYT01tIGbvYj+OO0GaGxGaJmnh+4vGca1+U=,906dec975cf574c55f2eb588dc91a4ddd6be273eaddfbeb45288ea6aebcc6306
+github.com/decred/dcrd/crypto/blake256,v1.0.0,h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=,cd8bbdae14641f0ba44430fc66990dd37bbfcf1e21a965a9fd1871d16cac127d
+github.com/decred/dcrd/dcrec,v1.0.0,h1:W+z6Es+Rai3MXYVoPAxYr5U1DGis0Co33scJ6uH2J6o=,a1e16c5ef3633f2dfa23c052778552cf9300821197f5b2dc547e20dd9d45756b
+github.com/decred/dcrd/dcrec/edwards,v1.0.0,h1:UDcPNzclKiJlWqV3x1Fl8xMCJrolo4PB4X9t8LwKDWU=,7ed52f3316f5a47c5925e23bebf5016ecfd75e7ac340714b4b94b0e25bdf0611
+github.com/decred/dcrd/dcrec/secp256k1,v1.0.2,h1:awk7sYJ4pGWmtkiGHFfctztJjHMKGLV8jctGQhAbKe0=,5fa2c17fd611665a39e6435283445ec3b46a5b52d14661e04bd1f7ef295ba9d3
+github.com/decred/dcrd/dcrutil,v1.4.0,h1:xD5aUqysGQnsnP1c9J0kGeW8lDIwFGC3ja/gE3HnpCs=,6de50428375fca174f4861f8aa45549360e7733bca0184a882448f0b9f94be2e
+github.com/decred/dcrd/dcrutil/v2,v2.0.0,h1:HTqn2tZ8eqBF4y3hJwjyKBmJt16y7/HjzpE82E/crhY=,fa91eb7c5062e0f3f6e7d1b9d8e1a89698f6ee6e7f8f4941929f6d89a293ec76
+github.com/decred/dcrd/wire,v1.3.0,h1:X76I2/a8esUmxXmFpJpAvXEi014IA4twgwcOBeIS8lE=,e17b78d19d0056503627826a0e599ed14a7a4fc8aa2c31c47b12ffc1864aedb1
+github.com/decred/slog,v1.0.0,h1:Dl+W8O6/JH6n2xIFN2p3DNjCmjYwvrXsjlSJTQQ4MhE=,1c27399a3f38fb7b581f4dbe11a0b3e3d5d8afcc8109880771c0e44135388bb0
+github.com/denisenkom/go-mssqldb,v0.0.0-20191001013358-cfbb681360f0,h1:epsH3lb7KVbXHYk7LYGN5EiE0MxcevHU85CKITJ0wUY=,ff2349c73cee9e54cd61e85af75d7d0537fb5f070da5a737b5abede1f7d579ac
+github.com/denkhaus/bitshares,v0.6.1-0.20190502142618-5ae8c00cb394,h1:PpFS6pvAoRwH13WlqnX/mrxesu6LNFtiVwoWgfNLCeY=,af76695d3e546cad6a8b56d9d5e431bfeb12bfce643a395fb45d8827409dd9ff
+github.com/denkhaus/gojson,v1.0.0,h1:p1hAlN/yAvRvzbdO1HNDQvmBslfyk64IMt3O3DtftPU=,5c0d8d98a53be88e2801d90124e28ba781d2c6a09aaf9a57272df92c5c0e0fe2
+github.com/denkhaus/logging,v0.0.0-20180714213349-14bfb935047c,h1:imM7UU8JD1sNuk2tVEk3QvrY2RZ5f/DOB+UA7c5ThGs=,5a1bb81f35dc7847b0cb8efe3f1e3bac3a34c9f11950a7c7643115c952fa3166
+github.com/denverdino/aliyungo,v0.0.0-20170926055100-d3308649c661,h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o=,e6ca432bab5a7b1d233c9c1495d32668d31b18803d65f3af27f1d8240b6547d4
+github.com/detached/gorocket,v0.0.0-20170629192631-d44bbd3f26d2,h1:zwp9mAr+YvsgLCFIVJ3/m61Z+NRX35jbD0HBa62ryHY=,f54c9dc20ba925f0b2a726cc1a22466c6e05d7e0080f6e4b5f26e60c15938712
+github.com/detailyang/go-fallocate,v0.0.0-20180908115635-432fa640bd2e,h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU=,dcc45102d034d78825d1aa9d2f61720b4b0d9f76314a7a53b32cf032713a0bde
+github.com/devfeel/dotweb,v1.7.3,h1:tt7YtCIp9JPmAS2yksVIsw6CiUkUSz3kVLSiCzRaWDw=,7cdb6d4872bb4c82fc333722fb2be3e39fe391b121550421d240d3008c8e00a0
+github.com/devigned/tab,v0.1.1,h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=,528e21b578f28a998453551c51abfdeed154c981486d49a8ad7c149743ea450f
+github.com/dghubble/oauth1,v0.6.0,h1:m1yC01Ohc/eF38jwZ8JUjL1a+XHHXtGQgK+MxQbmSx0=,6d4be6cfc2771fab15e47d2aa9c40d347dab7166f2cae3c248aeb51b10c88b4a
+github.com/dghubble/sling,v1.3.0,h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU=,880e7f44ee68eae979a34afb2f95ab1c7555712153c45be01d15cbc5991a5fe6
+github.com/dgraph-io/badger,v1.6.0,h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=,8329ae390aebec6ae360356e77a2743357ad4e0d0bd4c3ae03b7d17e01ad70aa
+github.com/dgraph-io/dgo,v1.0.0,h1:DRuI66G+j0XWDOXly4v5PSk2dGkbIopAZIirRjq7lzI=,dae0ee7690b0c58d72be328263d55394f88a4924a8274017021736d702be9cee
+github.com/dgrijalva/jwt-go,v3.2.0+incompatible,h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=,26b028eb2d9ee3aef26a96d6790e101f4088ef901008ebab17096966bf6522ad
+github.com/dgryski/go-farm,v0.0.0-20190423205320-6a90982ecee2,h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=,d1fb60f1ce562acb07569d53b43353b73f439911c27eecef716305cd2d730258
+github.com/dgryski/go-jump,v0.0.0-20170409065014-e1f439676b57,h1:qZNIK8jjHgLFHAW2wzCWPEv0ZIgcBhU7X3oDt/p3Sv0=,92666f8caf4843c5a9b6bdb0f48f261922595683351958b0909884adf064cfb2
+github.com/dgryski/go-metro,v0.0.0-20180109044635-280f6062b5bc,h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=,3f97b3cdeaee7b4fbf4fa06b7c52e3ee6bca461a100077892e861c6c8fc03722
+github.com/dgryski/go-sip13,v0.0.0-20190329191031-25c5027a8c7b,h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0=,81d318bf94b85b240278c35d7ef6015510751e31ffa89eb6287d6d236493551e
+github.com/digitalocean/go-libvirt,v0.0.0-20190626172931-4d226dd6c437,h1:phR13shVFOIpa1pnLBmewI9p16NEladLPvVylLPeexo=,7748e819d19524170969d2a470c212bb3936778ff630f833adc286e8c21e37cc
+github.com/digitalocean/go-qemu,v0.0.0-20181112162955-dd7bb9c771b8,h1:N7nH2py78LcMqYY3rZjjrsX6N7uCN7sjvaosgpXN9Ow=,7530507881e53214ed3c0fb770fb3faed36a57ca6eb376bd2cec91a0e5d575a6
+github.com/digitalocean/godo,v1.11.1,h1:OsTh37YFKk+g6DnAOrkXJ9oDArTkRx5UTkBJ2EWAO38=,5d1ad5b25ad252fb1a02366087fe6e94845ec2dce64dc6e875ed3253a7e0f8ff
+github.com/dimchansky/utfbom,v1.1.0,h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=,27fed73a62fcf06d4ceb28846e5d40786b7e81213aa0d1f4d840e89d25f285f7
+github.com/dimfeld/httppath,v0.0.0-20170720192232-ee938bf73598,h1:MGKhKyiYrvMDZsmLR/+RGffQSXwEkXgfLSA08qDn9AI=,ff59ff07643eccf8a166cc9693fbd18c42869e0bfcc0a9c979435847a7ae4fb1
+github.com/dimfeld/httptreemux,v5.0.1+incompatible,h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA=,031da29a128234db595fdce84301cfe5ff13b4be03c1e344cfe7daadb68559e9
+github.com/disintegration/gift,v1.2.1,h1:Y005a1X4Z7Uc+0gLpSAsKhWi4qLtsdEcMIbbdvdZ6pc=,d9a688a552dc8f5b2319325541e2bbc5c0af66b6e78273058893b259fcca5a0f
+github.com/disintegration/imaging,v1.6.1,h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=,209474c4c0348672c6747a7a73ff887a6d9458b67df78ff342ee3fd628156412
+github.com/djherbis/atime,v1.0.0,h1:ySLvBAM0EvOGaX7TI4dAM5lWj+RdJUCKtGSEHN8SGBg=,fe677e5c1a8bb168904c0856010bed33a770d49eda9edc6dc1b567940bf20afc
+github.com/dlclark/regexp2,v1.2.0,h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=,61054c243455e034d7a81e2f6a888cab5a81056a0cc43463cb3536b42cfe7cc1
+github.com/dmotylev/goproperties,v0.0.0-20140630191356-7cbffbaada47,h1:sP2APvSdZpfBiousrppBZNOvu+TE79Myq4kkmmrtSuI=,8afdf7b2989dff361cc80e560c1bd17e5c4ad37826b5caf4b65af8e152cdc6cb
+github.com/dnaeon/go-vcr,v1.0.1,h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=,8f586f95ce5567ef2ae702cf98e56a09ea0cc6171f5cd959e6fcf7502e00dabc
+github.com/dnsimple/dnsimple-go,v0.30.0,h1:IBIrn9jMKRMwporIRwdFyKdnHXVmwy6obnguB+ZMDIY=,5821d521b402f93dc19f6eb332d5f4159800336f53626c6dedd99ce4c351a55a
+github.com/dnstap/golang-dnstap,v0.1.0,h1:hKtRrSTEHuTmG0vCLgKU8WJkXCARoAJMDrlXHTTPBK8=,fe23fd626917c7f45ead63cef4a4bd1bb366bb30ba5873d9ee5432e79b971349
+github.com/docker/cli,v0.0.0-20191031185610-968ce1ae4d45,h1:KJ4FsevlLR30Q2H1aCACmL3CEoUTAZf16PMAJj+ofXI=,145fef54aa162edc123d514ed7a20bc14564581ad95bb6aae7294c3c08df55fd
+github.com/docker/distribution,v2.7.1+incompatible,h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=,be78bc43d74873b67afe05a6b244490088680dab75bdfaf26d0fd4d054595bc7
+github.com/docker/docker,v1.13.1,h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=,1decea9f21d4165bc134de72c51055612ff6992409cd56f3c35b7f78f3b542bd
+github.com/docker/docker-ce,v0.0.0-20180924210327-f53bd8bb8e43,h1:gZ4lWixV821UVbYtr+oz1ZPCHkbtE+ivfmHyZRgyl2Y=,d670d1c5faec51ee82dbc5d479a7fca60916c1b30547994c206622ab338a735a
+github.com/docker/docker-credential-helpers,v0.6.3,h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=,4dd2971b28524442b7a01e118a8040c3ab90eca50d55a7a232af514d18187324
+github.com/docker/engine-api,v0.4.0,h1:D0Osr6+45yAlQqLyoczv5qJtAu+P0HB0rLCddck03wY=,0db5d01c8401192b4eee6d2f9c34aa297d1a892f25230b470efd73f8f7ab59a4
+github.com/docker/go,v1.5.1-1,h1:hr4w35acWBPhGBXlzPoHpmZ/ygPjnmFVxGxxGnMyP7k=,fd626ee84b1eaea11c2a374fda5ed5ca8ad820bb4746ee31519efeb5038077b5
+github.com/docker/go-connections,v0.4.0,h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=,570ebcee7e6fd844e00c89eeab2b1922081d6969df76078dfe4ffacd3db56ada
+github.com/docker/go-events,v0.0.0-20190806004212-e31b211e4f1c,h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=,0f654eb0e7e07c237a229935ea3488728ddb5b082af2918b64452a1129dccae3
+github.com/docker/go-metrics,v0.0.1,h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=,4efab3706215f5b2d29ba823d3991fd6e2f81c02ce45ef0c73c019ebc90e020b
+github.com/docker/go-units,v0.4.0,h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=,0f2be7dce7b1a0ba6a4a786eb144a3398e9a61afc0eec5799a1520d9906fc58c
+github.com/docker/libkv,v0.2.1,h1:PNXYaftMVCFS5CmnDtDWTg3wbBO61Q/cEo3KX1oKxto=,7a0c81782d38b550acc2c0ef0ce397adfc13716f483be6a47d0b97fbc6eea0d5
+github.com/docker/libnetwork,v0.5.6,h1:hnGiypBsZR6PW1I8lqaBHh06U6LCJbI3IhOvfsZiymY=,7aea42c405304c495bf159e5004674eb503eb0120eb4c5d1275fdba65d88cc53
+github.com/docker/libtrust,v0.0.0-20160708172513-aabc10ec26b7,h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=,bf1c1230a3b5c0dadb2c9366aabc99181e708369d735dc83c3eb89f597f42adb
+github.com/docker/machine,v0.16.2,h1:jyF9k3Zg+oIGxxSdYKPScyj3HqFZ6FjgA/3sblcASiU=,1c13210831cafddba1abbf9ef034135233252c62927df396fee6fa0a45efcb43
+github.com/docker/notary,v0.6.1,h1:6BO5SNujR+CIuj2jwT2/yD6LdD+N9f5VbzR+nfzB5ZA=,439fd6664fb75323d78c5a362483f3375a6ac61a3dd08438a503df470a34f300
+github.com/docker/spdystream,v0.0.0-20160310174837-449fdfce4d96,h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=,70964f9eef29843634539b8d6e09c8b51ed6aa96b5deda28b7a44613327a22f2
+github.com/docker/swarmkit,v1.12.0,h1:vcbNXevt9xOod0miQxkp9WZ70IsOCe8geXkmFnXP2e0=,b9d09ff080beb0db2d4d4ebca93438dd080769266eb7aab6d5182e1ad7ba2c3a
+github.com/docopt/docopt-go,v0.0.0-20180111231733-ee0de3bc6815,h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=,00aad861d150c62598ca4fb01cfbe15c2eefb5186df7e5d4a59286dcf09556c8
+github.com/documize/community,v3.2.0+incompatible,h1:ilePrhqxjc+BWpDRsXPyLyMEE1BrGlqCPMg3T577mzQ=,e9e06bdbef4500c0d2cc609164fe23bc05f8234c2c8483c8c9bc3ffffe22bbf7
+github.com/dogmatiq/dogma,v0.6.0,h1:HdJ0cTcORIxZRTB5Z7RdsBXEr18gB3so7FMIHYiAhEQ=,db91004377004aa3c5f0c462205beea995e93a0be13d7d99d3232dc03209f65c
+github.com/donovanhide/eventsource,v0.0.0-20171031113327-3ed64d21fb0b,h1:eR1P/A4QMYF2/LpHRhYAts9wyYEtF7qNk/tVNiYCWc8=,2b911efc5101522ce50399cd7831ef931896541893955441168783666811a1d1
+github.com/dop251/goja,v0.0.0-20190912223329-aa89e6a4c733,h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw=,485156ad52ca9651f728a6039af63f9f11c5bf49846e513635d5fa35d8d39097
+github.com/dotcloud/docker,v1.13.1,h1:jjwxeyQYDwROaGy/YEodF+srQW5hJAnNnaTcfcKoU+0=,83884e41d26b32eae2387080b245792ac8fc0200f645aef02656cb5e4b3d0595
+github.com/drone/go-scm,v1.6.0,h1:PZZWLeSHHwdc6zbSQpg9n0CNoRB+8DAINzX9X/wJifY=,e26d2bc63c53a66252ab24a1b45ced06825bb4101cbd746c581683cf39e520b6
+github.com/dsnet/compress,v0.0.0-20171208185109-cc9eb1d7ad76,h1:eX+pdPPlD279OWgdx7f6KqIRSONuK7egk+jDx7OM3Ac=,25f6bcccb4c1cf6d97ad69253a394bd0a52a633caa623d75b30729aed495a73d
+github.com/dsnet/golib/unitconv,v0.0.0-20190531212259-571cdbcff553,h1:mE6azeVhLnKfk6DH3Zcg56L87yJ/uv9HZ5YJOQcPC4s=,603b60f7278fe7299f59d716da2bd287441f1321b5a663828d894e67bc274bed
+github.com/duosecurity/duo_api_golang,v0.0.0-20190308151101-6c680f768e74,h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=,75c90bdd92362e2cc36297193a543fe0cd75c07f82182940ad6158a1d470cc8b
+github.com/dustin/go-humanize,v1.0.0,h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=,e01916e082a6646ea12d7800d77af43045c27284ff2a0a77e3484509989cc107
+github.com/dylanmei/iso8601,v0.1.0,h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=,1e682968bfcac2115e1fd706ec6bd09a0b676d7d224514d8f8dff9cadbf87e79
+github.com/dylanmei/winrmtest,v0.0.0-20190225150635-99b7fe2fddf1,h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA=,5607cb987ec0a699003eeec5952f0280792fd5db7099ca277bdfae26e93b0ef3
+github.com/eapache/go-resiliency,v1.1.0,h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=,a64ebe539335e126b30f79f0f00f39ffe083e794995500a67e0a2156b334788e
+github.com/eapache/go-xerial-snappy,v0.0.0-20180814174437-776d5712da21,h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=,785264afffdcfe50573a1cb0df85ff4186e9e7e4e3a04513752f52d3da1054af
+github.com/eapache/queue,v1.1.0,h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=,1dc1b4972e8505c4763c65424b19604c65c944911d16c18c5cbd35aae45626fb
+github.com/eclipse/paho.mqtt.golang,v1.2.0,h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=,d36337c4b5a2752b91bcd437bd74e0907bf6c9e6c611dab88407bcca8462e918
+github.com/edgexfoundry/go-mod-core-contracts,v0.1.33,h1:lQbLbRhymV0/QDDDGU26idZ9Kv+Q0IETn81hLpHxi68=,a7a8792a8692d64daea343577a49934be6ba64acbe114b3c24262537b5a9157f
+github.com/edsrzf/mmap-go,v1.0.0,h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=,851a1d4d6e30f97ab23b7e4a6a7da9d1842f126d738f7386010c6ee7bf82518e
+github.com/edwingeng/doublejump,v0.0.0-20190102103700-461a0155c7be,h1:FnUE/uuuegwvhGE9z61q9krL5km5Mnwlusq3BT06yy8=,a9cb92422f0bbdd56c80d9873a8f7af6fd2d8d8154a7a11d7cb9232d9146f07c
+github.com/efarrer/iothrottler,v0.0.0-20141121142253-60e7e547c7fe,h1:WAx1vRufH0I2pTWldQkXPzpc+jndCOi2FH334LFQ1PI=,04291e6136b933fd2cdcc29f3af78090a9d678534a94823590eb63f1f318db1d
+github.com/efritz/backoff,v1.0.0,h1:r1DfNhA1J7p8kZ185J/hLPz2Bl5ezTicUr9KamEAOYw=,064d92e7f3e46079d158cac717e1c9bf96a230a5f31bf28940bd4a99bb91657e
+github.com/efritz/glock,v0.0.0-20181228234553-f184d69dff2c,h1:Q3HKbZogL9GGZVdO3PiVCOxZmRCsQAgV1xfelXJF/dY=,716200eb117905f4df509b7260869bb97bf8833c160d2ff1d328d01aa3874bc9
+github.com/eknkc/amber,v0.0.0-20171010120322-cdade1c07385,h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=,b1dde9f3713742ad0961825a2d962bd99d9390daf8596e7680dfb5f395e54e22
+github.com/elastic/go-sysinfo,v1.0.1,h1:lzGPX2sIXaETeMXitXL2XZU8K4B7k7JBhIKWxdOdUt8=,fe0cd64aa3ac73edbb4240dcbcb660c4ec004f07c36371be6d78543c3b215d92
+github.com/elastic/go-windows,v1.0.0,h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=,e487e6f1e269766b5815c36e93614b87a185ddc33f7a6f4bf23e5ee6d0d0e3c1
+github.com/elastic/gosigar,v0.10.5,h1:GzPQ+78RaAb4J63unidA/JavQRKrB6s8IOzN6Ib59jo=,a139252942b5ca82ddc3d9ced1daa262de0149a413149d3f0234b43dc3635acf
+github.com/elazarl/go-bindata-assetfs,v1.0.0,h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=,3aa225ae5ae4a8059a671fa656d8567f09861f88b88dbef9e06a291efd90013a
+github.com/elazarl/goproxy,v0.0.0-20191011121108-aa519ddbe484,h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=,6c224ac5720959a46f6d88e0b15dda732c7eb180b3103a826cf6d5459a5e112f
+github.com/elazarl/goproxy/ext,v0.0.0-20190711103511-473e67f1d7d2,h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=,7244c1fe7490460503559e24e0e478540bc10481d1d8f3afd0a1f6b1a470b52f
+github.com/emicklei/go-restful,v2.11.1+incompatible,h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE=,9befcac63629841301235124e728206a96170afd83c78b632d271acafc9acccf
+github.com/emicklei/go-restful-swagger12,v0.0.0-20170926063155-7524189396c6,h1:V94anc0ZG3Pa/cAMwP2m1aQW3+/FF8Qmw/GsFyTJAp4=,07fd41dbe765b7d340df21d6353db8bef782f9b6742a93696b6f4133ef1d8955
+github.com/emicklei/proto,v1.6.15,h1:XbpwxmuOPrdES97FrSfpyy67SSCV/wBIKXqgJzh6hNw=,162ad34010e5f81ebed962a33c91ee6356e19631c7a7030bc9b173e85ca34678
+github.com/emirpasic/gods,v1.12.0,h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=,729ea0bda86bf801b61ff66eb019e5b9adc559cd217944abf10bb103fca573ee
+github.com/endophage/gotuf,v0.0.0-20151124190824-3b700e20e376,h1:rPyHFhsuPZMEJAe1Oj2vpRC8277wpDJJ+aabkmlHF1A=,2cd5e6d0e748e0625e8c4a08a3b9f74e311e6654a1c5411fa3a9720f5f67cf40
+github.com/envoyproxy/go-control-plane,v0.9.0,h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4=,07b3a43081c9e1cdccb95c657cba7f483d5099f9ce07b5e3f3e28ce557687521
+github.com/envoyproxy/protoc-gen-validate,v0.1.0,h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=,ec5261f3bbc426d71e2be4c76063ba12460c5d27845d630763e9e911ec4768af
+github.com/eoscanada/eos-go,v0.8.10,h1:QUwHRBHEFag/qyW4PR2S9++0se0V4LjPLk1/KsNtXlo=,f1c48e793d1c7864288871a944af4b4ee3363ad6ae5298e9c2f9f42202e6d77c
+github.com/erikstmartin/go-testdb,v0.0.0-20160219214506-8d10e4a1bae5,h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=,471feb426b2a7ec1df29cc21c66aef34c9e7aabea751328644d1362593983d21
+github.com/ernesto-jimenez/gogen,v0.0.0-20180125220232-d7d4131e6607,h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es=,1f3030cfc89653ba791ae312b19e420dc8eaf1bef51f59dca6aa390f3cd1f3d0
+github.com/etcd-io/bbolt,v1.3.3,h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=,6630d7aad4b10f76aea88ee6d9086a1edffe371651cc2432edfd0de6beb99120
+github.com/ethantkoenig/rupture,v0.0.0-20180203182544-0a76f03a811a,h1:M1bRpaZAn4GSsqu3hdK2R8H0AH9O6vqCTCbm2oAFGfE=,8559344c496621c06b612453de587e8e4c45c0fbc348a955f8eda7ea2b3d09c8
+github.com/ethereum/go-ethereum,v1.9.6,h1:EacwxMGKZezZi+m3in0Tlyk0veDQgnfZ9BjQqHAaQLM=,778c9bf77dd96bfaf5c3ea84498611490999782fb37edf8257680e27dd8976e8
+github.com/euank/go-kmsg-parser,v2.0.0+incompatible,h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=,43cadfa5ab226f89ca7a715add32ba23c554a5dfafd3a55449856a6b7012f946
+github.com/evanphx/json-patch,v4.5.0+incompatible,h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=,5508e810685a5081a3e880aeb24e501bd87920241baa317bfb5f3946b4fa417c
+github.com/exoscale/egoscale,v0.18.1,h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=,8cb4f10504b54d31c71bc4a670171a074f7abbab67d939fd404b62ad36cb6aed
+github.com/facebookgo/atomicfile,v0.0.0-20151019160806-2de1f203e7d5,h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A=,3c9bdee73452cc12c2936b4050d638d36302a958091ceb49c45ffbaff8954218
+github.com/facebookgo/clock,v0.0.0-20150410010913-600d898af40a,h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=,5d6b671bd5afef8459fb7561d19bcf7c7f378da9943722d36676735b3c6272fa
+github.com/facebookgo/ensure,v0.0.0-20160127193407-b4ab57deab51,h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=,a96c69c2b5902e0383139ee7089877a5ae2ddcd4eba42a595d13b570907d3fdc
+github.com/facebookgo/freeport,v0.0.0-20150612182905-d4adf43b75b9,h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg=,0f717d7eb52e276aec2138a971b091cd04da95826c8f451a20e8e78c4bb8f915
+github.com/facebookgo/grace,v0.0.0-20160926231715-5729e484473f,h1:0mlfEUWnUDVZnqWEVHGerL5bKYDKMEmT/Qk/W/3nGuo=,79f9f73ef925d457d2b70d37b12c3cec97a2e84e73a932397d2f569ec8702ee7
+github.com/facebookgo/httpdown,v0.0.0-20160323221027-a3b1354551a2,h1:3Zvf9wRhl1cOhckN1oRGWPOkIhOketmEcrQ4TeFAoR4=,dbbccf963238c5f80c54edb19aeb016f486f42dcd922fc0be5b832af9449ca4b
+github.com/facebookgo/inject,v0.0.0-20161006174721-cc1aa653e50f,h1:jK9r9Ofgc/Yzdlod77G23LfYtwqAmkQCZ9MaP6779OI=,6292702ff520e1fb14231f29bb2639d8f39edc08de479d76757ad97dafbb9174
+github.com/facebookgo/stack,v0.0.0-20160209184415-751773369052,h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=,0afd18a8394caa29e94bd58a42e0d2be07939f9daf190a9ba2a947f9cbd4ba1a
+github.com/facebookgo/stats,v0.0.0-20151006221625-1b76add642e4,h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=,d87443825721dc1dd5c358cd9e55b917ee1c3b6b10ab9557375f59d563b628cb
+github.com/facebookgo/structtag,v0.0.0-20150214074306-217e25fb9691,h1:KnnwHN59Jxec0htA2pe/i0/WI9vxXLQifdhBrP3lqcQ=,3a9c84e9dc2b9960f1de3cc7a61d91fe2978e64e4e4859a9383259092ec91c5e
+github.com/facebookgo/subset,v0.0.0-20150612182917-8dac2c3c4870,h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=,bb18c678177e1aaaae209a2de9c28b5b7acc34e58fe00517b847a9460bd42df2
+github.com/farsightsec/golang-framestream,v0.0.0-20190425193708-fa4b164d59b8,h1:/iPdQppoAsTfML+yqFSq2EBChiEMnRkh5WvhFgtWwcU=,084f0ac3684b180e3d87db3e7b36a412c750397fbf009579e126c304528c1738
+github.com/fatih/camelcase,v1.0.0,h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=,54664f64f1f24097b80c64b9f606cbe8d8bc410a755ce6cda4f45e46f1141984
+github.com/fatih/color,v1.7.0,h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=,6036f0b31167280b696b5efb43603e71bce31420fb3428afdf74a68bb3a3ebef
+github.com/fatih/structs,v1.1.0,h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=,a361ecc95ad12000c66ee143d26b2aa0a4e5de3b045fd5d18a52564622a59148
+github.com/fatih/structtag,v1.0.0,h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc=,347fce3911900f5947735c12ccb4c6fbe0199c6df040bcaa4d74a8587af896d0
+github.com/fd/go-nat,v1.0.0,h1:DPyQ97sxA9ThrWYRPcWUz/z9TnpTIGRYODIQc/dy64M=,bdf011af97da57ef3c58a091ae760eb885a6322faa3539d3c37bf76d4fff536a
+github.com/fernet/fernet-go,v0.0.0-20180830025343-9eac43b88a5e,h1:P10tZmVD2XclAaT9l7OduMH1OLFzTa1wUuUqHZnEdI0=,a484a3172222095507a7f1901a91ab741c28278ea6b878c21c1151c0fd40f46d
+github.com/flosch/pongo2,v0.0.0-20190707114632-bbf5a6c351f4,h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=,814b52f668d2e2528fe9af917506cda4894d22c927283cfb8aaf6857503dfc5a
+github.com/flynn/go-shlex,v0.0.0-20150515145356-3f9db97f8568,h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=,ea68a1d391e59ebc04ce986b88e000327bb141e5e8e80ef93af950bca42bb4cc
+github.com/fogleman/gg,v1.3.0,h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=,792f7a3ea9eea31b7947dabaf9d5a307389245069078e4bf435d76cb0505439c
+github.com/forestgiant/sliceutil,v0.0.0-20160425183142-94783f95db6c,h1:pBgVXWDXju1m8W4lnEeIqTHPOzhTUO81a7yknM/xQR4=,bedd47c23670847642576777cc8b53b9dd8a5a8e7b0a6f2299ebc6fa3b7b6f00
+github.com/fortytw2/leaktest,v1.3.0,h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=,867e6d131510751ba6055c51e7746b0056a6b3dcb1a1b2dfdc694251cd7eb8b3
+github.com/francoispqt/gojay,v1.2.13,h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=,f41e3e4f3086400448dbce1c06c59f5848a6c5983e5466689965e3a2cabcba7c
+github.com/frankban/quicktest,v1.5.0,h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY=,515b5b2b9320b2982193ad6bd118907aaab9ff62189870e00be459cc4097073c
+github.com/fsnotify/fsnotify,v1.4.7,h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=,1d09ad8f3dc41cb6e0288023b47272c1c9393ca411e48f4b5009bca6662dc3ad
+github.com/fsouza/fake-gcs-server,v1.2.0,h1:FZUL/EJlyAlHxpUWZs23ae4zNwBwmHM1p5TykkoP85A=,83b547a0780693f154c30137b1eeaf0c0e9628798ae4b7e1d74ebfb8efaf61fc
+github.com/fsouza/go-dockerclient,v1.5.0,h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA=,c7025b816e0ba28041a88b1063003f4e31097346d06cf69811f9d55505d3d46c
+github.com/fullsailor/pkcs7,v0.0.0-20190404230743-d7302db945fa,h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=,ba36a8fc855d6eecef329d26f8e82132e38d45d06f79f88d3b0bde6d718c8fb2
+github.com/fuyufjh/splunk-hec-go,v0.3.3,h1:7PLVIODblK9FXfuAy8iPZg0lcw1YNzSQHfC+0NYgUxU=,9517f63386f64e0dceca9352f45eb7f160452682a07fa04d3c1ff90eb19ac83d
+github.com/gabriel-samfira/sys,v0.0.0-20150608132119-9ddc60d56b51,h1:rUp9t/FbeJM3R3BSYkJfViN3CNQcmk44H20SqkJ/y+k=,1be262d101bd9079bb859639ad6d5eaee80646b6db0fcbeb7146d9381949d2a8
+github.com/gammazero/deque,v0.0.0-20190130191400-2afb3858e9c7,h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8=,a1fe4ec3258f68685ee45b68e1d9188d79726af46a1b93281cf11ddc6045a864
+github.com/gammazero/workerpool,v0.0.0-20190406235159-88d534f22b56,h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc=,cbb92fdf8d457e27923dc6515af4458a55af932ccf468415c8b36bf49845fc00
+github.com/garyburd/go-oauth,v0.0.0-20180319155456-bca2e7f09a17,h1:GOfMz6cRgTJ9jWV0qAezv642OhPnKEG7gtUjJSdStHE=,be051ba0d52eaced1c1985ebdf2dece3f7127ad392645b42fd06c2af9c9caea2
+github.com/garyburd/redigo,v1.6.0,h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=,68f0d2b454f7a9a000c3335fc0f409123637e4711c6461a4c75e2f128f68f283
+github.com/gavv/monotime,v0.0.0-20161010190848-47d58efa6955,h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4=,c97324768edc8170e05b8925b0551778909c8e15817d4327ac405a4e0b6071f4
+github.com/gcash/bchd,v0.14.7,h1:n3gMXCT4VhU/emiCq61kmKBPADLxBzpX5IlXPnGuR2c=,871644f504d6c3f19dcfc8a7a6e6aa623e6642275a48dfffe770ec61368c2032
+github.com/gcash/bchlog,v0.0.0-20180913005452-b4f036f92fa6,h1:3pZvWJ8MSfWstGrb8Hfh4ZpLyZNcXypcGx2Ju4ZibVM=,d400c8e944edf2a67f46e75335f55c14170c523691804ea71e1a348ad45bc7e7
+github.com/gcash/bchutil,v0.0.0-20191012211144-98e73ec336ba,h1:KVa96lSrJGMYZ414NtYuAlbtCgrmW9kDnjvYXcLrr5A=,7b829a35d22ead0ee82d8a98b1e06da5e63fd07b2798fce8ba87c8da670ef04a
+github.com/gcla/gowid,v1.0.0,h1:78Xf5G9+lb4/g3KCB3hX8UJ8VorymMH5PXu9Npvwf8s=,eaa7e0b7bb0912c6b24c98dee0073a2de754c24e1347ce7c5bfc63397ccf0fa6
+github.com/gdamore/encoding,v1.0.0,h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=,638a9832e2f62d118d7c511d86bdae1622a51f331de48a01d929fd24ebe6a2a6
+github.com/gdamore/tcell,v1.3.0,h1:r35w0JBADPZCVQijYebl6YMWWtHRqVEGt7kL2eBADRM=,97c1e828ff9de0cef3a5bbdb3f3def8a351ad6ca65a780d4dd4141b0ee23c88e
+github.com/genuinetools/pkg,v0.0.0-20180910213200-1c141f661797,h1:SGpZXDd/CFeDIY4Rq5cFO8K/uqDblHUxjlzOmjFpvRg=,c15cbe95e0a7e38cc0a790b0098170c103ba84d56e7cbaf744a6df10c00efa45
+github.com/genuinetools/reg,v0.16.0,h1:ZhLZPT+aUGHLfy45Ub5FLWik+3Dij1iwaj8A/GyAZBw=,a505ff5357d6095540c89ee27d207a3a4dc7c73840fb6bc9a2f0f3a81e498341
+github.com/gernest/wow,v0.1.0,h1:g9xdwCwP0+xgVYlA2sopI0gZHqXe7HjI/7/LykG4fks=,b49d5efc34e19469e7319df09b35438de307ba7cd8c9333ecba190f457ca8e22
+github.com/getgauge/common,v0.0.0-20190514095629-619e107433ce,h1:/ofMj8gIhPYdb/JEXKj8iYe5Yxl3mrK8YA7yl/06t6Y=,04ab4fb7e8dcf693c3b79028693130cd51fe54f5a16f12622975a7c3eb7705f7
+github.com/getlantern/context,v0.0.0-20190109183933-c447772a6520,h1:NRUJuo3v3WGC/g5YiyF790gut6oQr5f3FBI88Wv0dx4=,27515ae761018c4cfc83043194904170bef0cac037c48ff96fc497502b9bab14
+github.com/getlantern/errors,v0.0.0-20190325191628-abdb3e3e36f7,h1:6uJ+sZ/e03gkbqZ0kUG6mfKoqDb4XMAzMIwlajq19So=,a48d7684463e8c496fea4a2595ca71012c3b222bc77de7c2ddfbe78bc4595ac5
+github.com/getlantern/fdcount,v0.0.0-20170105153814-6a6cb5839bc5,h1:8Q9iN/V24EG01IgXEKVScth/rTXpplBxCYio/yIKtUw=,b24c26d5ede197fd6b7f981cf5db300124e22f48667942c948a9750f7a908c94
+github.com/getlantern/golog,v0.0.0-20190830074920-4ef2e798c2d7,h1:guBYzEaLz0Vfc/jv0czrr2z7qyzTOGC9hiQ0VC+hKjk=,1eeabfbc56105f3d751e1947405f5296db5ded7e25900209fe7327f1b5d785e6
+github.com/getlantern/hex,v0.0.0-20190417191902-c6586a6fe0b7,h1:micT5vkcr9tOVk1FiH8SWKID8ultN44Z+yzd2y/Vyb0=,ea5a13f98a82c1919c59b655de531cbb35ac7dfff3c99072b43b8bfd1c29b774
+github.com/getlantern/hidden,v0.0.0-20190325191715-f02dbb02be55,h1:XYzSdCbkzOC0FDNrgJqGRo8PCMFOBFL9py72DRs7bmc=,c901f2e702114d6268446a381a27737c6123e50191197fd84f17b339238191b4
+github.com/getlantern/idletiming,v0.0.0-20190529182719-d2fbc83372a5,h1:laM1s/bxUH8xbbC9TBGWsOc7A0KCAPZMa4pdwO5e6Vw=,35de51b383e926042d3f8f4859e2d961582cf9964d3b7bb513ac4733cc43162f
+github.com/getlantern/mockconn,v0.0.0-20190403061815-a8ffa60494a6,h1:+aO65ByJw74kV8vXqvkj49P5RtIqyUObyeRTIxMz218=,a4a1ccdc9ec68dea571d9603d4a36150b6ccaea447ca88965e088ff0b9eeaa0d
+github.com/getlantern/mtime,v0.0.0-20170117193331-ba114e4a82b0,h1:1VNkP55LM/W2IwWN+qi+5X3gZcEQHfj8X9E+FNxVgM4=,5af0b20838a808b86a2a9c87c254d47185d38d5935780dade3bc7a54dc2880f4
+github.com/getlantern/netx,v0.0.0-20190110220209-9912de6f94fd,h1:mn98vs69Kqw56iKhR82mjk16Q1q5aDFFW0E89/QbXkQ=,cb386d0527fb6f549fa0266c770a68d7d83a88bab2194d25b55355f59198fdf0
+github.com/getlantern/ops,v0.0.0-20190325191751-d70cb0d6f85f,h1:wrYrQttPS8FHIRSlsrcuKazukx/xqO/PpLZzZXsF+EA=,321694d3d2f31415653a7b9d97a4a701f36f10ccfbbdb94449f1211137d6f215
+github.com/getsentry/raven-go,v0.2.0,h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=,eaffe69939612cd05f95e1846b8ddb4043655571be34cdb6412a66b41b6826eb
+github.com/gf-third/mysql,v1.4.2,h1:f1M5CNFUG3WkE07UOomtu4o0n/KJKeuUUf5Nc9ZFXs4=,14a08134ce02bd0d07667da91a89c9098d18bad8c790414e37aba906895a5a3e
+github.com/gf-third/yaml,v1.0.1,h1:pqD4ix+65DqGphU1MDnToPZfGYk0tuuwRzuTSl3g0d0=,6354a95d7faa222d2e653485bc9dd555aad61a75eb5a5f970de531391ed77a2f
+github.com/ghetzel/go-defaults,v1.2.0,h1:U1T64bxhBc6nVZ68QXch1hoHq43h6isqgbvG7kxY9Uc=,f339e441d08af3af184a21f518227db7c705851be82f3fcea611e762ebb633a1
+github.com/ghetzel/go-stockutil,v1.8.6,h1:VgqpePUGGXMHjgArUH5mSAYFC35aiFgkU/TdTU/ts80=,aa0cce06af82b7d1f98a20deaafd6997fa7c3d36fba9a204a34e5d91a2096fa0
+github.com/ghetzel/testify,v1.4.1,h1:wpJirdM+znAnxWruGDBdIys5aU+wGJHNUTkgEo4PYwk=,90206efc10ad71a33bf314ef768d16c6186d23ccb5aa8172663437d497dbfdd7
+github.com/ghetzel/uuid,v0.0.0-20171129191014-dec09d789f3d,h1:YVJe7KwVYazt90hCc/q2dYJVS3062AY6QdT6iHd+Kh8=,924f39fe83589fa269e652c8ca4f7b0dbc59023baada8a55c24692fe5223b67a
+github.com/ghodss/yaml,v1.0.1-0.20190212211648-25d852aebe32,h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=,9771720da98bbdd80dacdefb47b9a0e36faa75caa4745149d150325ba5390e4b
+github.com/gin-contrib/gzip,v0.0.1,h1:ezvKOL6jH+jlzdHNE4h9h8q8uMpDQjyl0NN0Jd7jozc=,e994ecc5881938978d6d031e3d0c1bc5968bfe5de2a307aed7c63aecba459ecd
+github.com/gin-contrib/sse,v0.1.0,h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=,512c8672f26405172077e764c4817ed8f66edc632d1bed205b5e1b8d282816ab
+github.com/gin-gonic/gin,v1.4.0,h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=,b9bc661bf658179d53fee9e7c587eba4df8326d0c26ad29f785739a78313fc4b
+github.com/glacjay/goini,v0.0.0-20161120062552-fd3024d87ee2,h1:+SEORW3KptcFnlhTbn7N0drG3AFnrcmBDWDyQ3Bt06o=,061319068788a9eeef67d4e5cf84a87c4649005aaa4f37c983a868c357e3df3c
+github.com/gliderlabs/ssh,v0.2.2,h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=,f9f12d766ceeab9e2134504520de75819d1eeb6733b8b619b7bcd4aac4cca983
+github.com/globalsign/mgo,v0.0.0-20181015135952-eeefdecb41b8,h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=,c07f09e0c93e6410076edfd621d2decbd361361c536c3e33ba097fa51708f360
+github.com/glycerine/go-unsnap-stream,v0.0.0-20180323001048-9f0cb55181dd,h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=,9a66d6f9bb1a268f4b824d6fe7adcd55dc17ed504683bdf2dbf67b32028d9b88
+github.com/glycerine/goconvey,v0.0.0-20190315024820-982ee783a72e,h1:SiEs4J3BKVIeaWrH3tKaz3QLZhJ68iJ/A4xrzIoE5+Y=,344fb699344a5ab09464c0283a65402ae0fe6bd6fac7d40e9c4d403cf4a7714f
+github.com/gmallard/stompngo,v1.0.12,h1:uj1Bl9o+dqn0qSR33xHmaKw21W5LzhWo4Q4hS1MCpQU=,88498e4da4e0f7f3923d758a464d53f550921617b1047643def2a973c86dfd03
+github.com/go-aah/forge,v0.8.0,h1:sk4Z523B9ay3JQF4At97U7kecB5yTIm0J2UM/qRVXbQ=,e883adcfb380d6187de84c59a0f8bb3b34931487151873d7a326a1b4df556e48
+github.com/go-acme/lego,v2.7.2+incompatible,h1:ThhpPBgf6oa9X/vRd0kEmWOsX7+vmYdckmGZSb+FEp0=,1a597873ff61c0fbdab6b4f1027141d2e8dbe739bd2018473559bec954f3e651
+github.com/go-acme/lego/v3,v3.1.0,h1:yanYFoYW8azFkCvJfIk7edWWfjkYkhDxe45ZsxoW4Xk=,fbb3cfe2619281c3ccd456b213b5f8c7bf695f82ecac6c97f747dc4159dfe4b2
+github.com/go-ble/ble,v0.0.0-20190521171521-147700f13610,h1:eWay3GzFqTJUEYN1BrbqdDTFeFUGmYLps8SQkn1D7Yo=,a5fb6440935dd7ef8bb3569bc7260bd1ad44e01d41bbb684dbb96cc677fb2234
+github.com/go-chassis/foundation,v0.0.0-20190621030543-c3b63f787f4c,h1:p+Y6yq7RwHmYjEr/vwdVYGacBqFCc2lPQfNRIC3vRIs=,db38c108455e57b3f8f062c22872554d5af9dfa03a723c9fea263a009f3002e6
+github.com/go-chassis/go-archaius,v0.24.0,h1:ubNgs3Rv067PI7t37ZJoIMaPPHIBWV+ni/e7XAdW1hU=,37b0c60692eaed91abd3d2c6a0fc9366a54882f3a6b5ef81f3cc20d14882a13d
+github.com/go-chassis/go-chassis,v1.7.3,h1:7fcfaE9Ij+oBbf2lHoHHIvxT9objtt1EHpwRPBUkDhw=,38f8393558528b0212674268f6dc507d5db716fc5745eff09bccf1cd98b86eb7
+github.com/go-chassis/go-chassis-config,v0.14.0,h1:OnM9sx2GalDC7vEIhPecRpQlVa8hz10NOB41+9tii5A=,afc7506eec8591a5ccbb08f073ba19312bc03d87ec15c1532f5daba02f090e00
+github.com/go-chassis/go-restful-swagger20,v1.0.1,h1:HdGto0xroWGK504XN0Um7JBc0OPMHDlWwedkd2mTGII=,2c41388f71dc766088fc3e47e91a2f8c2d7936e40f6a64afff53a12ef73e0d05
+github.com/go-chassis/paas-lager,v1.0.2-0.20190328010332-cf506050ddb2,h1:iORWPbIQ81tJPKWs9TNvcjCQnqvyTlL41F9ILgiTcyM=,a74c06554cf6835e98c4fa548a4aa3dcc317ca93567af893b89a4dba88b783af
+github.com/go-chat-bot/bot,v0.0.0-20191022130543-3da6cae45477,h1:JfUELmxvEz/MXI3/iSn2UcB/5CCAvMsxKi88j783ssk=,a059cd1d050747bd0adcd3d4ba91e12b0ace2c038187484726c3d551169d9fa4
+github.com/go-chat-bot/plugins,v0.0.0-20181006134258-491b3f9878d6,h1:qNYjVQnDwznjLk+OnNdczA5SXwEa/RwjPTZSQCKofF4=,b19527108aef487fa1f4856e354f4777644a574248cc7e891bacf1bfb38bd12d
+github.com/go-chat-bot/plugins-br,v0.0.0-20170316122923-eb41b30907dc,h1:v/poG4Y4O/z1cUm2cWxiIkFFgRsT3Fe1u1A33evx89g=,6b613e62d3f389f3d6f8f262903bc31c4f1eb4b3ca8d192606f78199b1af0d43
+github.com/go-check/check,v0.0.0-20190902080502-41f04d3bba15,h1:xJdCV5uP69sUzCIIzmhAw6EKKdVk3Tu48oLzM86+XPI=,93bbc1f982dd553e279fb4c7fbc060032096e2b5d0537385ae80247492a6433e
+github.com/go-chi/chi,v4.0.2+incompatible,h1:maB6vn6FqCxrpz4FqWdh4+lwpyZIQS7YEAUcHlgXVRs=,25c94ccd43f18002c2dd07e87da1dc393ff87d615441e559bda425ea0979715b
+github.com/go-cmd/cmd,v1.0.5,h1:IK23uTRWxq6UJnNWp8nKO7mVCwnPfbaxA2lhzEKfNj0=,2623aa43dbf68c24362bcfb7a216b83c2e7473d4a3e49e7955c3fa5f28b4974c
+github.com/go-delve/delve,v1.3.2,h1:K8VjV+Q2YnBYlPq0ctjrvc9h7h03wXszlszzfGW5Tog=,b8a250f2b3ef87da34fbfc655bb23a051b43672bea7a8abc4e083a2b214faf09
+github.com/go-errors/errors,v1.0.1,h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=,bdbee3143e1798eadff4df919479c28ec2d3299a97d445917bc64d6eb6a3b95a
+github.com/go-gl/gl,v0.0.0-20181026044259-55b76b7df9d2,h1:78Hza2KHn2PX1jdydQnffaU2A/xM0g3Nx1xmMdep9Gk=,499822d1b3bcc34b82df0fcc13ac9a0ea273c5d68b3e183e18fa76dab9793954
+github.com/go-gl/glfw,v0.0.0-20190409004039-e6da0acd62b1,h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=,96c694c42e7b866ea8e26dc48b612c4daa8582ce61fdeefbe92c1a4c46163169
+github.com/go-gl/mathgl,v0.0.0-20190713194549-592312d8590a,h1:yoAEv7yeWqfL/l9A/J5QOndXIJCldv+uuQB1DSNQbS0=,39948d90a5672c7866b5b1c01e9e8ce6c80c099306ed80e9e138350840f82110
+github.com/go-ini/ini,v1.49.0,h1:ymWFBUkwN3JFPjvjcJJ5TSTwh84M66QrH+8vOytLgRY=,4820559fd3640c6b5361a7077e8b5c1a4318a06a59df7a095cbf96514d46d432
+github.com/go-kit/kit,v0.9.0,h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=,f3da9b35b100dd32e7b10c37a0630af60d54afa37c61291e7df94bc0ac31ed03
+github.com/go-ldap/ldap,v3.0.3+incompatible,h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=,4197e5fbebc7a1805be236cf75dea301f0b8e15a857e2373653b76157c649f93
+github.com/go-log/log,v0.1.0,h1:wudGTNsiGzrD5ZjgIkVZ517ugi2XRe9Q/xRCzwEO4/U=,ec5845d33a6d7ede81970833cfc3179d53b99019da1ebffef5e71005ff94be43
+github.com/go-logfmt/logfmt,v0.4.0,h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=,d678198dc0eeaed28736e0d71b919a0bd98501b7275c69a7917122f6de9e0d1c
+github.com/go-logr/logr,v0.1.0,h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=,4c14b7c05eaa48b7f8dbf2ca38c3603dce446f4184a4c0af2f569b046d66201e
+github.com/go-logr/zapr,v0.1.0,h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=,7b60c74f722b8f215711503dd63576845987eff81ef5f9dc052fc9158d1c57e2
+github.com/go-macaron/binding,v1.0.0,h1:ILEIP1e9GaXz//fZIl1zXgHVbM9j1SN89aTGOq8340Y=,3887f50d442cd8f9eeeb0e7710c7cba41c185d8e5a82404ff33e7cbd4e16d0c7
+github.com/go-macaron/cache,v0.0.0-20151013081102-561735312776,h1:UYIHS1r0WotqB5cIa0PAiV0m6GzD9rDBcn4alp5JgCw=,a854b7844fff9ec69025db12a2b03834a2eac570a366962c4eb83984813a9fdb
+github.com/go-macaron/captcha,v0.0.0-20190710000913-8dc5911259df,h1:MdgvtI3Y1u/DHNj7xUGOqAv+KGoTikjy8xQtCm12L78=,fb1c643c72ba9ef2c5d613e324e47dbb17ce45a28cbee8cb540ea48a0b3d6a23
+github.com/go-macaron/cors,v0.0.0-20190418220122-6fd6a9bfe14e,h1:auESkcVctNZnNl4EH0TuoCSJMJ7Q7ShU8FS6lDEsAC4=,0f3043631d54efca5615fe7ed819523bbe0c18726ce9e4b0cdc0ef2879aa6044
+github.com/go-macaron/csrf,v0.0.0-20180426211211-503617c6b372,h1:acrx8CnDmlKl+BPoOOLEK9Ko+SrWFB5pxRuGkKj4iqo=,90b5cbd86ff3708d41be70ad3cde77fdedd5ef485b960cc3a9ffea6f0a14902c
+github.com/go-macaron/gzip,v0.0.0-20191101043656-b5609500c6fc,h1:z3gfrCJUPhdRHtd8kftnNBzI5ayZ1zQhWARPeL83JNQ=,dfcc1200b66bcb581c6984da9fa4aefc92facc3a07d182c7c37f0978b41b868f
+github.com/go-macaron/i18n,v0.0.0-20160612092837-ef57533c3b0f,h1:wDKrZFc9pYJlqFOf7EzGbFMrSFFtyHt3plr2uTdo8Rg=,6c1d5fe7ed23e05ca1af7462e6deac2d993ddacd099ad794faad5c685337742d
+github.com/go-macaron/inject,v0.0.0-20160627170012-d8a0b8677191,h1:NjHlg70DuOkcAMqgt0+XA+NHwtu66MkTVVgR4fFWbcI=,666bb04a5df1271326b4fcdbbdc3276400ae7e54f4ed6233792cd6e519676491
+github.com/go-macaron/session,v0.0.0-20191101041208-c5d57a35f512,h1:7ndsXTX42iYHryQz98zUsBJfStJ0kXFKgDrPmRvR400=,3581a7eb19a2a60d41aba7e85afa576c35a97659e162b83292ff67396f899845
+github.com/go-macaron/toolbox,v0.0.0-20180818072302-a77f45a7ce90,h1:3wYKrRg9IjUMfaf3H0Hh7M5Li9ge79Y7aw2yujHa2jQ=,43f2a06502408404c3b1231c3642693632cf20bc4f2cb45881bd2292b1eed714
+github.com/go-martini/martini,v0.0.0-20170121215854-22fa46961aab,h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=,0561a4dadd68dbc1b38c09ed95bbfc5073b0a7708b9a787d38533ebd48040ec2
+github.com/go-mesh/openlogging,v1.0.1,h1:6raaXo8SK+wuQX1VoNi6QJCSf1fTOFWh7f5f6b2ZEmY=,3606bad571f959cc24382381f7d50fb321819958df37911f6ad6aa5ac3e02181
+github.com/go-ole/go-ole,v1.2.4,h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=,c8b3ef1187d2d7dbfddc4badefcc992c029cd377ae07bff2fa05ec8972836612
+github.com/go-openapi/analysis,v0.19.5,h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=,22e5ff3f88802059aa86835d8f7c25386afed1159d4e951ef0f87ef62ab4a253
+github.com/go-openapi/errors,v0.19.2,h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=,e02e448e5a2c1ff2a011f74d41d505a2f32b369551064940630d6660c600bf3d
+github.com/go-openapi/inflect,v0.19.0,h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=,fbcca36e347a2f560f50ac1c9c63f7d6cd97c8dff9800f08f370b5ce09b77c57
+github.com/go-openapi/jsonpointer,v0.19.3,h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=,1fe6122c9c9d10837439398976a2ff55e8ed905fa7e4a66f3fb0e857c6e06582
+github.com/go-openapi/jsonreference,v0.19.2,h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=,00b2457c2d091a9817f91f55655a334bed8f75b2d6499ba9192f12564dd51dd9
+github.com/go-openapi/loads,v0.19.4,h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=,adffcd0e2900bf0cca893e6bf014db55ebf161476367ac4dd365f8481c12616f
+github.com/go-openapi/runtime,v0.19.7,h1:b2zcE9GCjDVtguugU7+S95vkHjwQEjz/lB+8LOuA9Nw=,4017d9c69d9d2789d0a3b50c6af509831c0f24bfc545f1b43224df2fc5194dbd
+github.com/go-openapi/spec,v0.19.4,h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo=,7c12cf07de1b65175474fdde12110716ab237fa862694e4e5051eb15541a964e
+github.com/go-openapi/strfmt,v0.19.3,h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA=,07b9c9b2da9dffc0a830e6536b705282fd17023fe8d04aa909fe1e4e3b6306f5
+github.com/go-openapi/swag,v0.19.5,h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=,54aec6bdc63d1d6609c32b140fe74d099f8b9628d362689556537506724eaeda
+github.com/go-openapi/validate,v0.19.4,h1:LGjO87VyXY3bIKjlYpXSFuLRG2mTeuYlZyeNwFFWpyM=,2b1b2612db93ed3fb411cc798150821af5c031b120097bbe6578dc4ce2d6d1df
+github.com/go-playground/locales,v0.13.0,h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=,9c4b65273e135b1bdb9bafc7c0b5180a6c5936f54edecbc8807c57a9d107c6b9
+github.com/go-playground/overalls,v0.0.0-20180201144345-22ec1a223b7c,h1:3bjbKXoj7jBYdHpQFbKL2546c4dtltTHzjo+5i4CHBU=,7972d7c49470ee2e187868b30d3157ca58201f50a934caa75ce4d5b134a2a644
+github.com/go-playground/universal-translator,v0.16.0,h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=,316fba5fa26a586e39fc11698c16e67edabd122efe26f7fff71091a00a59883a
+github.com/go-redis/redis,v6.15.6+incompatible,h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg=,e277bbc2acb8462aca5e20ef7569a733501bc765f65303a6e5153a86e6e3090c
+github.com/go-sourcemap/sourcemap,v2.1.2+incompatible,h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=,1bdaec84a31896eee149acb563f8af0b3ce7899d916383e0b597d6b480b6a622
+github.com/go-sql-driver/mysql,v1.4.1,h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=,f128045df19d340743a155ef282116130d27e27cbc62de160b6072c751b435ba
+github.com/go-stack/stack,v1.8.0,h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=,78c2667c710f811307038634ffa43af442619acfeaf1efb593aa4e0ded9df48f
+github.com/go-swagger/go-swagger,v0.20.1,h1:37XFujv7lYHLOKawfzLDg4STwwgB5zhPjodN33asJto=,79cc2c57c4e9d03a9399577b942eface46073ee6fa289b86651f1c5d0c513484
+github.com/go-swagger/scan-repo-boundary,v0.0.0-20180623220736-973b3573c013,h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0=,51aed4b67bce9d988d64ca6be9de2169f709a29d5ea83e78ffb1c2432b346ec6
+github.com/go-telegram-bot-api/telegram-bot-api,v4.6.4+incompatible,h1:2cauKuaELYAEARXRkq2LrJ0yDDv1rW7+wrTEdVL3uaU=,a0d2549e07c67e066337cc6eadd8be2a961d13b493d4325603010d4e35e519df
+github.com/go-test/deep,v1.0.3,h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=,d199ce762552766bd3baf37ae4b0255bb6a6fecf144e8ae5fa3a94f1ce30a180
+github.com/go-xorm/builder,v0.3.4,h1:FxkeGB4Cggdw3tPwutLCpfjng2jugfkg6LDMrd/KsoY=,81028f69e261c29566c24f4717458d04dbe92aebc4eb93a41c1cfeef13b7c5dd
+github.com/go-xorm/core,v0.6.0,h1:tp6hX+ku4OD9khFZS8VGBDRY3kfVCtelPfmkgCyHxL0=,8a8c43c039422f38e1775a835bda46e62f4a055b4b38d57967c0e7a6c9b21d23
+github.com/go-xorm/sqlfiddle,v0.0.0-20180821085327-62ce714f951a,h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y=,e539a37b8fb0d23c21e9eb1fe34db0ffcf19e5e4ae3d3b7049bb23c722c4b382
+github.com/go-xorm/xorm,v0.7.9,h1:LZze6n1UvRmM5gpL9/U9Gucwqo6aWlFVlfcHKH10qA0=,8836904c60cf227804fc843c707cd3e99122b95a97801d09dd2bddce4ed5a29f
+github.com/go-yaml/yaml,v2.1.0+incompatible,h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=,842989ea2e54ba8e4ef49cca914a5cd37176c44ccd3bb3e8c44fcbc10cb7832e
+github.com/gobuffalo/attrs,v0.1.0,h1:LY6/rbhPD/hfa+AfviaMXBmZBGb0fGHF12yg7f3dPQA=,06c6c210a26c85ae291efe9d54cab9cab26fd1453f4f48962e04c89760e775d0
+github.com/gobuffalo/buffalo,v0.15.0,h1:VsxIcfJaDm4u2UirLHGgMfQpfHVwJP3JoDmGyeeNnc0=,f4553c8809a6764cefac8eefce9a868a42ee7538bdef4eadfcc06075b865a087
+github.com/gobuffalo/buffalo-docker,v1.0.7,h1:kj+AfChcev54v4N8N6PzNFWyiVSenzu6djrgxTBvbTk=,d84d8bea93f017e3ff07eddab57e0fd7007cf2516250d6fea86c8811c36cf786
+github.com/gobuffalo/buffalo-plugins,v1.14.1,h1:ZL22sNZif+k/0I9X7LB8cpVMWh7zcVjfpiqxFlH4xSY=,556641c2c1b3a9d679a3fc46727d41da225f33c63cfbf1ff721203b24e0a9b82
+github.com/gobuffalo/buffalo-pop,v1.23.1,h1:AnxJQZu/ZN7HCm3L8YBJoNWc2UiwSe6UHv5S4DfXUDA=,00dea8b0e63d3f4110b8bd9d32c086163229f56845a9f8b221e0093876065a05
+github.com/gobuffalo/clara,v0.9.1,h1:LYjwmKG0VwwW/nOG2f5jNamvAcfdm2Ysokc/eoVhtZ8=,319f607092c02686dfed2eb047d500c332ddd962341012bdcd91202bb46d37a9
+github.com/gobuffalo/depgen,v0.2.0,h1:CYuqsR8sq+L9G9+A6uUcTEuaK8AGenAjtYOm238fN3M=,efb3db0d05f712580bc8d3dce2967bd09d6c90140ac7bca1fbd5c5c4a28e1836
+github.com/gobuffalo/envy,v1.7.1,h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8=,14ac6a5cd617dc05abfcb136586800f05f861d4a03d8fa66819a18c0d9eddeec
+github.com/gobuffalo/events,v1.4.0,h1:Vje/vgTWs+dyhIS0U03oLpvx1SUdAqutv/hDWIz2ErM=,f6d99c722115631805f04fcf22e8edb7a4116bc65d698ac05c58b6a7f768efdc
+github.com/gobuffalo/fizz,v1.9.5,h1:Qh0GkP7MYtJs9RZwBkPJ0CzEXynVowdNfrjg8b+TOxA=,2f645d789550f8f97039e1c4ce3e3f09dfeec28d85c8977c2b20caa06cd75b0c
+github.com/gobuffalo/flect,v0.1.6,h1:D7KWNRFiCknJKA495/e1BO7oxqf8tbieaLv/ehoZ/+g=,a7011c8d3f59bac18512c76de610bf1a1f022a01ac6695e0c5af7498d33be613
+github.com/gobuffalo/genny,v0.4.1,h1:ylgRyFoVGtfq92Ziq0kyi0Sdwh//pqWEwg+vD3eK1ZA=,4ecf29587a8cbe069fc6b298d9a3cb674a8008ca4e08233904a8cba91d1ba21b
+github.com/gobuffalo/gitgen,v0.0.0-20190315122116-cc086187d211,h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8=,c79975f91dd2fd691d70e29678034eb2dc94b5da2f01b0790a919de9d2a632ac
+github.com/gobuffalo/github_flavored_markdown,v1.1.0,h1:8Zzj4fTRl/OP2R7sGerzSf6g2nEJnaBEJe7UAOiEvbQ=,2d73a2baad09dc0d0f0c01549c35e83ab0c18c97f859191e54a632c2fb0eaad2
+github.com/gobuffalo/gogen,v0.2.0,h1:Xx7NCe+/y++eII2aWAFZ09/81MhDCsZwvMzIFJoQRnU=,f60900e595a3779b95b299ca9e74c517523860994a0477b360ac447d3318ccbd
+github.com/gobuffalo/helpers,v0.4.0,h1:DR/iYihrVCXv1cYeIGSK3EZz2CljO+DqDLQPWZAod9c=,17ae2b069c0ca73b11b4ace6793617e0620f8d8ef171b0010b91e243c4a3bbe3
+github.com/gobuffalo/here,v0.2.3,h1:1xamq7i4CKjGgICCXY0qpxPeXGdB8oVNSevkpqwd5X4=,3808d0fbc11c58cfb0e7b430b9fc30024ba3781febe8e2601a8e2b8f76e48c00
+github.com/gobuffalo/httptest,v1.4.0,h1:DaoTl/2iFRTk9Uau6b0Lh644tcbRtBNMHcWg6WhieS8=,9d1b48f3e525ab4661d02b3fac86f89fe27f648b1ff8e607f39a353c60c0f315
+github.com/gobuffalo/licenser,v1.4.0,h1:S8WY0nLT9zkBTjFYcbJ0E9MEK7SgE86aMfjsnuThQjY=,3e126adeb06dcaee29376804b463ed33af2b821579162039e8a16e45d0334cdc
+github.com/gobuffalo/logger,v1.0.1,h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg=,43510255e52f7472ec17a76847ca42cebab6efe0b573a5dcfd8261e00d86d3b7
+github.com/gobuffalo/makr,v1.2.0,h1:TA6ThoZEcq0F9FCrc/7xS1ycdCIL0K6Ux+5wmwYV7BY=,113259ce8e945acf3dd184534ab6135240fde6b57d5c6ee3787e7c124e313502
+github.com/gobuffalo/mapi,v1.1.0,h1:VEhxtd2aoPXFqVmliLXGSmqPh541OprxYYZFwgNcjn4=,162640cc01d04543030d55ed51841d673cb8257fd78b069a79010e52ec996b73
+github.com/gobuffalo/meta,v0.2.0,h1:QSDlR2nbGewl0OVL9kqtU8SeKq6zSonrKWB6G3EgADs=,6a44e2a02126c65d2e2f09de5f732327001ac05d542abcabb8dc286422469e9a
+github.com/gobuffalo/mw-basicauth,v1.0.7,h1:9zTxCpu0ozzwpwvw5MO31w8nEoySNRNfZwM1YAWfGZs=,da5e2767a9d91e14efb25209c9b9dcf5ad07b551d6d54670c43c6225c8e94084
+github.com/gobuffalo/mw-contenttype,v0.0.0-20190129203934-2554e742333b,h1:6LKJWRvshByPo/dvV4B1E2wvsqXp1uoynVndvuuOZZc=,f9e2f7cce4e88ff8d6f86bc61076179b4f23a85eb5fd0a5f28793ef1e7889fab
+github.com/gobuffalo/mw-csrf,v0.0.0-20190129204204-25460a055517,h1:pOOXwl1xPLLP8oZw3e3t2wwrc/KSzmlRBcaQwGpG9oo=,b47a0879eadba5c6774ad37c66afea4998767d9df1295b7b17f3469282cc92f2
+github.com/gobuffalo/mw-forcessl,v0.0.0-20180802152810-73921ae7a130,h1:v94+IGhlBro0Lz1gOR3lrdAVSZ0mJF2NxsdppKd7FnI=,533187beeb18b977c8436d0a5596c1bd420b30cce55589cb11af592df063470c
+github.com/gobuffalo/mw-i18n,v0.0.0-20190129204410-552713a3ebb4,h1:c1fFPCxA7SozZPqMhpfZoOVa3wUpCl11gyCEZ4nYqUE=,96a1754eff9c9a75c6b48fc3bc9ab102bbf5d23c103b37a82cc88c666c0dbf9b
+github.com/gobuffalo/mw-paramlogger,v0.0.0-20190129202837-395da1998525,h1:2QoD5giw2UrYJu65UKDEo9HFcz9yun387twL2zzn+/Q=,d2e3b1baa234032585cc0e7dc1950681dbc05d960ee958578e470df9fa3b8f18
+github.com/gobuffalo/mw-tokenauth,v0.0.0-20190129201951-95847f29c5c8,h1:dqwRMSzfhe3rL0vMDaRvc2ozLqxapWFBEDH6/f0nQT0=,eb6f82200a81da34baa366475479069f08ed797d5edd4976c9f2af1027d37f1c
+github.com/gobuffalo/nulls,v0.1.0,h1:pR3SDzXyFcQrzyPreZj+OzNHSxI4DphSOFaQuidxrfw=,a77a09fd75234e7e5589640fae5d261c03ede9ab5ec626406f24c89dfeba2b38
+github.com/gobuffalo/packd,v0.3.0,h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4=,c7a9263fd464b9f5629bf161521f420b2c40f7780ed6a9ce88184dc4136787a5
+github.com/gobuffalo/packr,v1.30.1,h1:hu1fuVR3fXEZR7rXNW3h8rqSML8EVAf6KNm0NKO/wKg=,20aeea726f6db2ffc8b6dd90b1dce8991f0fd66152a270efdd21c0905b12d5f5
+github.com/gobuffalo/packr/v2,v2.7.1,h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o=,60cd83772938a617b37c26a4924ee1f95008d53481724f801eee647e68ce22b1
+github.com/gobuffalo/plush,v3.8.3+incompatible,h1:kzvUTnFPhwyfPEsx7U7LI05/IIslZVGnAlMA1heWub8=,312e219c9827bb7d2dfc954f03fcaa275a3d9eb70687a62ecebad84ede4c51a7
+github.com/gobuffalo/plushgen,v0.1.2,h1:s4yAgNdfNMyMQ7o+Is4f1VlH2L1tKosT+m7BF28C8H4=,0efa90fac0c464409201fa74cace63c4307ac3700a23b3df7c9a9c1c976f0875
+github.com/gobuffalo/pop,v4.12.2+incompatible,h1:WFHMzzHbVLulZnEium1VlYRnWkzHz39FzVLov6rZdDI=,de2837b63e54b15d99234202839e0394183c4ff7c45b9d99162a407c95574003
+github.com/gobuffalo/release,v1.14.0,h1:+Jy7eLN5md6Fg+AMuFRUiK4sTNq4+zXxRho7/wJe1HU=,a0f34f0d3f02ea43434436936766f185b97204a073a605e720190c433c30aaa5
+github.com/gobuffalo/shoulders,v1.2.0,h1:XcPmWbzN7944VXS/I//R7o2eupUHEp3mLFWbUlk1Sco=,4c129ae195bd14520a38c608ba3a27aca674745c1f79fbcce03dacf829802ac6
+github.com/gobuffalo/syncx,v0.0.0-20190224160051-33c29581e754,h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=,ad9a571b43d72ecce24b8bed85636091710f22d8b06051e1e19ef2051f3e00da
+github.com/gobuffalo/tags,v2.1.6+incompatible,h1:xaWOM48Xz8lBh+C8l5R7vSmLAZJK4KeWcLo+0pJ516g=,99bd74d4144bcdfba45fa501cd8d6dec78dc5b0404bbbfebf5bced5b976bb911
+github.com/gobuffalo/uuid,v2.0.5+incompatible,h1:c5uWRuEnYggYCrT9AJm0U2v1QTG7OVDAvxhj8tIV5Gc=,6ab82616cbb02ddd78b9b7db14f580e2e212ceeadcfccff387a973b04be8db37
+github.com/gobuffalo/validate,v2.0.3+incompatible,h1:6f4JCEz11Zi6iIlexMv7Jz10RBPvgI795AOaubtCwTE=,53d876ba454e5e0604ab8078bfb1fca54dcd3ddd859c850cafce757c5f40153d
+github.com/gobuffalo/x,v0.0.0-20190224155809-6bb134105960,h1:DoUD23uwnzKJ3t5HH2SeTIszWmc13AV9TAdMhtXQts8=,2435ac54f3ea5c024aea1d4db42a87011bb877f18f0f273f7b3e19b7093c3cfd
+github.com/gobwas/glob,v0.2.3,h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=,0cfe486cd63d45ed4cb5863ff1cbd14b15e4b9380dcbf80ff26991b4049f4fdf
+github.com/gobwas/httphead,v0.0.0-20180130184737-2c6c146eadee,h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=,5a43ed4a7cd2b063b634f0df5311c0dfa6576683bfc1339f2c5b1b1127fc392b
+github.com/gobwas/pool,v0.2.0,h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=,52604b1456b92bb310461167a3e6515562f0f4214f01ed6440e3105f78be188f
+github.com/gobwas/ws,v1.0.2,h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=,f9e5c26e83278f19958c68be7b76ad6711c806b6dae766fad7692d2af867bedd
+github.com/gocolly/colly,v1.2.0,h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI=,82f210242c4efda461bb6d2cd0543bbadf322c23b840043f236dc1fd74af9325
+github.com/gocql/gocql,v0.0.0-20191018090344-07ace3bab0f8,h1:ZyxBBeTImqFLu9mLtQUnXrO8K/SryXE/xjG/ygl0DxQ=,d38e5bd51d411bc942f295950d87d80e607a8eb186d51b445cc6c2b985681b18
+github.com/godbus/dbus,v4.1.0+incompatible,h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=,107ef979cca9f2720633f118263afeb9acb0bf0703cc1e860098d5ec48efccb8
+github.com/gofrs/flock,v0.7.1,h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=,ee433032ec18df1e38d2385d7f9448820c5a017d895cb930cd8801401940137c
+github.com/gofrs/uuid,v3.2.0+incompatible,h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=,4139fd148a7a9389629659253722b302791146583e0db94e351a325ecd06abbf
+github.com/gogf/gf,v1.9.10,h1:lPBf0EOxv6AXVWN46EKLID0GMHDGOrs4ZAi/RUJbt+c=,83a8cf0cc2557c1e1b3cdb2112953ca303a09cb6d457d2102b3921db1bfd6fe5
+github.com/gogits/chardet,v0.0.0-20150115103509-2404f7772561,h1:deE7ritpK04PgtpyVOS2TYcQEld9qLCD5b5EbVNOuLA=,4b5c6d4b26d381d37b9a5538b9f2dc29d11f422653b19a2047e439a268c3f5ba
+github.com/gogits/cron,v0.0.0-20160810035002-7f3990acf183,h1:EBTlva3AOSb80G3JSwY6ZMdILEZJ1JKuewrbqrNjWuE=,746b3b98243fc5ae7127c5102f9ba4f0b88238d081e9cb113d61be2ec16a6241
+github.com/gogo/googleapis,v1.3.0,h1:M695OaDJ5ipWvDPcoAg/YL9c3uORAegkEfBqTQF/fTQ=,ee9e1dda02a5a415c41b5bdff7f6835e929ea89ff3dc1c766510ee909e03c6c3
+github.com/gogo/protobuf,v1.3.1,h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=,4b63e18981e30565f60b7305e3de71ff9aa42cfccf15b88b3813dd2ba6c27be1
+github.com/gogs/chardet,v0.0.0-20150115103509-2404f7772561,h1:aBzukfDxQlCTVS0NBUjI5YA3iVeaZ9Tb5PxNrrIP1xs=,53b6234983c0828d620ba418be5b4e467ef8c9d634bb3d0a2bd4056e3dfa38b3
+github.com/gogs/cron,v0.0.0-20171120032916-9f6c956d3e14,h1:yXtpJr/LV6PFu4nTLgfjQdcMdzjbqqXMEnHfq0Or6p8=,913889f3018853808015c9198e6d3a25f586d88d88493c3de36530eef967664c
+github.com/gogs/git-module,v0.8.2,h1:fCi0Lt8VZuFgjCXeLpkhC3irKLArK4oZ69gFvrDXx/s=,e4010dd8fdfe88a65fa8af6ecf97d7e16d4235d0eeb6a0b4b1f4e4d201c70d23
+github.com/gogs/go-gogs-client,v0.0.0-20190710002546-4c3c18947c15,h1:tgEyCCe4+o8A2K/PEi9lF0QMA6XK+Y/j/WN01LnNbbo=,cc5dcea1cca3d3d3e90a0ad548a660250b1299a61519f6dda5dcd7f2f1412daf
+github.com/gogs/go-libravatar,v0.0.0-20161120025154-cd1abbd55d09,h1:UdOSIHZpkYcajRbfebBYzFDsL3SuqObH3bvKYBqgKmI=,f81991af4a649aa273bc0c3e7251f107ba0967f5d83553f5a18ed688d937eff0
+github.com/gogs/gogs,v0.11.91,h1:p8kTD9Sn6a/14u6ain6j0dPENMZ0gVEiM7phSIAL29E=,b41695c115f4e2dfc96bfbc7443fa6f91a6d2c8b32d32db4262e6977f5d55fa7
+github.com/gogs/minwinsvc,v0.0.0-20170301035411-95be6356811a,h1:8DZwxETOVWIinYxDK+i6L+rMb7eGATGaakD6ZucfHVk=,fb48a56a9f610b061af186008072fbd6e51055a12c168e1e347ecf9a05f25767
+github.com/gohugoio/hugo,v0.59.1,h1:nxaeKEY52cdpx3wZN/EcY6dEqbgeFsZaeNkDL8azeZ8=,508257b11bfc1ec77d3993a13929de63fa08e70ae26cd7c53f03857b3db9bbdf
+github.com/gohugoio/testmodBuilder/mods,v0.0.0-20190520184928-c56af20f2e95,h1:sgew0XCnZwnzpWxTt3V8LLiCO7OQi3C6dycaE67wfkU=,0d6eabbeb381b08c84e7191fcecc49027ad3382997441180b2d6eea3fafc81b6
+github.com/goji/httpauth,v0.0.0-20160601135302-2da839ab0f4d,h1:lBXNCxVENCipq4D1Is42JVOP4eQjlB8TQ6H69Yx5J9Q=,8467ed1df8ffba8da7ead144b656b6281469ab4d122adf3edf496175ad870192
+github.com/goki/freetype,v0.0.0-20181231101311-fa8a33aabaff,h1:W71vTCKoxtdXgnm1ECDFkfQnpdqAO00zzGXLA5yaEX8=,80884151cd73d38904e4370afba3b870345a883a77c395194582202d805d7d74
+github.com/goki/ki,v0.9.8,h1:SzVTxJrd0ZcnkRTinZdbc41nIFmocJ7pyllEyBzNmys=,ce62e162090d566e2f9cb5b1659327a84c646dced32729e24b420cde4d5cb714
+github.com/goki/prof,v0.0.0-20180502205428-54bc71b5d09b,h1:3zU6niF8uvEaNtRBhOkmgbE/Fx7D6xuALotArTpycNc=,f46b93b6c42a97f06a2f658e49243972f4bd469b296f1010609c8d649163b73f
+github.com/golang-collections/collections,v0.0.0-20130729185459-604e922904d3,h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=,7847b09c355215616db6309597757ff6be2cf44781d800cdad1628f141dc82ee
+github.com/golang-migrate/migrate/v3,v3.5.2,h1:SUWSv6PD8Lr2TGx1lmVW7W2lRoQiVny3stM4He6jczQ=,5086537ee116e958cf9647e28f843a0ac17f5de75ab642e5aef1fe2b360b0e30
+github.com/golang-sql/civil,v0.0.0-20190719163853-cb61b32ac6fe,h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=,22fcd1e01cabf6ec75c6b6c8e443de029611c9dd5cc4673818d52dac465ac688
+github.com/golang/freetype,v0.0.0-20170609003504-e2365dfdc4a0,h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=,cdcb9e6a14933dcbf167b44dcd5083fc6a2e52c4fae8fb79747c691efeb7d84e
+github.com/golang/gddo,v0.0.0-20180828051604-96d2a289f41e,h1:8sV50nrSGwclVxkCGHxgWfJhY6cyXS2plGjGvUzrMIw=,9a0683005c7700bb1b7ac155597592d15d02f510a0d2c334f8564c43b9072107
+github.com/golang/glog,v0.0.0-20160126235308-23def4e6c14b,h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=,36b3c522c8102dfe74ca96e474c4c361750bf2bb85bc3cefe4f074c07d6825a9
+github.com/golang/groupcache,v0.0.0-20191027212112-611e8accdfc9,h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=,a4815d7048e9a1dd79a72a09d4c9a946ccff837695d046c7f0f5c24037ce18b3
+github.com/golang/lint,v0.0.0-20180702182130-06c8688daad7,h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=,66e95adf2c1feb4de316d2c0ba9e04a22322df010a67b1054ad3d4fb2f9a1791
+github.com/golang/mock,v1.3.1,h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=,3209f2030646855a3644736b5d7ce2cd9076856cac2f50360805a19c38b7bc45
+github.com/golang/protobuf,v1.3.2,h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=,a004ba3acb85e012cb9e468e1d445a81cfeeb4b4db7e9802f30aa500a8341851
+github.com/golang/snappy,v0.0.1,h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=,0a9a73d55340a8e6d17e72684cf90618b275b6034ce83299abb55ed8fb3860bd
+github.com/golangplus/bytes,v0.0.0-20160111154220-45c989fe5450,h1:7xqw01UYS+KCI25bMrPxwNYkSns2Db1ziQPpVq99FpE=,2904c49772d1bade7c81ddae2fa70e42bdce7b006c871c8106d1feb14fe2982b
+github.com/golangplus/fmt,v0.0.0-20150411045040-2a5d6d7d2995,h1:f5gsjBiF9tRRVomCvrkGMMWI8W1f2OBFar2c5oakAP0=,2afd341a4d32c84532d6d44574718e1b8000aa57cfc21ced284612fc92b61217
+github.com/golangplus/testing,v0.0.0-20180327235837-af21d9c3145e,h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0=,fc111aa59d03741dad00f05ce869fcb44f5d75b841413e21e7301bc538a0255e
+github.com/gomodule/redigo,v2.0.0+incompatible,h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=,77342da7b962489363b3661803ee2fba72b23b8e97af0241877ce6ab8a95d194
+github.com/gonum/blas,v0.0.0-20181208220705-f22b278b28ac,h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50=,bfcad082317ace0d0bdc0832f0835d95aaa90f91cf3fce5d2d81ccdd70c38620
+github.com/gonum/floats,v0.0.0-20181209220543-c233463c7e82,h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18=,52afb5e33a03b027f8f451e23618c2decbe4443f996a203e332858c1a348a627
+github.com/gonum/graph,v0.0.0-20190426092945-678096d81a4b,h1:LilU5ERRFWL+2D6yR1PL2oeS4n+xyTq1vfv39LFVaeE=,411fd86d898ad7ea8c1145610a27f0f13153c86b3ef5e78cb80431125082b5a6
+github.com/gonum/internal,v0.0.0-20181124074243-f884aa714029,h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M=,e7f40a97eee3574c826a1e75f80ecd94c27853feaab5c43fde7dd95ba516c9dc
+github.com/gonum/lapack,v0.0.0-20181123203213-e4cdc5a0bff9,h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI=,f38b72e072728121b9acf5ae26d947aacc0024dddc09d19e382bacd8669f5997
+github.com/gonum/matrix,v0.0.0-20181209220409-c518dec07be9,h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4=,9cea355e35e3f5718b2c69f65712b2c08a1bec13b3cfadf168d98b41b043dd63
+github.com/google/btree,v1.0.0,h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=,8dbcb36f92c7a6dc5f6aef5c26358d98b72caee69829b5b33dddabada2047785
+github.com/google/cadvisor,v0.34.0,h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw=,5a3807f43a14e6a03b7ceb9ea11f8ac241a42286be90c3b2cba49ee811111848
+github.com/google/certificate-transparency-go,v1.0.21,h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=,7ddb21b272632236d5fb35b35c837f39d38390ea8dcb97c9f0f5d5aa561c3366
+github.com/google/flatbuffers,v1.11.0,h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=,ff61e5077ecc7d46a2020c1b42e0a6405b50271f396d4dcc50c683345059af76
+github.com/google/go-cmp,v0.3.2-0.20191028172631-481baca67f93,h1:VvBteXw2zOXEgm0o3PgONTWf+bhUGsCaiNn3pbkU9LA=,6682f890f076aaa03f2c2afb6bc7304c9d602b9e23ff212f8a9a64f44f432dbc
+github.com/google/go-containerregistry,v0.0.0-20191029173801-50b26ee28691,h1:9fkqC5Bq8l2FQgcW6FQbPDUeZvExyg7okl+s4Gg9Jrs=,7bef2c87f7ca8a39e04c770b38160dd5cfdd508546f96fab427225d12d40d85a
+github.com/google/go-github,v17.0.0+incompatible,h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=,9831222a466bec73a21627e0c3525da9cadd969468e31d10ecae8580b0568d0e
+github.com/google/go-github/v21,v21.0.0,h1:tn4/tmCgPAsezJFwZcMnE7U0R9/AtKRBGX4s4LFdDzI=,0b25aebca5386cdb52515402b81a8e0a676ac30f9843feb0a47a1944b7c8b527
+github.com/google/go-github/v24,v24.0.1,h1:KCt1LjMJEey1qvPXxa9SjaWxwTsCWSq6p2Ju57UR4Q4=,4dd0a57a527a1cc52e6619e9d2e1936534439426f0eb065bfbe1e7c03b60d465
+github.com/google/go-github/v28,v28.1.1,h1:kORf5ekX5qwXO2mGzXXOjMe/g6ap8ahVe0sBEulhSxo=,621cca7f4889897317c18ed021fe0f55c279769f11357d90eb21a29c5ea78d04
+github.com/google/go-querystring,v1.0.0,h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=,1c0a0b81b921ee270e47e05cf0bf8df4475de850671e553c07740849068d4f9f
+github.com/google/go-replayers/grpcreplay,v0.1.0,h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=,794ad7fb2669ea1d1305cf7717a1329146635637739bf2e26d858a318e87f99b
+github.com/google/go-replayers/httpreplay,v0.1.0,h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=,cf6d3e2262e94db5bad86d944f2f97507b1ffc2943e4385f140eb6f9a01f8e7b
+github.com/google/go-tpm,v0.2.0,h1:3Z5ZjNRQ0CsUj3yWXtbbx4Vfb/sQapdSeZJvuaKuQzc=,7e90cb155fa3e7759caa1fe5df1ca43520a7f8e1a31e540573cc8290ff523a23
+github.com/google/go-tpm-tools,v0.0.0-20190906225433-1614c142f845,h1:2WNNKKRI+a5OZi5xiJVfDoOiUyfK/BU1D4w+N6967F4=,2e41ca1e24a1ba5eedf980331527d6a5ad09b8ef653bbd040321572899eff8a2
+github.com/google/gofuzz,v1.0.0,h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=,752570262575bbcb5f0107dbd80a463abacaf51e94e15f96f5bc4166ff2d33e1
+github.com/google/gopacket,v1.1.17,h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY=,008645038244e12a1bfbda2317372ec34a514250741139b8e4842de7f98639d4
+github.com/google/gxui,v0.0.0-20151028112939-f85e0a97b3a4,h1:OL2d27ueTKnlQJoqLW2fc9pWYulFnJYLWzomGV7HqZo=,be209ad45b16077b010faef4a7bcbf0723dfbe47869a6f4c0aacd534e7fcbfb1
+github.com/google/martian,v2.1.1-0.20190517191504-25dcb96d9e51+incompatible,h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=,dfc5eac3877863c1f231457f96c54c915ea1c86f86c590710b7477f96e1ba0f3
+github.com/google/netstack,v0.0.0-20191031000057-4787376a6744,h1:wKeh74w+ydKcE1Eo44WDzIOcPHWmxxmtAzkAL0Mlspc=,dd74d0c9fadfb29db3bd09da657cb95300255d562ce596e88c865a71ee5d2519
+github.com/google/pprof,v0.0.0-20191028172815-5e965273ee43,h1:59gkLC5pLENSgzw9Gx73BQQho5i//80XwgIIYWxZjp4=,667012da0f67eb7822d16f532e850091a58c1efebeef5047df9a02e972112484
+github.com/google/readahead,v0.0.0-20161222183148-eaceba169032,h1:6Be3nkuJFyRfCgr6qTIzmRp8y9QwDIbqy/nYr9WDPos=,3a2435123538463dc3412a2eb1be033b7cf8105775c1ff3524351ec405fa1469
+github.com/google/renameio,v0.1.0,h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=,b8510bb34078691a20b8e4902d371afe0eb171b2daf953f67cb3960d1926ccf3
+github.com/google/rpmpack,v0.0.0-20191101142923-13d81472ccfe,h1:P1WflKHEgTAYe39btxYzeds84DhxQSLj4hfoNn0tCyQ=,5144bdeda051f10f407f1f798502ec0d7599f9c4a7e0a79c3711fe2b79f5cae4
+github.com/google/shlex,v0.0.0-20181106134648-c34317bd91bf,h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg=,250fc48c105475c54cc8c9fe5c110e31986590433de2608740d6592d0dc0a4c6
+github.com/google/subcommands,v1.0.1,h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=,de4249d9823a0509df32ebad2787d5e54c9b53c1059592bd9a3bb0c4cf58034d
+github.com/google/uuid,v1.1.1,h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=,2b0cbc45fb0e21c8bfebbae9b04babc196d9f06d9f3b9dec5e2adc8cfd0c1b81
+github.com/google/wire,v0.3.0,h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=,38eb402dbe84aee2f891df0e62623f9ff5615dfeb1e4f631eaac5cf1859c9ea6
+github.com/googleapis/gax-go,v2.0.2+incompatible,h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=,36fe8c993c8f90067bffbba78f1325ff45ae60c8a85b778d798c56067e55c19e
+github.com/googleapis/gax-go/v2,v2.0.5,h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=,846b017e21fc01f408774051d4a10bfccd7c294e10a1ad5d725278889d5f1d42
+github.com/googleapis/gnostic,v0.3.1,h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=,33277bd9aab84cf04d058a5e2e1dbb5f3c023ba30c6127b4cc8a6662a776de53
+github.com/gopackage/ddp,v0.0.0-20170117053602-652027933df4,h1:4EZlYQIiyecYJlUbVkFXCXHz1QPhVXcHnQKAzBTPfQo=,76b2493aae8a5513b707e4f6c529f57175cca6c834dd19072a51ed3974cd77bc
+github.com/gophercloud/gophercloud,v0.6.0,h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU=,f5be75a3b128c9de7385dd7e2a8ec9fba18fb46dcf57624d88249ae99e188ed2
+github.com/gophercloud/utils,v0.0.0-20190128072930-fbb6ab446f01,h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=,c98b6d529b47679302d175f04d7b635824c292edc8a5ede807f9ba8145517ce7
+github.com/gopherjs/gopherjs,v0.0.0-20190915194858-d3ddacdb130f,h1:TyqzGm2z1h3AGhjOoRYyeLcW4WlW81MDQkWa+rx/000=,ff395ad20350783713974a6b4d03254b811d83c0c0caa13bcb329462a7263f70
+github.com/gopherjs/jquery,v0.0.0-20180404123100-3ba2b901425e,h1:Tf0PnEo36tq56/JezxbbiFpEce0pmK6tY7hS6PNS7tI=,26fb481ef7f7010ec901990527d7ef7b06bc18c38cb617db77f8b61263b5b453
+github.com/gopherjs/jsbuiltin,v0.0.0-20180426082241-50091555e127,h1:atBEgNR1C5+LFkl8ipQtLee9RStheS8YeCSkiYqBhOg=,603151a77e4be25c8389014b06449520c2ad5856f0161590a5de5f01bee28912
+github.com/goreleaser/goreleaser,v0.120.5,h1:N3VirNAK9u30Wj7xulfE9/cCvptO0vl+CLhaMEVGbGs=,9c516d6e8db8c6800102ca68e3f674a62dd42877d7785607f56c22f6dc9b5a9e
+github.com/goreleaser/nfpm,v1.1.2,h1:9+hnNm/h/ANQWLxZixNO562w4tIO/8VlgCwOKwwZTX4=,9781a05527458d352a744a524c94473d2a72694fd54bc559a5888158bb4fa1fb
+github.com/gorhill/cronexpr,v0.0.0-20180427100037-88b0669f7d75,h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=,742d8957d3f9fe773150fb3164868a755b2af5b705b38c72c45ca5386715c617
+github.com/gorilla/context,v1.1.1,h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=,4ec8e01fe741a931edeebdee9348ffb49b5cc565ca245551d0d20b67062e6f0b
+github.com/gorilla/csrf,v1.6.0,h1:60oN1cFdncCE8tjwQ3QEkFND5k37lQPcRjnlvm7CIJ0=,6fa6b9d34ba1c2409e6575db396f57607c5283e397d38a271b6930c666f166b0
+github.com/gorilla/handlers,v1.4.2,h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=,9e47491112a46d32e372be827899e8678a881f6407f290564c63e8725b5e9a19
+github.com/gorilla/mux,v1.7.3,h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=,9ffc6c6c1194cb2b9f39237ff90b20eb4a55273404c97364ed9a6500e9571fe3
+github.com/gorilla/pat,v1.0.1,h1:OeSoj6sffw4/majibAY2BAUsXjNP7fEE+w30KickaL4=,e0dedacf6f405854b94932a59b410bbda64d4fff8111b674db987ce242bc9d57
+github.com/gorilla/rpc,v1.1.0,h1:marKfvVP0Gpd/jHlVBKCQ8RAoUPdX7K1Nuh6l1BNh7A=,0e83ae0cbc4164cdaf0b808413f97fed7a90e2096095c14f5495b6dbfaa34266
+github.com/gorilla/schema,v1.1.0,h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY=,42a6d7dc873e8ba1822551b4e15304d5654a11f6da3cccdc270be847148bbfaf
+github.com/gorilla/securecookie,v1.1.1,h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=,dd83a4230e11568159756bbea4d343c88df0cd1415bbbc7cd5badad6cd2ed903
+github.com/gorilla/sessions,v1.2.0,h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=,8753d00ae6cf8ea0e28c195d4b87875384e2ed79df7eba4cf210fdf9ab0294df
+github.com/gorilla/websocket,v1.4.1,h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=,86eb427567de9e2dc84da52ee4f4315496c5ffc2152928df0e3ac4ce8a359ff7
+github.com/gosimple/slug,v1.9.0,h1:r5vDcYrFz9BmfIAMC829un9hq7hKM4cHUrsv36LbEqs=,0f72d897e3decea434cdc68c7d0226afbda7d6b1908e955bf406333e7d6bb4a7
+github.com/gosuri/uitable,v0.0.3,h1:9ZY4qCODg6JL1Ui4dL9LqCF4ghWnAOSV2h7xG98SkHE=,1316f88b6b2689d941a4727889818705a289c72d7f1f4d2d9cf5cd06fecd0b7b
+github.com/gotestyourself/gotestyourself,v2.2.0+incompatible,h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=,653f8ec3ed62f8d235ab67cfc56e7c814d4ac6f56f24000802b32728523c074c
+github.com/gotnospirit/makeplural,v0.0.0-20180622080156-a5f48d94d976,h1:b70jEaX2iaJSPZULSUxKtm73LBfsCrMsIlYCUgNGSIs=,5750c916115b851f4881b76d90128802d090558958aa821c691d4fa378018093
+github.com/gotnospirit/messageformat,v0.0.0-20180622080451-0eab1176a3fb,h1:akgcoKcMcMOlzb6fdycEck1Vc3+y7ubUjO6hgAOyqC8=,7189231c806aa1988b50a82019c5f972a5f1b82e61c94776999728ec1894cd29
+github.com/graarh/golang-socketio,v0.0.0-20170510162725-2c44953b9b5f,h1:utzdm9zUvVWGRtIpkdE4+36n+Gv60kNb7mFvgGxLElY=,f41faefdf625d1c04113636d467a9fa47fe083148d7393fa65c0f08e3a4078c3
+github.com/grafana/globalconf,v0.0.0-20181214112547-7a1aae0695d9,h1:2/Bz5A5zR4TMGd9yvgGMal7nhQwHBt5/dfp0sbJFfes=,0393f4fa690096ea26c76373e99f9d9f3bfc9b34e5acd08d639b68f68af7b5e2
+github.com/grandcat/zeroconf,v0.0.0-20190424104450-85eadb44205c,h1:svzQzfVE9t7Y1CGULS5PsMWs4/H4Au/ZTJzU/0CKgqc=,2d364bea1939e3ec55b732cae452feb3182fc1d8ffa30f35aa42c0181709d138
+github.com/graph-gophers/graphql-go,v0.0.0-20190225005345-3e8838d4614c,h1:YyFUsspLqAt3noyPCLz7EFK/o1LpC1j/6MjU0bSVOQ4=,fad60e1061e15848aff79c6620f1cf55a9dd87d58ca2f57fea50c35322c817ac
+github.com/graphql-go/graphql,v0.7.9-0.20190403165646-199d20bbfed7,h1:E45QFM7IqRdFnuyFk8GSamb42EckUSyJ55rtVB/w8VQ=,6e9d51c4dc431d2d7c1348fa2b3358ed8e57338a07750177698bde29c913e786
+github.com/gravitational/trace,v0.0.0-20190726142706-a535a178675f,h1:68WxnfBzJRYktZ30fmIjGQ74RsXYLoeH2/NITPktTMY=,6fb8317692ac3aa8280cd4b4749970ec6652ecbe2c629cd43b52005f9a992197
+github.com/graymeta/stow,v0.2.4,h1:qDGstknYXqcnmBQ5TRJtxD9Qv1MuRbYRhLoSMeUDs7U=,67b4e728448b89c2233da14c22f18fe6c720e88a858dff2cd3c7405c7ea10493
+github.com/gregjones/httpcache,v0.0.0-20190611155906-901d90724c79,h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=,73d773791d582cad0d90942e7d92f52d82f13119dd78e849bbd77fae2acc0276
+github.com/grokify/html-strip-tags-go,v0.0.0-20190921062105-daaa06bf1aaf,h1:wIOAyJMMen0ELGiFzlmqxdcV1yGbkyHBAB6PolcNbLA=,0bb5eaff16e4119a9251bb0a26b4190a8e36cbacce8daee8c77df76022e1087c
+github.com/grpc-ecosystem/go-grpc-middleware,v1.1.0,h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=,def2c3ec1d07264489b79fa0e8e7a5c23545f16ba3c6e613f5cdba2ae8fe2768
+github.com/grpc-ecosystem/go-grpc-prometheus,v1.2.1-0.20191002090509-6af20e3a5340,h1:uGoIog/wiQHI9GAxXO5TJbT0wWKH3O9HhOJW1F9c3fY=,bca256c9eee3d43fe310c205866c69de454e71346f18ea2b05a32bd2f6018c84
+github.com/grpc-ecosystem/grpc-gateway,v1.11.3,h1:h8+NsYENhxNTuq+dobk3+ODoJtwY4Fu0WQXsxJfL8aM=,d96a88c820576b8b6989944cbe15f4f2d94d2884f29f2f683b975a03a5bdc5fc
+github.com/grpc-ecosystem/grpc-opentracing,v0.0.0-20180507213350-8e809c8a8645,h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=,0606bde24e978e9cd91ae45ca9e5222ce695c21a07ae02e77546496bf23b1c62
+github.com/gucumber/gucumber,v0.0.0-20180127021336-7d5c79e832a2,h1:iR8wSrr/JCzL1Ul+dRVxtIOnP8DGg/m02nHZJ9PH6P0=,4feb5116e650552868f056ee74d179e91239bf166d365267f32e903ccc495dbb
+github.com/guptarohit/asciigraph,v0.4.1,h1:YHmCMN8VH81BIUIgTg2Fs3B52QDxNZw2RQ6j5pGoSxo=,976279cdbc5425609c272b2116a92fb5871a40164ae64c51dedffea7b550d2d4
+github.com/guregu/null,v2.1.3-0.20151024101046-79c5bd36b615+incompatible,h1:SZmF1M6CdAm4MmTPYYTG+x9EC8D3FOxUq9S4D37irQg=,1adcbf87f6c55963b0d020ccbac0ebd07e8aca5e0ff22469ac708c6574d7333f
+github.com/gxed/go-shellwords,v1.0.3,h1:2TP32H4TAklZUdz84oj95BJhVnIrRasyx2j1cqH5K38=,c63674c66949c0442402bceca8b7768684875a667140ea0b32afdd46fc094a7f
+github.com/gxed/hashland/keccakpg,v0.0.1,h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU=,c77522ff0820feb7b5be4e1c74d7c64b3aa5afe3452e1dd2f54d1ffa067c6b2d
+github.com/gxed/hashland/murmur3,v0.0.1,h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc=,4576d7ae9b5d2f4ebd238de84f3b52b9d4ae4d41822ac0eabd404d346eace067
+github.com/gxed/pubsub,v0.0.0-20180201040156-26ebdf44f824,h1:TF4mX7zXpeyz/xintezebSa7ZDxAGBnqDwcoobvaz2o=,718b183cca4e30a97d3fa06457060b4d3be66742838d98a39b02ea710693d9eb
+github.com/h2non/filetype,v1.0.8,h1:le8gpf+FQA0/DlDABbtisA1KiTS0Xi+YSC/E8yY3Y14=,534a477c811032fceb0c8e1ad7a15f35ff95f1d038d41164bb4d265860cc42c3
+github.com/h2non/gock,v1.0.9,h1:17gCehSo8ZOgEsFKpQgqHiR7VLyjxdAG3lkhVvO9QZU=,ab5679329b0c26b523254dd728cad1b4e6e2e7bf11569df73a1dcaa468a46cd6
+github.com/h2non/parth,v0.0.0-20190131123155-b4df798d6542,h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=,3b7b7e4bb3c2d0e22075e13443af78d03fb2ed54b3eb5bb1fa6f528c7ebe3ac0
+github.com/hailocab/go-hostpool,v0.0.0-20160125115350-e80d13ce29ed,h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=,faf2b985681cda77ab928976b620b790585e364b6aff351483227d474db85e9a
+github.com/hanwen/go-fuse,v1.0.0,h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=,4b94d038e80959f816a18b34cdcbb5244e87b73956b220aac213483999b54c84
+github.com/hashicorp/aws-sdk-go-base,v0.4.0,h1:zH9hNUdsS+2G0zJaU85ul8D59BGnZBaKM+KMNPAHGwk=,967c057aecede32de140c88b6527149d2441216569620b9d9350522d0f309bdc
+github.com/hashicorp/consul,v1.6.1,h1:ISPgwOO8/vPYrCXQNyx63eJAYjPGRnmFsXK7aj2XICs=,0ca8c5046df99a7a6607ab68b6604340af58d1696c7901088adfd9618850629f
+github.com/hashicorp/consul/api,v1.2.0,h1:oPsuzLp2uk7I7rojPKuncWbZ+m5TMoD4Ivs+2Rkeh4Y=,2833a78c39a4fa869a928e1218f3aa83130e4f5c03b4d4e355fb76b91fa75946
+github.com/hashicorp/consul/sdk,v0.2.0,h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=,3f0b677061f7e79191cc0d2f8184895c20051166959566a2e48e511b1fab222c
+github.com/hashicorp/errwrap,v1.0.0,h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=,ccdf4c90f894d8a5fde4e79d5828c5d27a13e9f7ce3006dd72ce76e6e17cdeb2
+github.com/hashicorp/go-azure-helpers,v0.0.0-20190129193224-166dfd221bb2,h1:VBRx+yPYUZaobnn5ANBcOUf4hhWpTHSQgftG4TcDkhI=,dd17ed56e4b541cffa69679557074071372ab70682f695d8b61126c9393f92dc
+github.com/hashicorp/go-bexpr,v0.1.2,h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=,ac79086a2900ebf2f5414fe54b5799f24b3ddf953a28299f46831a11b10b1df0
+github.com/hashicorp/go-checkpoint,v0.5.0,h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=,1baf63010271d6c8abc0f4edc9e9d41483cb55218e4e399ca4c70ef225415f36
+github.com/hashicorp/go-cleanhttp,v0.5.1,h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=,e3cc9964b0bc80c6156d6fb064abcb62ff8c00df8be8009b6f6d3aefc2776a23
+github.com/hashicorp/go-discover,v0.0.0-20190403160810-22221edb15cd,h1:SynRxs8h2h7lLSA5py5a3WWkYpImhREtju0CuRd97wc=,c58ed5375890c98a836234f5166cf88b73ad7595899edaa43c775d650043b4b3
+github.com/hashicorp/go-gcp-common,v0.5.0,h1:kkIQTjNTopn4eXQ1+lCiHYZXUtgIZvbc6YtAQkMnTos=,a1fee55619b3579e5fe89b6f944dce87e190b8ea1526f24622ba5941d664b639
+github.com/hashicorp/go-getter,v1.4.0,h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw=,cbae7b8a5f018c78bb304c47840c390b3c3be98b712b90b33d16304f1b427eb1
+github.com/hashicorp/go-hclog,v0.9.2,h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=,e1a873d9fa828038b5b2c93e0f49f9e8187b4f5255d0a3d7989d3ac178807af4
+github.com/hashicorp/go-immutable-radix,v1.1.0,h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=,c23ca92f0fb7dce35b86d35ccf9cfa871db97379d2ca8a0fcc15fde32ff369bb
+github.com/hashicorp/go-memdb,v1.0.4,h1:sIdJHAEtV3//iXcUb4LumSQeorYos5V0ptvqvQvFgDA=,c3eedd68e60f3db16499dff27fe4d4e874978c250bab152044965a475cb47c72
+github.com/hashicorp/go-msgpack,v0.5.5,h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=,fb47605669b0ddd75292aac788208475fecd54e0ea3e9a282d8a98ae8c60d1f5
+github.com/hashicorp/go-multierror,v1.0.0,h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=,a66a1b9dff26a9a7fcaa5aa5e658c13f94c0daeb572536b1ecc7ebe51f4d0be7
+github.com/hashicorp/go-oracle-terraform,v0.0.0-20181016190316-007121241b79,h1:RKu7yAXZTaQsxj1K9GDsh+QVw0+Wu1SWHxtbFN0n+hE=,5b3ab30e1aef56e38d750a5dc344f1ab996859408a6b76a9f48f5f75747fd712
+github.com/hashicorp/go-plugin,v1.0.1,h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=,0853effcccdb7bfac1c122f72cd3a1241b4e0934609541c409e9f59b441ae01e
+github.com/hashicorp/go-raftchunking,v0.6.2,h1:imj6CVkwXj6VzgXZQvzS+fSrkbFCzlJ2t00F3PacnuU=,f5c55a3679c8a8f63d798d2b67552bfcd198dc5b9473d81c3ce1b353a055bc5c
+github.com/hashicorp/go-retryablehttp,v0.6.3,h1:tuulM+WnToeqa05z83YLmKabZxrySOmJAd4mJ+s2Nfg=,69cb67f4821e97ca8f04b0cb710c61a5acfaa948dda59b949b40fd6fae8e7dec
+github.com/hashicorp/go-rootcerts,v1.0.1,h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=,3f558b1a436ed6fb15872383545109227f9552bf5daa95583e9402bbd3a24fff
+github.com/hashicorp/go-safetemp,v1.0.0,h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=,6843a6b60d650ae9be836add0ab5ac1b1719a101bf12fe4ca6678fcd87baa19a
+github.com/hashicorp/go-slug,v0.4.0,h1:YSz3afoEZZJVVB46NITf0+opd2cHpaYJ1XSojOyP0x8=,b6a027a2d69ae8786a6830239a79ceac487463237b49e03250a9b1e116f0a5ac
+github.com/hashicorp/go-sockaddr,v1.0.2,h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=,50c1b60863b0cd31d03b26d3975f76cab55466666c067cd1823481a61f19af33
+github.com/hashicorp/go-syslog,v1.0.0,h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=,a0ca8b61ea365e9ecdca513b94f200aef3ff68b4c95d9dabc88ca25fcb33bce6
+github.com/hashicorp/go-tfe,v0.3.25,h1:4rPk/9rSYuRoujKk5FsxSvtC/AjJCQphLS/57yr6wUM=,5ade1d16517697c7bd04b556f852264eef33906c52d32bd6702c47838c1c1c04
+github.com/hashicorp/go-uuid,v1.0.1,h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=,a05417b988b047d55fca8ad4fec6bde56c3907f679fece48f97d608e61e82a5c
+github.com/hashicorp/go-version,v1.2.0,h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=,a3231adb6bf029750970de2955e82e41e4c062b94eb73683e9111aa0c0841008
+github.com/hashicorp/go.net,v0.0.1,h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=,71564aa3cb6e2820ee31e4d9e264e4ed889c7916f958b2f54c6f3004d4fcd8d2
+github.com/hashicorp/golang-lru,v0.5.3,h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=,ac6e8bdc76a1275e3496f1ab2484e28ab4be2c81e2da78b8cdd1c2d269b931e4
+github.com/hashicorp/hcl,v1.0.0,h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=,54149a2e5121b3e81f961c79210e63d6798eb63de28d2599ee59ade1fa76c82b
+github.com/hashicorp/hcl/v2,v2.0.0,h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8=,6275e2af8b3247c6de72baab13b3be531431f695e001e4d36c920e412a715032
+github.com/hashicorp/hcl2,v0.0.0-20191002203319-fb75b3253c80,h1:PFfGModn55JA0oBsvFghhj0v93me+Ctr3uHC/UmFAls=,42811f77c4da1d31371c51076cbcecc99042fc7a74c6e2622b11bea96043a777
+github.com/hashicorp/hil,v0.0.0-20190212112733-ab17b08d6590,h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI=,cb2b110c86a312b7c60094c9b11853ae288945c34fa5861b67ff2d97edaab292
+github.com/hashicorp/logutils,v1.0.0,h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=,0e88424578d1d6b7793b63d30c180a353ce8041701d25dc7c3bcd9841c36db5b
+github.com/hashicorp/mdns,v1.0.1,h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=,0f4b33961638b1273ace80b64c6fc7e54a1064484b2a1e182ab3d38a35dbc94f
+github.com/hashicorp/memberlist,v0.1.5,h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=,51054573cad1655b1b349553a8d455eedc15b49f0277edd2e693bc5d0503af62
+github.com/hashicorp/net-rpc-msgpackrpc,v0.0.0-20151116020338-a14192a58a69,h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE=,b0c3a5ec955b0dfb85b39a6aa1d10fe0e810dd78493c0a14ea5760bac1cadd32
+github.com/hashicorp/nomad/api,v0.0.0-20190412184103-1c38ced33adf,h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA=,b9e994cd47eed80531b93d9f64be426cbdc6fc6e58323f6b26ae53b1fd692bbd
+github.com/hashicorp/packer,v1.4.4,h1:ee+jewbEfTKV77+YtRR0m2Q8suTiXnr010bBFt5vJSA=,d2fc7c22b3528a4acb321fda24575cf2f88df8f5085b3b5da559e44d8b12295a
+github.com/hashicorp/raft,v1.1.1,h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=,b6a10aa04b5f45486a6111d4a50cb65ee179b091f04a047e316b85f38ebbf873
+github.com/hashicorp/raft-boltdb,v0.0.0-20191021154308-4207f1bf0617,h1:CJDRE/2tBNFOrcoexD2nvTRbQEox3FDxl4NxIezp1b8=,e2008570aed06ba72cd783d6bc729b67b7e0cecd2219a8420dd24dcef82e64f8
+github.com/hashicorp/raft-snapshot,v1.0.1,h1:cx002JsTEAfAP0pIuANlDtTXg/pi2Db6YbRRmLQTQKw=,3d40d03f6793fe87464359f28b136b920daf7aa8544a98270470d04cef132a77
+github.com/hashicorp/serf,v0.8.5,h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc=,88623d0f1a155bb2fe254210f68f1603b42162f031fbf51256f1465b36bc7769
+github.com/hashicorp/terraform,v0.12.13,h1:LACXUTZvAGf8W/6wehHjOgi6YEMN7ejDUpnpll2qbJ0=,4dbe6d0c15f4d934fd583fc20bec55326ffc79cf0d5b7fd28978ba14d178fe8d
+github.com/hashicorp/terraform-config-inspect,v0.0.0-20190821133035-82a99dc22ef4,h1:fTkL0YwjohGyN7AqsDhz6bwcGBpT+xBqi3Qhpw58Juw=,1261dc9b65805f9be029f6a42d9e0ddccc89c4d0c50e5fa2895b1b53198195c3
+github.com/hashicorp/terraform-svchost,v0.0.0-20191011084731-65d371908596,h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg=,8055e9f82b0484eb70594ca682bcf4401d2286c2021ffc72c6c3b6ad9ac9a024
+github.com/hashicorp/vault,v1.2.0-rc1,h1:GFYP6ck5f0EaJsGMD4PARIX5HaHREUxMbTaVPy+dFEg=,89c84474c97b1400ca858fe1b6e0eb3bd91dac17a4aff4336bd95104381e8b2b
+github.com/hashicorp/vault-plugin-auth-alicloud,v0.5.2-0.20190725165955-db428c1b0976,h1:f+r1gXVvQJ0+2pfxgBDP1zZUC6lUmPNM0xp7AKupyBg=,3bc95606713215c3ae25f9be06ed2186f8f2e5e9ad8e025fafd332d97045ed09
+github.com/hashicorp/vault-plugin-auth-azure,v0.5.2-0.20190725170003-0fc4bd518aa2,h1:Ua6AFhJYkdNGC5s4uDL7EGVBD/jPUOcnubDkPsaG7K8=,bfa988cf4a3e33e7db3caf2ecad55c2f7c2e3f39088243767927ac8ed8d2556e
+github.com/hashicorp/vault-plugin-auth-centrify,v0.5.2-0.20190725170023-9d8d5e0b03e6,h1:UXM3yxzNaruvgaccRjFXKcKnsTTHzp213MJ045wto6A=,165cf5f7daa0e4c286bad12f09eeac648062554399f18890bf35789b6b16e9c7
+github.com/hashicorp/vault-plugin-auth-gcp,v0.5.2-0.20190725170032-0aa7d1c92039,h1:uqYbah1dntV8OccHCbY3bBzYX/zLtjmG0ZIZPV+x6EM=,e9f9ccc7ca02c40291bb27012f2dd1fead86d2de2ab85a6d07b0aa7d98533f49
+github.com/hashicorp/vault-plugin-auth-jwt,v0.5.2-0.20190725170041-1cfee03e8d3a,h1:zdhacnLMH4P47PdSPJo0omNh+IkSvPj0LbiHLQu0aVk=,2b04c80c6d2000558b63ced2c9ba60a4a2ffe4c76d2d58d5fe121f714a3cf291
+github.com/hashicorp/vault-plugin-auth-kubernetes,v0.5.2-0.20190725170047-354505be0ecf,h1:il4UUQC9zfsSRNR2EAQVqC+DzrvzZpFmJReQ7p6/bKw=,d95794ab78e644a95799a3505a83af58a83c6bf775e69832b3660ca47c042d5a
+github.com/hashicorp/vault-plugin-auth-oci,v0.0.0-20190904175623-97c0c0187c5c,h1:z6LQZvs1OtoVy2XgbgNhiDgp0U62Xbstn7/cgNZvh6g=,b23f2afa7fab5368d83a01be865e2dddf7ba6c7e8804ac205ccc1701a9239d51
+github.com/hashicorp/vault-plugin-auth-pcf,v0.0.0-20190725170053-826a135618c1,h1:mPyQ1+jB/ztcqebEdmNhSuYq4XVOpB5TUyyi0118T40=,c444159df670a1aba7e59029bf928989091886fa45970f751fe644d243d43744
+github.com/hashicorp/vault-plugin-database-elasticsearch,v0.0.0-20190725170059-5c0d558eb59d,h1:VUD1T3aI5GL8uoSSDhHncHP8ksgepZsvSLhsRG8MJ3s=,b151c27f632b8e05686473b4936b480cec694498c1e446dc5208b4db05c559f1
+github.com/hashicorp/vault-plugin-secrets-ad,v0.5.3-0.20190725170108-e1b17ad0c772,h1:N219G3MUxPRhtOBMFVdsSQWU47MrvivSHLmTAPpHcs4=,c8801ee5f030fa6cb36045f1e321d964224f9a2b4a17100140cafca7a6d8daf5
+github.com/hashicorp/vault-plugin-secrets-alicloud,v0.5.2-0.20190725170114-7d66a3fa0600,h1:kyHR0JOKFDAaC4sjQ3iD1lTH6uaIfmTk4rQ+JOGW5Zo=,f816c029601c9e7235f798e9591246ac935d3b1e330abfb23af59afa6bc08e0d
+github.com/hashicorp/vault-plugin-secrets-azure,v0.5.2-0.20190725170121-541440395211,h1:hZ21h0DWWKkoeMW7zkYaPVLxGZtKfYyIcE9G8xug4YQ=,5d71ad3ef26fd40b3afaf852913f38e5be0a1db8844bf21cb787e204cdbc48e4
+github.com/hashicorp/vault-plugin-secrets-gcp,v0.5.3-0.20190725170127-aa49df112140,h1:gSvWU9aYAsHxqKU0ohJD9njlNQ1/qLFPRs85u+xJFv4=,9b209e3ef7b8d7c41e823705cc190699540bbd2076f82344a83c106fa7e4ac98
+github.com/hashicorp/vault-plugin-secrets-gcpkms,v0.5.2-0.20190725170135-aaf270943731,h1:zP2vqetYhON59Mf5FTV9KmyKSnY1cLFzdNW0YYnNKbo=,5d3bc6de4bdad4725c4348a0d6861bce3e80a9eb13d4b05179cd663b47f46545
+github.com/hashicorp/vault-plugin-secrets-kv,v0.5.2-0.20190725170141-1c4dac87f383,h1:4IqT7JQt/GyYKr0HGemkUlYpF45ZALHSN9rHy7Sipos=,10f03c6d8a51714692b43ab69c2cb5f041ac611210aa9804237a9345e930f018
+github.com/hashicorp/vault/api,v1.0.4,h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU=,a885d16e067a5586e55914cd8e40f250a28fe94b3b864de47d495ad1f71c4251
+github.com/hashicorp/vault/sdk,v0.1.14-0.20190909201848-e0fbf9b652e2,h1:b65cSyZqljnCPzzsUXvR4P0eXypo1xahQyG809+IySk=,0aca8708570b724605514cab6dbbc9cc7bce5d27786a4b2da87553c437c42463
+github.com/hashicorp/vic,v1.5.1-0.20190403131502-bbfe86ec9443,h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw=,9c09a35b14d797812e6714073471b3472c16f9cb4deb430f9e2dd15fa8d25e32
+github.com/hashicorp/yamux,v0.0.0-20190923154419-df201c70410d,h1:W+SIwDdl3+jXWeidYySAgzytE3piq6GumXeBjFBG67c=,d8a888d6a4ecbc09f2f3663cb47aa2d064298eeb1491f4761a43ae95e93ba035
+github.com/herenow/go-crate,v0.0.0-20190617151714-6f2215a33eca,h1:kk1qCxy+FS5McLJ69dSpB6Y6kHCMa23UwHyglIzJ/bk=,aa618858b9c03e47962afb2a4098ad6cca8ecd09904cdbc5eb62c5a1d74befca
+github.com/hetznercloud/hcloud-go,v1.15.1,h1:G8Q+xyAqQ5IUY7yq4HKZgkabFa0S/VXJXq3TGCeT8JM=,028402928c1bc1db686cab5738e6fb91a61252c1236258e2d911dd8da21f8af5
+github.com/hinshun/vt10x,v0.0.0-20180616224451-1954e6464174,h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=,4afc77bd4950db746c68d23e6ed681d31cd952559d712c1400da476084567cf6
+github.com/hjfreyer/taglib-go,v0.0.0-20151027170453-0ef8bba9c41b,h1:Q4OOFmH18aIjnDJlvYm4BXmpHKXk1zTJP0QZ0otNwPs=,e7735f2cdbb7441dbe6bbc303cff9b9a20d9845dc901e31f6e29e3ef83613390
+github.com/howeyc/fsnotify,v0.9.0,h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=,a72f2f092433c8b53e095d6db3d3e18517db1a5a9814a78ed97194239145740f
+github.com/howeyc/gopass,v0.0.0-20190910152052-7cb4b85ec19c,h1:aY2hhxLhjEAbfXOx2nRJxCXezC6CO2V/yN+OCr1srtk=,83560b6c9a6220bcbb4ad2f043e5a190ab11a013b77c1bbff9a3a67ed74d4b37
+github.com/hpcloud/tail,v1.0.0,h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=,3cba484748e2e2919d72663599b8cc6454058976fbca96f9ac78d84f195b922a
+github.com/huandu/xstrings,v1.2.0,h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=,fe7011ad569e464d6ff81bdb1d80c4ebdb5baac5c89d17c1644a23cac0c48828
+github.com/huin/goupnp,v1.0.0,h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=,9685536729d9860766846ad4e56fb961b246d5afa209e4058ee0d021aec37827
+github.com/huin/goutil,v0.0.0-20170803182201-1ca381bf3150,h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=,d887199bd2f388075ff7aaf1d3061b13b92c20e01ccd6337c864fd409fe78831
+github.com/hybridgroup/go-ardrone,v0.0.0-20140402002621-b9750d8d7b78,h1:7of6LJZ4LF9AvF4bTiMr2I72KxodBf1BXrSD9Tz0lWU=,997e0efef1b73cc1930ad67cd649268ff864393fa85dedf32672ecca78647021
+github.com/hybridgroup/mjpeg,v0.0.0-20140228234708-4680f319790e,h1:xCcwD5FOXul+j1dn8xD16nbrhJkkum/Cn+jTd/u1LhY=,d9134203da596f895c55c3a9fd0aea32ad26501ca88e646cbe9f82136f592c0f
+github.com/hyperledger/fabric,v1.4.3,h1:6MmYhcDbxhd0TvpvHLR3c5m3fVjaX97690H8TRjpJNA=,067d2bd69094dc9f693d9b00c8bea810f61f6a8a3d0ac640830b468934e22023
+github.com/hyperonecom/h1-client-go,v0.0.0-20190122232013-cf38e8387775,h1:MIteIoIQ5nFoOmwEHPDsqng8d0dtKj3lCnQCwGvtxXc=,135625f81c1c6c62b296269829a74f1266928600545fedec0825cb97284264f6
+github.com/iancoleman/strcase,v0.0.0-20190422225806-e506e3ef7365,h1:ECW73yc9MY7935nNYXUkK7Dz17YuSUI9yqRqYS8aBww=,f93e74faf2e05699180c40ef21204629a1c6bd382658f1059c80631c377c5246
+github.com/ianlancetaylor/demangle,v0.0.0-20181102032728-5e5cf60278f6,h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=,73ae40ed96af2703f85cd4c552cf6b14551ceb782348be8185b730f44c842ab9
+github.com/iij/doapi,v0.0.0-20190504054126-0bbf12d6d7df,h1:MZf03xP9WdakyXhOWuAD5uPK3wHh96wCsqe3hCMKh8E=,7e33155961c2cba072047deb34d19a7d863a713e502abe8bdc31ab91424bd226
+github.com/ijc/Gotty,v0.0.0-20170406111628-a8b993ba6abd,h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM=,b8b9a99b3632feb3449d1fb8950d292333f8a7f494b182320ecdb0479d78442f
+github.com/imdario/mergo,v0.3.8,h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=,579cad1ed913cfcb424deb97e7016749abcc9d585bad07d14f19550df052cec5
+github.com/imkira/go-interpol,v1.1.0,h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=,de5111f7694700ea056beeb7c1ca1a827075d423422f251076ee17bd869477d9
+github.com/improbable-eng/grpc-web,v0.9.1,h1:tenDg9Lg+zYXeS/ojbKyfwVO5TVYh5FFGsrXNAblF1o=,3a287ae758b41feea9f26ec1b8757628d4742b87376fa40b29d878ee651bfe62
+github.com/imroc/req,v0.2.3,h1:ElMCifcqg/1GonGloyyTUrj6D6IITL6EiNEKHUl4xZM=,951172f0969fa0bad31ebbe9b17699ea3909b09eaf8df39ccd78e48097682c78
+github.com/inconshreveable/go-update,v0.0.0-20160112193335-8152e7eb6ccf,h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=,adf856fb49e7c5059b2edb42a31daf4a536dc698fe0728835b018150a884b678
+github.com/inconshreveable/log15,v0.0.0-20180818164646-67afb5ed74ec,h1:CGkYB1Q7DSsH/ku+to+foV4agt2F2miquaLUgF6L178=,31875747bcd198c39714d38747ac77e585620f2f37d1b1e1a03b164af6762995
+github.com/inconshreveable/mousetrap,v1.0.0,h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=,c3fa0813e78f5cef10dc0e9912c43e68e06ff970a98e98c4050fe14dbbfd18c7
+github.com/influxdata/changelog,v1.1.0,h1:HXhmLZDrbuC+Ca5YX7g8B8cH5DmJpaOjd844d9Y7aTQ=,19e60d9b658aaecca4e075126c996c1abd5e369003c14bbe575edc4ba2b9c182
+github.com/influxdata/flux,v0.52.0,h1:R91uUXbHzoiyYF7Xhm+wP3a0iSnl43iYJrN93nBhuP0=,e0121889c46cc4ad22f1662e68df7dbdfbb361c3da6809add4d1409cef764be9
+github.com/influxdata/influxdb,v1.7.9,h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=,b49a72374a14f726229e71152e74e8a132c2913137c4457f31bae8c7735e812c
+github.com/influxdata/influxdb1-client,v0.0.0-20190809212627-fc22c7df067e,h1:txQltCyjXAqVVSZDArPEhUTg35hKwVIuXwtQo7eAMNQ=,fc41ea93bf2b06b231823b116dc11b0ed89badf1ce6a4c848a33c77dcf2c123a
+github.com/influxdata/influxql,v1.0.1,h1:6PGG0SunRmptIMIreNRolhQ38Sq4qDfi2dS3BS1YD8Y=,2a697984d1cd82656f69901bfe1771676493411c1370d77271bde3ab3c917a1e
+github.com/influxdata/line-protocol,v0.0.0-20180522152040-32c6aa80de5e,h1:/o3vQtpWJhvnIbXley4/jwzzqNeigJK9z+LZcJZ9zfM=,6111b5e459106f7003477186aa2e34423dbe0c53983944a07d8b835ff8c7757c
+github.com/influxdata/promql/v2,v2.12.0,h1:kXn3p0D7zPw16rOtfDR+wo6aaiH8tSMfhPwONTxrlEc=,b928626f2eb81eed0046ef23a83a77a28dd140d369a0d2538c94e85d1055877f
+github.com/influxdata/tdigest,v0.0.0-20181121200506-bf2b5ad3c0a9,h1:MHTrDWmQpHq/hkq+7cw9oYAt2PqUw52TZazRA0N7PGE=,5d6b056d98d1e7e9cd884aea4e73934cc8ea89218eb43ee1d5140d3ccb34ed52
+github.com/influxdb/influxdb,v1.7.9,h1:KMBwwvyJyBppIwrg5t0662p+Yei/ucnIkqUl8txiQdQ=,ad251d4cc00aec767465dc60d6b702a3635b68402123a4ee5d1ee2b5006310b3
+github.com/iotexproject/go-pkgs,v0.1.1,h1:AyWJf8jqOg4aMSrxi+MInFFBZhTvSm0LCu1o08heijk=,c5099edde7450b4f8b9a0f49c42697f5e9bcb92d2bf58395aa0681f3ef6b583d
+github.com/iotexproject/iotex-address,v0.2.1,h1:ZJH2ajx5OBrbaRJ0ZWlWUo685zr5kjWijVjtmUrm42E=,53c7ce4d7fbc55ee79e92e9e0b31ee3b3ba0e6e5d3e24cd43e0a58c766568c9d
+github.com/iotexproject/iotex-proto,v0.2.5,h1:SYdl9Lqb0LYfFf3sfw92fN8GY3bthfCvGmltz+2uvDQ=,546cb070e92286601aee16d03383712172061c8fe78e53cf04498a9358470a78
+github.com/ipfs/bbloom,v0.0.4,h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=,92993c175552cc626ef6b1ab6cf887f0f640311748c47e7615df29a966c1b774
+github.com/ipfs/go-bitswap,v0.1.3,h1:jAl9Z/TYObpGeGATUemnOZ7RYb0F/kzNVlhcYZesz+0=,ee26d57b2765f808ebebca8aa18695bfa02b738f47b4b5db5efce5c91f28fbcd
+github.com/ipfs/go-block-format,v0.0.2,h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=,02ad9fa29f97073ece45a5da7a92e59e6c6b856e9a03bd853361b8107296c020
+github.com/ipfs/go-blockservice,v0.1.2,h1:fqFeeu1EG0lGVrqUo+BVJv7LZV31I4ZsyNthCOMAJRc=,31c5ff02d71ee454bebea3944d7e06c2ffd6f1c4cfdddf71c5122e982f261c7d
+github.com/ipfs/go-cid,v0.0.3,h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms=,f8bd60f8bbd79ed1fa5c8c113f6e17addb12257b0d925d3327ee7c25a7733591
+github.com/ipfs/go-datastore,v0.1.1,h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI=,be724f5e3a459cf6ae9e68d2fa14e27cc92c53ae775979f2412b4f5b3f2b0336
+github.com/ipfs/go-detect-race,v0.0.1,h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=,c00c97cacb355cb0569bee75775eff6b656d95dd7d0855ed97c2ee44666b72cd
+github.com/ipfs/go-ds-badger,v0.0.7,h1:NMyh88Q50HG6/S2YD58DLkq0c0/ZQPMbSojONH+PRf4=,26a453fc19eb26fe6077f12310ff1ad7230fe31b31a0c17fb47abba75379ee61
+github.com/ipfs/go-ds-leveldb,v0.1.0,h1:OsCuIIh1LMTk4WIQ1UJH7e3j01qlOP+KWVhNS6lBDZY=,43085f79b999edef0b8b49dea1ed35d47cc1c453ef401634825c0be5b62ac6d9
+github.com/ipfs/go-hamt-ipld,v0.0.13,h1:Jbt5ALTYnrzbcOBka11kAkgn3auvkQBGkKWjGRsQrio=,e16acbc3f203616ccd9119415b9db28a6f18c72f053259842f7db50aa1193cf8
+github.com/ipfs/go-ipfs-blockstore,v0.1.0,h1:V1GZorHFUIB6YgTJQdq7mcaIpUfCM3fCyVi+MTo9O88=,19a45734b2615632b180b59032d39c04c50fc735c7f9fd27c5547b0facb4ef8f
+github.com/ipfs/go-ipfs-blocksutil,v0.0.1,h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=,3fcf4221d4d59af5807040f209ff0d28d81f6974d61ac279b43a44b2f46d8182
+github.com/ipfs/go-ipfs-chunker,v0.0.1,h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw=,02a0e4766162345a5bea8962c315b4bab8f2550aa1b760dcece96794b3ba22ef
+github.com/ipfs/go-ipfs-config,v0.0.11,h1:5/4nas2CQXiKr2/MLxU24GDGTBvtstQIQezuk7ltOQQ=,e26bdd6db98c4ccf932440aa22a1aa2d550903a0f6f9da82f1ff5902ebbe260e
+github.com/ipfs/go-ipfs-delay,v0.0.1,h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=,bc3a4494d27cd7fabdeb7036e2edadd27f0edbd2b7d3cf49d14e3402c17c3ab6
+github.com/ipfs/go-ipfs-ds-help,v0.0.1,h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU=,52d0d886ebb65366abb35f19b76c4f6f349464eaedf092da95c661a451b2bf06
+github.com/ipfs/go-ipfs-exchange-interface,v0.0.1,h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM=,0a593df65586ff592255eb69923a43d413b24ad56454e14e94f5e722756fb102
+github.com/ipfs/go-ipfs-exchange-offline,v0.0.1,h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew=,04b69dc6dd34a2c5c2d1f0df8777fcaa8590aa528b960cc26178af6f609e29cf
+github.com/ipfs/go-ipfs-files,v0.0.6,h1:sMRtPiSmDrTA2FEiFTtk1vWgO2Dkg7bxXKJ+s8/cDAc=,442fa790aba0beff3a79503064a35dceab2a29dc4ab8edcca690c7f61ef6c6c0
+github.com/ipfs/go-ipfs-flags,v0.0.1,h1:OH5cEkJYL0QgA+bvD55TNG9ud8HA2Nqaav47b2c/UJk=,61ac13bc74f89286ac30db2ce79b26adfba63a0676cbc430ad750df2d516565a
+github.com/ipfs/go-ipfs-posinfo,v0.0.1,h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs=,149f52f33d8ffd4f82056b4ea1dae2f25024a2e8df0ff555789c549468d998e7
+github.com/ipfs/go-ipfs-pq,v0.0.1,h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU=,4eda59f4f898933265b82d381cc1ea5a3d3c75752618f46496a2d150c09aeb2d
+github.com/ipfs/go-ipfs-routing,v0.1.0,h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ=,e2281e568eed0ee5621886d28802eefd8a9d0806cbd1db80c01550ad59ec54c7
+github.com/ipfs/go-ipfs-util,v0.0.1,h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=,6d3af4d6dcb95047b64fc74972cdcd84f199c6bad467a7de3543c3eaa0d4ee49
+github.com/ipfs/go-ipld-cbor,v0.0.3,h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I=,4087b9930a8e2899c3540e61bd8e7f04c8bdd8670f68ddbfcf10f45a0e619cef
+github.com/ipfs/go-ipld-format,v0.0.2,h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=,3da08ede588080b6ec81c5ad8fbfb1c9ea306a038be41dc06b1f3a1a101ebe50
+github.com/ipfs/go-log,v0.0.1,h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=,9165a91716b11b432f8bf303d59fc019bf91872f1b9ca7e12d666c11ba6e6676
+github.com/ipfs/go-merkledag,v0.2.4,h1:ZSHQSe9BENfixUjT+MaLeHEeZGxrZQfgo3KT3SLosF8=,ed269e045c613cc7b9bba3593797fe09cdf84c906726bef9261c74bd8c470404
+github.com/ipfs/go-metrics-interface,v0.0.1,h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=,e83f0c01b084000492db0c0e1a28ff900c3f6d11eea8defdbe8bdd1a04c33fd0
+github.com/ipfs/go-mfs,v0.1.1,h1:tjYEWFIl0W6vRFuM/EnySHaaYzPmDcQWwTjtYWMGQ1A=,1db35113aff60e645544cc64cbbddbf0608332b1f2208615744098af59b97fee
+github.com/ipfs/go-path,v0.0.7,h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho=,96c607c0253c24ed0cb37016007f34420a3a83c37cdd68b6d4391126418835c4
+github.com/ipfs/go-peertaskqueue,v0.1.1,h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY=,5fa92b0302d8e72e8c4d74517a68fad04c1d89d90f0a6314a5e30662dda5d359
+github.com/ipfs/go-unixfs,v0.2.2,h1:eTkDT9F0dn4qHmBMVRMZbziwyqLRcogjtPYqMgZYmQs=,77f7f6b2de604b592018dd914a6606084069d22efa70ea95e0dd623a04e4453c
+github.com/ipfs/go-verifcid,v0.0.1,h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=,1f808a29fcd38406325435c7a6a02b253aee28832704f0032600c2b41ef3b8f1
+github.com/ipfs/interface-go-ipfs-core,v0.2.4,h1:oQiJ3Mj3rqVJohdi316K3+VSyiADto3Z35ukj7z+UGg=,e1030de5fc1ee1868a87386708be313fc0fcbbe137d5a71d71f28621393f70a2
+github.com/ipfs/iptb,v1.4.0,h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo=,0b00d0279c700ad687cfbba073f504cc4c8a17ff731550c3784fcb3e24b0c6d5
+github.com/iris-contrib/blackfriday,v2.0.0+incompatible,h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4=,936679f49251da75fde84b8f38884dbce89747b96f8206f7a4675bfcc7dd165d
+github.com/iris-contrib/formBinder,v5.0.0+incompatible,h1:jL+H+cCSEV8yzLwVbBI+tLRN/PpVatZtUZGK9ldi3bU=,6f1fef9e533a1f57a8b033f8c0a135ed038524d7535dd16ba22e9494e3096e3b
+github.com/iris-contrib/go.uuid,v2.0.0+incompatible,h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=,c6bae86643c2d6047c68c25226a1e75c5331c03466532ee6c943705743949bd9
+github.com/issue9/assert,v1.3.2,h1:IaTa37u4m1fUuTH9K9ldO5IONKVDXjLiUO1T9vj0OF0=,f4349cbd5af134fce10b399717aa4b455b5c73df6c20c1057c6e45973f24a06d
+github.com/issue9/identicon,v0.0.0-20160320065130-d36b54562f4c,h1:A/PDn117UYld5mlxe58EpMguqpkeTMw5/FCo0ZPS/Ko=,5a837560a10469ab524b185a092edf67be85aff5ed794e1fcaaa084cf4540336
+github.com/itchio/arkive,v0.0.0-20190702114012-1bb6c7241ec3,h1:UcZnU7qzWTmZf8v7F3mC79H98I0b77pZz+99vqHFwtI=,dad4e3a988e6834d4ce1c3fb650a8dbb9aedd116ba8b3556c8f8babecdd17ead
+github.com/itchio/dskompress,v0.0.0-20190702113811-5e6f499be697,h1:u3Q2WkrIPYlGEw4fjcImSOrkivWd6SVb0BF0Ehoih9c=,d8379b7e4219f001b61e5c2b3b34b2a6b69f8a55dc1acde2919be3050a7c84f5
+github.com/itchio/go-brotli,v0.0.0-20190702114328-3f28d645a45c,h1:Jf20xV/yR/O6eSUqLTuXhka/+54YR59sGwN7b3MkxYk=,6bab2adfb10a8ae7132e02ed10823df2e91c42dd08a1f3e1835679390ea69927
+github.com/itchio/headway,v0.0.0-20190702175331-a4c65c5306de,h1:RQW9xPqYtvjdHHRZR95XsaEA9B4URCuNHK78IuJcc+Y=,54e63fd6f25217e272e196f6213915515196a5b17a1923a666c05b4f49c82ef3
+github.com/itchio/httpkit,v0.0.0-20190702184704-639fe5edf1f1,h1:mViP/A8hAP04YWbbZR7Kcm7rTkUeT2HLcn3BBiK+CwM=,e56bf70a53a305f6866631d1272a3f3543abd45a68c50d202ddc80796b58c461
+github.com/itchio/kompress,v0.0.0-20190702090658-5e2558a00102,h1:QXEwRXrrx+7CxU+Y+G4GpDk4mUeHbP7grMXHhydk8qU=,cadec4996aed4026c0e0321f90b5bb11d9b8d1de3665752b2e862c6ddbfa229d
+github.com/itchio/ox,v0.0.0-20190925154941-b613e528fc7d,h1:EcmVffUYduCSFCEM12YpSXoVXvyeq8Ro4Q+rwc60TIo=,81c3dfe8e91eb13815bf5b7f159f24a3cb1bd7028a395f691e9cefc1c3a71d01
+github.com/itchio/randsource,v0.0.0-20190702184213-a7635a4cb94b,h1:fG+9RlMeggMG/C2FH80HTfJmm+eOjAve2pFSv6Uio8A=,2375b07785c2738527c864dfb1bee0082b1f89c51e200157165bcd36f5c2933f
+github.com/itchio/savior,v0.0.0-20190925162935-b92976a0b402,h1:a51wRxkLoJWu5NqnVDkI6cE50S0mDpJfOXkCp4ltvr8=,1454524a51fa6492ee593fb7d648dc037fec7c32d90e2d21c149b6e724a74838
+github.com/iwind/TeaGo,v0.0.0-20191007090339-daba0bb6607e,h1:bxD34HpyJWx6bnGdahZo6uN6XnuOvMa8LrzfC+eZqes=,bec78c179e2676d51bb1a07122896661d4ae7727d325e9fa91682361e0321161
+github.com/jackc/chunkreader,v1.0.0,h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=,e204c917e2652ffe047f5c8b031192757321f568654e3df8408bf04178df1408
+github.com/jackc/chunkreader/v2,v2.0.0,h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=,cae1df6cc4f52abdf31d9c7c9869714f5c2e2dddc8047eb6d335409489e76031
+github.com/jackc/fake,v0.0.0-20150926172116-812a484cc733,h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=,bf8b5b51ae03f572a70a0582dc663c5733bba9aca785d39bb0367797148e6d64
+github.com/jackc/pgconn,v1.0.1,h1:ZANo4pIkeHKIVD1cQMcxu8fwrwIICLblzi9HCjooZeQ=,4b7e033c80207f032275845f7d366b51b46e3434cafebd13599a351f01f68b86
+github.com/jackc/pgio,v1.0.0,h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=,1a83c03d53f6a40339364cafcbbabb44238203c79ca0c9b98bf582d0df0e0468
+github.com/jackc/pgmock,v0.0.0-20190831213851-13a1b77aafa2,h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=,5d8117d8fb79d3a41998bec8dca93d450eba9edf3cf0b8c36881e0ea6140b406
+github.com/jackc/pgpassfile,v1.0.0,h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=,1cc79fb0b80f54b568afd3f4648dd1c349f746ad7c379df8d7f9e0eb1cac938b
+github.com/jackc/pgproto3,v1.1.0,h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=,e3766bee50ed74e49a067b2c4797a2c69015cf104bf3f3624cd483a9e940b4ee
+github.com/jackc/pgproto3/v2,v2.0.0,h1:FApgMJ/GtaXfI0s8Lvd0kaLaRwMOhs4VH92pwkwQQvU=,22635755552d1363817a9c9f192cf464034dfc31593e4975982a85de8295dcf4
+github.com/jackc/pgtype,v0.0.0-20190828014616-a8802b16cc59,h1:xOamcCJ9MFJTxR5bvw3ZXmiP8evQMohdt2VJ57C0W8Q=,30822259b27010e41850fde5f75166abc90028b9c57e2a77976cab119e01295f
+github.com/jackc/pgx,v3.6.0+incompatible,h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=,07a0cc87069e38acac988cc48e5a6cfd1bfd02b4b843d0e8931e48bb8c25d821
+github.com/jackc/pgx/v4,v4.0.0-pre1.0.20190824185557-6972a5742186,h1:ZQM8qLT/E/CGD6XX0E6q9FAwxJYmWpJufzmLMaFuzgQ=,1782863d2118cd0e63cc50cca24bd79cbea5674bac3b798bf12148400590128d
+github.com/jackc/puddle,v0.0.0-20190608224051-11cab39313c9,h1:KLBBPU++1T3DHtm1B1QaIHy80Vhu0wNMErIFCNgAL8Y=,a780306bb3ad76174eca1d83a6d925fb3f7a13981cda6249e51be64476c76f15
+github.com/jackmordaunt/icns,v0.0.0-20181231085925-4f16af745526,h1:NfuKjkj/Xc2z1xZIj+EmNCm5p1nKJPyw3F4E20usXvg=,06f511df7637fd1424b6f099d7ce7ecf7378e62adc9d13133ce7df419e51faf0
+github.com/jackpal/gateway,v1.0.5,h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=,adab846630d73763e5a3b984c8264d6503c8cb0b2914df559dacd41f6380e4ef
+github.com/jackpal/go-nat-pmp,v1.0.1,h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=,d7f2409f72895a01e0d11b457eac015dbcd94c2657f95d508e53867ca6b07db1
+github.com/jacobsa/crypto,v0.0.0-20190317225127-9f44e2d11115,h1:YuDUUFNM21CAbyPOpOP8BicaTD/0klJEKt5p8yuw+uY=,ec4d2a1fc28e1d99c68557e38cd77527df5a9f5090aa12876ab4aa6f9137a3d5
+github.com/jacobsa/oglematchers,v0.0.0-20150720000706-141901ea67cd,h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA=,bcd70357107c45c3177c913b718624376b692d39672c157708fe2cd9aa78fcb5
+github.com/jacobsa/oglemock,v0.0.0-20150831005832-e94d794d06ff,h1:2xRHTvkpJ5zJmglXLRqHiZQNjUoOkhUyhTAhEQvPAWw=,5159f5f22d0e130b1fbfdbc96eb9d4653b32bd463439cb0f3c98e179de5daf80
+github.com/jacobsa/ogletest,v0.0.0-20170503003838-80d50a735a11,h1:BMb8s3ENQLt5ulwVIHVDWFHp8eIXmbfSExkvdn9qMXI=,69d96e3ea6e055d68ed46c0c1044a5dfa18064c9d45bc68d5946aa55e048af6b
+github.com/jacobsa/reqtrace,v0.0.0-20150505043853-245c9e0234cb,h1:uSWBjJdMf47kQlXMwWEfmc864bA1wAC+Kl3ApryuG9Y=,a7efb54142e39f4acab39d22db692d5734f818723783646f6727269228deea83
+github.com/jaegertracing/jaeger,v1.14.0,h1:C0En+gfcxf3NsAriMAvQ6LcSFrQ5VQGXddqfty1EpTI=,5f6245d1b0c986c44cc37c7c950f3cf9c2cfd1e0d540905cd4fab9a164684ecd
+github.com/jarcoal/httpmock,v1.0.4,h1:jp+dy/+nonJE4g4xbVtl9QdrUNbn6/3hDT5R4nDIZnA=,5c7d051f237633573a168713760758005724c268242484d982cb0c76dc3f3ee7
+github.com/jaytaylor/html2text,v0.0.0-20190408195923-01ec452cbe43,h1:jTkyeF7NZ5oIr0ESmcrpiDgAfoidCBF4F5kJhjtaRwE=,2369830967f1c18c382cbee77a510431b42275f1f368e3b5cbbdaa782ae24c0d
+github.com/jbenet/go-base58,v0.0.0-20150317085156-6237cf65f3a6,h1:4zOlv2my+vf98jT1nQt4bT/yKWUImevYPJ2H344CloE=,e686d369d490d6728f6e63b1680db3b567c9e884545f8c47ca656f0d944299b7
+github.com/jbenet/go-cienv,v0.1.0,h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc=,3de5dadf2add50bf7fbdf88db4e6d008ba1848516585f7f9dfbf53cb6dc1705c
+github.com/jbenet/go-context,v0.0.0-20150711004518-d14ea06fba99,h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=,4cd0955abeea43dc4b5a08b8769e696109e0376f2a113a9b8eff13cc90cac1c7
+github.com/jbenet/go-temp-err-catcher,v0.0.0-20150120210811-aac704a3f4f2,h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A=,9299671a264400f8f0e145da442aa3216394f324c50f045ef2ed2b898b3945c9
+github.com/jbenet/goprocess,v0.1.3,h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=,026bb36c2d4316ad327f8b2e623f172c01140f699d57ec8609f702df5cdf021d
+github.com/jcmturner/gofork,v1.0.0,h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=,5e015dd9b038f1dded0b2ded77e529d2f6ba0bed228a98831af5a3610eefcb52
+github.com/jdcloud-api/jdcloud-sdk-go,v1.9.1-0.20190605102154-3d81a50ca961,h1:a2/K4HRhg31A5vafiz5yYiGMjaCxwRpyjJStfVquKds=,93754c3fe6c00591fcd499cf73ad7f66e4ed864619579ff726872a2f50b53dfa
+github.com/jdkato/prose,v1.1.0,h1:LpvmDGwbKGTgdCH3a8VJL56sr7p/wOFPw/R4lM4PfFg=,4e07b4f2012b46465fcc262d907b1cb81699bc61e6fb7a59ee47ea262e4986d1
+github.com/jeffchao/backoff,v0.0.0-20140404060208-9d7fd7aa17f2,h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4=,e6daeed2ffbf793cbdab5e21e9ba47ced708e7c594d4155e1964109903bd199f
+github.com/jefferai/isbadcipher,v0.0.0-20190226160619-51d2077c035f,h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4=,c438b15316e4af2487ba2c818288aa15ba19e39b3bf2f83651dcc9d451af6c5b
+github.com/jefferai/jsonx,v1.0.0,h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI=,e8ccf27ffc8d4560e7db02f8a1663fd4605c5996a025f90721f8157fde332be7
+github.com/jellevandenhooff/dkim,v0.0.0-20150330215556-f50fe3d243e1,h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8=,8a3ba94d93fb61070bee24ffca5043eb32b4a6aafa9b84e4950a5f8f34328659
+github.com/jessevdk/go-flags,v1.4.0,h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=,a26e72c3f4c220df8b65ac6eb3d358a8ad2efc300b212318582893ea882726f9
+github.com/jfrazelle/go,v1.5.1-1,h1:EJWkn/L/VOoena+VQryO7xEkxz7J6lHvPXAe+Z3Q6Gc=,ff67181f47086da85e0d0896aeffb52142f6f45bd3bbf75b94cd7546365bf140
+github.com/jfrog/gofrog,v1.0.5,h1:pEJmKZ9XgvQH2a8WCqAEeUDSXBCKBMN90QzOiOhBTIs=,bb6267655de882922977dca0860020c4c781bf7b3d6aba3fddc206a21c13784c
+github.com/jfrog/jfrog-client-go,v0.5.5,h1:dYoajyMXcmc13YpZ/NLye0KL7r+QfpP9l8+WriZNZbE=,3d62cf613d821eb41b8b62ff01e09d8d4eed781f4deb52d3dd96e5a636967732
+github.com/jhump/protoreflect,v1.5.0,h1:NgpVT+dX71c8hZnxHof2M7QDK7QtohIJ7DYycjnkyfc=,a6f0926d31ed98d63d04f2aa60a5579cca471e7544cb701202ba5a5fd3134256
+github.com/jimstudt/http-authentication,v0.0.0-20140401203705-3eca13d6893a,h1:BcF8coBl0QFVhe8vAMMlD+CV8EISiu9MGKLoj6ZEyJA=,0bcf35e1ca69658b70fe05050f436b18ae141a08863cf6011afb39edef5c4013
+github.com/jinzhu/copier,v0.0.0-20190924061706-b57f9002281a,h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o=,c05742c031370bace7c0d5b4101d437e59ad4613bb707fda49c365b3e6af8ad2
+github.com/jinzhu/gorm,v1.9.11,h1:gaHGvE+UnWGlbWG4Y3FUwY1EcZ5n6S9WtqBA/uySMLE=,87f36225e1108c93f299d9b7e4cda23c2f9469ce3db0de59df90691c1e740565
+github.com/jinzhu/inflection,v1.0.0,h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=,cf1087a6f6653ed5f366f85cf0110bbbf581d4e9bc8a4d1a9b56765d94b546c3
+github.com/jinzhu/now,v1.0.1,h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=,5900b34a1d8daa959798e342e684c4237f60ffaebd1aa4201e29a7d3a98d32b7
+github.com/jlaffaye/ftp,v0.0.0-20190126081051-8019e6774408,h1:9AeqmB6KVEJ7GQU985MGQc7Mtxz1+C+JZkgqBnUWqMU=,b1b8b0e10084219eaf1a829778c1b53c049eeb77249a5660b62291cc3b454e6b
+github.com/jmcvetta/neoism,v1.3.1,h1:GCFSl/90OYwEQH5LML/Vy6UlwK4SZ2OIO278UI4K7DE=,93e9ce5946ab71d9d0970e3709716a2b9cc96b4d03cfc708dfba8f062e870885
+github.com/jmcvetta/randutil,v0.0.0-20150817122601-2bb1b664bcff,h1:6NvhExg4omUC9NfA+l4Oq3ibNNeJUdiAF3iBVB0PlDk=,742cb157c8eb74da05a7972de646034cf0ddaba7c89d8aac625ed73027e778c1
+github.com/jmespath/go-jmespath,v0.0.0-20180206201540-c2b33e8439af,h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=,5c18f15c2bcfbbdb4fd15c0598ea5d3a373991a7b46a8f2405d00ac8b6121629
+github.com/jmhodges/clock,v0.0.0-20160418191101-880ee4c33548,h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=,f66a541ce3f97b4696d65282a332e8d08dee3f15271b7c2066050aeb5b7334b7
+github.com/jmhodges/levigo,v1.0.0,h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=,7f43feb409c9650336152a959d7dc4d8e5a260c92e0212b1d2e0f0a7d3de6d87
+github.com/jmoiron/sqlx,v1.2.0,h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=,c8000fe80e86eea575e0d3dd0737f6399c1880a420ce2a9d833ca0e0cfc9c875
+github.com/joefitzgerald/rainbow-reporter,v0.1.0,h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=,889ea7a751c043bd0ea0ee31734011938be19ecbf08e652d53fc41f3eade9435
+github.com/joeshaw/multierror,v0.0.0-20140124173710-69b34d4ec901,h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=,e31f735c5f42ac65aef51a70ba1a32b5ac34067a7ba0624192dd41e5ea03aa1e
+github.com/joho/godotenv,v1.3.0,h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=,acef5a394fbd1193f52d0d19690b0bfe82728d18dd3bf67730dc5031c22d563f
+github.com/jonas-p/go-shp,v0.1.1,h1:LY81nN67DBCz6VNFn2kS64CjmnDo9IP8rmSkTvhO9jE=,ac1706c486b7ea7e83eecd1f773259098569d2fe3ad2a53cc32ff89a68915a8f
+github.com/jonboulle/clockwork,v0.1.0,h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=,930d355d1ced60a668bcbca6154bb5671120ba11a34119505d1c0677f7bbbf97
+github.com/joncalhoun/qson,v0.0.0-20170526102502-8a9cab3a62b1,h1:lnrOS18wZBYrzdDmnUeg1OVk+kQ3rxG8mZWU89DpMIA=,062b14a6986be3fb833eb9dd907acb7e563d5b6cfcaee1a04120a9b1fcc2d451
+github.com/josephspurrier/goversioninfo,v0.0.0-20190124120936-8611f5a5ff3f,h1:wBb8/KQrr2tWYffdugrpxOdWyOPSBRNzAR76aF9Nn3Y=,50be4b48f9fb8fbe79a013a791c015c13d7294c5de8f9bee586eaadd6f479459
+github.com/joyent/triton-go,v0.0.0-20190112182421-51ffac552869,h1:BvV6PYcRz0yGnWXNZrd5wginNT1GfFfPvvWpPbjfFL8=,5e875a04efd7f844211b68657d21313ae16b479cb01dc7161811c2c39ac19b18
+github.com/jpillora/backoff,v1.0.0,h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=,f856692c725143c49b9cceabfbca8bc93d3dbde84a0aaa53fb26ed3774c220cc
+github.com/jrick/logrotate,v1.0.0,h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=,b87ee434f9e2cfda719b639cd5bd0a52523f920f64d23336f88070e9d3765d54
+github.com/jsimonetti/rtnetlink,v0.0.0-20190606172950-9527aa82566a,h1:84IpUNXj4mCR9CuCEvSiCArMbzr/TMbuPIadKDwypkI=,97d995d4ca858da8955aefcead01425d12a91188d6f9b36b5cb63aa35a4ea674
+github.com/json-iterator/go,v1.1.8,h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=,0de8f316729fb05ba608361323b178aa32944154e77aa208ad2818848b0628e2
+github.com/jstemmer/go-junit-report,v0.0.0-20190106144839-af01ea7f8024,h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=,b623acfae0dcc440f81ae14f3c5bc3ca40b1a674660ad549127980f892ab165e
+github.com/jteeuwen/go-bindata,v3.0.7+incompatible,h1:91Uy4d9SYVr1kyTJ15wJsog+esAZZl7JmEfTkwmhJts=,03f794b47c49da98a4eab6c3a7cc49d286f012d64ab832f783b76b9fcd3bd8b2
+github.com/jtolds/gls,v4.20.0+incompatible,h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=,2f51f8cb610e846dc4bd9b3c0fbf6bebab24bb06d866db7804e123a61b0bd9ec
+github.com/jtolds/go-luar,v0.0.0-20170419063437-0786921db8c0,h1:UyVaeqfY1fLPMt1iUTaWsxUNxYAzZVyK+7G+a3sRfhk=,1ed97930b5dfc7f89c84ff3c5ea5a7de9964ccca970f45853d42a13a138b644e
+github.com/jtolds/monkit-hw,v0.0.0-20190108155550-0f753668cf20,h1:XK96humQhnPbQ24uKtSHKbdShDgrKYqlWBNKJTcIKbg=,5d84e6f3f559b67e00b08a5e93e1017866695a4590b97ccb23a82e3ce792ad04
+github.com/juju/ansiterm,v0.0.0-20180109212912-720a0952cc2a,h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=,17d1e05fd6f1c8fdce7ba7495af54f4dac1e155febff56bd6450593b016655c2
+github.com/juju/clock,v0.0.0-20190205081909-9c5c9712527c,h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A=,f57579c0c104add5228b279c4673f592d5756033d33b085185ef72a3d2f83bfe
+github.com/juju/cmd,v0.0.0-20190815094254-0c5c82a8dfc6,h1:rPqkdymtMRLcCSYKOeIxuw5mmd8dWx8jSq+t9EGBgtA=,c603f2311cf6524a74535eb9d416a83959c03e6ad52ab4ec081cbc7343734af6
+github.com/juju/collections,v0.0.0-20180717171555-9be91dc79b7c,h1:m/Uo8B7nrH3K6nvk66Y67T7cbHcyY101rW24vGuMON8=,18275066d75835f37845565980c0ac818f9c29145f756b1eeacb6496dac3ebd3
+github.com/juju/errors,v0.0.0-20190930114154-d42613fe1ab9,h1:hJix6idebFclqlfZCHE7EUX7uqLCyb70nHNHH1XKGBg=,2519c885f89cfba663da3bd9a1ff2532e3ae948bdea3e44b42603d8f91cc0796
+github.com/juju/gnuflag,v0.0.0-20171113085948-2ce1bb71843d,h1:c93kUJDtVAXFEhsCh5jSxyOJmFHuzcihnslQiX8Urwo=,47cdfb1bf94a2719e97e03caf4e0dc1cb89ba27c35ed7ce7020701fe8ee2c353
+github.com/juju/gojsonpointer,v0.0.0-20150204194629-afe8b77aa08f,h1:QzpKmMsaP06HVZnYNlcy1CLIXPytsj2NuzfCHitxuus=,0e75303c5dc230f30a629963589376030c3c2a1152a40b9e2075a084224eb173
+github.com/juju/gojsonreference,v0.0.0-20150204194633-f0d24ac5ee33,h1:huRsqE0iXmVPTML75YvFBOiaNj4ZiCZgKVnkRQ06d3w=,d1648b2f71dfbb02acc4a18c55711c721e0f6b50a5280d852cd9c0a639e8ebe6
+github.com/juju/gojsonschema,v0.0.0-20150312170016-e1ad140384f2,h1:VqIDC6dRE0C7wEtTdT6zx2zP5omaoJiZXp2g/dBHRcE=,a9f736e7cb462ccf3b2cb03aa8a133db13dc8d938a2753329a7a1274bdca2656
+github.com/juju/httpprof,v0.0.0-20141217160036-14bf14c30767,h1:COsaGcfAONDdIDnGS8yFdxOyReP7zKQEr7jFzCHKDkM=,9a8c77f887765536c312c89d73d7568126393c7d38c473a50addbec30f8c80ec
+github.com/juju/httprequest,v2.0.0+incompatible,h1:+WtiSbRkEwdqKRBi+4JH8PTdNxBa/h8U8RIzdYaMENI=,0d2ae765c01f7956da6896b7c7d8bb1ad4065e960b93c09a644e2e61a0acaa52
+github.com/juju/jsonschema,v0.0.0-20161102181919-a0ef8b74ebcf,h1:SGTxyCG74uh2dYdBJCUJOo2FSx0fRHP7nMRH7s5JVeQ=,a5681c88d87b34d10dcf701b10d149303fa6d152ee224dc1bd7bd7680da80bfa
+github.com/juju/loggo,v0.0.0-20190526231331-6e530bcce5d8,h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=,3db058c07ced25b8689f5d3e462d344ffb965c6f371eabc0396ce94d927e6206
+github.com/juju/lru,v0.0.0-20190314140547-92a0afabdc41,h1:/ucixsNZ+l94agL5LZioJ4ECyOz7kOYY+DKb/0NN6ME=,8f41907249beb66ba4dee5df1f53c37f387506e21568cc89db4b72493b970e85
+github.com/juju/os,v0.0.0-20191022170002-da411304426c,h1:iJZl5krsl2AqkgU7IiJ2/jNAchctLFa3BiKdyOUvK+g=,b236cb3d90b3fae0f83e767feef3a17b472ab0fe238ac08810c4f9c1d683c14d
+github.com/juju/proxy,v0.0.0-20180523025733-5f8741c297b4,h1:y2eoq0Uof/dWLAXRyKKGOJuF0TEkauPscQI7Q1XQqvM=,443cd58a22392e66576d883b9d04c17faebafa37a406a346b671f7e994436c34
+github.com/juju/pubsub,v0.0.0-20190419131051-c1f7536b9cc6,h1:2aARJxmMC2IF9GqVtt5PYcIy4jyuAcR44byqwXKTK0o=,b908f7985f6250270708c2c46ca0ccfc17a3705fea4a27da6f1277a9f6b5404c
+github.com/juju/qthttptest,v0.0.1,h1:pR8nTl6Uo/iI6/ynQf5Cxy9FEICXzaa83NtrBdGMCVQ=,4ba292a46e27af468c181118214f7eb1bfc015f289e90841d7746b954f20ba49
+github.com/juju/ratelimit,v1.0.1,h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=,c9af5c6719ce3b6912579a029cb2a651707aa25daa1921488f9cae9c4f8ed334
+github.com/juju/retry,v0.0.0-20180821225755-9058e192b216,h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU=,c5b2437ff128cf13f2d6f3cc3b7e226f2c0119e22caed286946245150b9428e7
+github.com/juju/schema,v1.0.0,h1:sZvJ7iQXHhMw/lJ4YfUmq+fe7R2ZSUzZzd/eSokaB3M=,746bcab557bed4e05456419e5012573dc8481dc8740309100e4bd901ff282a39
+github.com/juju/testing,v0.0.0-20191001232224-ce9dec17d28b,h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0=,317de254f343f9aff6e1226b4ea225cab92fee84667db8e72d541667715ea610
+github.com/juju/txn,v0.0.0-20190612234757-afeb83d59782,h1:FcaMWAFKHuxS7UAaB/GuLWrqI9L7f20m6aXaxg+t5lY=,4656c1c5f0e3dac641999feba77879c7206aff1d606513d7bdb3be7d17a6635c
+github.com/juju/utils,v0.0.0-20180820210520-bf9cc5bdd62d,h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY=,8edd8a74c692eb717156a2bb689e1e24a446656677760dc7dc06b761ee451df5
+github.com/juju/version,v0.0.0-20180108022336-b64dbd566305,h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag=,73312c50c8b4f6f8644aaccc09b71a2235c8083cfc6c99425540f3c0a3c29e64
+github.com/juju/webbrowser,v1.0.0,h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78=,7b38f053656e4a883bc122589994e4ec34eae3f833e899450650752d5b72eec8
+github.com/juliangruber/go-intersect,v1.0.0,h1:0XNPNaEoPd7PZljVNZLk4qrRkR153Sjk2ZL1426zFQ0=,e7f539e6b13470da34009d3ab44c6ba84a6b9bb9f6e92d315551919287a25e3c
+github.com/julienschmidt/httprouter,v1.3.0,h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=,e457dccd7015f340664e3b8cfd41997471382da2f4a743ee55be539abc6ca1f9
+github.com/jung-kurt/gofpdf,v1.0.3-0.20190309125859-24315acbbda5,h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=,f0fa70ade137185bbff2f016831a2a456eaadc8d14bc7bf24f0229211820c078
+github.com/justinas/alice,v0.0.0-20171023064455-03f45bd4b7da,h1:5y58+OCjoHCYB8182mpf/dEsq0vwTKPOo4zGfH0xW9A=,3d6623831901bb973db882bbaffcff3f55849724100ee72c5bf8d0fdfa927ae4
+github.com/jzelinskie/whirlpool,v0.0.0-20170603002051-c19460b8caa6,h1:RyOL4+OIUc6u5ac2LclitlZvFES6k+sg18fBMfxFUUs=,ca0115fcfaaa03f1973f65d05c6d6aefdbdeca6507cdda4359fdf55fd0be2c48
+github.com/k0kubun/colorstring,v0.0.0-20150214042306-9440f1994b88,h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=,32a2eac0ffb69c6882b32ccfcdd76968cb9dfee9d9dc3d469fc405775399167c
+github.com/k0kubun/pp,v3.0.1+incompatible,h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40=,2b91f559df17a49554094e4befd7e1c7d32ba4519417b1b36796d9b49d7328c5
+github.com/kami-zh/go-capturer,v0.0.0-20171211120116-e492ea43421d,h1:cVtBfNW5XTHiKQe7jDaDBSh/EVM4XLPutLAGboIXuM0=,fb1ef7d18f4cec39e9115fb200fbf7d5cff65674afe6ecc63ad57d413f503830
+github.com/kamilsk/retry/v4,v4.3.1,h1:hNQmK1xAgybAVsadNAGvCNutFLS2h+Ycpw317u4d+i0=,74181d82f9bba5b7c313c6b338f127668fffbede70f5495a4a2ef8fddaa6c20f
+github.com/kardianos/osext,v0.0.0-20190222173326-2bc1f35cddc0,h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=,10976c39b58f218a6e29687d19763845e7650d04ac86096cd67ace58f4e56346
+github.com/karrick/godirwalk,v1.13.0,h1:GJq8GHQEAPsjwqfGhLNXBO5P0dS2HYdDRVWe+P4E/EQ=,9652ac9eb85bf13594ba9c41a86864ec5236e429a65f6bbb19c6897d1e335092
+github.com/kataras/golog,v0.0.9,h1:J7Dl82843nbKQDrQM/abbNJZvQjS6PfmkkffhOTXEpM=,bb4d1476d5cbe33088190116a5af7b355fd62858127a8ea9d30d77701279350e
+github.com/kataras/iris,v11.1.1+incompatible,h1:c2iRKvKLpTYMXKdVB8YP/+A67NtZFt9kFFy+ZwBhWD0=,9aba6b1128d42ee2b63a9319e28c1b665b7e82dde1b10763ee7510bcc6427a25
+github.com/kataras/pio,v0.0.0-20190103105442-ea782b38602d,h1:V5Rs9ztEWdp58oayPq/ulmlqJJZeJP6pP79uP3qjcao=,70a50855f07ff59d96db9633a0cf729280a8b9f7af72b936fe8a28e48406432f
+github.com/kavu/go_reuseport,v1.4.0,h1:YIp/96RZ3sJfn0LN+FFkkXIq3H3dfVOdRUtNejhDcxc=,b08d4f774766e1136fd256484f2584d42cd568b5edc7dbc7b19e1259b5dbb75c
+github.com/kballard/go-shellquote,v0.0.0-20180428030007-95032a82bc51,h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=,ae4cb7b097dc4eb0c248dff00ed3bbf0f36984c4162ad1d615266084e58bd6cc
+github.com/kellydunn/golang-geo,v0.7.0,h1:A5j0/BvNgGwY6Yb6inXQxzYwlPHc6WVZR+MrarZYNNg=,4f4699636a450e20bd107fb81894fcdcc8ceeddbac7062e9457c67326c1fb036
+github.com/kelseyhightower/envconfig,v1.4.0,h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=,af674112c38290862e5f59fc2867b81f7b0e623ec2fd1465cd3812e538b351d3
+github.com/kennygrant/sanitize,v1.2.4,h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=,733211913a22ff6eb5843455345fde8c0c3cff25cc5e8e8225c330fb4c6a72df
+github.com/kevinburke/ssh_config,v0.0.0-20190725054713-01f96b0aa0cd,h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=,ebd98d4bfd0deb1825d9a54689560b42a17d87385222971117ad72e7ad2f36fa
+github.com/keybase/go-crypto,v0.0.0-20190403132359-d65b6b94177f,h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo=,a839bacd8eb0a61a72f84678d568d8df899b512510a326e06db0f191e8c1c5a1
+github.com/kisielk/errcheck,v1.2.0,h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=,709eeca978804f41720a94bc69ee3cfa8277f7d15016478a3ebda86606a286c5
+github.com/kisielk/gotool,v1.0.0,h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=,089dbba6e3aa09944fdb40d72acc86694e8bdde01cfc0f40fe0248309eb80a3f
+github.com/kisielk/sqlstruct,v0.0.0-20150923205031-648daed35d49,h1:o/c0aWEP/m6n61xlYW2QP4t9424qlJOsxugn5Zds2Rg=,dbff9241f676de69e88bc006004da6087576433457b306f53cb952d0313ccb78
+github.com/kisom/goutils,v1.1.0,h1:z4HEOgAnFq+e1+O4QdVsyDPatJDu5Ei/7w7DRbYjsIA=,a0b58731f8e1144c013107294885891c44b7fd3235da0ec20776f4d644b4eaa4
+github.com/kkdai/bstream,v1.0.0,h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8=,dc1d546e0df6ef040963bc9d483834d6e56c77e0e4f6c48e574ac360e7723121
+github.com/klauspost/compress,v1.8.2,h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=,4dc2632696a9cd93cc32c1564e1a6aa4aecfcb5c995a077d45c6f92116e1711d
+github.com/klauspost/cpuid,v1.2.1,h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=,8367d6c97e74f88b149ba9de708ff321273e0114aeb71a45e62e5ac296412420
+github.com/klauspost/crc32,v0.0.0-20161016154125-cb6bfca970f6,h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=,6b632853a19f039138f251f94dbbdfdb72809adc3a02da08e4301d3d48275b06
+github.com/klauspost/pgzip,v1.2.1,h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=,a482336aa4b0e4e9368b15d75629ae741b44ef290b7d16430ba05ce561846213
+github.com/klauspost/reedsolomon,v1.9.2,h1:E9CMS2Pqbv+C7tsrYad4YC9MfhnMVWhMRsTi7U0UB18=,ea8a4d6d994088dae0308843fd6bddb7541cf36306463a696fd4a29097496705
+github.com/knative/pkg,v0.0.0-20191031171713-d4ce00139499,h1:ha5eqzJaPg1CZroomqWxHqspOqpqpRMO3fDtgF1fvIM=,a8d19fc2196a1aec7869ca45df44ba9c5de5b81b6094f0579d25989eb7967660
+github.com/kniren/gota,v0.9.0,h1:ywFrdNxkBD5Xypk5BxjCaKiH507oQVXIf31pTvRhC4I=,062182a345c456c9c0fd7ce9644900708f7f9c08707d64fe2438b9d295dad6dd
+github.com/knq/sysutil,v0.0.0-20191005231841-15668db23d08,h1:V0an7KRw92wmJysvFvtqtKMAPmvS5O0jtB0nYo6t+gs=,81ec4ac93dba6a6161264a0575f20235d8932abab0cd6b9777b4be936f5c2af5
+github.com/knqyf263/berkeleydb,v0.0.0-20190501065933-fafe01fb9662,h1:UGS0RbPHwXJkq8tcba8OD0nvVUWLf2h7uUJznuHPPB0=,1e575b5fdc170e0318ab06841873ae6d115978fbaffc3779290d7ba3aadbdf0e
+github.com/knqyf263/go-deb-version,v0.0.0-20190517075300-09fca494f03d,h1:X4cedH4Kn3JPupAwwWuo4AzYp16P0OyLO9d7OnMZc/c=,4a09d0533768cf6f9d929858aa2e79b6942685569c2db00b8d4688590a89ba3d
+github.com/knqyf263/go-rpmdb,v0.0.0-20190501070121-10a1c42a10dc,h1:pumO9pqmRAjvic6oove22RGh9wDZQnj96XQjJSbSEPs=,33a3568289d22672dfcb0ba7c5b8aa7f9223d5303003368e7dbe8c9718a803b4
+github.com/knqyf263/nested,v0.0.1,h1:Sv26CegUMhjt19zqbBKntjwESdxe5hxVPSk0+AKjdUc=,c0e123844a174b1e9929d4368d8a8bb2f5ecef578ee9dee692c5971a47a633ff
+github.com/koki/structurederrors,v0.0.0-20180506174113-6b997eb5e2ca,h1:KmXUVzyPjXzd3kY0feNFsWOGVDYFT4MjjgG8QJx0m6k=,1efa717c181722fd1c6807919571dc559b48d17120f5eeb4638a322fb882411a
+github.com/kolo/xmlrpc,v0.0.0-20190717152603-07c4ee3fd181,h1:TrxPzApUukas24OMMVDUMlCs1XCExJtnGaDEiIAR4oQ=,9d37c94f50784536aa8ef9a7623ec7bcac9e5bc67b18f7a801efc7cbbe6b1ab0
+github.com/konsorten/go-windows-terminal-sequences,v1.0.2,h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=,4d00d71b8de60bcaf454f8f867210ebcd05e75c0a7c2725904f71aa2f20fb08e
+github.com/koron/go-ssdp,v0.0.0-20180514024734-4a0ed625a78b,h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ=,3a99f050b7a668291942cada4e38213965fa0ae3794469bb29ad0d6d9677db23
+github.com/kr/fs,v0.1.0,h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=,d376bd98e81aea34585fc3b04bab76363e9e87cde69383964e57e9779f2af81e
+github.com/kr/logfmt,v0.0.0-20140226030751-b84e30acd515,h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=,ebd95653aaca6182184a1b9b309a65d55eb4c7c833c5e790aee11efd73d4722c
+github.com/kr/pretty,v0.1.0,h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=,06063d21457e06dc2aba4a5bd09771147ec3d8ab40b224f26e55c5a76089ca43
+github.com/kr/pty,v1.1.8,h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=,d66e6fbc65e772289a7ff8c58ab2cdfb886253053b0cea11ba3ca1738b2d6bc6
+github.com/kr/text,v0.1.0,h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=,9363a4c8f1f3387a36014de51b477b831a13981fc59a5665f9d21609bea9e77c
+github.com/kshvakov/clickhouse,v1.3.4,h1:p/yqvOmeDRH+KyCH6NtwExelr4rimLBBfKW2a/wBN94=,01a0d1a90e0545da94350319a52c051257fee64c838e2632ec40ef8d89a2f153
+github.com/kylelemons/go-gypsy,v0.0.0-20160905020020-08cad365cd28,h1:mkl3tvPHIuPaWsLtmHTybJeoVEW7cbePK73Ir8VtruA=,321087246482a680bd3f06de64075fb843430da544596ad216a4a63d5b8dafa3
+github.com/kylelemons/godebug,v1.1.0,h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=,dbbd0ce8c2f4932bb03704d73026b21af12bd68d5b8f4798dbf10a487a2b6d13
+github.com/kyokomi/emoji,v2.1.0+incompatible,h1:+DYU2RgpI6OHG4oQkM5KlqD3Wd3UPEsX8jamTo1Mp6o=,0721a2fc643e49e002bd8a3e604b5d2f0f3e242cc279d14d76f90a55f8aeebf7
+github.com/labbsr0x/bindman-dns-webhook,v1.0.2,h1:I7ITbmQPAVwrDdhd6dHKi+MYJTJqPCK0jE6YNBAevnk=,d1a327ab22f62486250f50f98990c0d9e1a5fdece6a496fbbb85d4e123df3244
+github.com/labbsr0x/goh,v1.0.1,h1:97aBJkDjpyBZGPbQuOK5/gHcSFbcr5aRsq3RSRJFpPk=,84c91135623961c7c400bf8b646da76c0ce2941fe8706d5aef5650be9a5e37dd
+github.com/labstack/echo,v3.3.10+incompatible,h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=,29634743cf44c47079b74812ecf5aa7074630507886c4ff40b60c397c45af524
+github.com/labstack/gommon,v0.3.0,h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=,2783ed1c24d09a5539bc35954f71f41d270d78dc656be256c98a8ede2cbbe451
+github.com/lafriks/xormstore,v1.0.0,h1:P/IJzNSIpjXl/Up3o2Td5ZU/x4v6DEKLMaPQJGtmJCk=,0e347e24ab91f62e1b69bab5d78cbba77569f087b483569ef37761e1f93a3f46
+github.com/lann/builder,v0.0.0-20180802200727-47ae307949d0,h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=,1fe7a88079ff2bbe90fb4724fb5c353ecb6af4cd7e011440354c804f678895ee
+github.com/lann/ps,v0.0.0-20150810152359-62de8c46ede0,h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=,76756d46634f44edd3facdb01e7271ddf23a1b51a8423de55d3a2bf685ff032a
+github.com/leanovate/gopter,v0.2.4,h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU=,99b27788411d478764bf7c51e4f6e84e5ccd60f3959a88a03e96b2a1d519a45d
+github.com/leodido/go-urn,v1.2.0,h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=,8a854d784171000a69b79babb2cd3da9b8fccb1e1b6bb102c7a6d2b52380d08a
+github.com/lestrrat-go/jspointer,v0.0.0-20181205001929-82fadba7561c,h1:pGh5EFIfczeDHwgMHgfwjhZzL+8/E3uZF6T7vER/W8c=,a64de11dd2840c3251906c5fe5f61719713af52a41287411007434684745af39
+github.com/lestrrat-go/jsref,v0.0.0-20181205001954-1b590508f37d,h1:1eeFdKL5ySmmYevvKv7iECIc4dTATeKTtBqP4/nXxDk=,1acee9b59501460f5063a82bc2c05f1a11cd24077198fc08ba100ee642d3db72
+github.com/lestrrat-go/jsschema,v0.0.0-20181205002244-5c81c58ffcc3,h1:TSKrrGm89gmmVlrG34ZzCIOMNVk5kkSV1P88Dt38DiE=,1b7552a5ecd193bdd07995226f58fe48de0aadedbcb42f3a5b135fd7b3538ea4
+github.com/lestrrat-go/jsval,v0.0.0-20181205002323-20277e9befc0,h1:w4rIjeCV/gQpxtn3i1voyF6Hd7v1mRGIB63F7RZOk1U=,f060af1b36e0f156546436dcc9b1569600871185d69e9daf214f24e0e2934784
+github.com/lestrrat-go/pdebug,v0.0.0-20180220043849-39f9a71bcabe,h1:S7XSBlgc/eI2v47LkPPVa+infH3FuTS4tPJbqCtJovo=,17690c72219264e0a195dac69ae6ed12bbadf309242dbaa21609339dfa74b3a5
+github.com/lestrrat-go/structinfo,v0.0.0-20190212233437-acd51874663b,h1:YUFRoeHK/mvRjBR0bBRDC7ZGygYchoQ8j1xMENlObro=,8dd77f51595dea974553558e0d249059b9047a39354548b5bbd88b32cf3df75a
+github.com/lestrrat/go-jsschema,v0.0.0-20181205002244-5c81c58ffcc3,h1:UaOmzcaCH2ziMcSbQFBq/3Iuz/E/Jr/GOGtV80jpFII=,ce0f1e04d70eadcc75f96d70703b53231e1c5be7d9fd832c144e0135bfd5afb4
+github.com/lib/pq,v1.2.0,h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=,cb1028c395747cacafb6c3c6ad5fa244563ce641aae45cf7742f98b6764b1fde
+github.com/libp2p/go-addr-util,v0.0.1,h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88=,d49a37e15540c8b95f845dde6cdf802e7af490bc13fd88fec3da318d08464f7b
+github.com/libp2p/go-buffer-pool,v0.0.2,h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=,fef932705b72198df3d50befd9d2aa157aea1b5f3d23712b09d627d02cfe841e
+github.com/libp2p/go-conn-security,v0.0.1,h1:4kMMrqrt9EUNCNjX1xagSJC+bq16uqjMe9lk1KBMVNs=,e7b58f887c8a8a2ed0178d2f0d6b4ad36bdd7b8cf52ca4d66bafc108b80d095c
+github.com/libp2p/go-conn-security-multistream,v0.1.0,h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0=,597b249bd51de097142815318b13c339752532f15131887492d9d3e3407ab92e
+github.com/libp2p/go-eventbus,v0.1.0,h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ=,1b02c8340d2740f99d67078a8c8823c6b9212b92dd9ca7eaf2a38adf2bfd6b56
+github.com/libp2p/go-flow-metrics,v0.0.1,h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s=,f783542a7fce8382de9cea6940049b106cc35f9714126a1e3d61925c29db8617
+github.com/libp2p/go-libp2p-autonat,v0.1.0,h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU=,11e86ef0b36125a7cd6aa447ffe488f7f3ab00e441bdf8cf30a832a41da4342c
+github.com/libp2p/go-libp2p-blankhost,v0.1.4,h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk=,9cd5abe8ad2f137c13309a9dbdd213376bbec03f9685cf8cde7fbfe2e5783e7d
+github.com/libp2p/go-libp2p-circuit,v0.1.0,h1:eniLL3Y9aq/sryfyV1IAHj5rlvuyj3b7iz8tSiZpdhY=,24ee6c7851f4f0072922ae497c230718a0f44beab890d0403261b38a2946a866
+github.com/libp2p/go-libp2p-core,v0.2.4,h1:Et6ykkTwI6PU44tr8qUF9k43vP0aduMNniShAbUJJw8=,d521cc1bffba8afc8b8057901cf22c2f6ffd88faec0274426e13c4e7c12c756c
+github.com/libp2p/go-libp2p-crypto,v0.1.0,h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=,14ef1867bd8b0ef8fc528f5069ef267270dd0de8cf89a235beb9fbd79e4bed8d
+github.com/libp2p/go-libp2p-discovery,v0.2.0,h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=,d1c0800b601cbe6833522727b249567379422e9f324b7d0a0866bd86c74fb930
+github.com/libp2p/go-libp2p-host,v0.1.0,h1:OZwENiFm6JOK3YR5PZJxkXlJE8a5u8g4YvAUrEV2MjM=,d26bf1db299917f080a13ace37ef4363c08c2407c126cee59e642b1372d2b211
+github.com/libp2p/go-libp2p-interface-connmgr,v0.0.5,h1:KG/KNYL2tYzXAfMvQN5K1aAGTYSYUMJ1prgYa2/JI1E=,fe1e74365cc5c155161e5500671a8e9a85a90efdad9f5630bdfdc15bdfc52fe5
+github.com/libp2p/go-libp2p-interface-pnet,v0.0.1,h1:7GnzRrBTJHEsofi1ahFdPN9Si6skwXQE9UqR2S+Pkh8=,9767f78f87f54bdf3fb1f0f9b5f67e907463b445a00a566402bacca85749c8fe
+github.com/libp2p/go-libp2p-loggables,v0.1.0,h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=,351c87c02c2b147193fac5c441d8767d2b247cd3f3c420fa205da2ccd1c3f00f
+github.com/libp2p/go-libp2p-metrics,v0.1.0,h1:v7YMUTHNobFaQeqaMfJJMbnK3EPlZeb6/KFm4gE9dks=,a86fe0ae6cda820fd6a0e576bcd94a22360439819f344a9121086b31c651caaf
+github.com/libp2p/go-libp2p-mplex,v0.2.1,h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI=,f11961ef5114e57eb176740a066e1535132c8c238bd444ed53d94fad36ba7708
+github.com/libp2p/go-libp2p-nat,v0.0.4,h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw=,4c3db4e0f7f714439364ca0853f63d426bba67924da6fd050fd0184abdfec2df
+github.com/libp2p/go-libp2p-net,v0.1.0,h1:3t23V5cR4GXcNoFriNoZKFdUZEUDZgUkvfwkD2INvQE=,4140afd418393c2a4ecccca97d80b4752d20da6f34fac15fbdc4f0566f7b8cea
+github.com/libp2p/go-libp2p-netutil,v0.1.0,h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=,c98ad0a3ffab37b6a0bc80aefba4e4cb442b09c01277a7dcc0086c4a004e649a
+github.com/libp2p/go-libp2p-peer,v0.2.0,h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=,5b400d6b6337cc759846d7ddf50ec2d761148e1447a86982696687ef1e792c1a
+github.com/libp2p/go-libp2p-peerstore,v0.1.4,h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k=,1606c0bb56c31d0249980b8a0c0e5dda9212687b1994ef47cfd42039d4cf1847
+github.com/libp2p/go-libp2p-protocol,v0.1.0,h1:HdqhEyhg0ToCaxgMhnOmUO8snQtt/kQlcjVk3UoJU3c=,4560018136a73817e03eed49af46d97dd561b3eeffb1ff00559152acf9a74627
+github.com/libp2p/go-libp2p-pubsub,v0.2.0,h1:4UXcjpQdpam/RsGhfWyT/4u5f6F42ods/WgDAaocYxA=,bde7bb50d950b8ea7902c523a696eff4ad7b5d0daac808356358ff6d53aecb14
+github.com/libp2p/go-libp2p-record,v0.1.1,h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY=,27a3e94e144b893cbb5ceaddfa7a4e456052e173f807db52945e06920f62d0b3
+github.com/libp2p/go-libp2p-routing,v0.1.0,h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU=,4241980dadf216e937a42a572a9c5b5eb28ff62458380ad37892c5b5095de270
+github.com/libp2p/go-libp2p-secio,v0.2.0,h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng=,979a82829f3188d4ca8d20d194923c5620ff12a161d13c945c1630b7b9d050ff
+github.com/libp2p/go-libp2p-swarm,v0.2.2,h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ=,b920f69fbfaa8805047b958c6d45d944a195181dd6dddab36bead5fe68f2f1e4
+github.com/libp2p/go-libp2p-testing,v0.1.0,h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U=,e1c7fa467d88b33f2fc519542cc19aa48bcade304f579f10ab402a19c38d0aa6
+github.com/libp2p/go-libp2p-transport,v0.0.5,h1:pV6+UlRxyDpASSGD+60vMvdifSCby6JkJDfi+yUMHac=,df7bc96a5d76c351fd3a6ee29995f4974013d9709904edd9608b86f4fa089ad2
+github.com/libp2p/go-libp2p-transport-upgrader,v0.1.1,h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw=,60ea73fa42536178798c3d4a36c5f9cffb185b6c1629c23c3faff5919f9e9cad
+github.com/libp2p/go-libp2p-yamux,v0.2.1,h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI=,849f0097fd7203b5c6d590463b7fb17573af8d12136413768706188a39b34b21
+github.com/libp2p/go-maddr-filter,v0.0.5,h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg=,19c76e021879aab85a8858b53d706220e9e3277a96dead161db152f5a1d17219
+github.com/libp2p/go-mplex,v0.1.0,h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0=,3340a423ea89310360810973a77a97c217fe7b35e1c18189a3628e35fe1275e0
+github.com/libp2p/go-msgio,v0.0.4,h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=,ec22f703203a2a443c57896b2082c02fe9c54d372aad091cdca144709d244721
+github.com/libp2p/go-nat,v0.0.3,h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI=,d642c9dd697176ec69c4a5faeff1fc3b5472ef9f32c2c40e21c42f81ceef86b9
+github.com/libp2p/go-openssl,v0.0.3,h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk=,f2eb05d710fe960ba12d5f640cefe7d31d24f1fab0d9a52faf5f2923a19c6f13
+github.com/libp2p/go-reuseport,v0.0.1,h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=,274ade934c7f26ffae86d3f4d34352371c3eca7ead080392f6f35698ec5f0a3f
+github.com/libp2p/go-reuseport-transport,v0.0.2,h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4=,866f45bfa6c2e65d563955a28050bcfbc6ed11df6ded7c551e92ff98ba98a2d8
+github.com/libp2p/go-stream-muxer,v0.1.0,h1:3ToDXUzx8pDC6RfuOzGsUYP5roMDthbUKRdMRRhqAqY=,d42dab9fb102b3e56cc555eb9aacb742e4230120dd356078cc723f8817200d43
+github.com/libp2p/go-stream-muxer-multistream,v0.2.0,h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg=,a4ca5d0422d55ee7b4e74b040ca85799365b05684b7b6687adfa79a345049a9d
+github.com/libp2p/go-tcp-transport,v0.1.1,h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw=,147dc8d50aab944666c1a7a371ba3e351480506313be298ebf8dcdb9dc51b1b4
+github.com/libp2p/go-testutil,v0.1.0,h1:4QhjaWGO89udplblLVpgGDOQjzFlRavZOjuEnz2rLMc=,9fa6fa5741f541a6309e8a5fa6031c51f97fcd3086fe3a3b371b74f9d8e9a4b8
+github.com/libp2p/go-ws-transport,v0.1.0,h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4=,30cfd8011bb8de03c23680d2249120ea9ba29879e855ca5c35311f8fa874d094
+github.com/libp2p/go-yamux,v1.2.3,h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI=,97947a07c9430184c3be45e87580abcdea18c9b7435adb8048b08aebce0fea50
+github.com/liggitt/tabwriter,v0.0.0-20181228230101-89fcab3d43de,h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=,41b6869255915ffdfd32575ba14d52732d62d34b47d904df4890e165489ec77d
+github.com/linkedin/goavro,v2.1.0+incompatible,h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY=,25d4ccde4ece770196fbf6f09ca4184df581944224be5d64a263eb2c7f9a24fc
+github.com/linode/linodego,v0.10.0,h1:AMdb82HVgY8o3mjBXJcUv9B+fnJjfDMn2rNRGbX+jvM=,4c4e8829c0290c473e36bacdce8b490833d1f6247b1a4290062db30ba2b21568
+github.com/liquidweb/liquidweb-go,v1.6.0,h1:vIj1I/Wf97fUnyirD+bi6Y63c0GiXk9nKI1+sFFl3G0=,19e08fe2aa62655eb3cb209b37d532a267dd3078e5d262c4c45e7e09134b079c
+github.com/lithammer/dedent,v1.1.0,h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=,4ec56a3fef0d7dd1536046e540827e60419a935dde49d87d21f5856174cadba2
+github.com/logrusorgru/aurora,v0.0.0-20180419164547-d694e6f975a9,h1:KQdwUNlTDGyS6e+2rjAxfHSpBFIOHXqgDceNDqb55+4=,3b9d5caeede8553ead48405de57cd25bf6276b12531dae582c3ee089474aaf95
+github.com/loov/hrtime,v0.0.0-20181214195526-37a208e8344e,h1:UC+nLCm+w3WL+ibAW/wsWbQC3KAz7LLawR2hgX0eR9s=,f077796a9f39c579d356ac8f99831c56b3b2c52b70526f97730eccdc5ce558b2
+github.com/loov/plot,v0.0.0-20180510142208-e59891ae1271,h1:51ToN6N0TDtCruf681gufYuEhO9qFHQzM3RFTS/n6XE=,eb57dc24113d92cda1d0eecd6280603a2f1a98eececde895db4b060a7208659a
+github.com/lovoo/gcloud-opentracing,v0.3.0,h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU=,7bead4937d23976e07caf4bf7a7f302724cda9155aa8ac4de7baa2e10976eacc
+github.com/lsegal/gucumber,v0.0.0-20180127021336-7d5c79e832a2,h1:Gg0dt1q5bB+3R3qu+BucR+1f5ZhKm3OzPPo53dZ3Hxs=,2e5cd235f8c80ae078b3115b41fb765682c796d62fa54ecbb2096b159b0294bd
+github.com/lucas-clemente/aes12,v0.0.0-20171027163421-cd47fb39b79f,h1:sSeNEkJrs+0F9TUau0CgWTTNEwF23HST3Eq0A+QIx+A=,074a3c40044c8f07dbe93129fe30bfd4a12f6283f393e7300664d59924a8af2b
+github.com/lucas-clemente/quic-clients,v0.1.0,h1:/P9n0nICT/GnQJkZovtBqridjxU0ao34m7DpMts79qY=,b916edbd87d45fd375b0f81f905453102eb4e7e724ca0fc8ac5be323fe5958b8
+github.com/lucas-clemente/quic-go,v0.12.1,h1:BPITli+6KnKogtTxBk2aS4okr5dUHz2LtIDAP1b8UL4=,144443ffb6231cabbe6da1496c5851eb73f03fff33d7bd94aa394f8d1e3c73b3
+github.com/lucas-clemente/quic-go-certificates,v0.0.0-20160823095156-d2f86524cced,h1:zqEC1GJZFbGZA0tRyNZqRjep92K5fujFtFsu5ZW7Aug=,d9eff929a62711fc36f9655008e144863cd816ad2b59d25eb00a248c96178ce5
+github.com/lucasb-eyer/go-colorful,v1.0.2,h1:mCMFu6PgSozg9tDNMMK3g18oJBX7oYGrC09mS6CXfO4=,c0e388db91f217be87f8d508ac9f495adc5a33ffda78849e2d0a89a8e8dae28c
+github.com/lunixbochs/struc,v0.0.0-20190916212049-a5c72983bc42,h1:PzBD7QuxXSgSu61TKXxRwVGzWO5d9QZ0HxFFpndZMCg=,8a7db31161ec3a3bcc7b52e25975d0299b9c0bb465f076014d303f112b5cb9e1
+github.com/lunixbochs/vtclean,v1.0.0,h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=,4d73f9678abde21c67dd8cb4ed8d7f63bcdd9413b6093b53cec4d26ce1be5b88
+github.com/lunny/dingtalk_webhook,v0.0.0-20171025031554-e3534c89ef96,h1:uNwtsDp7ci48vBTTxDuwcoTXz4lwtDTe7TjCQ0noaWY=,b94d4c7cacca0c289b3fbbeae6cc9e66f2eec4a3210fbbfd208316337ff2f1e3
+github.com/lunny/levelqueue,v0.0.0-20190217115915-02b525a4418e,h1:GSprKUrG9wNgwQgROvjPGXmcZrg4OLslOuZGB0uJjx8=,8f62ece23811c3c2be0d1c8d10057ab564641b2f73dc5a9910dd5f8462954f19
+github.com/lunny/log,v0.0.0-20160921050905-7887c61bf0de,h1:nyxwRdWHAVxpFcDThedEgQ07DbcRc5xgNObtbTp76fk=,0d551b83dcb0c4a3e0f97febf74e8f69b58a419791e217a7d2fd3d79a1e5877b
+github.com/lunny/nodb,v0.0.0-20160621015157-fc1ef06ad4af,h1:UaWHNBdukWrSG3DRvHFR/hyfg681fceqQDYVTBncKfQ=,a0f6632294f1eec60e2651fa2d4b3590f3a1a8e2f7692dcc77251b945906a701
+github.com/lusis/go-artifactory,v0.0.0-20160115162124-7e4ce345df82,h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48=,487d2ef1720bd49c5a36efc8893fdb0a76bd5f8b064c2a98974a78b3e35f5763
+github.com/lusis/go-slackbot,v0.0.0-20180109053408-401027ccfef5,h1:AsEBgzv3DhuYHI/GiQh2HxvTP71HCCE9E/tzGUzGdtU=,0bb7feaeb5a4e83486234c1c8fbe2f73b94213f511aaf6b8ef1f0fc96dd7b4fa
+github.com/lusis/outputter,v0.0.0-20171130132426-5a3b464a163f,h1:JY0YSH+YvMGmq83g5qILMAkJDFv7qIiHalhlQXal9V0=,e3b54ad36707730681b10a3838d89c346bf2d2c52cb61a241b178bcb0fc96e0f
+github.com/lusis/slack-test,v0.0.0-20190426140909-c40012f20018,h1:MNApn+Z+fIT4NPZopPfCc1obT6aY3SVM6DOctz1A9ZU=,019aa5a65d7fc369730c089a8af985f8d4760297a0058dd0c352fb662e8a0cfc
+github.com/lyft/protoc-gen-star,v0.4.11,h1:zW6fJQBtCtVeSiO/Kbpzv32GO0J/Z8egSLeohES202w=,673c0c53ce301a5589d4aab2b389c6ab52c8312193bae9b491e75e4938475277
+github.com/lyft/protoc-gen-validate,v0.1.0,h1:NytKd9K7UW7Szxn+9PYNsaJ/98TL/WsDq4ro4ZVuh5o=,2e452d4298aa5f2be8d4eda3e55522a4c020d0f23dac6b33ecf9942be09bf082
+github.com/magefile/mage,v1.4.0,h1:RI7B1CgnPAuu2O9lWszwya61RLmfL0KCdo+QyyI/Bhk=,55862155e89367536d665080ac028decc98ce68c5651ccc4238d7e34ddf1cbc2
+github.com/magiconair/properties,v1.8.1,h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=,c0f0378f5949db2e7976d6822a0dfac1786acd34190e83ab253d6505542d0128
+github.com/mailgun/mailgun-go,v0.0.0-20171127222028-17e8bd11e87c,h1:5huPh/MfWW65cx8KWNVD4mCCnwIrNiX4bFJR5OeONg0=,33250edd00795e387f2de671003b8ef8f2d940d24b12a9ce90c6b49dd6094231
+github.com/mailgun/minheap,v0.0.0-20170619185613-3dbe6c6bf55f,h1:aOqSQstfwSx9+tcM/xiKTio3IVjs7ZL2vU8kI9bI6bM=,26930b2a6dc2f2b442e28ecc5dcbb22c2e7da3d151b3388d0bc604370bd9df77
+github.com/mailgun/multibuf,v0.0.0-20150714184110-565402cd71fb,h1:m2FGM8K2LC9Zyt/7zbQNn5Uvf/YV7vFWKtoMcC7hHU8=,7dbb280e8bc981732510ee72e124e931991d06c317531de709fd7922e38a5339
+github.com/mailgun/timetools,v0.0.0-20170619190023-f3a7b8ffff47,h1:jlyJPTyctWqANbaxi/nXRrxX4WeeAGMPaHPj9XlO0Rw=,a4d961cefbfbe858f4ba5a5824d91ad8713a736707f5c259cf0d7307a07ac83e
+github.com/mailgun/ttlmap,v0.0.0-20170619185759-c1c17f74874f,h1:ZZYhg16XocqSKPGNQAe0aeweNtFxuedbwwb4fSlg7h4=,35308e95ed02635049d1804b85f16407f3109fc60c38df541f0401dbba66dc8d
+github.com/mailru/easyjson,v0.7.0,h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=,c36c8ab36aab9ba2ca776d1c71cbd9c30fce7c4e8e62be6611f4c2d1e98e86ae
+github.com/manucorporat/sse,v0.0.0-20160126180136-ee05b128a739,h1:ykXz+pRRTibcSjG1yRhpdSHInF8yZY/mfn+Rz2Nd1rE=,cd90f350cca3a6536432afb4cd2355ff25124ef89fc23a52392e5189733b0359
+github.com/manveru/faker,v0.0.0-20171103152722-9fbc68a78c4d,h1:Zj+PHjnhRYWBK6RqCDBcAhLXoi3TzC27Zad/Vn+gnVQ=,80bc3e8ca50e89d3a6139d1709fbf4680c26231079d297d237902d3c23f4c1e8
+github.com/manveru/gobdd,v0.0.0-20131210092515-f1a17fdd710b,h1:3E44bLeN8uKYdfQqVQycPnaVviZdBLbizFhU49mtbe4=,39811c3d6c7de66195a29a78b235dead57fb866e61082301fe68d51cf04a5200
+github.com/markbates/deplist,v1.3.0,h1:uPgoloPraPBPYtNSxj2UwZBh2EHW9TmMvQCP2FBiRlU=,e0b1903fb33c324721565076e2061d7f54e29ba098afb80af4fe2ccdd02ed178
+github.com/markbates/going,v1.0.3,h1:mY45T5TvW+Xz5A6jY7lf4+NLg9D8+iuStIHyR7M8qsE=,61efe687a56d3141284be7bdb83bb5ae86e1df694ababa5937c4d3e30f3b60f1
+github.com/markbates/goth,v1.49.0,h1:qQ4Ti4WaqAxNAggOC+4s5M85sMVfMJwQn/Xkp73wfgI=,39a0244d07f47d7b91215590900a7754c4700e875c0866b1e65568133471478a
+github.com/markbates/grift,v1.1.0,h1:DsljFKUSK1ELpU22ZE+Gi93jiQI3cYD/RQ+vHM/PpY8=,29aa2fa782f9d8730bde2df024c40ba749f1812dd3bbab489b4197a1faa78627
+github.com/markbates/hmax,v1.1.0,h1:MswE0ks4Iv1UAQNlvAyFpsyFQSBHolckas95gRUkka4=,8c7557798a88c74594f27137be859e99195427e2e04f0835f48781b0bde5c73a
+github.com/markbates/inflect,v1.0.4,h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g=,0da6e75f6cd27672255a41f5dfab418d2746897239ad601e5d8d78d6354b5665
+github.com/markbates/oncer,v1.0.0,h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY=,9a774885bfa4c9a96c438fdb51768833e1c7003f35cd27961137ff4096b1a764
+github.com/markbates/refresh,v1.8.0,h1:ELMS9kKyO/H6cJrqFo6qCyE0cRx2JeHWC9yusDkVeM8=,7ac81390a898cfd1cdc097ffb1e05321c415183165b7341749de41160c47e504
+github.com/markbates/safe,v1.0.1,h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=,d5a98e8242318d4e88844ddbbfebe91f67f41e5aa1f6a96a58fa2fa94e0ae9ef
+github.com/markbates/sigtx,v1.0.0,h1:y/xtkBvNPRjD4KeEplf4w9rJVSc23/xl+jXYGowTwy0=,e3b591a1a2b4dcec7b86d59e504b0bbf87ec3663efad818cd9b00471a33a0345
+github.com/markbates/willie,v1.0.9,h1:394PpHImWjScL9X2VRCDXJAcc77sHsSr3w3sOnL/DVc=,a6c3eda44d765eeb1370b0ddeb739df86e900b78eb365688da143f1c0c0e9bc0
+github.com/marstr/guid,v1.1.0,h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=,7db3cd8020c72ba260d1a20183bf5a030c696d6442eccaff2b31f72b194fc571
+github.com/marten-seemann/qpack,v0.1.0,h1:/0M7lkda/6mus9B8u34Asqm8ZhHAAt9Ho0vniNuVSVg=,46c42087e554edae4e19f79b785722d27316e23278889bf78a0c8f43fc387f2e
+github.com/marten-seemann/qtls,v0.3.2,h1:O7awy4bHEzSX/K3h+fZig3/Vo03s/RxlxgsAk9sYamI=,ff5245b3d5a1e65754d4a740e09ff02c738e9043c6e2bc02c59d5851c1fc1e2d
+github.com/martini-contrib/render,v0.0.0-20150707142108-ec18f8345a11,h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw=,2edd7f64b2f1f053f86a51856cd0f02b1f762af61a458a2e282dab76ad093d70
+github.com/martinlindhe/unit,v0.0.0-20190604142932-3b6be53d49af,h1:4bEyeobv/dO+lT1Qp1hr+/DcNjy6Ob8BDaSrxX6nQsQ=,ee5001e908fb9997e5918c909dcb0cc078f1a91719f4df3d62243d5e88dc07c6
+github.com/martinusso/go-docs,v0.0.0-20161215163720-81905d575a58,h1:VmcrkkMjTdCGOsuuMnn7P2X9dGh3meUNASx6kHIpe7A=,70ad43a3172287882f904657184af77133a578c6d1ec968c5ce3e27259100a06
+github.com/maruel/panicparse,v0.0.0-20171209025017-c0182c169410,h1:1ROIrlLvFoHKX+i48KdRauq21irSOXPyfQw4T/PrINY=,5fd98b2b0a8346ffcba1858775e93db0582ead6b3329b974595d5ab448c95f28
+github.com/maruel/ut,v1.0.0,h1:Tg5f5waOijrohsOwnMlr1bZmv+wHEbuMEacNBE8kQ7k=,a7c90a5020071c66efe2ccae7f3859c60f17840d4ae2972ee9c9a38ae071fb3e
+github.com/masterzen/azure-sdk-for-go,v0.0.0-20161014135628-ee4f0065d00c,h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=,de40198aee773ecaf502d59b8f29fe5d1564fb9a68900b6bfed2369e169e193a
+github.com/masterzen/simplexml,v0.0.0-20190410153822-31eea3082786,h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=,a9e4548a5c7e098c89273c470e4e9d18cb0beb530629f2e512f6f105fd9cbc88
+github.com/masterzen/winrm,v0.0.0-20190223112901-5e5c9a7fe54b,h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y=,28f8e69baadf7f220842a5cd4269ccebdb175a835c0b43819a6b15670ae5403c
+github.com/matryer/moq,v0.0.0-20190312154309-6cfb0558e1bd,h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=,b9fb2bc3d0894dfaa3cc4298f49c97346ccb66f2f0e6911f4f224ffc9acc3972
+github.com/matryer/try,v0.0.0-20161228173917-9ac251b645a2,h1:JAEbJn3j/FrhdWA9jW8B5ajsLIjeuEHLi8xE4fk997o=,f1afa36a4bd0bf09a1290f3afef954058e334d6b275aae6a591d8dad276f5e2f
+github.com/mattbaird/elastigo,v0.0.0-20170123220020-2fe47fd29e4b,h1:v29yPGHhOqw7VHEnTeQFAth3SsBrmwc8JfuhNY0G34k=,f6a94deccbe4d008d265bb4b5cbaee7893e5994a82bc49b44438675a0ca8d8f3
+github.com/mattbaird/jsonpatch,v0.0.0-20171005235357-81af80346b1a,h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8=,55abaf4d26d8ad7f81c230f38a6e482b6b416d9b5777a6c3b1a5c140465a5235
+github.com/mattermost/mattermost-server,v5.11.1+incompatible,h1:LPzKY0+2Tic/ik67qIg6VrydRCgxNXZQXOeaiJ2rMBY=,1f601d79e647a248f9e711891e015b1709f3af37e6a45d5e97827f074c40398e
+github.com/mattn/go-colorable,v0.1.4,h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=,02ad42bc54adf7c52030b6ab903277af8fb7163aad4f7f8d8703ecfdc62597de
+github.com/mattn/go-ieproxy,v0.0.0-20190805055040-f9202b1cfdeb,h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=,5914c18852b0be63008f7ccaf1bd3a8214a82fae78f8afe2e7d774ff96a410ff
+github.com/mattn/go-isatty,v0.0.10,h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=,dca893515dccb58e21f9b08837470c5512e0ecd1275767ed996912bb46933c91
+github.com/mattn/go-mastodon,v0.0.5-0.20190517015615-8f6192e26b66,h1:TbnaLJhq+sFuqZ1wxdfF5Uk7A2J41iOobCCFnLI+RPE=,b290b77b6e5556bba70cf18ac815c13ed9a80ffa4cb03627d73187e99cd15d42
+github.com/mattn/go-oci8,v0.0.0-20190320171441-14ba190cf52d,h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g=,eb3bd1fa93c8a341ad43176cb6e4d8540d7a91d3edd7eb98c1388cf2f4c3515c
+github.com/mattn/go-runewidth,v0.0.5,h1:jrGtp51JOKTWgvLFzfG6OtZOJcK2sEnzc/U+zw7TtbA=,3b34033634b059bfa31ac552d2150d8c0d6e530dd1c0ead2ce0806e1d7cc754a
+github.com/mattn/go-shellwords,v1.0.6,h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=,374285b205f0659ab4be3f8ce346cfd3291cd42f47b12bda15174c42c462b1a6
+github.com/mattn/go-sqlite3,v1.11.0,h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=,7fec79c50206f5faa759d1b64500fb0d082e22ef23f10e2d4cbce24e4fc2d5c1
+github.com/mattn/go-tty,v0.0.0-20190424173100-523744f04859,h1:smQbSzmT3EHl4EUwtFwFGmGIpiYgIiiPeVv1uguIQEE=,76f28f59927667d2d750fa6ffdefeb3f0c41034cb593e4545a206995c76c619f
+github.com/mattn/go-xmpp,v0.0.0-20190124093244-6093f50721ed,h1:A1hEQg5M0b3Wg06pm3q/B0wdZsPjVQ/a2IgauQ8wCZo=,2c39b78184ea27890be56f593353c8fe6b3d6efa53db20e800ff8793bc665199
+github.com/mattn/go-zglob,v0.0.1,h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY=,8decd6c1916188ab4fa1001e3da3f22d7c9fb6218215fd25053c901979930feb
+github.com/mattn/goveralls,v0.0.2,h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=,3df5b7ebfb61edd9a098895aae7009a927a2fe91f73f38f48467a7b9e6c006f7
+github.com/matttproud/golang_protobuf_extensions,v1.0.1,h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=,e64dc58023f4b8c4472d05a44f2719b84d6c2cc364cc682820c9f72b233c9cdc
+github.com/maxbrunsfeld/counterfeiter/v6,v6.2.2,h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=,c185793a7e749ff2557f4557628f5b5d8d9edbf72ca6bd2cb94503f4817c01d2
+github.com/mcuadros/go-version,v0.0.0-20190830083331-035f6764e8d2,h1:YocNLcTBdEdvY3iDK6jfWXvEaM5OCKkjxPKoJRdB3Gg=,ff2364bda8605ad94051c576ffa601e1a9aedabc8a1fda588eb04c3371a845ea
+github.com/mdlayher/dhcp6,v0.0.0-20190311162359-2a67805d7d0b,h1:r12blE3QRYlW1WBiBEe007O6NrTb/P54OjR5d4WLEGk=,fba7b2f01311e2d41bb4ebe15409d4e0a605a79d2f05156bb0f4adbc20f557bc
+github.com/mdlayher/netlink,v0.0.0-20191009155606-de872b0d824b,h1:W3er9pI7mt2gOqOWzwvx20iJ8Akiqz1mUMTxU6wdvl8=,9be201b393fe866f855e5ebb20ef33e86a0e6a99b6b76209531b93615fcbac7c
+github.com/mesos/mesos-go,v0.0.10,h1:+M/7Zlkvw4MolkLvXHfj6hkDsLLHOOU54CmOkOUaNBc=,f18d5601dc6a5234b9c2d65cb96b8d30ab877e3117dd52dd47e31a353ed887d1
+github.com/mgutz/ansi,v0.0.0-20170206155736-9520e82c474b,h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=,d7c0ff88c53dfca384bb82108a6e5fdc9e11b358d68b67144ff6a285be20a16a
+github.com/mgutz/logxi,v0.0.0-20161027140823-aebf8a7d67ab,h1:n8cgpHzJ5+EDyDri2s/GC7a9+qK3/YEGnBsd0uS/8PY=,0a7837d5246591fe1fd341e48a72786c0b61fff8d3ebfea0e9c789176c3e75d5
+github.com/mgutz/str,v1.2.0,h1:4IzWSdIz9qPQWLfKZ0rJcV0jcUDpxvP4JVZ4GXQyvSw=,bf640c2048957f183e72664ff08745ae3d016f64072a5967f5269ccb5fc4b318
+github.com/mholt/archiver,v3.1.1+incompatible,h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=,6cbad83ecd8a2bcb013fb1ac163a6551e6f948b103df9b258788612c72551184
+github.com/mholt/certmagic,v0.7.5,h1:1ZGHwUI4+zg1S17tPUj5Xxb9Q1ghTjLcUZE5G4yV5SM=,a85c14ecbb135636c8e4701a25b8d2884f091d948269c0a3187918af83e11db3
+github.com/michaelklishin/rabbit-hole,v1.5.0,h1:Bex27BiFDsijCM9D0ezSHqyy0kehpYHuNKaPqq/a4RM=,1fdb62e985c4b1be24632875668720ed687455ece54cb2c77079488784e06e69
+github.com/micro-plat/gmq,v1.0.1,h1:ai1PiCEfgBmiqzmZ4iWE3l2Vuz7rOTWOakqRWqi/Hgo=,63c4a02b87b31c0f5cfcdfee5df2fa05e77eeaa2aab93b0ef217c57f6b37b38a
+github.com/micro-plat/lib4go,v0.2.1,h1:NBTIq0DvpRzTChnYShBagPmsYM4k1NgvkE8OYhgMDt8=,ae1056cc76eee3fccb14b0d8723b6444d8f31d2575a0caa1d3723bc54b91496b
+github.com/micro/cli,v0.2.0,h1:ut3rV5JWqZjsXIa2MvGF+qMUP8DAUTvHX9Br5gO4afA=,09e532e4616aa7827d1a1f249bc80ebb01fe8c63978f4b14605246c6be596b82
+github.com/micro/go-log,v0.1.0,h1:szYSR+yyTsomZM2jyinJC5562DlqffSjHmTZFaeZ2vY=,5ec9ba1cfb781edd3695dc9c28afb520cced5e1cf7eabb5faafd4bd8db6953ea
+github.com/micro/go-micro,v1.14.0,h1:lptn9DBbsNCB3RC3PMwxTJGqCUgU8Rf23nAMaRuOcOA=,2278cfa86f7bf97df81ea79535127cf87bf03aba29e7603f2feeb48b2d1a3334
+github.com/micro/go-rcache,v0.2.0,h1:g51QJW+lj+dAOXwRlYNZPQQ8ueHLptgoUzZE3iRwJMg=,fa96add40dac8fb14cf08f7a8c96d05c902da40b27b2c4e586cf3304e4ef6533
+github.com/micro/h2c,v1.0.0,h1:ejw6MS5+WaUoMHRtqkVCCrrVzLMzOFEH52rEyd8Fl2I=,6fea0303cbaa2bc6c45098ce5ad0ae2aa7f9c54ce2ff90160549756f8c7a2b07
+github.com/micro/mdns,v0.3.0,h1:bYycYe+98AXR3s8Nq5qvt6C573uFTDPIYzJemWON0QE=,a40ecbd32a2170698f0f49f8961b39e88e7c3e958546a401a59653231b51f1b2
+github.com/micro/micro,v1.14.0,h1:Uol1+Yg5frzneACpzoHEDsyNTN+/+yLrlGMuxR3RVRQ=,0fd330788ad610cc2cb3eb2224f1ca403d9888ad40e78628f250c885373d739c
+github.com/micro/util,v0.2.0,h1:6u0cPj1TeixEk5cAR9jbcVRUWDQsmCaZvDBiM3zFZuA=,3e61d5232a3a91d521ade483ab64b53a7b8760d0635978d72b4920eba52f8f79
+github.com/microcosm-cc/bluemonday,v1.0.2,h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=,9cfac37098da75ab1c278740e8f0f7741891d8843e14afb256574596ad786f83
+github.com/miekg/dns,v1.1.22,h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc=,54f1f62de314150df163bbe1de91acc922cdce70c5c8a43dfeb7f4af24711d38
+github.com/miekg/mmark,v1.3.6,h1:t47x5vThdwgLJzofNsbsAl7gmIiJ7kbDQN5BxwBmwvY=,8d1b05ee1c0a28093c678af2ed9d0aac9dfc30dce728ccd21fe1506762b54cee
+github.com/mindprince/gonvml,v0.0.0-20190828220739-9ebdce4bb989,h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs=,6702f94187c4e2994ffbdc318c94a04d4bc67081a402e968a2c362a74c81263f
+github.com/minio/blake2b-simd,v0.0.0-20160723061019-3f5f724cb5b1,h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=,ab10edfe994b513e2d03cdd8122b352f31a1eb246fe884617b3f2f6195a3ca0c
+github.com/minio/cli,v1.22.0,h1:VTQm7lmXm3quxO917X3p+el1l0Ca5X3S4PM2ruUYO68=,33533a4e0a2b1a698d0f899cb5b84d9fc199e7723b971d1408e4b5ee797c9a50
+github.com/minio/dsync,v0.0.0-20180124070302-439a0961af70,h1:pRHQdPOlUhelWqNUF3icFrBSC6VYH1hvF6HigVfgMoI=,850e5b400afc4301a1860debf934c5e8e67565d4937ac45f9a37132b31a09941
+github.com/minio/highwayhash,v0.0.0-20180501080913-85fc8a2dacad,h1:L+8skVz2lusCbtlalLXmJp+TK8XaGAsZ3utSC3k5Jc0=,7393dfe736668f9ab98fcf2d264f9bd20bbf4f98538f02ff15df9604f747cdb1
+github.com/minio/lsync,v0.0.0-20180328070428-f332c3883f63,h1:utJHim4C0K4CmD+Qgod/tgHvo7QNOlH6HN5O8QUvPEI=,417c4bdd4fc5d50da2d81e8890b03af4b80ce9fbd5e4c196731a3d76a09913c1
+github.com/minio/mc,v0.0.0-20180926130011-a215fbb71884,h1:co3kRW9cEI65yolYtcLcNxp2a9yk5T/eEt7gw14tJVs=,37300de5179e1085559c6f317b331d261cc4508ba0e4febbd93cbbfef42d7fc9
+github.com/minio/minio,v0.0.0-20180508161510-54cd29b51c38,h1:F7p0ZU9AQuxlA6SWwhXr0H/rYrA9fOiBk2OzOj7GtfM=,6421e5cf72b35a2948e5edd2b189f37ad1896b8637d5b9bcf7cd40b7ab63dfd4
+github.com/minio/minio-go,v6.0.14+incompatible,h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=,3bc396d5e1c0c6f3497743140eaf16ebb97c5f1ca815ba12c4f431e804fb737d
+github.com/minio/minio-go/v6,v6.0.27-0.20190529152532-de69c0e465ed,h1:g3DRJpu22jEjs14fSeJ7Crn9vdreiRsn4RtrEsXH/6A=,34d85b6b915ef5876f9c262f260583fabec147c37dcb82e1f42374dd088b9096
+github.com/minio/sha256-simd,v0.1.1,h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=,0ecfa6532265e139d5d9406c0a803c7ef45b1d8d0f0c1b1d55f7b81969294bfc
+github.com/minio/sio,v0.0.0-20180327104954-6a41828a60f0,h1:ys4bbOlPvaUBlA0byjm6TqydsXZu614ZIUTfF+4MRY0=,6c46bc4a68353d7b41f6e91eb276c9b21560cad4f75419baaee01764927fb7e8
+github.com/mistifyio/go-zfs,v2.1.1+incompatible,h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=,545764e34ed40473380ea1b08af9f0aea1715d15a0a56fc937e6c3b1bda0d9a3
+github.com/mitchellh/cli,v1.0.0,h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=,74199f2c2e1735a45e9f5c2ca049d352b0cc73d945823540e54ca9975ce35752
+github.com/mitchellh/colorstring,v0.0.0-20190213212951-d06e56a500db,h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=,d0733284b20567055e374b420373f5508fa47e95204e59e4b8a66834e7e3964d
+github.com/mitchellh/copystructure,v1.0.0,h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=,4a2c9eb367a7781864e8edbd3b11781897766bcf6120f77a717d54a575392eee
+github.com/mitchellh/go-fs,v0.0.0-20180402234041-7b48fa161ea7,h1:PXPMDtfqV+rZJshQHOiwUFqlqErXaAcuWy+/ZmyRfNc=,21c34fee3df3dc1ddad5e774ddf9e05998061177420709fb68a958c6c113a90b
+github.com/mitchellh/go-homedir,v1.1.0,h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=,fffec361fc7e776bb71433560c285ee2982d2c140b8f5bfba0db6033c0ade184
+github.com/mitchellh/go-linereader,v0.0.0-20190213213312-1b945b3263eb,h1:GRiLv4rgyqjqzxbhJke65IYUf4NCOOvrPOJbV/sPxkM=,7b83ef857c71fe8d4937b57923923176dd43c7b1b7632a9779bac411924e87e1
+github.com/mitchellh/go-ps,v0.0.0-20190716172923-621e5597135b,h1:9+ke9YJ9KGWw5ANXK6ozjoK47uI3uNbXv4YVINBnGm8=,06090b6c22dedf800259eb5d9b5f35bfb7b38e22888c0345631dc54366b21f89
+github.com/mitchellh/go-testing-interface,v1.0.0,h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=,255871a399420cd3513b12f50738d290e251637deb23e21a4332192584ecf9c7
+github.com/mitchellh/go-vnc,v0.0.0-20150629162542-723ed9867aed,h1:FI2NIv6fpef6BQl2u3IZX/Cj20tfypRF4yd+uaHOMtI=,2d65ac584e1a17421265fe97f83bd1cbff447ca6a911fa8d91414fa2115e3e74
+github.com/mitchellh/go-wordwrap,v1.0.0,h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=,9ea185f97dfe616da351b63b229a5a212b14ac0e23bd3f943e39590eadb38031
+github.com/mitchellh/gox,v1.0.1,h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI=,30a69e17ba5cafe6f1ac436bcc99368a5a34f0a0763926d2c6780a781f8e9e95
+github.com/mitchellh/hashstructure,v1.0.0,h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=,3b79b07860631d05645ea3f54830b7e1997dbcf477e84a8adfe4979be3abdfde
+github.com/mitchellh/iochan,v1.0.0,h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=,f3eede01adb24c22945bf71b4f84ae25e3744a12b9d8bd7c016705adc0d778b8
+github.com/mitchellh/mapstructure,v1.1.2,h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=,cd86d8586cbc333de28f6a46989069487877fae437df4c2cc417668d203c7305
+github.com/mitchellh/panicwrap,v0.0.0-20190213213626-17011010aaa4,h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4=,b9ab07bbacf733cc24f9f7f53eec19f9bf999cbb35180ad0b615fe437640de6e
+github.com/mitchellh/pointerstructure,v0.0.0-20190430161007-f252a8fd71c8,h1:1CO5wil3HuiVLrUQ2ovSTO+6AfNOA5EMkHHVyHE9IwA=,658a3e14e4983f3c8a04c8da4a56d4d8a86e2b4fcaa6b1eefab150efcd742848
+github.com/mitchellh/prefixedio,v0.0.0-20190213213902-5733675afd51,h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8=,d3209d88b3b5b05ecd48f469bc16811666f786685c49273664a5496d5dd69018
+github.com/mitchellh/reflectwalk,v1.0.1,h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=,bf1d4540bf05ea244e65fca3e9f859d8129c381adaeebe7f22703959aadc4210
+github.com/mjibson/esc,v0.2.0,h1:k96hdaR9Z+nMcnDwNrOvhdBqtjyMrbVyxLpsRCdP2mA=,9f090786bd43dddb5c0d798b449d5e8aede4cb7d106f56dcac0aebd8fd1929cc
+github.com/mndrix/ps,v0.0.0-20131111202200-33ddf69629c1,h1:kCroTjOY+wyp+iHA2lZOV5aJ6WfBVjGnW8bCYmXmLPo=,30b12b7a2467d4a1aa64aa31c715cb45d570d36e31ae70719101d686363d2685
+github.com/mndrix/tap-go,v0.0.0-20171203230836-629fa407e90b,h1:Ga1nclDSe8gOw37MVLMhfu2QKWtD6gvtQ298zsKVh8g=,c6f65bd8d977e53fa083d9d0309cffb0dbfaaae69a5a64a352fb2f7d079ce73d
+github.com/modern-go/concurrent,v0.0.0-20180306012644-bacd9c7ef1dd,h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=,91ef49599bec459869d94ff3dec128871ab66bd2dfa61041f1e1169f9b4a8073
+github.com/modern-go/reflect2,v1.0.1,h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=,6af8268206d037428a4197bd421bbe5399c19450ef53ae8309a083f34fb7ac05
+github.com/mohae/deepcopy,v0.0.0-20170929034955-c48cc78d4826,h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=,41ba726508a213f4af89e7d58937263ff778e352d591edd422d3a3dc3272585c
+github.com/mongodb/grip,v0.0.0-20191008181606-ee248dc03622,h1:pPoJByX3B56ydhWGUMard1QQ2skLNTw/s1W5VuLLAtA=,08fcfea928382f428dc1fceeada1c264e7f6dc7256dbe05c5c0ba41dca16a42c
+github.com/monoculum/formam,v0.0.0-20190830100315-7ff9597b1407,h1:ZU5O9BawmEx9Mu1lxn9NLIwO9DrqRfjE+HWKU+e9GKQ=,5a04e3907fb1008c1e6640e8a0e9394c752aab4ebf7e3be01cd3ee55c2659121
+github.com/montanaflynn/stats,v0.5.0,h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk=,05527945351f54f4e8c48666bce277fbace34026eed22ac7d88a50a6730767f1
+github.com/morikuni/aec,v1.0.0,h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=,c14eeff6945b854edd8b91a83ac760fbd95068f33dc17d102c18f2e8e86bcced
+github.com/moul/anonuuid,v0.0.0-20160222162117-609b752a95ef,h1:E/seV1Rtsnr2juBw1Dfz4iDPT3/5s1H/BATx+ePmSyo=,ec103e75b93231b5b858a2fc9985da39d6b7c35644a689a20e60f3a6ad6b1396
+github.com/moul/gotty-client,v0.0.0-20180327180212-b26a57ebc215,h1:y6FZWUBBt1iPmJyGbGza3ncvVBMKzgd32oFChRZR7Do=,265c4cbad4789e267f283b9012ad174c89e378e59ad9c64ac28729402eb60afe
+github.com/moul/http2curl,v0.0.0-20161031194548-4e24498b31db,h1:eZgFHVkk9uOTaOQLC6tgjkzdp7Ays8eEVecBcfHZlJQ=,2ff4e19b14d84f6d181afc79f28668c6171d6dea79c43a1918c0428a265137c1
+github.com/mozilla-services/heka,v0.10.0,h1:w+y6RPJkU6ZKeNbG1VvK9aSqJm0sru5TYcwOj6ejv8U=,f325891304f9acc654944d9a2297b8816a0a86440b2f035c4996ec38fcfa0eed
+github.com/mozillazg/go-cos,v0.12.0,h1:b9hUd5HjrDe10BUfkyiLYI1+z4M2kAgKasktszx9pO4=,5376eaf13e10fed6d73b713fbabc4a159d204239579120c410ea74de33dd6d71
+github.com/mozillazg/go-httpheader,v0.2.1,h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ=,50b7a36360fc1ec1a85fd40fe45f8db02fc734fc2af0514a60a068f0a2708122
+github.com/mozillazg/go-unidecode,v0.1.1,h1:uiRy1s4TUqLbcROUrnCN/V85Jlli2AmDF6EeAXOeMHE=,812d3bc9f03cb6a8552bfadd9e0d1b44a57807a3af2e8667a42861510bb2b20c
+github.com/mpvl/unique,v0.0.0-20150818121801-cbe035fff7de,h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto=,af2bcc8a61a6881e0703afee2217dd1e75c8b34f4e49947c0d7f6e87af574e0e
+github.com/mr-tron/base58,v1.1.2,h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78=,c2b362db55d8266ce02a161b7f73cad646432d2dae98511385b88481380c4e86
+github.com/mreiferson/go-httpclient,v0.0.0-20160630210159-31f0106b4474,h1:oKIteTqeSpenyTrOVj5zkiyCaflLa8B+CD0324otT+o=,e94cbe43c052831323c59ff186c830ea2e271065f7f8b2794ade7aaf88a37a85
+github.com/mrjones/oauth,v0.0.0-20180629183705-f4e24b6d100c,h1:3wkDRdxK92dF+c1ke2dtj7ZzemFWBHB9plnJOtlwdFA=,4c1fef02b34241008ba6bc33fb5d01b4cfb3b7e7544fb7f70823fe74b9b21362
+github.com/mrunalp/fileutils,v0.0.0-20171103030105-7d4729fb3618,h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=,c32d691ce15012ba21fbe69db3558df0c97326426c14ef747b8a1e02652ca7b3
+github.com/mschoch/smat,v0.0.0-20160514031455-90eadee771ae,h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=,488e193897c7d8e3b3758cbeb8a5bc1b58b9619f3f14288a2ea9e0baa5ed9b3e
+github.com/msteinert/pam,v0.0.0-20151204160544-02ccfbfaf0cc,h1:z1PgdCCmYYVL0BoJTUgmAq1p7ca8fzYIPsNyfsN3xAU=,315d911c41d88a22bf8831b174bbd15310bc403626507095f98b9780ddcf9174
+github.com/muesli/smartcrop,v0.0.0-20180228075044-f6ebaa786a12,h1:l0X/8IDy2UoK+oXcQFMRSIOcyuYb5iEPytPGplnM41Y=,5857e4d0ed238d8c6f8f41294b98771f1c21874a80ea5f2e75b4a49cbcf1d3e0
+github.com/multiformats/go-base32,v0.0.3,h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=,658875e4980370db6180f99835b3a48158a697eef69e7c3eb86b0b4f5c1c19ed
+github.com/multiformats/go-multiaddr,v0.1.1,h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE=,ba4849fc68453c3e812e850f40e6d5acef671060ed79f203c2d179d395d20fc5
+github.com/multiformats/go-multiaddr-dns,v0.0.2,h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8=,219f855f485aa198d36305f2f43012a73bd40f15caa3e606324cee9f117e5b89
+github.com/multiformats/go-multiaddr-fmt,v0.1.0,h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=,d83537dc1f83185dfb60b190ea4b3c7b05c552a75ad7cfaddd0b987c00ff0cff
+github.com/multiformats/go-multiaddr-net,v0.1.1,h1:jFFKUuXTXv+3ARyHZi3XUqQO+YWMKgBdhEvuGRfnL6s=,241c47d621bcb9a40d33284f407a7fdf458cb3f87ef02db68735cc6b9002afed
+github.com/multiformats/go-multibase,v0.0.1,h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=,ed39145efcf5e8c99deaa183071aed246239730f5781b291bad7de5d1fc12d81
+github.com/multiformats/go-multihash,v0.0.8,h1:wrYcW5yxSi3dU07n5jnuS5PrNwyHy0zRHGVoUugWvXg=,44fae6e8771331f54f267d9440a9d520e7daeb91817ff61e26b8494099ae046a
+github.com/multiformats/go-multistream,v0.1.0,h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=,f720be6e29845f0a41c1241a24f19c08adf762f9e7e972b4096416776c603b15
+github.com/munnerz/goautoneg,v0.0.0-20191010083416-a7dc8b61c822,h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=,3d7ce17916779890be02ea6b3dd6345c3c30c1df502ad9d8b5b9b310e636afd9
+github.com/mwitkow/go-conntrack,v0.0.0-20190716064945-2f068394615f,h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=,d6fc513490d5c73e3f64ede3cf18ba973a4f8ef4c39c9816cc6080e39c8c480a
+github.com/mwitkow/go-grpc-middleware,v1.0.0,h1:XraEe8LhUuB33YeV4NWfLh2KUZicskSZ2lMhVRnDvTQ=,074f46f92d7a0043c5b283f1af224123cc48e21f96b259e62f77b6da72240812
+github.com/mxk/go-flowrate,v0.0.0-20140419014527-cca7078d478f,h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=,bd0701ef9115469a661c07a3e9c2e572114126eb2d098b01eda34ebf62548492
+github.com/myesui/uuid,v1.0.0,h1:xCBmH4l5KuvLYc5L7AS7SZg9/jKdIFubM7OVoLqaQUI=,3055c4b167daeb9984ccd7c8eeba154e3d84afa6fdf06a3151280ef120d1633d
+github.com/myitcv/gobin,v0.0.8,h1:hQORun03Mlnm8yp/OgKX8UYSIVZQ8ebTWf3aahY1u+s=,015311e9db646cb9e5f63a0586c466c9eb5bc5f45661282644f8a5b549607e72
+github.com/myitcv/vbash,v0.0.2,h1:8R+91eSlfcgoRjEbnUgvbXYOmfh+p0+7i5klFOM5VMA=,08dcf62b94843e7fd115cd0605158d948fb361ca8c958db1958c5d2feef9c2d1
+github.com/namedotcom/go,v0.0.0-20180403034216-08470befbe04,h1:o6uBwrhM5C8Ll3MAAxrQxRHEu7FkapwTuI2WmL1rw4g=,0c6ea2c994e982c25e44ccba2ead1a9655cd2f253986eedb73253c30ad21b42f
+github.com/naoina/go-stringutil,v0.1.0,h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=,4cfea6f0ebfecb5e6297f8a6eee0e9ef9fe254883eb75dd6179133995a219c58
+github.com/naoina/toml,v0.1.1,h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=,8e34d510563d9e8b3f2dbdf0927bf5108b669144bdbe2fda4fcb44e7e2e55268
+github.com/natefinch/lumberjack,v2.0.0+incompatible,h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=,1f6e7c9e0b915c45151d8780a8711426b19d16d04c9cf0e7995b29035d6b500f
+github.com/nats-io/gnatsd,v1.3.0,h1:+5d80klu3QaJgNbdavVBjWJP7cHd11U2CLnRTFM9ICI=,85fa90b3eaef17698734d398a9939b8bb94df1b9f35bc92c8d31cb7a349c1e97
+github.com/nats-io/go-nats,v1.6.0,h1:FznPwMfrVwGnSCh7JTXyJDRW0TIkD4Tr+M1LPJt9T70=,8c63be6f10479802a40c66c0999f724e492bcb9863d5517038c6472e585a76aa
+github.com/nats-io/go-nats-streaming,v0.4.2,h1:e7Fs4yxvFTs8N5xKFoJyw0sVW2heJwYvrUWfdf9VQlE=,62dd1d6ba18f3b7686766116e3beaaf9f62b89b58a6efb0b8f1ad04d3ddfb026
+github.com/nats-io/jwt,v0.3.0,h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=,e131314c7cf6a714ec10ca3b6f95f8af6a41f5cdaf72a364f7c71b33e97314db
+github.com/nats-io/nats,v1.6.0,h1:U5b2apHOTZlUou+NGfCRWG4ZEeivbt2hpsZO4kHKIVU=,12cc70ed3477472d110d4b4bc109fbe20218e8199629669ad5f617c199fbf9d2
+github.com/nats-io/nats-server/v2,v2.1.0,h1:Yi0+ZhRPtPAGeIxFn5erIeJIV9wXA+JznfSxK621Fbk=,a5897b8f5302ae38894de2c240f31d33ab7b2f3d4e88a2c212fc9b31f2d4f444
+github.com/nats-io/nats-streaming-server,v0.12.2,h1:EpyLfUBZgwu5c0mdSSytQsapm615AyitPssq7jgafdw=,48605f61f74903ba1322f11aa17806b57f71cebf2557b7dd8620d4193abc868d
+github.com/nats-io/nats.go,v1.9.1,h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=,34a735d158d70685faad1fc3153f08da0ddc21c0ae42f6a0cb09430d638364b2
+github.com/nats-io/nkeys,v0.1.0,h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=,dbc82abacf752e532ffd67db230a97f52a5f92070b04b4028cb79534d2ab0ef6
+github.com/nats-io/nuid,v1.0.1,h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=,809d144fbd16f91651a433e28d2008d339e19dafc450c5995e2ed92f1c17c1f3
+github.com/nats-io/stan.go,v0.5.0,h1:ZaSPMb6jnDXsSlOACynJrUiB3Evleg3ZyyX+rnf3TlQ=,1dcb14e2ef8ad30dd1ee61a63b0a3bfbaa48e9c3d13f69458a149956a14bbab7
+github.com/nbio/st,v0.0.0-20140626010706-e9e8d9816f32,h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=,e6cd27bd360be27d0f7efd3c4c41c4e14e659e60086b0bc4f09fb09cfd02a50d
+github.com/ncw/swift,v1.0.49,h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=,b2be24cad8923c9171835547df2d621d2aa2029ceb9fa770d6ecf3bf70c2c029
+github.com/neelance/astrewrite,v0.0.0-20160511093645-99348263ae86,h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=,815811c2140669e55e99d59d4bdd2fcf4c810610a9d278fd25cc2c3480c002d4
+github.com/neelance/sourcemap,v0.0.0-20151028013722-8c68805598ab,h1:eFXv9Nu1lGbrNbj619aWwZfVF5HBrm9Plte8aNptuTI=,ce5499f29779a604233bb76f36925c3326a8a8f270533df8d3dff1107b7aa066
+github.com/neurosnap/sentences,v1.0.6,h1:iBVUivNtlwGkYsJblWV8GGVFmXzZzak907Ci8aA0VTE=,9dbe86e291937eba92847454650d1c65338527ff89dec5daccb99aaf7e03865b
+github.com/newrelic/go-agent,v2.15.0+incompatible,h1:IB0Fy+dClpBq9aEoIrLyQXzU34JyI1xVTanPLB/+jvU=,4c541c5f7b10055c37cf22843edbb9b0fcb06ad3504e8d6eae3d9c37ff3c64c6
+github.com/nf/cr2,v0.0.0-20140528043846-05d46fef4f2f,h1:nyKdx+jcykIdxGNrbgo/TGjdGi99EY9FKBCjYAUS4bU=,665afbe7830424dd9815cae42aa7762b657484686d671f88704257ea7c9736be
+github.com/nfnt/resize,v0.0.0-20180221191011-83c6a9932646,h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=,b8e97cb14e5e5ef29d762d2dff890f6279a125990ddf9cb7ae5c4d2a015b109c
+github.com/ngaut/pools,v0.0.0-20180318154953-b7bc8c42aac7,h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=,26342833d7a5b91a52f8451e8e34bc9ffc5069d342666ab0b478628c41a86d44
+github.com/ngaut/sync2,v0.0.0-20141008032647-7a24ed77b2ef,h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=,2635d6120b6172c190f84b57b5fc878f9158b768b4bd6bd4468bfa98a73061a4
+github.com/nicksnyder/go-i18n,v2.0.2+incompatible,h1:Xt6dluut3s2zBUha8/3sj6atWMQbFioi9OMqUGH9khg=,687be9dc953545d390761e5464e07c38f313d19c1f695f7d7702d954afcf6b66
+github.com/nicolai86/scaleway-sdk,v1.10.2-0.20180628010248-798f60e20bb2,h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=,a2e992324edd4396f24e0b6a165c4d1057eeefdecdc9f7472b0de8a30f3be729
+github.com/niklasfasching/go-org,v0.1.6,h1:F521WcqRNl8OJumlgAnekZgERaTA2HpfOYYfVEKOeI8=,c938afb1ad7f567524686395c9de66da75220eaa60fe8917c02b97aa1e2cbbb1
+github.com/nkovacs/streamquote,v0.0.0-20170412213628-49af9bddb229,h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=,679a789b4b1409ea81054cb12e5f8441199f5fb17d4a2d3510c51f3aa5f3f0cc
+github.com/nlopes/slack,v0.6.0,h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA=,048ddfddd4a66407f26b069a65d4d8f3d6d0368adcd52fd5a0dc6d86fe012f47
+github.com/nrdcg/auroradns,v1.0.0,h1:b+NpSqNG6HzMqX2ohGQe4Q/G0WQq8pduWCiZ19vdLY8=,81e3564b38ca27024b6e981a03ae70afcf435d5f8d35a2113321dfd3a220f00b
+github.com/nrdcg/goinwx,v0.6.1,h1:AJnjoWPELyCtofhGcmzzcEMFd9YdF2JB/LgutWsWt/s=,8e1e3ea7d38f5b9b21603350d97a583c9108d380f5cc08bf93a4c69d6968dc8a
+github.com/nrdcg/namesilo,v0.2.1,h1:kLjCjsufdW/IlC+iSfAqj0iQGgKjlbUUeDJio5Y6eMg=,e20a47d9257fcf7ce95254b14bb84ba290b5f4867e4d63027b669f5a55aaab6c
+github.com/nsf/jsondiff,v0.0.0-20160203110537-7de28ed2b6e3,h1:OqFSgO6CJ8heZRAbXLpT+ojX+jnnGij4qZwUz/SJJ9I=,9652618358184592fb7a4657e2c51748cbe0bf5bbf97150a2c6e95ecf65b126b
+github.com/nsf/termbox-go,v0.0.0-20190817171036-93860e161317,h1:hhGN4SFXgXo61Q4Sjj/X9sBjyeSa2kdpaOzCO+8EVQw=,a64e374836a25ab74ece4eb5314d79617d8b828bd6d13c654d95bed920c82784
+github.com/nsqio/go-nsq,v1.0.7,h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=,5acb7902bf31355fa7d77f507ed42847368834eb378fbf407d82ae3e4211e248
+github.com/nu7hatch/gouuid,v0.0.0-20131221200532-179d4d0c4d8d,h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=,0889a0ac13cfa9f32f986a88a82bb24380070932299131ae7d7180a389d08ca7
+github.com/nullstyle/go-xdr,v0.0.0-20180726165426-f4c839f75077,h1:A804awGqaW7i61y8KnbtHmh3scqbNuTJqcycq3u5ZAU=,0ab4f958f0420027d40b53c98bcb8f3cbe1e106dfb49d3e91415cb1c512a552c
+github.com/nutmegdevelopment/sumologic,v0.0.0-20160817160817-42ed9b372fa3,h1:xOEJG5C3e8CvgAYsnkgoSBzCr0No+m++aB6v7A2WScY=,a33916e02e1159304145b621ffdf284120e50f618c684f38776a8bab7ae7b3fe
+github.com/nwaples/rardecode,v0.0.0-20171029023500-e06696f847ae,h1:UF9xsJn7AeQ72TCus3eRO1lh08Id3AoF37vl+qigL/w=,5598a02308af3b04418b15854ff940be49cf31ce7238ce23c10409110364d40f
+github.com/ogier/pflag,v0.0.1,h1:RW6JSWSu/RkSatfcLtogGfFgpim5p7ARQ10ECk5O750=,c4db0ecff32deb3205c705d72a616bce01e1f6a1948c851d30b52deeec3fbf91
+github.com/oklog/run,v1.0.0,h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=,108d409b7d235d61b82cfb6e1df139501123fcd8fa68fe94ddb024b53335cb48
+github.com/oklog/ulid,v1.3.1,h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=,40e502c064a922d5eb7f2bc2cda9c6a2a929ec0fc76c9aae4db54fb7b6b611ae
+github.com/olekukonko/tablewriter,v0.0.1,h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=,7e5cc8a9b5a51126a0cb46ac96b274d92a8b1cc24b2321832c38d60c0ea4cc9c
+github.com/oliamb/cutter,v0.2.2,h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k=,9174c2374109a7d3aeb2c59b5f4b744ec5f65752aab797f0d50beb26cfc7d857
+github.com/olivere/elastic,v6.2.25+incompatible,h1:X34sPAlSpZVlnuSjOYwbMbiCMU+WKK7YUxrunuNSdG8=,bf3b4cc7ea89a716e91002a31b33f55ec3168ce5ab36ffe5c02ff68d94b9aad5
+github.com/olivere/env,v1.1.0,h1:owp/uwMwhru5668JjMDp8UTG3JGT27GTCk4ufYQfaTw=,f486deab73b3d7866e762e1ad34fe63c88e9ac38f41d811414361fb6490bbb2c
+github.com/onsi/ginkgo,v1.10.3,h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=,088314495acb90d1e520519b243f4dbdd17b43469e6fb83bd45d600796856e63
+github.com/onsi/gomega,v1.7.1,h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=,0a245e719f17cc2bc399aa7c2005cca84f1cfba5373b0c96f5c64673f758a712
+github.com/op/go-logging,v0.0.0-20160315200505-970db520ece7,h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=,c506eace74028656eb28677a4c162f9c023ce2f9c0207354ba80cca89f11b461
+github.com/openconfig/gnmi,v0.0.0-20190823184014-89b2bf29312c,h1:a380JP+B7xlMbEQOlha1buKhzBPXFqgFXplyWCEIGEY=,f52967c7b194daa57252042f6ccf9d26f8c599a7e13aca26043f948d5139b91a
+github.com/openconfig/reference,v0.0.0-20190727015836-8dfd928c9696,h1:yHCGAHg2zMaW8olLrqEt3SAHGcEx2aJPEQWMRCyravY=,040cf32cee7256a08716313dd7ea4f8c44f1d644ae872ecf2dd381c35b12125c
+github.com/opencontainers/go-digest,v1.0.0-rc1,h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=,25fd455029e8a1bbe15ed2eeafc67222372c6f305a47b4ec157d8a1a2849c15c
+github.com/opencontainers/image-spec,v1.0.1,h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=,ebb2dca711a137fbfb717158b0368792f834000f4308d9ea259d06c6804c677c
+github.com/opencontainers/runc,v0.1.1,h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=,aa212163f009190d0f4f3dbe64f71fcda06d7896b67863d7f7b185fee6a68ea6
+github.com/opencontainers/runtime-spec,v1.0.1,h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=,1958458b00ce912425f5c7d2ee836431b296a3f9320d565512d8c96b107fffbf
+github.com/opencontainers/runtime-tools,v0.9.0,h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=,53c720dbb7452cfb2fd3945e37c26b5a0140cb1012d35a2b72a5e035f28a32c4
+github.com/opencontainers/selinux,v1.3.0,h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=,88286825b32cd46a0469e578f378a185032da2d5b03893623861ef3af59359d8
+github.com/openshift/client-go,v3.9.0+incompatible,h1:13k3Ok0B7TA2hA3bQW2aFqn6y04JaJWdk7ITTyg+Ek0=,661b7f28b4905f1936dd58e373374513d54663ec85aecafede1c7d9c260e9369
+github.com/openshift/library-go,v0.0.0-20191101161407-e7c97b468b83,h1:wwR+laNaFKVGiizoIDL/cAKIZVoKXJ9jbjUoUlq2p5I=,c74f8134013f978ef154d6accf9b4b0c5126941f2d45e6eb223db7098f7ab2a4
+github.com/opentracing-contrib/go-observer,v0.0.0-20170622124052-a52f23424492,h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=,50023eee1ef04412410f43d8b5dcf3ef481c0fc39067add27799654705fa84b2
+github.com/opentracing-contrib/go-stdlib,v0.0.0-20190519235532-cf7a6c988dc9,h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg=,966cdf6d869ff62c35edf1ea00113465cc9b90f34838c6a6990a1f776e7d1152
+github.com/opentracing/basictracer-go,v1.0.0,h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=,a908957c8e55b7b036b4761fb64c643806fcb9b59d4e7c6fcd03fca1105a9156
+github.com/opentracing/opentracing-go,v1.1.0,h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=,3e0f42d035019fa037991d340da9677a802f8182792770c38e87906d33e06629
+github.com/openzipkin-contrib/zipkin-go-opentracing,v0.4.4,h1:bzTJRoOZEN7uI1gq594S5HhMYNSud4FKUEwd4aFbsEI=,8a4688f80cd67140aa4edb91506d440ecea4d8ec01634caab5c95991af011c5d
+github.com/openzipkin/zipkin-go,v0.2.2,h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI=,dfc610dc52d9299df49172a9e61fcc772d85450b6b6f82e8f43cf23562232a4c
+github.com/oracle/oci-go-sdk,v7.0.0+incompatible,h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA=,941cd26813b22873477f1c6bb86fed929bdc85379d435bd9707d923f57d070dc
+github.com/orcaman/concurrent-map,v0.0.0-20190826125027-8c72a8bb44f6,h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw=,ec80830c751199283290a8d398ebf28ca5169e866a70347b39856d2c1178f2cb
+github.com/ory/dockertest,v3.3.4+incompatible,h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8=,cbcc7ba21c846d38229aa06a2d7cf35b99ac219eb2694bd9a1ceeac89667e475
+github.com/ory/herodot,v0.6.2,h1:zOb5MsuMn7AH9/Ewc/EK83yqcNViK1m1l3C2UuP3RcA=,caf465ffb73c7537212ba4fd58a4c2c41fe7ca69737404a28e84ceff90c340ea
+github.com/otiai10/copy,v0.0.0-20180813032824-7e9a647135a1,h1:A7kMXwDPBTfIVRv2l6XV3U6Su3SzLUzZjxnDDQVZDIY=,67d0e4f6ba369653e30257882fbbb20c28b560bc837e1847a42c48e868f1c81c
+github.com/otiai10/curr,v0.0.0-20150429015615-9b4961190c95,h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=,7cf2143067d9bb3e7d54d2906766bb24c11d76f1bb0b0c5069574e9a0d8ae93d
+github.com/otiai10/mint,v1.2.3,h1:PsrRBmrxR68kyNu6YlqYHbNlItc5vOkuS6LBEsNttVA=,0b82a05ca43810c9aa8299ddae1663feeb178d699aeb5242c3bdeb61cb5a54fb
+github.com/outscale/osc-go,v0.0.1,h1:hvBtORyu7sWSKW1norGlfIP8C7c2aegI2Vkq75SRPCE=,2a988384c564fdba8b8c496024aafc212140e4b996654be7a92a3b0c7a962632
+github.com/ovh/go-ovh,v0.0.0-20181109152953-ba5adb4cf014,h1:37VE5TYj2m/FLA9SNr4z0+A0JefvTmR60Zwf8XSEV7c=,0fa35e8026a9b3aebd804739f31ffe07e553b84e2b8ea145b2f2ebaa0dd7c08f
+github.com/oxtoacart/bpool,v0.0.0-20190530202638-03653db5a59c,h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw=,6816ec3a6f197cbee0ba6ddb9ec70958bc28870e59864b24e43da0c858079a1b
+github.com/packer-community/winrmcp,v0.0.0-20180921204643-0fd363d6159a,h1:A3QMuteviunoaY/8ex+RKFqwhcZJ/Cf3fCW3IwL2wx4=,4a48fa503853d129e7e32ca81f069b9e09a9e3249739781f61fae70bb02d098b
+github.com/packethost/packngo,v0.1.1-0.20180711074735-b9cb5096f54c,h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE=,6dac4e55c104df58ace636ef31d5dd6173a36747c4fd79299252ba8826127491
+github.com/parnurzeal/gorequest,v0.2.16,h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ=,cc7b7d56e2e4c3fa0709e0e547875807746ac067b2a5c4b740b3088c1fdf941d
+github.com/pascaldekloe/goe,v0.1.0,h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=,37b73886f1eec9b093143e7b03f547b90ab55d8d5c9aa3966e90f9df2d07353c
+github.com/patrickmn/go-cache,v2.1.0+incompatible,h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=,d5d1c13e3c9cfeb04a943f656333ec68627dd6ce136af67e2aa5881ad7353c55
+github.com/pborman/uuid,v1.2.0,h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=,b888ff5d33651a1f5f6b8094acc434dd6dc284e2fe5052754a7993cebd539437
+github.com/pelletier/go-buffruneio,v0.2.0,h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=,70593688607f4d48192776fe257ab9298689267ebcdd7b155bfe40d893735f38
+github.com/pelletier/go-toml,v1.6.0,h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=,cc6dce19df6c6c30abd67594d17ea6015d1210aa6dd8c6096c6429eec06fdab4
+github.com/peterbourgon/diskv,v2.0.1+incompatible,h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=,1eeff260bd1ad71cd1611078995db99e1c7eba28628e7d6f24c79039536ea1cb
+github.com/peterbourgon/g2s,v0.0.0-20170223122336-d4e7ad98afea,h1:sKwxy1H95npauwu8vtF95vG/syrL0p8fSZo/XlDg5gk=,41526f42b4fe3019581ab3745afea18271d7f037eb55a6e9fb3e32fd09ff9b8d
+github.com/petergtz/pegomock,v2.7.0+incompatible,h1:42rJ5wIOBAg9OGdkLaPW9PlF/RtqDc5aGl6PcTCXl3o=,dc93e4483e8de4eb429e007aad17348822197ea7a3adde283b7752bc4544dfbb
+github.com/peterh/liner,v1.1.0,h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=,5cdc45c19901db8d8295c139bb382d7eea150e8fd96bd26de10384685728a461
+github.com/peterhellberg/link,v1.0.0,h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo=,d320f4204fbe886e1cefc0b677af2bfaba855e9e6556a6e92e43bcd80c3bb7a5
+github.com/petermattis/goid,v0.0.0-20180202154549-b0b1615b78e5,h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=,5134a176e306f9b973ff670a33c7536b59bf4114d83fd94f74c736ff0cc10ef0
+github.com/phayes/freeport,v0.0.0-20180830031419-95f893ade6f2,h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=,4ac97358de55a9b1ac60f13fdb223c5309a129fb3fb7bf731062f9c095a0796c
+github.com/philhofer/fwd,v1.0.0,h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=,b4e79b1f5fdfe8c44bf6dae3dd593c62862930114411a30968f304084de1d0b3
+github.com/pierrec/lz4,v2.3.0+incompatible,h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=,775487f2be5ddf23034b59bc862cb0d5767155c5e08d1186665d117092ceb50f
+github.com/pingcap/check,v0.0.0-20190102082844-67f458068fc8,h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=,b8eeddacc35915d8c40b42e9af4db468ed309a506412a767ba6bb03bb7ce4627
+github.com/pingcap/errors,v0.11.4,h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=,df62e548162429501a88d936a3e8330f2379ddfcd4d23c22b78bc1b157e05b97
+github.com/pingcap/gofail,v0.0.0-20181217135706-6a951c1e42c3,h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo=,444866a53b7429e80a8a16791e39555de8103c7514cd322fe191c902b8071360
+github.com/pingcap/goleveldb,v0.0.0-20171020122428-b9ff6c35079e,h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=,08ec0ffe5d0d74bdc543695f975316af6a63c17e36644ae56d42e30b0d1f8777
+github.com/pingcap/kvproto,v0.0.0-20191101062931-76b56d6eb466,h1:C5nV9osqA+R/R2fxYxVfqAUlCi3Oo5yJ/JSKDeHSAOk=,0d834c10c217c5de2c9ef79049891a69e73e102c4dbcd130173c3650e96da570
+github.com/pingcap/log,v0.0.0-20191012051959-b742a5d432e9,h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA=,eaece6f27792a39ccff08152050d4eb7905c250bf36877cacdd7e74c79d80472
+github.com/pingcap/parser,v0.0.0-20191101070347-94a5ef60f10b,h1:TLljHrSTC9MCTiUA6nMhV68my/D/FI3VNkUs94Wo3DE=,94e6857f4d2bf653edf4c2881cb8fb6b3abdf9efaec7d1f49159deec77580df2
+github.com/pingcap/pd,v2.1.17+incompatible,h1:mpfJYffRC14jeAfiq0jbHkqXVc8ZGNV0Lr2xG1sJslw=,b75266cd20abe6b1ccbb777a2f71d74dfcf231a06276b602df08bf27a9ea36f1
+github.com/pingcap/tidb-tools,v2.1.4+incompatible,h1:dkB4FMJcSk9GYRB2ICupU/lsTLf4mHLfkBE6fAsLdJ4=,c5c8e2b5c69c21bba2050c75d3a4582eda26308a355557036f058365d4583e5f
+github.com/pingcap/tipb,v0.0.0-20191030045153-07a0962bbc64,h1:wUSHIp4dura5/YAepdgDBEdf2zz20MHXyNtMi1TcaDE=,8ac8e775e3d5fd255b7a8f07460f3b19bebb04cb50a3c0f5d6f64cc2fd585177
+github.com/pkg/browser,v0.0.0-20180916011732-0a3d74bf9ce4,h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=,b845f84fbf08bba75401a4eff94c01c9e2c668fa1b43016e835bd60c6a8b4e87
+github.com/pkg/errors,v0.8.1,h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=,4e47d021340b7396a7dee454f527552faf7360a9fc34038b1dc32ba3b5a951d8
+github.com/pkg/profile,v1.3.0,h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=,5f20c007ac81019900f06cf1e4d451ce8e1d981460e39e04794fbcc60639f851
+github.com/pkg/sftp,v1.10.1,h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=,4e30f0455865434be7b83d4010ab97667217dafd0017caa651faafa2cc6aed64
+github.com/pkg/term,v0.0.0-20180730021639-bffc007b7fd5,h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=,165bb00eeab26fe65c64e0e13bc29abc7ea18ac28d288e2218c137cd0bd91d9b
+github.com/plaid/plaid-go,v0.0.0-20161222051224-02b6af68061b,h1:Don6I/E8nLCT6gdBi1sKB9hYxkx/24YD7XWwSly8IEo=,bd900ff0acd2968150f60770ab4e870d9f6b92c129a49eac0c9620a8043f901e
+github.com/pmezard/go-difflib,v1.0.0,h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=,de04cecc1a4b8d53e4357051026794bcbc54f2e6a260cfac508ce69d5d6457a0
+github.com/polydawn/refmt,v0.0.0-20190408063855-01bf1e26dd14,h1:2m16U/rLwVaRdz7ANkHtHTodP3zTP3N451MADg64x5k=,a92440a944006fd3e0b6f1717fce4c2ea490cf2c4af93b56675216204f138c3a
+github.com/portworx/kvdb,v0.0.0-20190911174000-a0108bddd091,h1:DqGiNhvCpvhWW/HJ1naJa0DudtlckvzQ9hEXSsOyv8Y=,d6fa957e1469a1b47ccbebc805034bafc5ed24798a1bef8675f751f9c4ed961e
+github.com/portworx/sched-ops,v0.0.0-20191101005636-ded833c86f1e,h1:emQnaLwLEYN3Hner2ekVuZfrcChdN3H3J4Lxu5mPe64=,43ff366e97ff640a34a566c81dd7d63537c2864da85d33b49d5261417cd8d4b0
+github.com/posener/complete,v1.2.1,h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=,a97f73829e0b71ae7a8f17a4884d5dcbb2c3499d8d3a077c2a8d7c2596f68d37
+github.com/pquerna/cachecontrol,v0.0.0-20180517163645-1555304b9b35,h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=,0e5185ab4dab1bb2241e9e23e36ebde5713f3fb1e47767c3eb44001b7e17644f
+github.com/pquerna/ffjson,v0.0.0-20190930134022-aa0246cd15f7,h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20=,377b4667540f620eae19722b5346f6f1efdea5688f9eedda97f2c659dad131f9
+github.com/pquerna/otp,v1.1.0,h1:q2gMsMuMl3JzneUaAX1MRGxLvOG6bzXV51hivBaStf0=,d46d289853f801387dfc514fd50133de30b684a6af34031b27caa877cbb7f687
+github.com/profitbricks/profitbricks-sdk-go,v4.0.2+incompatible,h1:ZoVHH6voxW9Onzo6z2yLtocVoN6mBocyDoqoyAMHokE=,b0baf185752eb96f8890f3e9adf856b13f5c43b5346387b659e2b1deb1d087c7
+github.com/project-flogo/core,v0.9.3,h1:uZXHR9j1Byqt+x3faNnOqB8NlEfwE2gpCh40iQ+44oA=,d1c43e3bc517bb438a9d313d976e327ba219232418064d439fb20671341832a2
+github.com/projectcalico/libcalico-go,v1.7.3,h1:qcbxAhsq/5zqZqpHE24VqMHfmoBVdXZV0Kf82+5rbqU=,4f638d56eb47ff8e1763f65131050294f7d2c828139276fe86127a803245ae8c
+github.com/prometheus/alertmanager,v0.18.0,h1:sPppYFge7kdf9O96KIh3fd093D1xN8JxIp03wW6yAEE=,45e122e7c2ac69577d63844313798060673a28b2e86ec8a0197f330c584b379b
+github.com/prometheus/client_golang,v1.2.1,h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=,174c921fe3e154adddd8e0dc572323dd04901bcad0965de614174241981da57c
+github.com/prometheus/client_model,v0.0.0-20190812154241-14fe0d1b01d4,h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=,5d4719be47f4f69ab5bf36a04c75eb078a0f69b43a335f400c2d688ac9e61795
+github.com/prometheus/common,v0.7.0,h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=,f2640a94b18b115552df41ee33effa013e10536aca51e09a971d1503a20e186a
+github.com/prometheus/procfs,v0.0.5,h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=,f45b90c72f8c2e4c84e5314092ee1ccf7d6ace1cc14b2f483c82f7c1e6d0d0d4
+github.com/prometheus/prom2json,v1.1.0,h1:/fEL2DK7EEyHVeGMG4TV+gSS9Sw53yYKt//QRL0IIYE=,166f5f98c62d0b90139947d1464ee747f8143772b9e926c7b51c53a4420380ff
+github.com/prometheus/prometheus,v2.5.0+incompatible,h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg=,ede73f6ccabd60365549986a6c7ae152c1952129006c8ae521c86ff45c4aadcc
+github.com/prometheus/tsdb,v0.10.0,h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=,34e98f0e9ba55e7290774ee40569737745b395e32811e5940d2ed124a20f927c
+github.com/pyinx/gorocket,v0.0.0-20170810024322-78ae1353729f,h1:N1r6pSlez3lLsqaNHbtrHW9ZuzrilETIabr9jPNj3Zs=,dcd920b789a98157bbe1ed7fff249255c1dd4d2fd80f7edc39b3a49fc08db13a
+github.com/qor/admin,v0.0.0-20191021122103-f3db8244d2d2,h1:IWw22+hlihdss/qI93QH48jTBUEOD/fsBqj+0z61z/Y=,722e243550878791adcdb3bcea66ab8e7a185637c8a7ad0a752713951aef2a91
+github.com/qor/assetfs,v0.0.0-20170713023933-ff57fdc13a14,h1:JRpyNNSRAkwNHd4WgyPcalTAhxOCh3eFNMoQkxWhjSw=,7fe36875e7e59afd9154f827babbffaa7f67ac54b7790df5a4a4a376c78b2282
+github.com/qor/middlewares,v0.0.0-20170822143614-781378b69454,h1:+WCc1IigwWpWBxMFsmLUsIF230TakGHstDajd8aKDAc=,4c2ed9a2f7b24dfa64464091b2c01ce9fc947524bb834d77aeb9ceaf8610e5fc
+github.com/qor/qor,v0.0.0-20191022064424-b3deff729f68,h1:MSbP9P4HnmEyH+uGQAW+V0HoTzlZ9SRq7kdCaRiZEmU=,9053796b8a7afe21483262affaf5b35bac8bf3387e24531448a4833d7b758978
+github.com/qor/render,v1.1.1,h1:DaGaKlf0OzpOB+hJUEiOTbZ40mg+n+LlSJx20/KUfes=,8f957a13173ef1a22d0caeea1cc6d198b064d242676444e00e2f597c405928c9
+github.com/qor/responder,v0.0.0-20171031032654-b6def473574f,h1:sKELSAyL+z5BRHFe97Bx71z197cBEobVJ6rASKTMSqU=,b69784649ec65ec2580d7640af25ec66973d59d82ec5391498cfe4c3076e5f6f
+github.com/qor/roles,v0.0.0-20171127035124-d6375609fe3e,h1:F0BNcPJKfubM/+IIILu/GbrH9v2vPZWQ5/StSRKUfK4=,1a35a5480c7169e86025eb19dbcddc13fd00472e6b4ade7574e62c290cf09100
+github.com/qor/session,v0.0.0-20170907035918-8206b0adab70,h1:8l21EEdlZ9R0AA3FbeUAANc5NAx8Y3tn1VKbyAgjYlI=,7c759bc736c4936a602ca1f0ebad9a324d8332ffd342e1e3acd80355180fc858
+github.com/qor/validations,v0.0.0-20171228122639-f364bca61b46,h1:dRlsVUhwD1pwrasuVbNooGQITYjKzmXK5eYoEEvBGQI=,b29360c4a4e9cc8d0ff682d8bf1f446a5d61d5a4f8d3cf2fc6d8cc077e5d810f
+github.com/racker/perigee,v0.1.0,h1:8RjBm1YGJKVVjUfO02Uok+npegz8lSSEVqjimDqlFYc=,d43613102ed67445c9fc81b621959b58f827c187189b09cec236c3bac5ce1ccb
+github.com/raff/goble,v0.0.0-20190909174656-72afc67d6a99,h1:JtoVdxWJ3tgyqtnPq3r4hJ9aULcIDDnPXBWxZsdmqWU=,ef5dde1af55d451c37ddf13e17ae339d299903cb7e67567fc6d1e69688a789e1
+github.com/rai-project/config,v0.0.0-20190926180509-3bd01e698aad,h1:o0056EwcQBeyaVb2my+T0TvMR5FpEY0CGNgWkbj/xEo=,27c2311ad1fdc185e08f2e1703893482b7d26caf64854ed371bf38f3a9303f92
+github.com/rai-project/godotenv,v0.0.0-20180619160704-a501614c3b8d,h1:reVy+ViZcrx1ILo+L8wa3dGf6hSd4qlY62VqxZxEgWs=,f4d9ecb56f20667fbb09bd5256d0c6b81b9e8cbca8f6476240c5d1800ffb07ed
+github.com/rai-project/logger,v0.0.0-20190701163301-49978a80bf96,h1:GeXSVSRfXOBN5XHNA+wq5G+sfq99mhpwn6U5kcGwSeg=,53d7677e7d7dab6b1f83591ec10491301289752e337641403f8413c0749b84d8
+github.com/rai-project/utils,v0.0.0-20180619204045-c582bb171808,h1:cHOS6oMEt8wi93zm5V7cHVnWgOhaAUCpjRDEZHBsckg=,6d43ccc901ad2f19744696f6c3d04ee28b4496cef7fe72ce7eccb89af0d8bfac
+github.com/rai-project/vipertags,v0.0.0-20190404224953-d63b0a674aa9,h1:3o86f/tK0DBZdPcUBjzFu1mEZsRCzjSgi5PNHope4AQ=,9aa8cdd1a3369382d28bad0f4581250fbecae51602aa8566cbe68dfadc8f7785
+github.com/raintank/schema,v1.0.0,h1:tK0zKHceZd5nkCUI5Soip1pA2BAvoc4qzloVEsK0y+Q=,9ffc30e882b1cfed3152bab9c8c95e00c984dc0d8895426c95d96a184e09ffe3
+github.com/rainycape/memcache,v0.0.0-20150622160815-1031fa0ce2f2,h1:dq90+d51/hQRaHEqRAsQ1rE/pC1GUS4sc2rCbbFsAIY=,2d42bb018c6b0531f93e2dc862c87374966c64c9a88863612ab5e676a32661fa
+github.com/rainycape/unidecode,v0.0.0-20150907023854-cb7f23ec59be,h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ=,0ab56010a3ef93c20bb6d8c486e3b447b4004052053e280ea6eabf2a5138bdce
+github.com/rakyll/statik,v0.1.6,h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=,58cc0c07f8e9dd17ad5c4e0f89c03d8a3ed420aac0e76b79adf7ebd1d48c5893
+github.com/rcrowley/go-metrics,v0.0.0-20190826022208-cac0b30c2563,h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=,22e944d960aec1a1e62e8cc2daaa70abefbbe989dd9c233060ab533de5f6e724
+github.com/remyoudompheng/bigfft,v0.0.0-20190512091148-babf20351dd7,h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28=,73f78c7e36c32822221f9f676b65ebe7ccb92ab6ff221035ace35c184e165c0d
+github.com/renier/xmlrpc,v0.0.0-20170708154548-ce4a1a486c03,h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o=,f9c07652c6de1aecf5baaa3b93c1e6c23379458e30553400d5f96ac8b3ea85c4
+github.com/renstrom/fuzzysearch,v0.0.0-20160331204855-2d205ac6ec17,h1:4qPms2txLWMLXKzqlnYSulKRS4cS9aYgPtAEpUelQok=,01782a5d1682a72614126da402171253030c0de60485bc18a3e63b07d977c094
+github.com/retr0h/go-gilt,v0.0.0-20190206215556-f73826b37af2,h1:vZ42M1tDiMLtirFA1K5k2QVFhWRqR4BjdSw0IMclzH4=,e7956b01b3ccea41395f1f641a0f9045f214c1075d7ecc25553b72383009274e
+github.com/revel/config,v0.21.0,h1:Bw4iXLGAuD/Di2HEhPSOyDywrTlFIXUMbds91lXTtTU=,22842698f6c646b9b89649b432d0f24deae1c5a3779c49819ec99c5db6e4b5a0
+github.com/revel/log15,v2.11.20+incompatible,h1:JkA4tbwIo/UGEMumY50zndKq816RQW3LQ0wIpRc+32U=,28e4263b0320a07dd2ae71ba09aef1f9b4af44258a8c0f1dfb1d63300f93c401
+github.com/revel/pathtree,v0.0.0-20140121041023-41257a1839e9,h1:/d6kfjzjyx19ieWqMOXHSTLFuRxLOH15ZubtcAXExKw=,de658b8de908c9c090343447e66e6bbdfe99656fcfa5889997486b0594c2a719
+github.com/revel/revel,v0.21.0,h1:E6kDJmpJSDb0F8XwbyG5h4ayzpZ+8Wcw2IiPZW/2qSc=,c66570c338f37e95626646909af1086f0bf31d8432fe982d24c415d14bc1dc9c
+github.com/rivo/tview,v0.0.0-20191018125527-685bf6da76c2,h1:GVXSfgXOMAeLvFH7IrpY3yYM8H3YekZEFcZ14q9gQXM=,000538d9517bd5f28cfe377e63183f7093043acf8bb913eb493adb29518eb6b8
+github.com/rivo/uniseg,v0.1.0,h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=,cb701df81f36acfbb2627a78662fdcaa150ee1ac00d0796a7f3eafbdb6218128
+github.com/rjeczalik/notify,v0.9.2,h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=,e8b9b93870f7ed17f30c617acb55f5fa78e7931518c88999c3d1b5b048f51482
+github.com/rkt/rkt,v1.30.0,h1:ZI5RQtSibfjicSttV/HLiHuWreYClEJA2Or5XKAdJb0=,ca2e00335dbeae7e0fbe2c45535d2bb8fce72c2bb6045b0bdf25bc6b8b59179e
+github.com/robertkrimen/otto,v0.0.0-20180617131154-15f95af6e78d,h1:1VUlQbCfkoSGv7qP7Y+ro3ap1P1pPZxgdGVqiTVy5C4=,7adbe73b0db5319bae0421a0ed7fc5619002d6e9a2be87dc8c673c8541dfd949
+github.com/robfig/cron,v1.2.0,h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=,0811a1a5a4e1f45824ac520deb2002326a659dbb4918cdfea47d80560a23211d
+github.com/robfig/cron/v3,v3.0.0,h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=,5e29b4f7f4ba62293420b918fb2309823523a583c2adaf6eddb059f525f05496
+github.com/rogpeppe/fastuuid,v1.2.0,h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=,f9b8293f5e20270e26fb4214ca7afec864de92c73d03ff62b5ee29d1db4e72a1
+github.com/rogpeppe/go-charset,v0.0.0-20180617210344-2471d30d28b4,h1:BN/Nyn2nWMoqGRA7G7paDNDqTXE30mXGqzzybrfo05w=,a28b06534aa71873d08578d69b08512dab54caa0ffd9e2943b3479166049eddd
+github.com/rogpeppe/go-internal,v1.4.0,h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY=,fb7d843253301d3ea9793f90e6bea16a8f2970a01b361f490ee66b36f81e03a5
+github.com/rpcx-ecosystem/quic-conn,v0.0.0-20190920095804-3967ef162525,h1:Awv5A28rrxuHf1+9+N08cnBa6JuKbhHswmNdfj65Bzo=,b40886ad7129eff9e517187b527467330db3705207349cdaa8f35c0dc8445c08
+github.com/rs/cors,v1.7.0,h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=,67815316761fddc4acfaad852965cf04ec88674abe3a05c6c332519556c55855
+github.com/rs/xhandler,v0.0.0-20160618193221-ed27b6fd6521,h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20=,665ae95533e1a046cf470c7341c59e64b3e2a795cdaaf307368f69a0ba547f2c
+github.com/rs/xid,v1.2.1,h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=,4abdedc4de69adcb9a4575f99c59d8ab542191e1800b6a91e12a4e9ea8da0026
+github.com/rs/zerolog,v1.16.0,h1:AaELmZdcJHT8m6oZ5py4213cdFK8XGXkB3dFdAQ+P7Q=,64e248c1fa3c62e2d904868b49acf906d0cb04a00a323d2562ea9ce7c6f154e1
+github.com/rubenv/sql-migrate,v0.0.0-20191025130928-9355dd04f4b3,h1:lwDYefgiwhjuAuVnMVUYknoF+Yg9CBUykYGvYoPCNnQ=,4d4e9e2c7387542b26a1cd9fbfcbdab7b75dce807877d5a0a501180b584c60f2
+github.com/rubyist/circuitbreaker,v2.2.1+incompatible,h1:KUKd/pV8Geg77+8LNDwdow6rVCAYOp8+kHUyFvL6Mhk=,fc1125d9260a471d349c94a251340c437f98743b42324706482596f303c28b11
+github.com/russross/blackfriday,v2.0.0+incompatible,h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=,836047aa9cbd223efba85b892e6897cf7a3b5ee3f2e6ad36b189d40842f703df
+github.com/russross/blackfriday/v2,v2.0.1,h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=,496079bbc8c4831cd0507213e059a925d2c22bd1ea9ada4dd85815d51b485228
+github.com/rwcarlsen/goexif,v0.0.0-20190401172101-9e8deecbddbd,h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=,98e8ce7bf484716bdf272f31ee01354599f4ec4b4ece7c04156c15b264d8f6ec
+github.com/ryanuber/columnize,v2.1.0+incompatible,h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=,ff687e133db2e470640e511c90cf474154941537a94cd97bb0cf7a28a7d00dc7
+github.com/ryanuber/go-glob,v1.0.0,h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=,2084f36ead38a505489fdb46329502fb627f568224dcc22ef11ec173b61fc2cf
+github.com/ryszard/goskiplist,v0.0.0-20150312221310-2dfbae5fcf46,h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=,12c65729fc31d5a9bf246eb387bd4c268d0d68bf33b913cccd81bebd47d6f80d
+github.com/sacloud/libsacloud,v1.26.1,h1:td3Kd7lvpSAxxHEVpnaZ9goHmmhi0D/RfP0Rqqf/kek=,4f0e24194ce3566707df5862177cb0f697debe3d5b799decb2685ee8d07dbe11
+github.com/saintfish/chardet,v0.0.0-20120816061221-3af4cd4741ca,h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=,d9cb0e35c88fbf91a409db0626f2e8ae9db305cf95dc3469dc7d089a8432c9c3
+github.com/samuel/go-zookeeper,v0.0.0-20190923202752-2cc03de413da,h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=,499f8144de8a6839b2d70c8869d88f294604188ec501e928ca17446043147d40
+github.com/sanity-io/litter,v1.1.0,h1:BllcKWa3VbZmOZbDCoszYLk7zCsKHz5Beossi8SUcTc=,c4bbddbf1bd7bb4ef74a3c2cac98f4a78a2a3a5a6b8dd140bd31a5d38c459217
+github.com/santhosh-tekuri/jsonschema,v1.2.4,h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis=,1c946415ee3395181090664a37779c296b540ca7eec58844ad0283fef11fec00
+github.com/sasha-s/go-deadlock,v0.2.0,h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y=,6c3f90c7947da1090f545438f4b3fd461cfeec79ee1c6e5e83a0eed7258622b1
+github.com/sassoftware/go-rpmutils,v0.0.0-20190420191620-a8f1baeba37b,h1:+gCnWOZV8Z/8jehJ2CdqB47Z3S+SREmQcuXkRFLNsiI=,88264dbd268c88bc8a57e4b4a261f22058fa6e03eb2883b0a82375f854e15188
+github.com/satori/go.uuid,v1.2.0,h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=,4f741306a0cbe97581e34a638531bcafe3c2848150539a2ec2ba12c5e3e6cbdd
+github.com/satori/uuid,v1.2.0,h1:6TFY4nxn5XwBx0gDfzbEMCNT6k4N/4FNIuN8RACZ0KI=,bfd4d3d619e3ad4dd915e05fec5bf10949d8af9bc5c19b840db35ec0f21172ad
+github.com/scaleway/scaleway-cli,v0.0.0-20180921094345-7b12c9699d70,h1:DaqC32ZwOuO4ctgg9qAdKnlQxwFPkKmCOEqwSNwYy7c=,05566d6711de08738803132b8522f7051fccd3b3bf2c739dde421fffdfa75eaf
+github.com/sclevine/agouti,v3.0.0+incompatible,h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4=,b20c8a6a2c1fda0ae6a9cd6d319e78a7a5afea4bc90810cd46b99246d8219d23
+github.com/sclevine/spec,v1.2.0,h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=,582017cd824cf3cdf6803ec7db2250304f66efea705feb69cbabab416928b8f4
+github.com/sean-/conswriter,v0.0.0-20180208195008-f5ae3917a627,h1:Tn2Iev07a4oOcAuFna8AJxDOF/M+6OkNbpEZLX30D6M=,0637d2fc0eb4627827e4b73dbe3a72479708641df8fc71a06e7bc481f6a7f39b
+github.com/sean-/pager,v0.0.0-20180208200047-666be9bf53b5,h1:D07EBYJLI26GmLRKNtrs47p8vs/5QqpUX3VcwsAPkEo=,a4288f9116ea01c34efd65b7dce4357ba6f9c02ad984ca758fea0d0aebb605c9
+github.com/sean-/seed,v0.0.0-20170313163322-e2103e2c3529,h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=,0bc8e6e0a07e554674b0bb92ef4eb7de1650056b50878eed8d5d631aec9b6362
+github.com/sebest/xff,v0.0.0-20150611211316-7a36e3a787b5,h1:MqIPVG2sHTgcQxFwZ+iHZSQ869PVP42SgEEeI1+X4Y8=,8cbe518a78ab7998550c509bd9fadc95a1aef8e86b1022cb3d265348ad370cde
+github.com/seccomp/libseccomp-golang,v0.9.1,h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=,5989692d87ef4c377fbc60d441795a90d9453b9e357d019e44d9033ab39ca888
+github.com/segmentio/go-loggly,v0.5.1-0.20171222203950-eb91657e62b2,h1:S4OC0+OBKz6mJnzuHioeEat74PuQ4Sgvbf8eus695sc=,5e071d0b6923a0fa78895bf7e673f5a4e482d39d4603b7dabd4056a506923ca7
+github.com/segmentio/go-prompt,v1.2.1-0.20161017233205-f0d19b6901ad,h1:EqOdoSJGI7CsBQczPcIgmpm3hJE7X8Hj3jrgI002whs=,b86fcda4b8afd5a3893ea333431368e60ea5ebee302a3014aee6d2020233bf31
+github.com/segmentio/kafka-go,v0.1.0,h1:IXCHG+sXPNiIR5pC/vTEItZduPKu4cnpr85YgxpxlW0=,e0b749b974d3277438d09dd6178928c3ad6c3760313f7ad45ec5cd88d8eb14b9
+github.com/serenize/snaker,v0.0.0-20171204205717-a683aaf2d516,h1:ofR1ZdrNSkiWcMsRrubK9tb2/SlZVWttAfqUjJi6QYc=,67272dde9cf92af80704869dea59346be1c37098373200dd8eea6e0e034079b4
+github.com/sergi/go-diff,v1.0.0,h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=,287218ffcd136dbb28ce99a2f162048d8dfa6f97b524c17797964aacde2f8f52
+github.com/serialx/hashring,v0.0.0-20180504054112-49a4782e9908,h1:RRpyb4kheanCQVyYfOhkZoD/cwClvn12RzHex2ZmHxw=,4184e14faf8e39222109eb2b7fa3aee2e0a544b66785ad0b7058318483ff76bb
+github.com/sethgrid/pester,v0.0.0-20190127155807-68a33a018ad0,h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=,ddcaf31e63aaf1ac003af97e667bedaa0fc89956e19aeb032c5658629da29800
+github.com/shiena/ansicolor,v0.0.0-20151119151921-a422bbe96644,h1:X+yvsM2yrEktyI+b2qND5gpH8YhURn0k8OCaeRnkINo=,60da6dc53662eb72063784f3bf609edb7aa317c552f81651164bc657754902a6
+github.com/shirou/gopsutil,v2.19.10+incompatible,h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto=,e5afa6f0b690ecc3ff12458663c6337920a759f27c3d9692a0836644337e4e85
+github.com/shirou/w32,v0.0.0-20160930032740-bb4de0191aa4,h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U=,3ed6741a7e1470feffb50031ecf9919f30b5f573f993683b6574488756ef65c1
+github.com/shopspring/decimal,v0.0.0-20191009025716-f1972eb1d1f5,h1:Gojs/hac/DoYEM7WEICT45+hNWczIeuL5D21e5/HPAw=,91a0ee539fb6f3de1550cdf93c73434fc8a16bab37be693997b20317510331a9
+github.com/shurcooL/component,v0.0.0-20170202220835-f88ec8f54cc4,h1:Fth6mevc5rX7glNLpbAMJnqKlfIkcTjZCSHEeqvKbcI=,2dd1cfac518def9fc8c6ac69022a85b0413269caf93d9532f77dca7375e1d645
+github.com/shurcooL/events,v0.0.0-20181021180414-410e4ca65f48,h1:vabduItPAIz9px5iryD5peyx7O3Ya8TBThapgXim98o=,1dcade8d00ba3945f5d1bc56c09a84e2d51fa20d20ef4fa6f867e5e4cd918e9d
+github.com/shurcooL/github_flavored_markdown,v0.0.0-20181002035957-2122de532470,h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64=,d984dc45e823f4c99e89841d675e34d2d35d3b334f1b3690fde05de30a66929f
+github.com/shurcooL/githubv4,v0.0.0-20191006152017-6d1ea27df521,h1:ARaYJO1zp2afVv0s28fq7uxgee4WLop35FWrOoSZyak=,7f5c88b38760c5090bffe582a40abe7dc17a789f9041549e5c17e3d71df2d75d
+github.com/shurcooL/go,v0.0.0-20180423040247-9e1955d9fb6e,h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=,350e4c547dbeb657bb3b2eab428f1c29a80808e8096ff87324fd84744f914766
+github.com/shurcooL/go-goon,v0.0.0-20170922171312-37c2f522c041,h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=,31cb3f736521597c56f962b9d7d21073620fbb1da845305aba743960f09e4115
+github.com/shurcooL/gofontwoff,v0.0.0-20180329035133-29b52fc0a18d,h1:Yoy/IzG4lULT6qZg62sVC+qyBL8DQkmD2zv6i7OImrc=,685dedb79602bb41403a7b5198f5c9d0ffbc99a68d7f99160ecf08a71475e5f4
+github.com/shurcooL/gopherjslib,v0.0.0-20160914041154-feb6d3990c2c,h1:UOk+nlt1BJtTcH15CT7iNO7YVWTfTv/DNwEAQHLIaDQ=,ea6c396c92724a8028793bde957dbe9a1c594b8af085035e652d4335e6aa30e1
+github.com/shurcooL/graphql,v0.0.0-20181231061246-d48a9a75455f,h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk=,eb1b45dc90aed0edcfc4cacffdc2645121dda8155702440eada1bcafefddcbba
+github.com/shurcooL/highlight_diff,v0.0.0-20170515013008-09bb4053de1b,h1:vYEG87HxbU6dXj5npkeulCS96Dtz5xg3jcfCgpcvbIw=,b4bcb7f3e50a99623d5f39c4e054964fc60d5e4b34543408582a0a984a67b630
+github.com/shurcooL/highlight_go,v0.0.0-20181028180052-98c3abbbae20,h1:7pDq9pAMCQgRohFmd25X8hIH8VxmT3TaDm+r9LHxgBk=,9f879b051c8eadb6dc063ca3ff6856d0e64cd30b5ad545e580b77b4f8ef9ddd7
+github.com/shurcooL/home,v0.0.0-20181020052607-80b7ffcb30f9,h1:MPblCbqA5+z6XARjScMfz1TqtJC7TuTRj0U9VqIBs6k=,0042d859afa3221fd4b4049b350a2d6ffcc674e4c4177bb0c232dc120b410ee6
+github.com/shurcooL/htmlg,v0.0.0-20190503024804-b6326af49ef6,h1:kXXs9Xnfv5gU7KLKiOE3AQgaRUUXchcXnO2rP3fZ5Ao=,52485f17bba8920b37a70124b90eea9d43037a9764a785c97a7e531ca09ed5a5
+github.com/shurcooL/httperror,v0.0.0-20190506043526-2e76094aa70e,h1:QTph/PpT1aDtFHk0sVJoVG/Vfox0YZkq70sW/tvXJM0=,7807129d1577611bdf803b7a4dd3253f45e4b63a77c1a73bed48a0c838c463c6
+github.com/shurcooL/httpfs,v0.0.0-20190707220628-8d4bc4ba7749,h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=,a2079dbd8c236262ecbb22312467265fbbddd9b5ee789531c5f7f24fbdda174b
+github.com/shurcooL/httpgzip,v0.0.0-20190720172056-320755c1c1b0,h1:mj/nMDAwTBiaCqMEs4cYCqF7pO6Np7vhy1D1wcQGz+E=,70ef73fce2f89d622f828cb439fd6c7b48a7fe63600410a8c0a936042c0e4631
+github.com/shurcooL/issues,v0.0.0-20190705005435-6a96395fbb66,h1:kls/E9JqtKEj8tWx2PwKCWqEWmwzsX7cnj9QkaEhUpM=,dd1ace2ad69b6c130a9294c3eb4032090e73c3b7dace098a5a7e1ad154f8e911
+github.com/shurcooL/issuesapp,v0.0.0-20180602232740-048589ce2241,h1:Y+TeIabU8sJD10Qwd/zMty2/LEaT9GNDaA6nyZf+jgo=,ac947684d3f13beef9433724deddc2c7ddb6d19921d6902f4789dd4ce1af5f3c
+github.com/shurcooL/notifications,v0.0.0-20181111060504-bcc2b3082a7a,h1:bQX0+HfDylIQCtf1tzyrxQ+BqIV08ZjkjgspFWiIYhc=,c1c77700f490d0211cec00fd5fd0ee80debf66e0e41de1dc68b24dc726db5409
+github.com/shurcooL/octicon,v0.0.0-20190930024621-43309dfb482e,h1:C2+alklsN4yRHXaOX3v9TuCGlTSwZQjSnN88nLGVhg8=,88953a9951a14e24afd2d1040e9de0b4fbe194805fdc7ec9d9d9bbcd8c2f3448
+github.com/shurcooL/reactions,v0.0.0-20181222204718-145cd5e7f3d1,h1:hHIhW4KrmPQ/hJ7AuKNNvVPVE2k/LVE5NTFsQ68taBw=,fd5f9a0c6e7e292bdfa81fcad767f61c95dc84f18bf4f9f02a4fe02f75327d37
+github.com/shurcooL/sanitized_anchor_name,v1.0.0,h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=,0af034323e0627a9e94367f87aa50ce29e5b165d54c8da2926cbaffd5834f757
+github.com/shurcooL/users,v0.0.0-20180125191416-49c67e49c537,h1:YGaxtkYjb8mnTvtufv2LKLwCQu2/C7qFB7UtrOlTWOY=,3f17089e996438a88a478d38807ce4f3c045a91114830946a1bdc760eb2b7c58
+github.com/shurcooL/vfsgen,v0.0.0-20181202132449-6a9ea43bcacd,h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=,8a093681b21159514a1742b1a49e88fa2cf562673a5a0055e9abeb7ff590ee19
+github.com/shurcooL/webdavfs,v0.0.0-20170829043945-18c3829fa133,h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ=,bb70104152800cbb490c480bead0d2ef24176be9e1304e6701ab161115484863
+github.com/siddontang/go,v0.0.0-20180604090527-bdc77568d726,h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=,ef97fabc8a96a758fac273b01dff6be7957ed44c4b6c6a8316f43741329a0049
+github.com/siddontang/go-snappy,v0.0.0-20140704025258-d8f7bb82a96d,h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68=,faf83d6459d06f5f4a9acd09e23e284e11792d14de331bd7b87852b18f9cf5c3
+github.com/siddontang/ledisdb,v0.0.0-20190202134119-8ceb77e66a92,h1:qvsJwGToa8rxb42cDRhkbKeX2H5N8BH+s2aUikGt8mI=,dab81c0bdfc62063a340f61dfab19c065d2d10b1245cd56cc04832130a6bbea5
+github.com/siddontang/rdb,v0.0.0-20150307021120-fc89ed2e418d,h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=,93bf89960d84b8732e648cb413dced692c1d3d9000997e99826538a5f20b1d82
+github.com/sigurn/crc8,v0.0.0-20160107002456-e55481d6f45c,h1:hk0Jigjfq59yDMgd6bzi22Das5tyxU0CtOkh7a9io84=,12916a0da94e747b99653138a25112e24b082db53bc0d5cffe62214ce3fb884d
+github.com/sigurn/utils,v0.0.0-20190728110027-e1fefb11a144,h1:ccb8W1+mYuZvlpn/mJUMAbsFHTMCpcJBS78AsBQxNcY=,694bb4cbe9dd17447c1e0054ef327eebd9bed8682aa39f5f4d282fb9b1717299
+github.com/sirupsen/logrus,v1.4.2,h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=,9a8e55830261a4b1c9350d7c45db029c8586c0b2d934d1224cde469425031edd
+github.com/skratchdot/open-golang,v0.0.0-20190402232053-79abb63cd66e,h1:VAzdS5Nw68fbf5RZ8RDVlUvPXNU6Z3jtPCK/qvm4FoQ=,242db3338b172ecb58bdf3406b4cafecfa738cfb7b8cd71698d23831aedd94b0
+github.com/skyrings/skyring-common,v0.0.0-20160929130248-d1c0bb1cbd5e,h1:jrZSSgPUDtBeJbGXqgGUeupQH8I+ZvGXfhpIahye2Bc=,d5010d4900d7417c05d4863399e5509e82dfaca9c09c31ac9e5ebdcaf109e833
+github.com/smallnest/libkv-etcdv3-store,v0.0.0-20191101045330-f92940446965,h1:YQtdLz+7JQdKn7f5cG+xSrSbI7X4jObx0Jy6ZzffGew=,b9fb22d7d67e16cd3a1d7c7a5b2faf6c35c690ae1c3bcf70dbf77813db7dc563
+github.com/smallnest/rpcx,v0.0.0-20191101045608-2a801682117a,h1:Fzp1HLqyYg8koEELgwfSEUgkE6QPvrN9qCkHZ8tikFY=,0d2255c9ffc429e32936dbb9e51c79bbf2b76a7dec95c5d9dc1668053d5642bc
+github.com/smallnest/valkeyrie,v0.0.0-20191030064635-54a884e4b303,h1:NDOAHb1sE8pYWd0Dge8W6bGQ63FHfa0/QjClXG2hrgw=,b846d492aaf7053115b2e143b7c7696299b852ec670d261bd78b5cd996eacde3
+github.com/smartystreets/assertions,v1.0.1,h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=,2e3d9f61f68cdf7b48653582640ef88744c1a3bdd4257ac68f621579a2f807dd
+github.com/smartystreets/go-aws-auth,v0.0.0-20180515143844-0c1422d1fdb9,h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q=,d9441cfbef2c680269ced67f8e1d99af9cf649e11a7f133a5b0685be0277ca7d
+github.com/smartystreets/goconvey,v0.0.0-20190731233626-505e41936337,h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=,fd90be078397b45806e4dfaca367235aef6d6133871c8a6cc6d3d579280d8d03
+github.com/smartystreets/gunit,v1.0.0,h1:RyPDUFcJbvtXlhJPk7v+wnxZRY2EUokhEYl2EJOPToI=,36cf43529cfadeb297ce1537c7d0fca8373a95936806121ce7ce0bf653e959ee
+github.com/smola/gocompat,v0.2.0,h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns=,7812934f407beeab20aa289b0056234ae6637b30b301ebf97a5d7a9fd8e665fc
+github.com/snikch/goodman,v0.0.0-20171125024755-10e37e294daa,h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=,ab939c56cb7afcff213aef4568f40c9ddeae30166e34a2fa7f5718a47227c2e1
+github.com/softlayer/softlayer-go,v0.0.0-20180806151055-260589d94c7d,h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic=,63ad57bc2d4c27db3dcab7cf545a075bb4d7ea66aba57c284c07a2c938220f8c
+github.com/soheilhy/cmux,v0.1.4,h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=,6d6cadade0e186f84b5f8e7ddf8f4256601b21e49b0ca49fd003a7e570ae1885
+github.com/songtianyi/rrframework,v0.0.0-20180901111106-4caefe307b3f,h1:o3QHyJEW1U+8oyEZeaXFcYqdhhiZjrs25/8AZmsWjiU=,b1cf04474a48de1ed7ae535ae4a2d5b17a0df4ce0d3b953c5268f42ee34cb17d
+github.com/soniakeys/unit,v1.0.0,h1:UMIgu6dxDQaK6tYaQV6dJn5oovB6035KRxCS0O7Jiec=,565c64fe777e1140d82422e9b8d29ce8de82d7916e50dac2f7591d2c6f2d79e7
+github.com/sony/gobreaker,v0.4.1,h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ=,eab9bf8f98b16b051d7d13c4f5c70d6d1039347e380e0a12cb9ff6e33200d784
+github.com/sourcegraph/annotate,v0.0.0-20160123013949-f4cad6c6324d,h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E=,2a58cbf2485b2e97e49d7c3e83e81385d1418bfbab2b846dabec041a3d402b3e
+github.com/sourcegraph/syntaxhighlight,v0.0.0-20170531221838-bd320f5d308e,h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8=,c0e6323ed7a5dcddcdd7686f2d7c68dff44a8ecbfd6818db3bdb33a7af422792
+github.com/spacemonkeygo/errors,v0.0.0-20171212215202-9064522e9fd1,h1:xHQewZjohU9/wUsyC99navCjQDNHtTgUOM/J1jAbzfw=,b360a46f9534dd46d2b2c27c84ba8bbe3942832e74aa4ceb16acaa6ba30620be
+github.com/spacemonkeygo/monotime,v0.0.0-20180824235756-e3f48a95f98a,h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA=,4a55e556811ab93b23b46907b354e53fc553eb93314cf0b524933f37ac1437f8
+github.com/spacemonkeygo/openssl,v0.0.0-20181017203307-c2dcc5cca94a,h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek=,23031c8d37bbaa5aace338eed65af68c7d72bf134d7d0e09c963ed4974c56e58
+github.com/spacemonkeygo/spacelog,v0.0.0-20180420211403-2296661a0572,h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=,91eb98e80c44d42e6f3ff7ddf84f825d20eb55669452d752fb8ed3adeb723be7
+github.com/spaolacci/murmur3,v1.1.0,h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=,60bd43ada88cc70823b31fd678a8b906d48631b47145300544d45219ee6a17bc
+github.com/spf13/afero,v1.2.2,h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=,81d51799397212c9adb2cea6cf3a96a2b50f1baff8aff7bd410128a84f2a9e73
+github.com/spf13/cast,v1.3.0,h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=,001ed519a3ec007e76e639f72bd9560be70497d499acbf1a32ccf32dc4647d91
+github.com/spf13/cobra,v0.0.5,h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=,6c6739f11d69fa1e5b60ba1e04529f355f8a30e1aa2b137ba26260de8fa7a647
+github.com/spf13/fsync,v0.9.0,h1:f9CEt3DOB2mnHxZaftmEOFWjABEvKM/xpf3cUwJrGOY=,d470c73c6e821d6c8f47ce05be3360f4d686d9079dd5af1585420c73e4725c56
+github.com/spf13/jwalterweatherman,v1.1.0,h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=,43cc5f056caf66dc8225dca36637bfc18509521b103a69ca76fbc2b6519194a3
+github.com/spf13/pflag,v1.0.5,h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=,fc6e704f2f6a84ddcdce6de0404e5340fa20c8676181bf5d381b17888107ba84
+github.com/spf13/viper,v1.5.0,h1:GpsTwfsQ27oS/Aha/6d1oD7tpKIqWnOA6tgOX9HHkt4=,7f3513d0a1186b765937c788f0ac751076067b7a0abc82420171b6f262787ac5
+github.com/src-d/envconfig,v1.0.0,h1:/AJi6DtjFhZKNx3OB2qMsq7y4yT5//AeSZIe7rk+PX8=,c694b1440b6969dfd4ebcba669faea8a05bdc7791ac78dcfbe29f153b0a8f0cd
+github.com/src-d/gcfg,v1.4.0,h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=,2aa52404cbeec89c0a976d333448d1a4a6e113f03e000a715ce9006c84eb2e32
+github.com/srwiley/oksvg,v0.0.0-20190829233741-58e08c8fe40e,h1:LJUrNHytcMXWKxnULIHPe5SCb1jDpO9o672VB1x2EuQ=,e29e85accb2169d2f0f4dc90c22c446c24d244d68e0bbe038ba9df63381916c5
+github.com/srwiley/rasterx,v0.0.0-20181219215540-696f7edb7a7e,h1:FFotfUvew9Eg02LYRl8YybAnm0HCwjjfY5JlOI1oB00=,8a4b0686258a3e1b4f8b3e5f25efbaaefe7919d4e47e89eb36a6779504f8b116
+github.com/ssdb/gossdb,v0.0.0-20180723034631-88f6b59b84ec,h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA=,2c20531d93416fa34ee9039308166c869c72c16fff715c73c05a3977157fdc2d
+github.com/ssor/bom,v0.0.0-20170718123548-6386211fdfcf,h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo=,7622ce25bbc5d5376ccb113f267f3d68bf2363963b02d04c053dfbc252f62c4a
+github.com/steakknife/bloomfilter,v0.0.0-20180922174646-6819c0d2a570,h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=,fb001f6df1197d462e7dfdbeded863aebd85bb904da5075117174a027a1b8cb1
+github.com/steakknife/hamming,v0.0.0-20180906055917-c99c65617cd3,h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=,e42bd1bc7073772613c2b4879110dd5330fded46a8cdf9269ff03cb6a82d1108
+github.com/stellar/go,v0.0.0-20191031165136-ed88b67b723d,h1:0pucQZ9fngYUl/tIGO/H96N3F5NL5ySjM3fuz+XEFSY=,d9d23bd5fc8cae6e65d4bb0d87e3cb582bc684eac1a519ca787b187a175999a5
+github.com/stellar/go-xdr,v0.0.0-20180917104419-0bc96f33a18e,h1:n/hfey8pO+RYMoGXyvyzuw5pdO8IFDoyAL/g5OiCesY=,5122e57a861bd0c38a3a3607f13576a150face8cacf9cafaf24e21e38a104b87
+github.com/stellar/throttled,v2.2.3-0.20190823235211-89d75816f59d+incompatible,h1:jMXXAcz6xTarGDQ4VtVbtERogcmDQw4RaE85Cr9CgoQ=,a89e929d8d8ba24e621c479708378263714861d8fce137085108da9f0cc8805a
+github.com/steveyen/gtreap,v0.0.0-20150807155958-0abe01ef9be2,h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s=,64b6a1f094784f1a843a6787bd159a103b9bebd2e85cc09a7e8445cc9e3ffc03
+github.com/streadway/amqp,v0.0.0-20190827072141-edfb9018d271,h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=,66bd109504bf565a4a777c20a8cf6a1c5d05cd87b59baa50da8b6f2b0da4c494
+github.com/stretchr/objx,v0.2.0,h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=,5517d43cfb7e628b9c2c64010b934e346cd24726e3d6eaf02b7f86e10752e968
+github.com/stretchr/testify,v1.4.0,h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=,0400c42ab95389bb4f4577bc09917a040a97f0f4251db2a54a7f6f5e65065b73
+github.com/stripe/stripe-go,v66.1.1+incompatible,h1:D8qUD1rxv+RdXi2qo+IdDELkDevxYUQDfje20bGQPiw=,471de64dbc99da2b83fc1822ff9b4627b1b0738a8e3ee9ffb038510ce84e4baf
+github.com/struCoder/pidusage,v0.1.2,h1:fFPTThlcWFQyizv3xKs5Lyq1lpG5lZ36arEGNhWz2Vs=,6ae03cd6cab9014ca7c0326fc233b27d942556c9753d2da87a93dd0fecbb9986
+github.com/stumble/gorocksdb,v0.0.3,h1:9UU+QA1pqFYJuf9+5p7z1IqdE5k0mma4UAeu2wmX8kA=,8bf18874189196133dabeb8fb7444633a0961e8983f8b2d8588d522d6aa679de
+github.com/subosito/gotenv,v1.2.0,h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=,21474df92536f36de6f91dfbf466995289445cc4e5a5900d9c40ae8776b8b0cf
+github.com/svanharmelen/jsonapi,v0.0.0-20180618144545-0c0828c3f16d,h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI=,482b13f426a15f3cb64ae5cb1a5fd2f27ca142465a174e24a2cc356812a3ed28
+github.com/swaggo/files,v0.0.0-20190704085106-630677cd5c14,h1:PyYN9JH5jY9j6av01SpfRMb+1DWg/i3MbGOKPxJ2wjM=,e1fe1ffca3a181bede3787e75797345bc69a583a67d8bb10b934f7a140516162
+github.com/swaggo/gin-swagger,v1.2.0,h1:YskZXEiv51fjOMTsXrOetAjrMDfFaXD79PEoQBOe2W0=,7ba6476ca79affa95429821a187b7cb3458305737ac2d1b86340814c3f276f71
+github.com/swaggo/swag,v1.6.3,h1:N+uVPGP4H2hXoss2pt5dctoSUPKKRInr6qcTMOm0usI=,1adbe98538a3f1b5e64fdf08f86cea4502a2c0d0cf1b047a27af6acf764f8c17
+github.com/syndtr/gocapability,v0.0.0-20180916011248-d98352740cb2,h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=,ece41bcca6ca06202649ccee0d2ab62667217ceb70f3a84794c3751c16b75cee
+github.com/syndtr/goleveldb,v1.0.1-0.20190318030020-c3a204f8e965,h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=,b0dbd1bdec73ea70eb1db85322046d202bcbfe901bc821d6a50ffc182c276306
+github.com/tarm/serial,v0.0.0-20180830185346-98f6abe2eb07,h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU=,cd962b3b9ef46158abad455c95ed92f2632cdb9217df2b7690171cc3db507add
+github.com/tatsushid/go-fastping,v0.0.0-20160109021039-d7bb493dee3e,h1:nt2877sKfojlHCTOBXbpWjBkuWKritFaGIfgQwbQUls=,1c25333d4ca05ca13828835e07876c0efdd90a1c32a715527aa722b3c63c2d48
+github.com/tchap/go-patricia,v2.3.0+incompatible,h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=,19db63cf16ba944ea853c18397e4336342f1e95e4b2cb12127405bb64c67cf73
+github.com/tdewolff/minify,v2.3.6+incompatible,h1:2hw5/9ZvxhWLvBUnHE06gElGYz+Jv9R4Eys0XUzItYo=,8cabb8163bd65e43b42c5842b700d55e2daeae60c82b019007aceb1ae63638d5
+github.com/tdewolff/minify/v2,v2.5.2,h1:If/q1brvT+91oWiWnIMEGuFcwWtpB6AtLTxba78tvMs=,0af37ec252d094917a1ff4178659fe9f4539fdc3dca108bbeb9c0c2f86499eb9
+github.com/tdewolff/parse,v2.3.4+incompatible,h1:x05/cnGwIMf4ceLuDMBOdQ1qGniMoxpP46ghf0Qzh38=,f290dda8150ebdc2b9586f509770a6c82093ac9027329aeb9f3004a0b26de8e9
+github.com/tdewolff/parse/v2,v2.3.9,h1:d8/K6XOLy5JVpLTG9Kx+SxA72rlm5OowFmVSVgtOlmM=,5f517cbecd071b97ed822e8f88f96ba7d8b5a8accc49fc515298210ac088e7ef
+github.com/tdewolff/test,v1.0.4,h1:ih38SXuQJ32Hng5EtSW32xqEsVeMnPp6nNNRPhBBDE8=,807205136d8f39bb7533d10b72932a183f15b45c385cd5464ae9d06e4af43337
+github.com/tealeg/xlsx,v1.0.5,h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE=,ff32f4336aed03df7c9cb7a4df9f1f42a1c64fe5d17c34566159511943d24bde
+github.com/tecbot/gorocksdb,v0.0.0-20181010114359-8752a9433481,h1:HOxvxvnntLiPn123Fk+twfUhCQdMDaqmb0cclArW0T0=,26c0e94162340c7b4d1da3ee4c71ca03f9d6638711cf440d6835e1a8f07e4fb4
+github.com/technoweenie/multipartstreamer,v1.0.1,h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM=,5a9aff85522275b125767b746869d24f4e2f776d5031631bf6e29641d99344dc
+github.com/tedsuo/ifrit,v0.0.0-20191009134036-9a97d0632f00,h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg=,1c502a5584dfbce25ff99c1a5689e2d106a138989e4a03249221ca4818674098
+github.com/tedsuo/rata,v1.0.0,h1:Sf9aZrYy6ElSTncjnGkyC2yuVvz5YJetBIUKJ4CmeKE=,f6745fd8ef8ee098410b31b1219def2c4e86c337ba6ff1319f086419b928f134
+github.com/temoto/robotstxt,v1.1.1,h1:Gh8RCs8ouX3hRSxxK7B1mO5RFByQ4CmJZDwgom++JaA=,c37f16f826a27512b7ae683ed32be5124a0252d1c7a8c4a00fd4e27d01c563d4
+github.com/templexxx/cpufeat,v0.0.0-20180724012125-cef66df7f161,h1:89CEmDvlq/F7SJEOqkIdNDGJXrQIhuIx9D2DBXjavSU=,c29bd644943d69b238da1936593421373d2db675a0fce54090d1c8b7eab7397b
+github.com/templexxx/xor,v0.0.0-20181023030647-4e92f724b73b,h1:mnG1fcsIB1d/3vbkBak2MM0u+vhGhlQwpeimUi7QncM=,578ab42785a74d1a5dd3e65bf0979138b3a98bf877de4767b8eae5701a2342e1
+github.com/tencentcloud/tencentcloud-sdk-go,v3.0.71+incompatible,h1:9sIWfe6ZC7xoSlshYWNGicPqomK7N+CsHMa1YFWBCWU=,33a9526ee0244844270e532358a22616d821cc7f8f0638e33c60f722f84c5e42
+github.com/tendermint/btcd,v0.1.1,h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s=,1967aa3cbabfb9e9780c0371a5359cc21ed77e5673b64e7dd5b234e838c82e62
+github.com/tendermint/crypto,v0.0.0-20180820045704-3764759f34a5,h1:u8i49c+BxloX3XQ55cvzFNXplizZP/q00i+IlttUjAU=,49ad334d452402d59757d3a415602f57bd7b66962d6115262f5c7413112d61bb
+github.com/tendermint/ed25519,v0.0.0-20171027050219-d8387025d2b9,h1:zccWau0P8FELSb4HTDJ88hRo+WVNMbIbg27rFqDrhCE=,7c4a6e57c787df7c6e990c35bb31df3f4a5aa89f45c3b3df4a25dfb70c01f7e3
+github.com/tendermint/go-amino,v0.15.1,h1:D2uk35eT4iTsvJd9jWIetzthE5C0/k2QmMFkCN+4JgQ=,e91cde0d10d5a8ea6ab726fbab02ef737ba52f47e207cf675440f625153d3205
+github.com/tendermint/iavl,v0.12.2,h1:Ls5p5VINCM1HRT9g5Vvs2zmDOCU/CCIvIHzd/pZ8P0E=,a56011434929c4003fd735cbef8147e8aca3d241983c5fa7a006f5753e123020
+github.com/tendermint/tendermint,v0.32.7,h1:Szu5Fm1L3pvn3t4uQxPAcP+7ndZEQKgLie/yokM56rU=,495a31dc762d79a689ce00cdd52f66b6b4071fc2b738ce4b3d1c2a9447389ecc
+github.com/tendermint/tm-db,v0.2.0,h1:rJxgdqn6fIiVJZy4zLpY1qVlyD0TU6vhkT4kEf71TQQ=,99b7c1a00ee483b97e73126a25327b75da9a5bc6e34bf9fb1ecd6b83832fe13e
+github.com/tent/http-link-go,v0.0.0-20130702225549-ac974c61c2f9,h1:/Bsw4C+DEdqPjt8vAqaC9LAqpAQnaCQQqmolqq3S1T4=,a4fe19fdbf8fbc30fe866e2cbb8761ee179f4a83bda63a0a6d30a651f3700ec2
+github.com/terraform-providers/terraform-provider-openstack,v1.15.0,h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y=,9c7419845747d0c4e3a9432f50788d8adec7ed6fca93ec9ffbf99e8c8b1cf0c3
+github.com/testcontainers/testcontainers-go,v0.0.8,h1:71E+jJpE9dSgydCfn5aWESVM7+l8giw/DBWaTy35TTU=,bceec8989a3beb9f14802c13c496c9158509f6b4cee6f855c0fb06b01e7da150
+github.com/tevino/abool,v0.0.0-20170917061928-9b9efcf221b5,h1:hNna6Fi0eP1f2sMBe/rJicDmaHmoXGe1Ta84FPYHLuE=,924168edd97fe37d4af80990d69c1d11d06b8e9236ebae65b9b68ba0261baaf1
+github.com/tgulacsi/picago,v0.0.0-20171229130838-9e1ac2306c70,h1:elvpffAnrLcWnsunBkvTwxr+Q79bPSNT1+2/pOFkCj0=,68e0cb434718215eae670723ce9327ec16d462a6403007ca22b6af71346445c5
+github.com/thanos-io/thanos,v0.3.2,h1:gNWga6sqv5kZp6ltaA7oUIFj+tTG2ohq4W9SQ4YU6ds=,99491658e5ed421ba1563818dd7c01034803fa1e0c4e5d7c28b06f3d3ed2a570
+github.com/theplant/cldr,v0.0.0-20190423050709-9f76f7ce4ee8,h1:di0cR5qqo2DllBMwmP75kZpUX6dAXhsn1O2dshQfMaA=,214ea2cc1e66f278928d0b5b1b40a3e12358b7a71e0fa6d6ea606c4d687e8eef
+github.com/theupdateframework/notary,v0.6.1,h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=,f921dfb3d54538118367d9018d9abacc3c0c026951442140d669443977180b66
+github.com/thoj/go-ircevent,v0.0.0-20180816043103-14f3614f28c3,h1:389FrrKIAlxqQMTscCQ7VH3JAVuxb/pe53v2LBiA7z8=,32edd7a9e219bdff36d2aac0c6c5f3ac982c2daf4869e6e0718e917efb23b3de
+github.com/tiancaiamao/appdash,v0.0.0-20181126055449-889f96f722a2,h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ=,a9961e6079339aec983f97fdb39d5d7258bf8d2031da68482e58e17b27a93a78
+github.com/tidwall/gjson,v1.3.3,h1:wM/XREVc9c0LbRLcNMgVcGpI16r0pbbTJpltR4jJjh0=,17da724ffc86cfb3132bd9c7ac3eb860ca43a2748be519a59aa50b436c147bc6
+github.com/tidwall/match,v1.0.1,h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=,a1b9d52b9a4c7574f46068665279522f2084be26bac71594630786f6ee9a70f2
+github.com/tidwall/pretty,v1.0.0,h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=,3b25a1a0fe7688989326aaa1ca1c74c972b30152ef2a756fbf2d217a827fc07d
+github.com/tidwall/sjson,v1.0.4,h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg=,cb47595016d45d72e6ee0f5585a86247aaeb93d9efa74e07676d32f60e8a7398
+github.com/tildeleb/cuckoo,v0.0.0-20190627040100-71059d5a2b62,h1:rXSNik45VDd1hfRLUAZwDLCY0FWvn2KlCeXjbd1yAI0=,f81f44544ec771ab630ddd5d65f4735ba2acc7619e41ccbc4bfad2473c21dc2f
+github.com/timewasted/linode,v0.0.0-20160829202747-37e84520dcf7,h1:CpHxIaZzVy26GqJn8ptRyto8fuoYOd1v0fXm9bG3wQ8=,9a3190b3751964a3d47449265d48e2d3a76b23c66a7cb402cc9bdf3d732d82b4
+github.com/tinylib/msgp,v1.1.0,h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=,61bd58489c555b30abffbe1175565b6f8460583349118e9ee12025fd17b67ea4
+github.com/tj/assert,v0.0.0-20171129193455-018094318fb0,h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=,59a81d1883aac9635ac15d8a6a6e0630cf0a4122328116f921289dab840374b7
+github.com/tj/cobra,v0.0.0-20160702192511-5e2db986a612,h1:eiUtRvCN5HSnOg9AyX5z5od5VWy/ukyJ2oTboInm9MM=,493ac2ac61730652fcdd0b9b4c1e0c63855666df7fcaa02821b63982a5a7ccdf
+github.com/tj/go-elastic,v0.0.0-20171221160941-36157cbbebc2,h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc=,a0df933432e9c7ec276cbc0edbb941375726cf5a39c663aafe0e945f9ba3079f
+github.com/tj/go-kinesis,v0.0.0-20171128231115-08b17f58cb1b,h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w=,0885f4631d33a20b5447ebbe12a0d23eb5ea3394de4bbc849cfe54ad19cadb2a
+github.com/tj/go-spin,v1.1.0,h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds=,060d09c35b1db5992747cde71ccbdaefe596ada06a6fe146e0ef10dc67d817dd
+github.com/tj/pflag,v0.0.0-20160702191705-e367e44eec04,h1:RAPJe7XUQhTjVUKvYegzhXnWkJd/1daXdoiXjvkSURU=,2156357bb17b30ccb893b8f7013168c85c1eb265b7156aca845d06fb35805257
+github.com/tjfoc/gmsm,v1.0.1,h1:R11HlqhXkDospckjZEihx9SW/2VW0RgdwrykyWMFOQU=,f8fe3c4d02f0dc90fd873278957d57c4c45f1c53b1fee3969216b67844efabb1
+github.com/tmc/grpc-websocket-proxy,v0.0.0-20190109142713-0ad062ec5ee5,h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=,dadf62266d259ffb6aa1d707892b97fa36c3f39df5cae99f54d3ef7682995376
+github.com/tomnomnom/linkheader,v0.0.0-20180905144013-02ca5825eb80,h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=,558504ea96d4312be0fe5faa6de13fb6abd8f1b2ac154123c67b623a5f219cdb
+github.com/toqueteos/webbrowser,v1.2.0,h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ=,1227d3ebeab16d8232a304a10b087984a96ad30f7439b6687bab2f5747d308cf
+github.com/transip/gotransip,v0.0.0-20190812104329-6d8d9179b66f,h1:clyOmELPZd2LuFEyuo1mP6RXpbAW75PwD+RfDj4kBm0=,38b593cbdeb59e64d042533c1ce6196d89662de3282373de0d3c0749fe4c4856
+github.com/trivago/tgo,v1.0.5,h1:ihzy8zFF/LPsd8oxsjYOE8CmyOTNViyFCy0EaFreUIk=,06dc60662735374365cd525e2f4f4d1580f348125546e1f3e0d92d2deca4fa9a
+github.com/tstranex/u2f,v1.0.0,h1:HhJkSzDDlVSVIVt7pDJwCHQj67k7A5EeBgPmeD+pVsQ=,325e3db32035ce38a5981bfaa35fb6d9b5cb4b960cfa0285b92448d21d29f379
+github.com/tsuru/config,v0.0.0-20180418191556-87403ee7da02,h1:mHuZ6JOixltE9fJmS+W1xLi4t/uDuR6Nl7w/e4uj0+I=,0255268934770d67b9d101a030ed7ed578938e346a279a273ab3983b0eee53fb
+github.com/ttacon/chalk,v0.0.0-20160626202418-22c06c80ed31,h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=,325521131515e4840e0083bc62cd9553da0b8d2480820f7e92ca89ae324f4c23
+github.com/tus/tusd,v1.0.1,h1:jb0SDf8zCUvlWv5SuHalOuRn684aW6WIvhfWRHC/XB8=,9a91d59123262b9bb1c43d39588a26d7560513b9e3c18254cd321890e8975083
+github.com/tv42/httpunix,v0.0.0-20150427012821-b75d8614f926,h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=,8246ebc82e0d9d3142f5aeb50d4fcd67f3f435fb5464120c356a4e5d57ef4aa0
+github.com/twinj/uuid,v1.0.0,h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk=,842c314d6d2ef9cb95b0f3f1b4cf998715680e836cfab8c2a7f75e351765a345
+github.com/twitchtv/twirp,v5.8.0+incompatible,h1:DTfGS9u/jHbo34cBB+qhzVHRaAq+tRois71j8pvjQ5M=,a4137792083eedd9ac04e88918d8952a841120b11e71161d2d444065b8e65d79
+github.com/tyler-smith/go-bip39,v1.0.2,h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=,6173ded455fa17cddd889bf3bc123be2343a09aeb60f83e2b63823dd9ce94e09
+github.com/tylerb/graceful,v1.2.15,h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83oA=,770bd36defb9463ebe8b190f508e47c37bbb6bedf23a32c675066f8edbd7aa8d
+github.com/u-root/dhcp4,v0.0.0-20190206235119-03363dc71ec8,h1:F9cRXeXZ95CzG7352mm+yfgloHFrjpr1L+CQFiCH/iU=,1db09816d65071cfc5dbf25d5dbf11b2b48c3442495d30777cc0714bb4cf4163
+github.com/u-root/u-root,v6.0.0+incompatible,h1:YqPGmRoRyYmeg17KIWFRSyVq6LX5T6GSzawyA6wG6EE=,f3ec29d4b285e50d7b3116e121caca0d722535346a0ddf189d4c7d8e7e0a07d3
+github.com/uber-go/atomic,v1.4.0,h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=,f380292d46ebec89bf53939e4d7d19d617327cbcdf2978e30e6c39bc77df5e73
+github.com/uber/jaeger-client-go,v2.19.0+incompatible,h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q=,d4928d51ce4440c825df67b4a54f851ead075701e67ece4b07fbc5c5857c091c
+github.com/uber/jaeger-lib,v2.2.0+incompatible,h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=,496f63f6df32c28ceb6574959c70969da2b609abc8f9f3b3a709466f862054bf
+github.com/uber/tchannel-go,v1.16.0,h1:B7dirDs15/vJJYDeoHpv3xaEUjuRZ38Rvt1qq9g7pSo=,64a37a5e89dd111ab943d94a1670f9addc0d2d41d34d630c95b0a756df916e01
+github.com/ucloud/ucloud-sdk-go,v0.8.7,h1:BmXOb5RivI0Uu4oZRpjI6SQ9/y7n/H9wxTGR1txIE8o=,d94766624c6f676880de354d4ed5c62c9ee7755c3d59cdf106ac0f5a070c0ece
+github.com/ugorji/go,v1.1.7,h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=,d02959e71c59b273d5b099697c058426941a862feef66c191c63e2934db7a2ff
+github.com/ugorji/go/codec,v1.1.7,h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=,8d482061c55b4c4fbf78de9fbf98a8d1b295f5904769679c73a2dc0b06a1a102
+github.com/ulikunitz/xz,v0.5.6,h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=,19ebb331e7ae7a542ed58597d13ab703fc423acf93a1e3c4db86476b0322049a
+github.com/unixpickle/anyvec,v0.0.0-20170908190750-59aa66ba0472,h1:eVBSKiY98Zth6cEYVzeu0CYagakYqbSWgpWqjZFiUvI=,4159f95762f7a99ee540397e78c7a60da788e6775ad9eca7fc1bd07d332a88f1
+github.com/unixpickle/autofunc,v0.0.0-20170112172612-f27a3f82164a,h1:ZUrHljv3rPkFyTYzUmBH8gBFjDwCIHc4a2DdPCWRjl0=,b39c092ab522c2ca3e8889dfbff281223628c08590b361242e72cc29015da9df
+github.com/unixpickle/essentials,v0.0.0-20180916162721-ae02bc395f1d,h1:mRwAxGRBEFcoKSWDoX5CROMJo6xmXBh4rNqOmyhpRi0=,7aa26b2cbcbac91669e88903f1e05b7696b32a6d8194d66c0fe7d93c613c2f5f
+github.com/unixpickle/num-analysis,v0.0.0-20161229165253-c45203c63047,h1:gipJz9DZGU3fgBjoaiNg+5CG9UdE7MmlBvSwNp1ulnY=,c1dac9bfeb72d39bb0b445f0f0b2af61753e5b11ff66e69bc196886189b7d50a
+github.com/unixpickle/serializer,v0.0.0-20170723202158-c6c092dc55bb,h1:kdurEYFZ2P58xnfWtmxKWkVtFPyK80BMIaJ2zW5uskY=,2cbf6cce1b2a57307c2c675a283ce9b46adcb9d18c3a9317d3ee20772175ae40
+github.com/unknwon/com,v1.0.1,h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=,f6264780f210f130a0edeafe4ffb0753c64b5168771f2d6cd1613999a7b79cd1
+github.com/unrolled/render,v1.0.1,h1:VDDnQQVfBMsOsp3VaCJszSO0nkBIVEYoPWeRThk9spY=,5b0ace5c3798f8989322a32b75c3eeabce7f6568533f808065cacf92425dd867
+github.com/unrolled/secure,v0.0.0-20190103195806-76e6d4e9b90c,h1:ZY4dowVsuIAQtXXwKJ9ezfonDQ2YT7pcXRpPF2iAy3Y=,1aba4f13fe4199198f9b59bbfd337773d049bad06f68360483a5f4c5431bdce4
+github.com/urfave/cli,v1.22.1,h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=,116fc1fba7db091617cd47c2b83c78d22489deeaf8390a6d3509da7fc9217d57
+github.com/urfave/cli/v2,v2.0.0-alpha.2,h1:2OVOKijPPhkA1cJA5SABACE8TT3Cwx9T0N6VtI8LJSI=,57250f97530fcb6fef7abc87cde3fbaf11ea45830adf98e3f1c986e2674e3b5f
+github.com/urfave/negroni,v1.0.0,h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=,7b50615961d34d748866565b8885edd7013e33812acdbaed47502d7cc73a4bbd
+github.com/valyala/bytebufferpool,v1.0.0,h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=,7f59f32c568539afee9a21a665a4156962b019beaac8404e26ba37af056b4f1e
+github.com/valyala/fasthttp,v1.6.0,h1:uWF8lgKmeaIewWVPwi4GRq2P6+R46IgYZdxWtM+GtEY=,b15a953ed5395599871097c94977d21c026205e6ca7ad6e340cd595096d5840e
+github.com/valyala/fastrand,v1.0.0,h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=,ed2166483141b4f3d59ee07975a5d91990e4c17f36c919565b8063c0cb02f7ed
+github.com/valyala/fasttemplate,v1.0.1,h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=,b4d9f77c6c15a0404952925ad59b759102c0ff48426b6fc88d6bfd347fe243b8
+github.com/valyala/tcplisten,v0.0.0-20161114210144-ceec8f93295a,h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=,07066d5b879a94d6bc1feed20ad4003c62865975dd1f4c062673178be406206a
+github.com/vbatts/tar-split,v0.11.1,h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=,73136db95ff35c2547c49be43727aa3f67da2d8837e1475954db910b41b1fa18
+github.com/veandco/go-sdl2,v0.3.3,h1:4/TirgB2MQ7oww3pM3Yfgf1YbChMlAQAmiCPe5koK0I=,d19e162daa2a6cc72569eb052adfd3d757fd069ee461a64803e9e8f2e9bb87a7
+github.com/vektah/dataloaden,v0.2.1-0.20190515034641-a19b9a6e7c9e,h1:+w0Zm/9gaWpEAyDlU1eKOuk5twTjAjuevXqcJJw8hrg=,92fe72fa4962bb2f375fae83f7a44a804e398ec08818f7d018724e0a23394ae3
+github.com/vektah/gqlparser,v1.1.2,h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=,cdd0119855b98641e7af60dce5b2848b31f8ef03dfcf097c06912309b86fc97c
+github.com/viant/assertly,v0.4.8,h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc=,253a5e53bb09bf94be7131d5034a6ba19c6eb1f9b8c7fa66182d577bd7b2d6cd
+github.com/viant/toolbox,v0.24.0,h1:6TteTDQ68CjgcCe8wH3D3ZhUQQOJXMTbj/D9rkk2a1k=,d6773a06b59de043eff2003bb97567056a1910eb0fd514f5503873b8f23309f4
+github.com/vimeo/go-util,v1.2.0,h1:YHzwOnM+V2tc6r67K9fXpYqUiRwXp0TgFKuyj+A5bsg=,85e52371bcf8299d47d8242546bc06e9e0c9c555b719008096889cd081a69173
+github.com/vincent-petithory/dataurl,v0.0.0-20160330182126-9a301d65acbb,h1:lyL3z7vYwTWXf4/bI+A01+cCSnfhKIBhy+SQ46Z/ml8=,5d5fa46ce0f88ba0734f52d0b0bcaa8a427770ef13cd1bfd7995e4d2a8439abb
+github.com/vishvananda/netlink,v1.0.0,h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=,6fb7184280eb1321e1857171862bdb624eae29876496f1cb56932fbc0064020f
+github.com/vishvananda/netns,v0.0.0-20190625233234-7109fa855b0f,h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=,a99a67e03a35e1d02d1a17900185a1c38c513a79b2b325ad826553dc078a90de
+github.com/vivint/infectious,v0.0.0-20190108171102-2455b059135b,h1:dLkqBELopfQNhe8S9ucnSf+HhiUCgK/hPIjVG0f9GlY=,f5d948bf34ac58786ad20df4fd6e99f990f72458dd2825558bf2e3c871f3f37a
+github.com/vmihailenco/msgpack,v4.0.4+incompatible,h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=,918f7dd7883105b9c55728c704a3bc54c80568b2b09583890b51508c03391356
+github.com/vmware/govmomi,v0.21.0,h1:jc8uMuxpcV2xMAA/cnEDlnsIjvqcMra5Y8onh/U3VuY=,75ca40f34da851e95d7e63685adbaf1ec5c7f659fb0b47096c85da44f098c4a3
+github.com/vmware/vic,v1.5.4,h1:y546pkye0aes2j2h2n6fWz++v8WxMZTLFl1mLOMzqYQ=,2a6f0c20be8acb3b467c78d3de18009ccd0ab2429a997266089d14341e43115c
+github.com/vmware/vmw-guestinfo,v0.0.0-20170707015358-25eff159a728,h1:sH9mEk+flyDxiUa5BuPiuhDETMbzrt9A20I2wktMvRQ=,29c73ba44ac315461640797d6ebfda2d906c28dbe21c20656c6e5fa1f515f220
+github.com/vulcand/oxy,v1.0.0,h1:7vL5/pjDFzHGbtBEhmlHITUi6KLH4xXTDF33/wrdRKw=,148843b55ed01813f8920aab70a799aa10cfdccc0bbd55e270cde78e1ad23b88
+github.com/vulcand/predicate,v1.1.0,h1:Gq/uWopa4rx/tnZu2opOSBqHK63Yqlou/SzrbwdJiNg=,3dd716f2436651429ce7f5fdd59fa1a9944ab4d57fdbae5fef00ef01baf7c4be
+github.com/vultr/govultr,v0.1.4,h1:UnNMixYFVO0p80itc8PcweoVENyo1PasfvwKhoasR9U=,7281fa718c076b84610b155fb0dec34503ea1ae5f2930cc714ed7772e475bb08
+github.com/warpfork/go-wish,v0.0.0-20190328234359-8b3e70f8e830,h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=,77a9eefa3edf38cb90eba443f282bd73ffcb6f1b87aebe8f891d8c8b38124d95
+github.com/weaveworks/common,v0.0.0-20190917143411-a2b2a6303c33,h1:UAh7j96ZXQID3shhQsrtfJsrQ2uO3tyRxCuXvh+kipw=,e1ceacd5b24c6414ae664f4b09e295dc25e48d3a1dcd5100d8c98dd405a0d162
+github.com/weaveworks/mesh,v0.0.0-20191031093817-8e3db2fe8f47,h1:RUdrWPah1Xu+efIGqN0YGTv7gQeyR5qwBq9uL4HloKw=,05f5d769f7ff6af1c098f0e42983227d6a86f5d8d1f8453cb0566450cad49358
+github.com/wellington/go-libsass,v0.9.3-0.20181113175235-c63644206701,h1:9vG9vvVNVupO4Y7uwFkRgIMNe9rdaJMCINDe8vhAhLo=,2ae95ed360950fab28eff3bedf1c1a6f5f81b73078000d3a0bd67443d38df87f
+github.com/wendal/errors,v0.0.0-20130201093226-f66c77a7882b,h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8=,f7722558c5c450fa02e800ce7bf4d0bc1d2a0e1696d3fc50ff1489bcd02ff3b3
+github.com/weppos/publicsuffix-go,v0.5.0,h1:rutRtjBJViU/YjcI5d80t4JAVvDltS6bciJg2K1HrLU=,bd8365c8501b307a1fbd62501bc3332ff97721bef51921a99e67a3f8b96318fc
+github.com/whyrusleeping/cbor-gen,v0.0.0-20190910031516-c1cbffdb01bb,h1:8yBVx6dgk1GfkiWOQ+RbeDDBLCOZxOtmZ949O2uj5H4=,9d5ab8362eaffa07bc2700d9a9e967c1ecf394e3233a6e7141efb48970bfd4e5
+github.com/whyrusleeping/chunker,v0.0.0-20181014151217-fe64bd25879f,h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=,b28fdb03b69be216c423967e9dee2481aa10c3e39c71d3bfc8911940dadb26a9
+github.com/whyrusleeping/go-keyspace,v0.0.0-20160322163242-5b898ac5add1,h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=,9416f8227e6c516294b9b938fcf2347bebe2cdab4377454150ba60dcd86c2990
+github.com/whyrusleeping/go-logging,v0.0.0-20170515211332-0457bb6b88fc,h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=,125b1a836936436354791583be42ae19f7c04a636b5c0c96135645d52aaa72ea
+github.com/whyrusleeping/go-notifier,v0.0.0-20170827234753-097c5d47330f,h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg=,08dddb594554c3b35791893207e66dd3c04e4da24d0e0df001bb185f97dec6cc
+github.com/whyrusleeping/go-smux-multiplex,v3.0.16+incompatible,h1:iqksILj8STw03EJQe7Laj4ubnw+ojOyik18cd5vPL1o=,e16e3da58e283e71955b21725c384d180a2999bc2a50cb0490b5e2f7a74b5fc6
+github.com/whyrusleeping/go-smux-multistream,v2.0.2+incompatible,h1:BdYHctE9HJZLquG9tpTdwWcbG4FaX6tVKPGjCGgiVxo=,9a783c4a1b69f6002ac4e0af684f4d5c4d360b7107fbbdde48faf38f7e23e998
+github.com/whyrusleeping/go-smux-yamux,v2.0.9+incompatible,h1:nVkExQ7pYlN9e45LcqTCOiDD0904fjtm0flnHZGbXkw=,3f44f41fc7b133085bba08d52e7615e9a8eb92f55fde6a07d3cd7804117e9985
+github.com/whyrusleeping/mafmt,v1.2.8,h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=,e5d5783d2bc35f7c23f2034fd52c5750ad0590773115c10b4e15360575322c69
+github.com/whyrusleeping/mdns,v0.0.0-20180901202407-ef14215e6b30,h1:nMCC9Pwz1pxfC1Y6mYncdk+kq8d5aLx0Q+/gyZGE44M=,fc2e4d2365ba40d52d03126ea490e712762b4ad398c8d6adb2a1a08699a10eb1
+github.com/whyrusleeping/multiaddr-filter,v0.0.0-20160516205228-e903e4adabd7,h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=,14e8963464dab0f6277f596985be5ea419bc3bae8bf4f4f139cce456e1815faf
+github.com/whyrusleeping/timecache,v0.0.0-20160911033111-cfcb2f1abfee,h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=,c33dfc5ac935582261bf5ddbab31bb07febc471a9c26eb3e1a895eddd574d3e8
+github.com/whyrusleeping/yamux,v1.1.5,h1:4CK3aUUJQu0qpKZv5gEWJjNOQtdbdDhVVS6PJ+HimdE=,658f9e704cbe1cac295ed34471bb096a4d2713f69ffbb8140fbf50b8ff6420e0
+github.com/willf/bitset,v1.1.9,h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c=,ddd687772ccfd6774e55e7e9d9e71dab86d85a64b98ce1d864d9661f5b0767e4
+github.com/x-cray/logrus-prefixed-formatter,v0.5.2,h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg=,00719eeb4f9eadb9431dd9f763fa4013dc52b37a8803a973c6d0c1ce8281e14b
+github.com/xanzy/go-cloudstack,v0.0.0-20190526095453-42f262b63ed0,h1:NJrcIkdzq0C3I8ypAZwFE9RHtGbfp+mJvqIcoFATZuk=,34b46eae351e4916015ce2a43ed501403937e4079cf69dae98a9544bfeec8092
+github.com/xanzy/go-gitlab,v0.21.0,h1:Ru55sR4TBoDNsAKwCOpzeaGtbiWj7xTksVmzBJbLu6c=,12ae6fa35c19fffc31d1fa2891f386875caac8077d19f3f09f49b5e2e51b1755
+github.com/xanzy/ssh-agent,v0.2.1,h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=,7011c1771f8ad9b65795f8a85113e4518c9a2c7493029c4c988bc802b63d9e28
+github.com/xdg/scram,v0.0.0-20180814205039-7eeb5667e42c,h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=,33884d438b686676ceaa2a439634a108f7fe763ce974342d2aa811c22b34112c
+github.com/xdg/stringprep,v1.0.0,h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=,2b262e4e8e9655100c98e2b7e75b517e3e83e2155818174c63ea09d3cce22721
+github.com/xeipuuv/gojsonpointer,v0.0.0-20180127040702-4e3ac2762d5f,h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=,5b1a4bcc8e003f214c92b3fa52959d9eb0e3af1c0c529efa55815db951146e48
+github.com/xeipuuv/gojsonreference,v0.0.0-20180127040603-bd5ef7bd5415,h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=,7ec98f4df894413f4dc58c8df330ca8b24ff425b05a8e1074c3028c99f7e45e7
+github.com/xeipuuv/gojsonschema,v1.2.0,h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=,55c8ce068257aa0d263aad7470113dafcd50f955ee754fc853c2fdcd31ad096f
+github.com/xenolf/lego,v2.7.2+incompatible,h1:aGxxYqhnQLQ71HsvEAjJVw6ao14APwPpRk0mpFroPXk=,25c2495e4fc2f5fea8c70b442add86c049f2f8810235e1ee94f29d8e0267ad2c
+github.com/xeonx/timeago,v1.0.0-rc4,h1:9rRzv48GlJC0vm+iBpLcWAr8YbETyN9Vij+7h2ammz4=,b06f4ede554b35387394827ca0350b628a72228a8002653817826991867e1fdd
+github.com/xi2/xz,v0.0.0-20171230120015-48954b6210f8,h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=,1ffe8f24af5118966084d41eca2c9bee7a831a07deb4356e4d707d208da22e8e
+github.com/xiang90/probing,v0.0.0-20190116061207-43a291ad63a2,h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=,437bdc666239fda4581b592b068001f08269c68c70699a721bff9334412d4181
+github.com/xlab/treeprint,v0.0.0-20181112141820-a009c3971eca,h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=,d14ebea967caa835f25e4c3980c60719e07f0e36375b74dc48928613fca5b2ff
+github.com/xo/dburl,v0.0.0-20191005012637-293c3298d6c0,h1:6DtWz8hNS4qbq0OCRPhdBMG9E2qKTSDKlwnP3dmZvuA=,1fb150cf2144a4b7a571360af52d9b22dfe53e2ba9ab3e56584fdb0eb282d315
+github.com/xordataexchange/crypt,v0.0.3-0.20170626215501-b2862e3d0a77,h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=,46dc29ef77d77a2bc3e7bd70c94dbaeec0062dd3bd6fcacbaab785c15dcd625b
+github.com/xtaci/kcp-go,v5.4.5+incompatible,h1:CdPonwNu3RKu7HcXSno5r0GXfTViDY2iFV2RDOao/4U=,98e77493d94b33bfec990bd5791d15a09add1a0ba2f3281f26bdc98c1815d9a7
+github.com/xtaci/lossyconn,v0.0.0-20190602105132-8df528c0c9ae,h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=,75cc8c3e14cf812dcc56a1e8cecafd8affd9b2843d39540ab67929f7ce3d1abc
+github.com/xtgo/set,v1.0.0,h1:6BCNBRv3ORNDQ7fyoJXRv+tstJz3m1JVFQErfeZz2pY=,6b70026a5ea66bc0be7efb2247afa53ae970b9535c7a8541795750ef9b640217
+github.com/yalp/jsonpath,v0.0.0-20150812003900-31a79c7593bb,h1:06WAhQa+mYv7BiOk13B/ywyTlkoE/S7uu6TBKU6FHnE=,d2041be5f19a3dbcd4b384dbbf5782cdb96d80ad9c60c8c9b887f2c5170cb25f
+github.com/yandex-cloud/go-genproto,v0.0.0-20190928220815-a36c849d0fc1,h1:GDyRNvsi/tOZj1ssPkk+kocO1djpbmLSpDKg4XeRPy4=,5502c680146902518514935af5ab5b554a80f5ebe2e79d491db3120911f5498d
+github.com/yandex-cloud/go-sdk,v0.0.0-20190916101744-c781afa45829,h1:2FGwbx03GpP1Ulzg/L46tSoKh9t4yg8BhMKQl/Ff1x8=,4b375b871ce7501943a26ba02c348ad4fdf2cb112520513628566a15a98a4796
+github.com/yohcop/openid-go,v0.0.0-20160914080427-2c050d2dae53,h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=,8c4f676193e3aa5ec012e0661d0e552a3e5d5d96086a73901dcfbf0bd4a6d2e9
+github.com/yookoala/realpath,v1.0.0,h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ=,9fe8b06f8efabb7df08608f18edc77d284e04ad06d490af9f55196e4184c339f
+github.com/yosssi/ace,v0.0.5,h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=,96157dbef72f2f69a900e09b3e58093ee24f7df341ac287bddfb15f8c3f530db
+github.com/yosssi/gmq,v0.0.1,h1:GhlDVaAQoi3Mvjul/qJXXGfL4JBeE0GQwbWp3eIsja8=,d06bbe96ba0e8c3c79bfb0b9191a02a19d8d3d3c181eba62df6d94c0602c784e
+github.com/youtube/vitess,v2.1.1+incompatible,h1:SE+P7DNX/jw5RHFs5CHRhZQjq402EJFCD33JhzQMdDw=,2eb3c516c8b24a72b8cb14f76f39562638acf0cd7fc3858002163d28047607f2
+github.com/yudai/gojsondiff,v0.0.0-20170107030110-7b1b7adf999d,h1:yJIizrfO599ot2kQ6Af1enICnwBD3XoxgX3MrMwot2M=,3f61230fe62a6fe2e93a75264d176bda3f62323063c1e9bfb87c0be31ac5d269
+github.com/yudai/golcs,v0.0.0-20150405163532-d1c525dea8ce,h1:888GrqRxabUce7lj4OaoShPxodm3kXOMpSa85wdYzfY=,ff1f3899e710574a08aaa51051a36c523ecf850180ad0564d55eec611c3cff72
+github.com/yudai/pp,v2.0.1+incompatible,h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=,ecfda4152182e295f2b21a7b2726e2865a9415fc135a955ce42e039db29e7a20
+github.com/yuin/gopher-lua,v0.0.0-20190514113301-1cd887cd7036,h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=,fd157d5d26c336c44837eceef5c6fc4b442a56b25931d4afae3c4080932a7aa7
+github.com/zach-klippenstein/goregen,v0.0.0-20160303162051-795b5e3961ea,h1:CyhwejzVGvZ3Q2PSbQ4NRRYn+ZWv5eS1vlaEusT+bAI=,6f523a11fcb80dca31c3bae99c8c4a59b7e5a4176e36cad0e3f1e64e1b9a7b11
+github.com/zclconf/go-cty,v1.1.0,h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw=,024660decfe11e74a9fab80f1447b79c61e328baf6418629a15c74e183b95e95
+github.com/zclconf/go-cty-yaml,v1.0.1,h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8=,2502da37ac6d9105b07748c4252f970aa6a7ffc8929b92a0b85abb81b804e9b7
+github.com/zeebo/admission,v0.0.0-20180821192747-f24f2a94a40c,h1:WoYvMZp+keiJz+ZogLAhwsUZvWe81W+mCnpfdgEUOl4=,b62a80509cfa84e697b23dd6b1b314a264e6f68586661ecd84026625f7753cb1
+github.com/zeebo/assert,v1.0.0,h1:qw3LXzO7lbptWIQ6DsemJIUOoaqyKbgY3M8b8yvlaaY=,bb31d428cc59a322975ab6b5757832e62507655f3e2c467a88345b21d7431d98
+github.com/zeebo/errs,v1.2.2,h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=,d2fa293e275c21bfb413e2968d79036931a55f503d8b62381563ed189b523cd2
+github.com/zeebo/float16,v0.1.0,h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=,ffc6b2a7bce5e37798bc3ac53448b6190039a77f2e7d589779680fbd3cb53a48
+github.com/zeebo/incenc,v0.0.0-20180505221441-0d92902eec54,h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=,141b997c5ece8f136f43644f5a2526305563128c4ecce280d9a54ce1ae506ba2
+github.com/zeebo/structs,v1.0.2,h1:kvcd7s2LqXuO9cdV5LqrGHCOAfCBXaZpKCA3jD9SJIc=,0495c69abfeb2ffa0911f4c44ba145d81b04ec76d2311e2eedfc2b3e2efd66c9
+github.com/zenazn/goji,v0.9.0,h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ=,0807a255d9d715d18427a6eedd8e4f5a22670b09e5f45fddd229c1ae38da25a9
+github.com/ziutek/mymysql,v1.5.4,h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=,1ea104186e0990a3d97a1e67fcd31177849c975de4abd9399270ab0a04c025de
+github.com/zkfy/cron,v0.0.0-20170309132418-df38d32658d8,h1:jxPemXnLeekMXItoaw4jZtDfe8HmvFmviUm2L5tEBhE=,81a903448f6bc140e07bc4ff70762f0a46e750388f4b92f700d358331b1ca8d5
+github.com/zkfy/go-metrics,v0.0.0-20161128210544-1f30fe9094a5,h1:Rb2qQMbEon+BI3IXGh4eW3u/iTLPA3+Y6kNK+gHO32w=,07f9078cbc233559128dc4ae80d69505dd1a07d47d33135fc8f4969829fd6ee8
+github.com/zkfy/jwt-go,v3.0.0+incompatible,h1:5hZNIkrRRa0mrkRiXoPFdLJWpMDByIZ6VIbX9aWhwmk=,8306a4a65059e17be035dd47f45d83aac503c50c954716c83e481d0b6530aed6
+github.com/zkfy/log,v0.0.0-20180312054228-b2704c3ef896,h1:nktyhX5ycnu+WA489Ei7SUi00bF+LW8TF2N7se5gQ/o=,dd0acb5ccceb2225c89f0f50dc8eea9f1cae0971b731750ea7a1b186c194d9bc
+github.com/zkfy/stompngo,v0.0.0-20170803022748-9378e70ca481,h1:dqbWcJVZJv06ZR7zK8yN9w8oNOHL23eylL4o9Xj9Zn0=,9e643fbfd166421cb186275742bafc663fc350da83e59e9d88c06feb12ec4462
+github.com/zmap/rc2,v0.0.0-20131011165748-24b9757f5521,h1:kKCF7VX/wTmdg2ZjEaqlq99Bjsoiz7vH6sFniF/vI4M=,fd70713ed40c95220e95c7c47f7e15051e8dc909d39253f403bb694f45fbe789
+github.com/zmap/zcertificate,v0.0.0-20180516150559-0e3d58b1bac4,h1:17HHAgFKlLcZsDOjBOUrd5hDihb1ggf+1a5dTbkgkIY=,7dc2c0bedccfdeb9c42ef41ef502f404befa9ef073c35db3b15c99cae6697b41
+github.com/zmap/zcrypto,v0.0.0-20190729165852-9051775e6a2e,h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw=,871979cf16453ddb4db7f153f449af4e346f68b51355c74b5eee832225618ff0
+github.com/zmap/zlint,v0.0.0-20190806154020-fd021b4cfbeb,h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg=,e62f5cd5f434d84f53d336261e3a6e50c8902152ce8f2f5ce918270d6d201cab
+github.com/zondax/hid,v0.9.0,h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=,9c72a6bdbf03d9465dfdf1ba876eabf5fa923d5bb9e726c9e4a994098dc9bd79
+github.com/zondax/ledger-go,v0.9.0,h1:oTrtFqPFA4VdCPRvqMaN45mQnJxkPc0JxoVZfCoUpjI=,6c6a7e036f9a621ce951939d7d13ae1f0c098f58829307c78f9312e02e78e438
+github.com/zquestz/grab,v0.0.0-20190224022517-abcee96e61b1,h1:1qKTeMTSIEvRIjvVYzgcRp0xVp0eoiRTTiHSncb5gD8=,4decd67f1252df4ee34968cb0cb4e7dc6010302b24ce8edd418f1c2520f1c351
+gitlab.com/NebulousLabs/errors,v0.0.0-20171229012116-7ead97ef90b8,h1:gZfMjx7Jr6N8b7iJO4eUjDsn6xJqoyXg8D+ogdoAfKY=,b355474f1a2ef2722ae450ef6df7209d223188ae413706be122b472fcc053c48
+gitlab.com/NebulousLabs/fastrand,v0.0.0-20181126182046-603482d69e40,h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs=,a56acdda993c7a4795028fe38844d54de9b1877d22e8ae09f205e488ce2284bc
+go.bug.st/serial.v1,v0.0.0-20180827123349-5f7892a7bb45,h1:mACY1anK6HNCZtm/DK2Rf2ZPHggVqeB0+7rY9Gl6wyI=,f0ea4cd4c51228f1a3cf14c6b92888169944f267e1ee778909512a4c8ac4762f
+go.cryptoscope.co/luigi,v0.3.4,h1:eDrtCoUL5Vl2Atr5ty2dq0uFbzFCc6Pz1HEqU1e7I1I=,949612e92dcb2fc919e506740f36d0cfe0797c1f85579a98763aad0135a4580a
+go.dedis.ch/fixbuf,v1.0.3,h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs=,dfa737543a5873b14cdfd0eec675c63044b16d3dbe481b2289c758ae4186ae95
+go.dedis.ch/kyber/v3,v3.0.8,h1:qnHzOBaxEO3+ZYuZAfwPTOPzX+F6QMmWGo8YJvENh68=,d69db17bd37bf14c4e508eb84974c3df9a82b8cb30b55ddc3ac0ee2784abcbac
+go.dedis.ch/kyber/v4,v4.0.0-pre1,h1:1f5OPESkyxK6kPaCSV3J9BlpnoysIpbGLNujX9Ov8m4=,d082a41e2178f7e18c088e414e020928794245a9dae41d07da842ebb667a337e
+go.dedis.ch/onet/v3,v3.0.26,h1:wQhVGB+SCdG7B0tbo6ZeZINQKWkU4u9TNMkGBH16EEM=,a41978897a3371f2eaaab5c84c354c95b4fdbd7b8207afa7c79f32b85f857d5d
+go.elastic.co/apm,v1.5.0,h1:arba7i+CVc36Jptww3R1ttW+O10ydvnBtidyd85DLpg=,447a5954db3f7fc61575c83782be0b6d69e453f1e667b0534d3bf5336039238a
+go.elastic.co/apm/module/apmhttp,v1.5.0,h1:sxntP97oENyWWi+6GAwXUo05oEpkwbiarZLqrzLRA4o=,1e6bc42b2e3ab10165036afd95a8a4d910acadce451c0b4e7c998cbb5c06da73
+go.elastic.co/apm/module/apmot,v1.5.0,h1:rPyHRI6Ooqjwny67au6e2eIxLZshqd7bJfAUpdgOw/4=,235fb0c1d0e107ffb7c5056e49226152063ac87ebc657428ea410d5170804d2e
+go.elastic.co/fastjson,v1.0.0,h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg=,451e29b2854f9e09c58e3fe4c1b3a72d9b2ee293628ab4c4323e8192af015c6c
+go.etcd.io/bbolt,v1.3.3,h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=,1ea076dbe18dabe78909e1fb7ec2954fc2d58cd72e7730ad69b35248a30049fd
+go.etcd.io/etcd,v3.3.17+incompatible,h1:g8iRku1SID8QAW8cDlV0L/PkZlw63LSiYEHYHoE6j/s=,7bd292878f70e154a061ed6b85fc70502aa270fcf0072340cbde1a0cb35b0d2d
+go.mongodb.org/mongo-driver,v1.1.2,h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=,6b3141ced32d7a41ebd0539df957b76331fc3efdca22eae68da54d41aad23fed
+go.opencensus.io,v0.22.1,h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50=,b8d9a5fca5e714c4bf66f6497dd905992113cfd6aae948bb7fad5ce987a520ed
+go.starlark.net,v0.0.0-20191021185836-28350e608555,h1:FhmD1D59MmncMfRVTRa889iERZG3jdaKj/1FtOQB1G0=,add124cd355e714f076a385eb3f2ddcfb8ce0c7c8e6611e2e03acc427a4c32bf
+go.uber.org/atomic,v1.5.0,h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY=,7e32f8f75b2029aa53399c2cd6e581398ac4e971c17a763980377279ede95c77
+go.uber.org/automaxprocs,v1.2.0,h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA=,4c7bf41eab5dd7781c69130aa37011427531dee231ffbdc3c9ed4267c06aa93c
+go.uber.org/multierr,v1.3.0,h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc=,29b25df332dea2dbfaaa308013fc6d3673ecd3d9ee09c666c69df504533d0714
+go.uber.org/ratelimit,v0.1.0,h1:U2AruXqeTb4Eh9sYQSTrMhH8Cb7M0Ian2ibBOnBcnAw=,78f82854809625c784088b9dec5dfb4810fbbd09c24891b8aaf2c2679212dfd8
+go.uber.org/thriftrw,v1.20.2,h1:0JlCE7dOyWHEQdfDm0MWIbgTn6vXkiMA6LNIe8FQXjw=,148b93f97a6ab865e2dbe0eb09b9f9504248808efc437e20efc1bf9b7896de9a
+go.uber.org/tools,v0.0.0-20190618225709-2cfd321de3ee,h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=,988dba9c5074080240d33d98e8ce511532f728698db7a9a4ac316c02c94030d6
+go.uber.org/zap,v1.12.0,h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw=,d4b304046a3f9443e4abe217889b5b2a4ecef35d52f175bcacf2baff18646595
+go4.org,v0.0.0-20191010144846-132d2879e1e9,h1:zHLoVtbywceo2hE4Wqv8CmIufe7jDERQ2KJHZoSDfCU=,21811f50d48c55047df1d6bf68db778087afe9116f1f32faf79f8ca459d29d89
+gobot.io/x/gobot,v1.14.0,h1:IJv4A9f5/lUz4JQaS37UW8bRVl3lG+jCGUcNmJ2F0vE=,95ad64d1bf33ee46816b2c87edb10d7b3bfe118b6f7026bf4b5f762867d1e776
+gocloud.dev,v0.17.0,h1:UuDiCphYsiNhRNLtgHVL/eZheQeCt00hL3XjDfbt820=,0df8e26a2356735d596e8a3917ec4b69f61fb5e9f6f291b51f6145a51b646a9b
+gocv.io/x/gocv,v0.21.0,h1:dVjagrupZrfCRY0qPEaYWgoNMRpBel6GYDH4mvQOK8Y=,9e1a70258d72b873d9605a2939b38f9e560650472d70b97f5dd0fc2657eaf35f
+golang.org/x/arch,v0.0.0-20191101135251-a0d8588395bd,h1:e1iK2rWppIPlzzqtjXT/p6WR/+ritGZ8xkfL8uDZb0g=,daba41c9150ebf192ce54952d69ef12fe47c5c6250a33c01f0624befea35354e
+golang.org/x/build,v0.0.0-20191031202223-0706ea4fce0c,h1:jjNoDZTS0vmbqBhqD5MPXauZW+kcGyflfDDFBNCPSVI=,a675f674bcee677f1dc9a15ca4d84bb2e842c29d745b165ba3e5423c09367d29
+golang.org/x/crypto,v0.0.0-20191029031824-8986dd9e96cf,h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss=,0a303100f9afba8628988bef45404b23c2e0c6aa73b5ad4ac9259af14a0e53ae
+golang.org/x/exp,v0.0.0-20191030013958-a1ab85dbe136,h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=,18ff05b39d29a3fd4c7f9071e7013264994ac18f7faa72f66b2f514fcdd141b0
+golang.org/x/image,v0.0.0-20191009234506-e7c1f5e7dbb8,h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=,aebca4c096dac7c20d9024b73bd0b4a87a85f4c6b50aae7615dec504c5f478c8
+golang.org/x/lint,v0.0.0-20190930215403-16217165b5de,h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=,91323fe1a77f13de722a0ce8efc5c5f2da4f26216d858acec64cb23c956fa163
+golang.org/x/mobile,v0.0.0-20191031020345-0945064e013a,h1:CrJ8+QyIm2tcw/zt9Rp/vGFsey+jndL1y5EnFwzgGOg=,5ee0c7eed83b64cc851d6ddb76346413d7c43213ea1241385b588c66e2169854
+golang.org/x/mod,v0.1.0,h1:sfUMP1Gu8qASkorDVjnMuvgJzwFbTZSeXFiGBYAVdl4=,e0d9b32f6f66103f777e8357b5b60f94a486330d46c6c8ea87789dab1a14cefa
+golang.org/x/net,v0.0.0-20191101175033-0deb6923b6d9,h1:DPz9iiH3YoKiKhX/ijjoZvT0VFwK2c6CWYWQ7Zyr8TU=,b07094a5589a436fd98c6700cd5898f2094d9c02f8385f9331a7ace46305c7ae
+golang.org/x/oauth2,v0.0.0-20190604053449-0f29369cfe45,h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=,f72b6c3c2b734ad053fadf5fa2adb2ad23024cfeacd567fec31a751526d1dfe0
+golang.org/x/perf,v0.0.0-20180704124530-6e6d33e29852,h1:xYq6+9AtI+xP3M4r0N1hCkHrInHDBohhquRgx9Kk6gI=,a2c7d02cc94c4ba767b6322f70ddcba4941cb5f60fed1bada3aa7a4d3a8128f1
+golang.org/x/sync,v0.0.0-20190911185100-cd5d95a43a6e,h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=,9c63fe51b0c533b258d3acc30d9319fe78679ce1a051109c9dea3105b93e2eef
+golang.org/x/sys,v0.0.0-20191029155521-f43be2a4598c,h1:S/FtSvpNLtFBgjTqcKsRpsa6aVsI6iztaz1bQd9BJwE=,c5a8efb84e706e4ec1e1fa5cda44d1d571e8b3f46afe165d5e93b90e777a15fc
+golang.org/x/text,v0.3.2,h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=,f755c0e7f4693f170e2f03c161f500b33f82accb8184a38dcfda63fed883f13c
+golang.org/x/time,v0.0.0-20191024005414-555d28b269f0,h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=,e0ca5eceb4840bcc264237408ff8942044e19b503d6e8e5546ed9f7e1f4bf82e
+golang.org/x/tools,v0.0.0-20191101200257-8dbcdeb83d3f,h1:+QO45yvqhfD79HVNFPAgvstYLFye8zA+rd0mHFsGV9s=,c3beb2acb726571e4cca3e922dd1eb037dcb6ef66ca562e9544716a53b6a1026
+golang.org/x/xerrors,v0.0.0-20191011141410-1b5146add898,h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=,5059c7b7e95f139b8c42d9001972fa5fa688b3581ef946c912c1dbc52415ff16
+gomodules.xyz/envconfig,v1.3.0,h1:w1laMNVtP05uOKqmRAY6Vx7HvfPL9yc388gcVtUiI/M=,ae5b4ee26eeb143c16bfb5316eb97e8ff4418bce379ae74e2a0bba367706d69c
+gomodules.xyz/jsonpatch/v2,v2.0.1,h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=,3c97ac5b7cfa3388f3dc157e20e6ad7b7a5789a4df1d5257a39589cf66edd462
+gonum.org/v1/gonum,v0.6.0,h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=,98857b431471c87facf3cd779eadc5d33760c9edee4b56a8228af4b383b90aa2
+gonum.org/v1/netlib,v0.0.0-20190331212654-76723241ea4e,h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=,ed4dca5026c9ab5410d23bbe21c089433ca58a19bd2902311c6a91791142a687
+gonum.org/v1/plot,v0.0.0-20191004082913-159cd04f920c,h1:Ssc2Jy4xun3/JMt2asledr/xSPAvX7ZZ7HimX2Gwz1w=,9246b6f7a9299061b31d99e50b2ac2685853dc478a6c2c730fada016c7268ea1
+google.golang.org/api,v0.13.0,h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=,4c853034281c673829b7a7f3e39c62640d01895d20a666f003f855ad5f55ec30
+google.golang.org/appengine,v1.6.5,h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=,24ddb4adf72189738dc8340b28f9493a385515e680eb0bfbffe08951412b6655
+google.golang.org/genproto,v0.0.0-20191028173616-919d9bdd9fe6,h1:UXl+Zk3jqqcbEVV7ace5lrt4YdA4tXiz3f/KbmD29Vo=,cb4eec9cf94aa450efbb0d131cf1484f6334f1e8c1e1475b76c3ab2dea76c72a
+google.golang.org/grpc,v1.24.0,h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=,eb4433685a85e20f934c2a98e35d104db2d77abe438a242d75d5aae9f78898fb
+google.golang.org/protobuf,v0.0.0-20191101204728-ef19a2a99470,h1:wSgCzfaFwg6Q4Eh+T7XknFfgswhFaeYkEs8t5endA/c=,73a49a6e5fd3330de7364564ab0954146e25ad8bbdff0ea6180f8ace153b0c1b
+gopkg.in/Acconut/lockfile.v1,v1.1.0,h1:c5AMZOxgM1y+Zl8eSbaCENzVYp/LCaWosbQSXzb3FVI=,66e89c98908e2b9295de1a32cdd90f626a2468c256ce6182d6339e6659548e71
+gopkg.in/AlecAivazis/survey.v1,v1.8.7,h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM=,c924df9f9d79f015cc619b1ecede52c92618c0ab8d020cd63e2c783f46b3907d
+gopkg.in/DataDog/dd-trace-go.v1,v1.19.0,h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88=,f8eb14519d62c80eea88fca1daa69b274a0b492aa8b775890424b48d362c32b3
+gopkg.in/Shopify/sarama.v1,v1.18.0,h1:f9aTXuIEFEjVvLG9p+kMSk01dMfFumHsySRk1okTdqU=,beeb8546c4202289f282529630bc3db4452dc5f7eb69c3d8546196470c7d8be3
+gopkg.in/VividCortex/ewma.v1,v1.1.1,h1:tWHEKkKq802K/JT9RiqGCBU5fW3raAPnJGTE9ostZvg=,fe7800182ce944f2b28834d6cf60c620de0cbba1d691d9442f3473baf2a3d50d
+gopkg.in/airbrake/gobrake.v2,v2.0.9,h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=,2db903664908e5a9afafefba94821b9579bbf271e2929c1f0b7b1fdd23f7bbcf
+gopkg.in/alecthomas/gometalinter.v2,v2.0.12,h1:/xBWwtjmOmVxn8FXfIk9noV8m2E2Id9jFfUY/Mh9QAI=,7e6b56f4b985a08d11c1494f9dcc2b595676e787afe7a1caa9c522d41cab9487
+gopkg.in/alecthomas/kingpin.v2,v2.2.6,h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=,638080591aefe7d2642f2575b627d534c692606f02ea54ba89f42db112ba8839
+gopkg.in/alecthomas/kingpin.v3-unstable,v3.0.0-20180810215634-df19058c872c,h1:vTxShRUnK60yd8DZU+f95p1zSLj814+5CuEh7NjF2/Y=,0e35a5bb02770611e4c53c611529b95b96d0bc573f05d10bb43f7441abef2fde
+gopkg.in/alexcesaro/quotedprintable.v3,v3.0.0-20150716171945-2caba252f4dc,h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=,1a310c5e55038937be3e69765276449601ca582f681129f7d9d47e052846cafc
+gopkg.in/asn1-ber.v1,v1.0.0-20181015200546-f715ec2f112d,h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=,fee158570ba9cbfc11156afbe9b9ab0833ab00d0f1a2a2af29a6325984a79903
+gopkg.in/bblfsh/sdk.v1,v1.17.0,h1:Ez/4P0S0Zaq30iZKfiTlhOtqMx6dfQHMTYpqKFvnv4A=,172521b9f2bdd4180751ed5122971c9c37a8c0bca2e0710bc255bc0e5ff8c106
+gopkg.in/bblfsh/sdk.v2,v2.16.4,h1:Ta/kBVRGXf8UOBYDw/ih8mw13/8NND+AdR0JiXBQrOw=,eb7a8a7d08bd80cd0673a6b9c90fa524bda9db24242bd6ef82fb414941c4ef0f
+gopkg.in/bsm/ratelimit.v1,v1.0.0-20160220154919-db14e161995a,h1:stTHdEoWg1pQ8riaP5ROrjS6zy6wewH/Q2iwnLCQUXY=,fea8af18591a0ac50d29c8db124d13a43da6bee7a624c411b7449a99ee87b489
+gopkg.in/bufio.v1,v1.0.0-20140618132640-567b2bfa514e,h1:wGA78yza6bu/mWcc4QfBuIEHEtc06xdiU0X8sY36yUU=,9d63fe986f79edba7fca9bcd3bee0c7dcff7787cd30b43b5f2ae8a59feae512c
+gopkg.in/check.v1,v1.0.0-20190902080502-41f04d3bba15,h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=,004537cb19dbe45954ec1605f331705f6685ccc267eddd4289c1eb27513ab817
+gopkg.in/cheggaaa/pb.v1,v1.0.28,h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=,39725f9f37aac44dd55bdc9ade65a2d066953a090456298d34203257fc7e8ee9
+gopkg.in/cheggaaa/pb.v2,v2.0.7,h1:beaAg8eacCdMQS9Y7obFEtkY7gQl0uZ6Zayb3ry41VY=,a6ba73f81893f0eca8c0a60c238a705a12bae499a44fe6217a4471687766ef02
+gopkg.in/clog.v1,v1.2.0,h1:BHfwHRNQy497iBNsRBassPixSAxRbn2z5KVkdBFbwxc=,51eb8901943d1cec850b55556a9989e21488a9636ac692d6f7575db057804f3d
+gopkg.in/editorconfig/editorconfig-core-go.v1,v1.3.0,h1:oxOEwvhxLMpWpN+0pb2r9TWrM0DCFBHxbuIlS27tmFg=,b5371885f56b40c03da4fd05006c717fabdfb8ee9ea1ceef4cc5b7caeda35041
+gopkg.in/errgo.v1,v1.0.1,h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=,32f45f7cfacfc04ae9e7e8c9fc55a53812554799da7c2bd17b043068b5fd5171
+gopkg.in/errgo.v2,v2.1.0,h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=,6b8954819a20ec52982a206fd3eb94629ff53c5790aa77534e6d8daf7de01bee
+gopkg.in/fatih/color.v1,v1.7.0,h1:bYGjb+HezBM6j/QmgBfgm1adxHpzzrss6bj4r9ROppk=,ed20c58de8c575144c2cc1c924121ee1a240e0621c77918231547b576d46d3ce
+gopkg.in/fatih/set.v0,v0.2.1,h1:Xvyyp7LXu34P0ROhCyfXkmQCAoOUKb1E2JS9I7SE5CY=,d743141e21d20f6d5ae8e784dd4644c0947948103b63404a878b0298f14a9e62
+gopkg.in/fsnotify.v1,v1.4.7,h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=,ce003d540f42b3c0a3dec385deb387b255b536b25ea4438baa65b89458b28f75
+gopkg.in/fsnotify/fsnotify.v1,v1.4.7,h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=,6f74f844c970ff3059d1639c8a850d9ba7029dd059b5d9a305f87bd307c05491
+gopkg.in/gavv/httpexpect.v1,v1.0.0-20170111145843-40724cf1e4a0,h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw=,4fe4a5e78a26ac5b60fc16405d3a5918d83cd645d36bd9dc0d558824136930b6
+gopkg.in/gcfg.v1,v1.2.3,h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=,06cdad29610507bafb35e2e73d64fd7aa6c5c2ce1e5feff30a622af5475bca3b
+gopkg.in/gemnasium/logrus-airbrake-hook.v2,v2.1.2,h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=,ce35c69d2a1f49d8672447bced4833c02cc7af036aa9df94d5a6a0f5d871cccd
+gopkg.in/go-playground/assert.v1,v1.2.1,h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=,11da2f608d82304df2384a2301e0155fe72e8414e1a17776f1966c3a4c403bc4
+gopkg.in/go-playground/validator.v8,v8.18.2,h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=,fea7482c7122c2573d964b7d294a78f2162fa206ccd4b808d0c82f3d87b4d159
+gopkg.in/go-playground/validator.v9,v9.30.0,h1:Wk0Z37oBmKj9/n+tPyBHZmeL19LaCoK3Qq48VwYENss=,f4769db84ddc2db880bc190a5420762ef45f80ebbce678b622c4fa82b422b890
+gopkg.in/gobwas/glob.v0,v0.2.3,h1:uLMy+ys6BqRCutdUNyWLlmEnd7VULqh1nsxxV1kj0qQ=,3a5fe045be1ff9b47c5e21a9f97bdefaada31463f365503d6b176b76e18a0257
+gopkg.in/gographics/imagick.v3,v3.2.0,h1:eUwlkCw2fa20OGu47G39Im8c50S9n/CVkh8PwtOKExA=,99695d22cf7d5609887609cc9dc63ca1031b5a3238c26f6b779f32e39d572a01
+gopkg.in/gomail.v2,v2.0.0-20160411212932-81ebce5c23df,h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=,08b3372836aef3a403b0a01e6867a3a2252a07f65c28e0d33fe9c4b1b3ac517a
+gopkg.in/gorp.v1,v1.7.2,h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=,eaad3325e8b5358d5d54a1ca8b1e6aa19d16968a1f11f3dc45671588d914ef25
+gopkg.in/guregu/null.v3,v3.4.0,h1:AOpMtZ85uElRhQjEDsFx21BkXqFPwA7uoJukd4KErIs=,b38d62a816c5905933396a02eb11e23cbe2c17f8837563cc10794274e5af7e6e
+gopkg.in/h2non/gentleman.v2,v2.0.3,h1:exsUPKJDFwNjJykboVj8+BKPWMNOxR/AmPL3f7Hutwo=,7a71dc2dd74e413832782e4478f85cc0617aed125e078e308b46207f34d6a500
+gopkg.in/h2non/gock.v1,v1.0.15,h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0=,c6a3d33e638b56ddd050c1dc6c1c6c8e9007c70cacfcc29e778fcf421f1fc029
+gopkg.in/httprequest.v1,v1.2.0,h1:YTGV1oXzaoKI6oPzQ0knoIPcrrVzeRG3amkoxoP7Xng=,3960019870090d0de3fca818633111186d46a908b4bcac6d87e5f08e7fb58770
+gopkg.in/inconshreveable/log15.v2,v2.0.0-20180818164646-67afb5ed74ec,h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=,799307ed46ca30ca0ac2dc0332f3673814b8ff6cc1ee905a462ccfd438e8e695
+gopkg.in/inf.v0,v0.9.1,h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=,08abac18c95cc43b725d4925f63309398d618beab68b4669659b61255e5374a0
+gopkg.in/ini.v1,v1.49.0,h1:MW0aLMiezbm/Ray0gJJ+nQFE2uOC9EpK2p5zPN3NqpM=,579074067ceacbf11e938940d65647094da4f23f627645b5c58218bf05c060f0
+gopkg.in/jarcoal/httpmock.v1,v1.0.0-20181117152235-275e9df93516,h1:H6trpavCIuipdInWrab8l34Mf+GGVfphniHostMdMaQ=,5b896c9e5e44146260a066533409c1b86268458301a7155624ef27f784e5d94a
+gopkg.in/jcmturner/aescts.v1,v1.0.1,h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=,8bfd83c7204032fb16946202d5d643bd9a7e618005bd39578f29030a7d51dcf9
+gopkg.in/jcmturner/dnsutils.v1,v1.0.1,h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=,4fb8b6a5471cb6dda1d0aabd1e01e4d54cb5ee83c395849916392b19153f5203
+gopkg.in/jcmturner/goidentity.v3,v3.0.0,h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=,1be44bee93d9080ce89f40827c57e8a396b7c801e2d19a1f5446a4325afa755e
+gopkg.in/jcmturner/gokrb5.v7,v7.2.3,h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=,3eec5b25adb89633174beb9798d8092e91ff4eed146a4b4cb950dd02414bd75e
+gopkg.in/jcmturner/rpc.v1,v1.1.0,h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=,83d897b60ecb5a66d25232b775ed04c182ca8e02431f351b3768d4d2876d07ae
+gopkg.in/jmcvetta/napping.v3,v3.2.0,h1:NpSZLAL6VgiyhdqaOkxwVtHXOLrQJZ6fFOMQgp7G8PQ=,887358529a8cd287b6a8232b43cc48636463fa266bac5ba48328cb0609d1dcb6
+gopkg.in/juju/charm.v6,v6.0.0-20191031115626-f595bfd8a049,h1:+isWLR3tDZyDacru13gHH0ooIuuDB28kuZJjSc8kOqU=,8d404b146f31d35148015de3f5bd4d25260f0a4b9f22a540a9167864d9e5d082
+gopkg.in/juju/charmrepo.v3,v3.0.1,h1:mm7/CwCczsO7JYHlYkw4iCUYR7X8upEOaY5bYj7eUkw=,8f673109a6d98e4abe4ef612f85dea26bdbd7de5c66b6722c546a08aefb548fc
+gopkg.in/juju/environschema.v1,v1.0.0,h1:51vT1bzbP9fntQ0I9ECSlku2p19Szj/N2beZFeIH2kM=,46ae8efc5a450745fea959dc8532d2a013aa741ab7193d3cea8b0735f09c6e8a
+gopkg.in/juju/names.v2,v2.0.0-20190813004204-e057c73bd1be,h1:xDxN+Fe8olIH8sTqvFJBMsuflBYzeHVeYC4Iz97+f5M=,72ac554c125260751aadf6d41eb82d85de22ef8bff1d59c6602e9e0f5b84a28c
+gopkg.in/juju/worker.v1,v1.0.0-20191018043616-19a698a7150f,h1:UAHa7z4EdrOcMN+9p5P+ojJshcIC34vwi0hCmEL6Qf8=,2e0da8053029ca9da961f8e6f1037a9d7ba12623e5c16fc5f88bf1a724c5dd23
+gopkg.in/karalabe/cookiejar.v2,v2.0.0-20150724131613-8dcd6a7f4951,h1:DMTcQRFbEH62YPRWwOI647s2e5mHda3oBPMHfrLs2bw=,07aae15601f54a5806705d218e313794118d54d9dda7addc1bf4bda4332dfc16
+gopkg.in/kothar/go-backblaze.v0,v0.0.0-20190520213052-702d4e7eb465,h1:DKgyTtKkmpZZesLue2fz/LxEhzBDUWg4N8u/BVRJqlA=,215300ce3726c40f51ee43c41a27c204441e756c8cb4f4b76b1a4dd08f509eef
+gopkg.in/ldap.v2,v2.5.1,h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU=,4fd426691e674164a701ef3ec3548596574f95447cde1fa331018f7d73f8399b
+gopkg.in/ldap.v3,v3.0.2,h1:R6RBtabK6e1GO0eQKtkyOFbAHO73QesLzI2w2DZ6b9w=,f79d1cb87a0a6d571e671c2028409056d65e6bfa7d3d0563ded0edbe8ff0998e
+gopkg.in/macaron.v1,v1.3.4,h1:HvIscOwxhFhx3swWM/979wh2QMYyuXrNmrF9l+j3HZs=,f9aca15b099dada4382e47898516d500876aae45d36895314cde86700636c05c
+gopkg.in/macaroon-bakery.v2,v2.1.0,h1:9Jw/+9XHBSutkaeVpWhDx38IcSNLJwWUICkOK98DHls=,0a12f46df7290b131ee74ec6a4d4760170192920a091939aa2d7a39a4d0fb310
+gopkg.in/macaroon-bakery.v2-unstable,v2.0.0-20171026135619-38b77b89a624,h1:FIOL4YpoNbXH6K+LnOoAEMa/1ebliK7B9mj5NuJHmiA=,51476e40e03bd1f64fd3cdf936d1cde4b8c1395884af9376ff65755041c247aa
+gopkg.in/macaroon.v2,v2.1.0,h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=,ae47a93d20ce5c053eafc9d6a76c01b2b06784f9886137dc73a99302928046eb
+gopkg.in/macaroon.v2-unstable,v2.0.0-20180319203259-5c9beabe0e9e,h1:yPxshueS06kvTVlsymSbHvk6VQ1WhX1Ou3hCqqWBp/s=,e09a1f8268d65e3dc28da85c75e78f15f1f742d1dcd31cce427fd885b1962bc4
+gopkg.in/mail.v2,v2.0.0-20180731213649-a0242b2233b4,h1:a3llQg4+Czqaf+QH4diHuHiKv4j1abMwuRXwaRNHTPU=,d7d60701b95fd7f62d3f83bc026f42c0fa69c3f16cc445d2b20497c9dd182ff6
+gopkg.in/mattes/migrate.v1,v1.3.2,h1:tWus4MPMhDY/htX+NCvASiQVRU2pj4Jyj4T8AIv6vUw=,c50f590108871c25d55631addd6bc267f311830d4306ff4d36a6feaad0b23255
+gopkg.in/mattn/go-colorable.v0,v0.1.0,h1:WYuADWvfvYC07fm8ygYB3LMcsc5CunpxfMGKawHkAos=,337a25f7f87a87097e5fb853313c1fac3d3126ed0eb9bb88511d52ba9a0eb4e0
+gopkg.in/mattn/go-isatty.v0,v0.0.4,h1:NtS1rQGQr4IaFWBGz4Cz4BhB///gyys4gDVtKA7hIsc=,18500935e08e5b74487537b8b78a30778a5b2304a138f53aa8758b86266773ff
+gopkg.in/mattn/go-runewidth.v0,v0.0.4,h1:r0P71TnzQDlNIcizCqvPSSANoFa3WVGtcNJf3TWurcY=,e0307a435e39658f761b7526dda9149e7664b7250958494c1a4eebd14884b82d
+gopkg.in/mcuadros/go-syslog.v2,v2.2.1,h1:60g8zx1BijSVSgLTzLCW9UC4/+i1Ih9jJ1DR5Tgp9vE=,1f444e24504b6a21c0d204441a84336ab1240f77a1280b60e48f68ea1b99da7b
+gopkg.in/mgo.v2,v2.0.0-20190816093944-a6b53ec6cb22,h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=,14edbec0d97107b0e0980b66166400f8a4c3844b03bd3240fc57be2b82734b16
+gopkg.in/natefinch/lumberjack.v2,v2.0.0,h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=,8c268e36660d6ce36af808d74b9be80207c05463679703e93d857e954c637aaa
+gopkg.in/neurosnap/sentences.v1,v1.0.6,h1:v7ElyP020iEZQONyLld3fHILHWOPs+ntzuQTNPkul8E=,e3df38d6fc6097f9d1d76ee13e24fec69103c43248ca6a7f3ade2afec5e85bdd
+gopkg.in/ns1/ns1-go.v2,v2.0.0-20190730140822-b51389932cbc,h1:GAcf+t0o8gdJAdSFYdE9wChu4bIyguMVqz0RHiFL5VY=,c51d0889ff5eb72df2f9e4adc28e9f3602e6eb567c3824bebb3c7d315a60710a
+gopkg.in/olivere/elastic.v2,v2.0.61,h1:7cpl3MW8ysa4GYFBXklpo5mspe4NK0rpZTdyZ+QcD4U=,0a20d84f6003850343937ef79179cabe99feef9b038c281fd65ec32ec6c7e85c
+gopkg.in/olivere/elastic.v5,v5.0.82,h1:QH7ere4lvOAWnnOd0VLJ54W8LzExZszoGIRijnb1h2Y=,3c66a7606b226d19f61651b3ad58aecda3155edc802029bd21cd4b8724bd0c9f
+gopkg.in/ory-am/dockertest.v3,v3.3.4,h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw=,73b01a1d025d30c8f11def182179b873410eae72f7b2fd9f9394b0fcf4683c93
+gopkg.in/redis.v2,v2.3.2,h1:GPVIIB/JnL1wvfULefy3qXmPu1nfNu2d0yA09FHgwfs=,abe2fa39afa36f8186ee287bcf82f9f4bc083aa35d17dd82a2ccbf5850ecdde8
+gopkg.in/redis.v3,v3.6.4,h1:u7XgPH1rWwsdZnR+azldXC6x9qDU2luydOIeU/l52fE=,749ef3e08eb4eda43969f88135040ae4517b450b27dbd48aefb9bf5e72465621
+gopkg.in/redis.v4,v4.2.4,h1:y3XbwQAiHwgNLUng56mgWYK39vsPqo8sT84XTEcxjr0=,6403d2b45edf2804bfd07b6d697184fc97377168589ad43ad19b2433e1dcee34
+gopkg.in/redis.v5,v5.2.9,h1:MNZYOLPomQzZMfpN3ZtD1uyJ2IDonTTlxYiV/pEApiw=,3c30e42670d1ef5f0b33876928b3bd5693ef3b5be1df6b2710d48c2667ca7133
+gopkg.in/resty.v1,v1.12.0,h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=,43487bb0bb40626d16502b1fe9e719cf751e7a5b4e4233276971873e7863d3cf
+gopkg.in/robfig/cron.v2,v2.0.0-20150107220207-be2e0b0deed5,h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY=,b25da9b8747e664334044e581d1a8fb700237239e7f182fd226d6296e6180bc0
+gopkg.in/satori/go.uuid.v1,v1.2.0,h1:AH9uksa7bGe9rluapecRKBCpZvxaBEyu0RepitcD0Hw=,794cefc3062e09b17f4300eb6b02622ac348af9d368341ff71a655a15884547f
+gopkg.in/sourcemap.v1,v1.0.5,h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=,05b5f382bfa60212f444c7207168e9eb0c722e26b57a688123cb8bbf234de692
+gopkg.in/spacemonkeygo/monkit.v2,v2.0.0-20190623001553-09813957f0a8,h1:nyw4hxw2zz4S0EHqr5nQfA3zGbMFJDRJlQPM4PCb7O4=,4a8e607c4f16b32bb9ee380627716979b19ac3df74ca2a4f80aefbaf0b411784
+gopkg.in/square/go-jose.v2,v2.4.0,h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A=,d00c4af5a633ab9cf7645b68f6fa389c8f0d9ffebc486742c7a5292280cae84b
+gopkg.in/src-d/go-billy.v4,v4.3.2,h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=,c49871e1d08bba07b2261626b929096b6dc5c839e781adfc24fcc410067cc2bf
+gopkg.in/src-d/go-cli.v0,v0.0.0-20181105080154-d492247bbc0d,h1:mXa4inJUuWOoA4uEROxtJ3VMELMlVkIxIfcR0HBekAM=,86042ffc0c8492845917453682c5bdba46beb2f0c067b61e495a92b9a8621076
+gopkg.in/src-d/go-errors.v1,v1.0.0,h1:cooGdZnCjYbeS1zb1s6pVAAimTdKceRrpn7aKOnNIfc=,f7d9f00c057d4b49bc6e57167561a7fb508ebb113a1946cb2b6f71dac5b14cfb
+gopkg.in/src-d/go-git-fixtures.v3,v3.5.0,h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=,282dc6474c5ecf62c1169d04ad1f6d75e6058922897b4709a16a1007a5f22eb7
+gopkg.in/src-d/go-git.v4,v4.13.1,h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=,13364fe60f2316a179e912fb4eb6c576e2aabd67e8d390651a155e85c69146d2
+gopkg.in/src-d/go-log.v1,v1.0.1,h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4=,48f6c8a7bdc5436d296f388cd5d40ffb9c749e1e4ab1e455984efc61008fd5d7
+gopkg.in/stack.v0,v0.0.0-20141108040640-9b43fcefddd0,h1:lMH45EKqD8Nf6LwoF+43YOKjOAEEHQRVgDyG8RCV4MU=,a88c4cb4af34bb5c4dd69d0c771829331be7416d2f18d58ff599126f7b291984
+gopkg.in/stretchr/testify.v1,v1.2.2,h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M=,0126e73e5f2ce5687dec597bb276e11dc4031dbdf199e68de735bc67bf808149
+gopkg.in/telegram-bot-api.v3,v3.0.0,h1:Y6QmqOMwRKv5NUdlvzEBtEZChjsrqdTS6O858cvuCww=,03c58e32567a5cc4ec631cc226ecc99dd1113a7a98bab4778b02cde073ab5ed4
+gopkg.in/telegram-bot-api.v4,v4.6.4,h1:hpHWhzn4jTCsAJZZ2loNKfy2QWyPDRJVl3aTFXeMW8g=,01a91b240fb416bf83bcaaa07133cafac28fd8eb8f0f251f6a616beec88c92ac
+gopkg.in/testfixtures.v2,v2.5.0,h1:N08B7l2GzFQenyYbzqthDnKAA+cmb17iAZhhFxr7JHw=,05baac4af6e2855d296a5c045b27deb1b33d0a04cd0df96f029927f0742765a3
+gopkg.in/tomb.v1,v1.0.0-20141024135613-dd632973f1e7,h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=,34898dc0e38ba7a792ab74a3e0fa113116313fd9142ffb444b011fd392762186
+gopkg.in/tomb.v2,v2.0.0-20161208151619-d5d1b5820637,h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=,15d93d96e1e8b2d8daf7b9e57a2a9193c0e676a2c6b63d9325bf34b53e93db00
+gopkg.in/tylerb/graceful.v1,v1.2.15,h1:1JmOyhKqAyX3BgTXMI84LwT6FOJ4tP2N9e2kwTCM0nQ=,0a8639cfe62508438ebf2cae721468b64d8cd2992fc0f80439c83c718f4608e0
+gopkg.in/urfave/cli.v1,v1.20.0,h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=,413704688402027dc0f51666bac42152eb1668a73fa0e33858c3d2123c0592e5
+gopkg.in/warnings.v0,v0.1.2,h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=,c412b1f704c1e8ba59b6cfdb1072f8be847c03f77d6507c692913d6d9454e51c
+gopkg.in/yaml.v1,v1.0.0-20140924161607-9f9df34309c0,h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU=,7abff7973fdab7386de5a1e9e197d8dc50d41ded9d24ff914685900caa0eb742
+gopkg.in/yaml.v2,v2.2.4,h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=,815be785649ae218b51efd8e40b3b75de8f9b57dd43162386ffe3e76709f2a5d
+gorgonia.org/tensor,v0.9.2,h1:bVTWB68apbLfdrAlz5Ev3daGhfOhKuPkVFacMSNzpHs=,17562e7c1c6477b8b530d6236ab9a61228edbabe01c1cfb9ba23286c2394ba4c
+gorgonia.org/vecf32,v0.9.0,h1:PClazic1r+JVJ1dEzRXgeiVl4g1/Hf/w+wUSqnco1Xg=,618df2e604236a2d143958a3571f9939c8264ab2aaae7d8c71b897b728240a23
+gorgonia.org/vecf64,v0.9.0,h1:bgZDP5x0OzBF64PjMGC3EvTdOoMEcmfAh1VCUnZFm1A=,f57695832a12a6f1fbcc04cdaa267ed01fb6b8105f518590d64b2c63b9ac4c61
+gotest.tools,v2.2.0+incompatible,h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=,55fab831b2660201183b54d742602563d4e17e7125ee75788a309a4f6cb7285e
+grpc.go4.org,v0.0.0-20170609214715-11d0a25b4919,h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY=,58b5c3cccf3e765d0f42918d458cddcd03fc28ff5d701790783677513a8446e3
+h12.io/socks,v1.0.0,h1:oiFI7YXv4h/0kBNcmAb5EkkoFJgYsOF88EQjMBxjitc=,3bf83125284ccabf811aa238954b442e39f53e3e068d4ddb6bf679ba2be28bbe
+honnef.co/go/js/dom,v0.0.0-20190526011328-ebc4cf92d81f,h1:b3Q9PqH+5NYHfIjNUEN+f8lYvBh9A25AX+kPh8dpYmc=,a65720d9c0339450c8818226693a85986549fb156ee4df65913682c350bd4d60
+honnef.co/go/js/util,v0.0.0-20150216223935-96b8dd9d1621,h1:QBApQyt1KyR3SvDWU8sHcIXeWTSCUamO7xQopvwuLWI=,db5638addc7638cc5cf2245cb9bcb19cf04a5912120330560149b54b4575ae50
+honnef.co/go/js/xhr,v0.0.0-20150307031022-00e3346113ae,h1:2dIKMawnBWvHzZrS8STyu/KdhYIOpnKQpp1WZm+K7TE=,d2a4a85c43fb4ccd9b5be6521450d272406a1722f7547f188f4a1d0cc65c4e13
+honnef.co/go/tools,v0.0.1-2019.2.3,h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=,539825114c487680f99df80f6107410e1e53bbfd5deb931b84d1faf2d221638e
+howett.net/plist,v0.0.0-20181124034731-591f970eefbb,h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=,58c94cd949be714c0ee320d1be0cff3116fc829c412b9e7b816b03fb3c85f463
+istio.io/api,v0.0.0-20191029012234-9fe6a7da3673,h1:wxFykuQoScKAnEtKujAPqjwR8Aqo2LNtkoIvodxyCSs=,9ba545b9c5411725b709287b590082d140d3b29d924be351e38942f46c33ff55
+istio.io/gogo-genproto,v0.0.0-20190930162913-45029607206a,h1:w7zILua2dnYo9CxImhpNW4NE/8ZxEoc/wfBfHrhUhrE=,3b5a81f1807f48117d6691c8d007402a94b648f45f4446841a3f56229aa94aba
+istio.io/pkg,v0.0.0-20191029184635-5c2f5ef63692,h1:MT7e5hpQ8cGtKCeWIjtdluEVkIhkN2tw4iVkAzhWHYA=,887882f7e721e6d00dee301f0b029792bd04bd38c455ab7e5cf4f2bc5bf309df
+k8s.io/api,v0.0.0-20191031065753-b19d8caf39be,h1:X0MqzqUHuZj50SrMQFExejJfy67RKPf30Vt2nnpa4AA=,00a67ed9b84be18f621701796b42cee630c770c858582753fe0eb9c146ef93ff
+k8s.io/apiextensions-apiserver,v0.0.0-20191028232452-c47e10e6d5a3,h1:XxkWdWvPKTParJ1sXpUIvHJsJ2iIIj5Ebjxxy5YU1Zo=,2cb12eb8b2b0f95fb5d69b1f80b754b32ae46ef1f9636333fe27c6b17b1a6e19
+k8s.io/apimachinery,v0.0.0-20191030190112-bb31b70367b7,h1:81UYA9Qq3JXPpZMmRBnq6T3qU+b71Dvnm6sV3NSQTVk=,4c16a440acf7559b0974d99650c876969ad4811ddc76f9f5b7aa43afc34f66ec
+k8s.io/apiserver,v0.0.0-20191031110436-8cb875160ee0,h1:BGkQMPpKpx07hvq9AW64gifbf+zbAh/xUbB5OYXPvQ0=,baefad9177a1f8077c94a2d88b7e85deb7df79317d3d6d6afe8ca0be8261b1ae
+k8s.io/cli-runtime,v0.0.0-20191025231729-08207da42a69,h1:05z+vSvn9yPr7GTAt3MXpVc9VeU4D80HHwvJU6jC3D4=,46264219a6e1c8263acd610841a156086bbbdd43436a836effcd7285c37b0e8a
+k8s.io/client-go,v11.0.0+incompatible,h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=,70925f536d409accf4f6ae3f20dafd81370ac096f848e99009141bea971103c2
+k8s.io/cloud-provider,v0.0.0-20191025232453-66dd06a864dd,h1:CxSfhPPmLwYFZquskmKvODMeEm82ZLc4eph47AdUp+o=,55623701c005f824ae847ecad409c6e61ef5fcfc6588b8dd2995a4a0991eeae8
+k8s.io/cluster-bootstrap,v0.0.0-20191025232351-410fafc3baf5,h1:P1mMVQngKW9pj1haVjyAtqViIBqkJmsITXsfuaHGRko=,ff50816340cdb73313e7233c3ff6df383c350102762b88ea0cc744910053c7bd
+k8s.io/code-generator,v0.0.0-20191029223907-9f431a56fdbc,h1:klQ4aWfZ3uk4UiSLkZZt5qQDI+7DwSdvbvyL5QUBHsQ=,1f63d3191c255d8fcc47ad24b3bd979865a3c12eca678b39f278c194c2ae560a
+k8s.io/component-base,v0.0.0-20191029070825-5e0e35147053,h1:W9/+uFw7olz+qQOCmSOG92c6j2YgIwagxqR9RWai/cE=,45bd9877048a57c3dfe7eb9c98bc1939775c73fdd6451afd055d7e4f7b9659bc
+k8s.io/cri-api,v0.0.0-20191025232916-446748cffdda,h1:HVTA1bXCQek+NF0xTZkryScnkGYWHkoeYAQVEVs73r8=,7aea277309740df8d1fad1c620901d60633569d755d2d0715d97bc553988d7be
+k8s.io/csi-api,v0.0.0-20190313123203-94ac839bf26c,h1:m3xih+9aI7l7Z/PvwzizV1J4vBvaUpkHrmagnGa5UNg=,0579fba2111dfd5b3cb62d7d234e52c54051176d9564ae3f0f2fdc69b31872b0
+k8s.io/gengo,v0.0.0-20191010091904-7fa3014cb28f,h1:eW/6wVuHNZgQJmFesyAxu0cvj0WAHHUuGaLbPcmNY3Q=,7fe69109e947204ee0b95705626e3c3b540faefb947d3426260f2991d1e4c036
+k8s.io/helm,v2.15.2+incompatible,h1:UjEb+c5BUZDGR9zU3dWG3OXASLIeqLeY0FCIx6ZyfTY=,377860d9db9fb1d45ffa90fe6ee79d7cfc4e91e5bc04183921480a823cf79ede
+k8s.io/klog,v1.0.0,h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=,a564b06078ddf014c5b793a7d36643d6fda31fc131e36b95cdea94ff838b99be
+k8s.io/kube-aggregator,v0.0.0-20191025230902-aa872b06629d,h1:ls0BmbSFkF5BhZN7grE+W5/X49QMU42RH6J9DWdP7UQ=,9a51f29a98f603acde33e3a10625e9ab603d7db29b3c2b2256d145adf7396393
+k8s.io/kube-controller-manager,v0.0.0-20191025232249-6de08162fd59,h1:bTAKwqwK2HvJVmpowb/ccyeV3wsxQZUtFQE1AqhMZ6I=,a1183bb172f19f6ae319da09c111a3f5dded663a9be85e18b9bceb131f03a342
+k8s.io/kube-openapi,v0.0.0-20190918143330-0270cf2f1c1d,h1:Xpe6sK+RY4ZgCTyZ3y273UmFmURhjtoJiwOMbQsXitY=,fb1dcd1508144991be0a243cea9a066944775ba4e9fa23c7ea038822e4e8e232
+k8s.io/kubelet,v0.0.0-20191025232041-e9c3b1e8a9ed,h1:ISiRMWhiLjmSsx24QQ6NJSgW1oKmAN59LCFzB7llrSk=,8c409ac5922dfb4cb0b8e6bb52823b2132e82faa3756dc3a94cdeabeaa3ff51e
+k8s.io/kubernetes,v1.11.10,h1:wCo67+wmguioiYv0ipIiTaXbVPfFBBjOTgIngeGGG+A=,7c8ca4ca473e9f2b5c6586a714209e98d99f193af84cdb3e8536a3d1e26be4bf
+k8s.io/metrics,v0.0.0-20191026071343-a166cc0bce8f,h1:D4AcfwGLY2gFDQaeK2QVyb8g4fy4Xzs0GopdwAgfSGc=,f4243455881a38d4962483ba5f2888220743a6ddece179c2b8f706815b75778f
+k8s.io/node-api,v0.0.0-20191025232816-761e5a80fde0,h1:V3FaBxwSQWPjPScXd5ioFx9+aREXGU24yFl8Gm7ib8w=,cb8718b6e148a66097f8cae4e4544dc55c674748202f9c21d07a2139b6e83fd1
+k8s.io/sample-apiserver,v0.0.0-20191030110742-cbfc6c263d7e,h1:9bsKcUCncu1Qg3A4pB5ZySTM0JMEZW4qgybjVhmaS4A=,91d701af12da2ff6cde6f07d53547885faafa32c2eadfa8d4614b4d814a854b9
+k8s.io/sample-controller,v0.0.0-20191025231305-d7b8b8302943,h1:ZYb6if7+Qa5kXFidUsQRLFDyZjCjRyG1sFf6GpZaA70=,07c3e3a95d0fac07a247a98c38448a8fc4ab0069ad599ec06ac9405df88b470b
+k8s.io/utils,v0.0.0-20191030222137-2b95a09bc58d,h1:1P0iBJsBzxRmR+dIFnM+Iu4aLxnoa7lBqozW/0uHbT8=,e21be6d971127d4650bd13525a2d2627b2a98dbb8589f168b734a45d50f3ea22
+launchpad.net/gocheck,v0.0.0-20140225173054-000000000087,h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=,1a1d9b10f2c69564e69993e4340d5299392a518d895ec06502e842e6c69f4857
+layeh.com/radius,v0.0.0-20190322222518-890bc1058917,h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU=,5eb6b6a05a5f89bc114f37085deda268f895a46621aee2e36649b8d80061357e
+leb.io/aeshash,v0.0.0-20190627052759-9e6b40329b3b,h1:MG17Tc0pA3XmFTsPwklMMEfcos3pTFnVYM4A0YfVSbU=,a78b48ac18e98ea68dacce16cd94c9074688a0b125f824f047313a33b264ea88
+leb.io/hashland,v0.0.0-20171003003232-07375b562dea,h1:s9IkzZTqYqw77voO6taUZHc0C1B096h4T/kQtujGApE=,0698177f24cbde0a7b45495e7fe976fe7623f2b9205995b7d91fd2e7b0f0e243
+leb.io/hrff,v0.0.0-20170927164517-757f8bd43e20,h1:9CHS8LIq9MDwUsAaCHUsbUq7zb5lSjLQYWlJ/AbMZKg=,538008712599401a903a7982714c0a9ae745221042d3dfb1437bc508d8fb9e96
+modernc.org/cc,v1.0.0,h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=,24711e9b28b0d79dd32438eeb7debd86b850350f5f7749b7af640422ecf6b93b
+modernc.org/golex,v1.0.0,h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=,335133038991d7feaba5349ac2385db7b49601bba0904abf680803ee2d3c99df
+modernc.org/mathutil,v1.0.0,h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I=,766ad95195543fe1ac217ce9f54e1fb43119c25db2b89013b9ef5477ad2dd9d1
+modernc.org/strutil,v1.0.0,h1:XVFtQwFVwc02Wk+0L/Z/zDDXO81r5Lhe6iMKmGX3KhE=,4bbca362df97450c6f24b90b7dc80b97ecf19e5f0f5954655b26f335a0b8f378
+modernc.org/xc,v1.0.0,h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU=,ef80e60acacc023cd294eef2555bd348f74c1bcd22c8cfbbd2472cb91e35900d
+moul.io/http2curl,v1.0.0,h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8=,422e2b8833089b001da02c6d7235ecb4c0591bb585fee125cbd0d72b1371dba5
+mvdan.cc/sh,v2.6.0+incompatible,h1:BLDuJ+D75OCaBF7W70+2oALi8aKAjcAiDBNmmwR8BQA=,c5c335f4ae8f1c4228a01710b84ba8f847709b1920d2beeddc4648e62cdd25f7
+mvdan.cc/sh/v3,v3.0.0-alpha1,h1:ao/4li6H9nZe5HDXA14cynXoq90+DLZz0HmjZE/qjhA=,5da16556569786a039c24229b55eb0f76049c2293ac96a9b978cede87676962e
+mvdan.cc/xurls/v2,v2.0.0,h1:r1zSOSNS/kqtpmATyMMMvaZ4/djsesbYz5kr0+qMRWc=,67e609a744e93b7ba05adee985d7e3471e6d414cea611ac73206e007a5e03082
+myitcv.io,v0.0.0-20190927111909-7837eed0ff8e,h1:aTqeLMcNZAhWxtvBgs0fbjTxg5BuNvHYnLo1lhSq9hE=,0d734b4e576c5c34dd9788481761864faef6cacdd735296d22f885b211fe9c70
+pack.ag/amqp,v0.11.2,h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=,7cdc81d1aeef4ad24c4a49f6227aac060ee193587c95d48bfe4437beaf08310a
+periph.io/x/periph,v3.6.2+incompatible,h1:B9vqhYVuhKtr6bXua8N9GeBEvD7yanczCvE0wU2LEqw=,aeb77a51a9e20e0414e7ea7c9a3a30302fcb5ffc5cf4dd41c3455ec0c3d7b1bc
+perkeep.org,v0.0.0-20190926184543-d342b0e26632,h1:6ZKRr0VZtsfdHyYDJ/G9rCy7z8jGfrpmYANf0BR+vJM=,fd9e06dfc30d3bcb49399fd062094dfdf364a8344d409541896cb96d36465ade
+rsc.io/binaryregexp,v0.2.0,h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=,b3e706aa278fa7f880d32fa1cc40ef8282d1fc7d6e00356579ed0db88f3b0047
+rsc.io/goversion,v1.2.0,h1:SPn+NLTiAG7w30IRK/DKp1BjvpWabYgxlLp/+kx5J8w=,f8426f6078b1d1b4e29a8c6223603680169c7c0a8789d2aee7e401a46ff6343f
+rsc.io/pdf,v0.1.1,h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=,79bf310e399cf0e2d8aa61536750d2a6999c5ca884e7a27faf88d3701cd5ba8f
+rsc.io/qr,v0.1.0,h1:M/sAxsU2J5mlQ4W84Bxga2EgdQqOaAliipcjPmMUM5Q=,fd09c124eb71d01dab3a0116eac47a6fce78f34bbdd84620b2dc01b90582b11c
+sigs.k8s.io/cluster-api,v0.2.7,h1:WjhtuvyjnMgo62kKlVizhI/nYs4DJxHNf+ZMSk/uUsM=,1e3767e7d0f655b72a52eab40e122779ccd1f734c06b9c6488ea9615a3db7b24
+sigs.k8s.io/controller-runtime,v0.3.0,h1:ZtdgqJXVHsIytjdmDuk0QjagnzyLq9FjojXRqIp+dU4=,f37a21668e57315e7248169bec6d4a71f86bcf53d7528c9752e7b459ee74efe0
+sigs.k8s.io/kustomize,v2.0.3+incompatible,h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=,e0f6ad3aaaf7160abcb5e7b16f711c13aebe876833ae9e6ad6f858f31641bf62
+sigs.k8s.io/structured-merge-diff,v0.0.0-20191023203907-336d3378ca53,h1:WCMuuk4OLJ1WdEK3fx+hroiutCODdAGwDlL2Dj4mpa0=,b389a2eafcce0dcef4ca1052942980f26b62030da007b3a84a653de5c0f91668
+sigs.k8s.io/testing_frameworks,v0.1.2-0.20190130140139-57f07443c2d4,h1:GtDhkj3cF4A4IW+A9LScsuxvJqA9DE7G7PGH1f8B07U=,bfb65beb3dda386efc0c0ff9237a07877cec71922f4d3dc1f4a40d5fcaa090a9
+sigs.k8s.io/yaml,v1.1.0,h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=,a0d39252e8665a428a8cb9d4dfc9cbea07b7ae90ae62e7cf3651be719adf515a
+sofastack.io/sofa-mosn,v0.0.0-20191101130505-becc7a6dc50c,h1:8IAozA6SkwfqCAF7fVyy8gu4FdyJvH5iBC12WhiocB8=,0ed9cc5b20e6233051bb4de2ffee5c7f3365704fe01d28e87237d9e8041a786d
+sourcegraph.com/sourcegraph/appdash,v0.0.0-20190107175209-d9ea5c54f7dc,h1:lmf242UNy8ucQUSUse9oXtyxHb6kaF82XRLqeVDXXhA=,49e3fd73d6218c97f49266f0e32bbdab1b6352f2f40da8d1aa98ee8dfdeec072
+sourcegraph.com/sourcegraph/appdash-data,v0.0.0-20151005221446-73f23eafcf67,h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI=,382adefecd62bb79172e2552bcfb7d45f47122f9bd22259b0566b26fb2627b87
+sourcegraph.com/sourcegraph/go-diff,v0.5.0,h1:eTiIR0CoWjGzJcnQ3OkhIl/b9GJovq4lSAVRt0ZFEG8=,2c5eaad1d3743b3d4bd6de70459a413e62d1753673d5b96402dda27508454b3b
+sourcegraph.com/sqs/pbtypes,v0.0.0-20180604144634-d3ebe8f20ae4,h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=,6750f8618ecbde1668de332800ec01d713debb145dee395c23fc9a373c207fe3
+storj.io/drpc,v0.0.7-0.20191021224058-08e7133752cd,h1:Oh7Nww1cgFA3fhrCOheDwQ0VcUKFcO1LsBSJEgiGgUQ=,51befd9e6e3aa6cfb9f5b56e47b3cd59715dbe656d0a12cfbb0282609b5456dd
+storj.io/storj,v0.24.5,h1:dWqApMsdhPoUufrljPQC1gZWkYcSTjRr5AoZ7mrSjCw=,ce0628bdcce2b8f0241d27993431d343d212b2e55323510bf657928001c2fb26
+strk.kbt.io/projects/go/libravatar,v0.0.0-20160628055650-5eed7bff870a,h1:8q33ShxKXRwQ7JVd1ZnhIU3hZhwwn0Le+4fTeAackuM=,be48b3949775d6ba0dd3105d7d31d338fede9fbd1471b41fe861f1cfcabbf85c
+v.io/x/lib,v0.1.4,h1:PCDfluqBeRbA7OgDIs9tIpT+z6ZNZ5VMeR+t7h/K2ig=,411c5ded56ba1b69269c37748d184954089c320f43ee76beb0c53f7c598baeaf
+vbom.ml/util,v0.0.0-20180919145318-efcd4e0f9787,h1:O69FD9pJA4WUZlEwYatBEEkRWKQ5cKodWpdKTrCS/iQ=,abbc7a9ac1d820f336ccbe247404800d0f79859b4e4412f0d107aebbb564f920
+vitess.io/vitess,v2.1.1+incompatible,h1:nuuGHiWYWpudD3gOCLeGzol2EJ25e/u5Wer2wV1O130=,8f823ede6775b4f5b3f6cd4c04b3b6be453416e124362a8d68fa2e829429fa68
+xorm.io/builder,v0.3.6,h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8=,8f16bb96bf2f75b4813be77072a966d1f2248a38f2c7afff4132b666876310a7
+xorm.io/core,v0.7.2,h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw=,24c93962a78b2a177ff5c66cd43921eb1e8b13290d0e8a4d87c6f075a81c4531
diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go
new file mode 100644
index 0000000..16cc145
--- /dev/null
+++ b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go
@@ -0,0 +1,232 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package zip_sum_test tests that the module zip files produced by modfetch
+// have consistent content sums. Ideally the zip files themselves are also
+// stable over time, though this is not strictly necessary.
+//
+// This test loads a table from testdata/zip_sums.csv. The table has columns
+// for module path, version, content sum, and zip file hash. The table
+// includes a large number of real modules. The test downloads these modules
+// in direct mode and verifies the zip files.
+//
+// This test is very slow, and it depends on outside modules that change
+// frequently, so this is a manual test. To enable it, pass the -zipsum flag.
+package zip_sum_test
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/csv"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+
+ "golang.org/x/mod/module"
+)
+
+var (
+ updateTestData = flag.Bool("u", false, "when set, tests may update files in testdata instead of failing")
+ enableZipSum = flag.Bool("zipsum", false, "enable TestZipSums")
+ debugZipSum = flag.Bool("testwork", false, "when set, TestZipSums will preserve its test directory")
+ modCacheDir = flag.String("zipsumcache", "", "module cache to use instead of temp directory")
+ shardCount = flag.Int("zipsumshardcount", 1, "number of shards to divide TestZipSums into")
+ shardIndex = flag.Int("zipsumshard", 0, "index of TestZipSums shard to test (0 <= zipsumshard < zipsumshardcount)")
+)
+
+const zipSumsPath = "testdata/zip_sums.csv"
+
+type zipSumTest struct {
+ m module.Version
+ wantSum, wantFileHash string
+}
+
+func TestZipSums(t *testing.T) {
+ if !*enableZipSum {
+ // This test is very slow and heavily dependent on external repositories.
+ // Only run it explicitly.
+ t.Skip("TestZipSum not enabled with -zipsum")
+ }
+ if *shardCount < 1 {
+ t.Fatal("-zipsumshardcount must be a positive integer")
+ }
+ if *shardIndex < 0 || *shardCount <= *shardIndex {
+ t.Fatal("-zipsumshard must be between 0 and -zipsumshardcount")
+ }
+
+ testenv.MustHaveGoBuild(t)
+ testenv.MustHaveExternalNetwork(t)
+ testenv.MustHaveExecPath(t, "bzr")
+ testenv.MustHaveExecPath(t, "git")
+ // TODO(jayconrod): add hg, svn, and fossil modules to testdata.
+ // Could not find any for now.
+
+ tests, err := readZipSumTests()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if *modCacheDir != "" {
+ cfg.BuildContext.GOPATH = *modCacheDir
+ } else {
+ tmpDir, err := os.MkdirTemp("", "TestZipSums")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if *debugZipSum {
+ fmt.Fprintf(os.Stderr, "TestZipSums: modCacheDir: %s\n", tmpDir)
+ } else {
+ defer os.RemoveAll(tmpDir)
+ }
+ cfg.BuildContext.GOPATH = tmpDir
+ }
+
+ cfg.GOPROXY = "direct"
+ cfg.GOSUMDB = "off"
+ modload.Init()
+
+ // Shard tests by downloading only every nth module when shard flags are set.
+ // This makes it easier to test small groups of modules quickly. We avoid
+ // testing similarly named modules together (the list is sorted by module
+ // path and version).
+ if *shardCount > 1 {
+ r := *shardIndex
+ w := 0
+ for r < len(tests) {
+ tests[w] = tests[r]
+ w++
+ r += *shardCount
+ }
+ tests = tests[:w]
+ }
+
+ // Download modules with a rate limit. We may run out of file descriptors
+ // or cause timeouts without a limit.
+ needUpdate := false
+ for i := range tests {
+ test := &tests[i]
+ name := fmt.Sprintf("%s@%s", strings.ReplaceAll(test.m.Path, "/", "_"), test.m.Version)
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ zipPath, err := modfetch.DownloadZip(ctx, test.m)
+ if err != nil {
+ if *updateTestData {
+ t.Logf("%s: could not download module: %s (will remove from testdata)", test.m, err)
+ test.m.Path = "" // mark for deletion
+ needUpdate = true
+ } else {
+ t.Errorf("%s: could not download module: %s", test.m, err)
+ }
+ return
+ }
+
+ sum := modfetch.Sum(ctx, test.m)
+ if sum != test.wantSum {
+ if *updateTestData {
+ t.Logf("%s: updating content sum to %s", test.m, sum)
+ test.wantSum = sum
+ needUpdate = true
+ } else {
+ t.Errorf("%s: got content sum %s; want sum %s", test.m, sum, test.wantSum)
+ return
+ }
+ }
+
+ h := sha256.New()
+ f, err := os.Open(zipPath)
+ if err != nil {
+ t.Errorf("%s: %v", test.m, err)
+ }
+ defer f.Close()
+ if _, err := io.Copy(h, f); err != nil {
+ t.Errorf("%s: %v", test.m, err)
+ }
+ zipHash := hex.EncodeToString(h.Sum(nil))
+ if zipHash != test.wantFileHash {
+ if *updateTestData {
+ t.Logf("%s: updating zip file hash to %s", test.m, zipHash)
+ test.wantFileHash = zipHash
+ needUpdate = true
+ } else {
+ t.Errorf("%s: got zip file hash %s; want hash %s (but content sum matches)", test.m, zipHash, test.wantFileHash)
+ }
+ }
+ })
+ }
+
+ if needUpdate {
+ // Remove tests marked for deletion
+ r, w := 0, 0
+ for r < len(tests) {
+ if tests[r].m.Path != "" {
+ tests[w] = tests[r]
+ w++
+ }
+ r++
+ }
+ tests = tests[:w]
+
+ if err := writeZipSumTests(tests); err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func readZipSumTests() ([]zipSumTest, error) {
+ f, err := os.Open(filepath.FromSlash(zipSumsPath))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ r := csv.NewReader(f)
+
+ var tests []zipSumTest
+ for {
+ line, err := r.Read()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, err
+ } else if len(line) != 4 {
+ return nil, fmt.Errorf("%s:%d: malformed line", f.Name(), len(tests)+1)
+ }
+ test := zipSumTest{m: module.Version{Path: line[0], Version: line[1]}, wantSum: line[2], wantFileHash: line[3]}
+ tests = append(tests, test)
+ }
+ return tests, nil
+}
+
+func writeZipSumTests(tests []zipSumTest) (err error) {
+ f, err := os.Create(filepath.FromSlash(zipSumsPath))
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ w := csv.NewWriter(f)
+ line := make([]string, 0, 4)
+ for _, test := range tests {
+ line = append(line[:0], test.m.Path, test.m.Version, test.wantSum, test.wantFileHash)
+ if err := w.Write(line); err != nil {
+ return err
+ }
+ }
+ w.Flush()
+ return nil
+}
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
new file mode 100644
index 0000000..37befa3
--- /dev/null
+++ b/src/cmd/go/internal/modget/get.go
@@ -0,0 +1,1946 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modget implements the module-aware “go get” command.
+package modget
+
+// The arguments to 'go get' are patterns with optional version queries, with
+// the version queries defaulting to "upgrade".
+//
+// The patterns are normally interpreted as package patterns. However, if a
+// pattern cannot match a package, it is instead interpreted as a *module*
+// pattern. For version queries such as "upgrade" and "patch" that depend on the
+// selected version of a module (or of the module containing a package),
+// whether a pattern denotes a package or module may change as updates are
+// applied (see the example in mod_get_patchmod.txt).
+//
+// There are a few other ambiguous cases to resolve, too. A package can exist in
+// two different modules at the same version: for example, the package
+// example.com/foo might be found in module example.com and also in module
+// example.com/foo, and those modules may have independent v0.1.0 tags — so the
+// input 'example.com/foo@v0.1.0' could syntactically refer to the variant of
+// the package loaded from either module! (See mod_get_ambiguous_pkg.txt.)
+// If the argument is ambiguous, the user can often disambiguate by specifying
+// explicit versions for *all* of the potential module paths involved.
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/par"
+ "cmd/go/internal/search"
+ "cmd/go/internal/toolchain"
+ "cmd/go/internal/work"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+var CmdGet = &base.Command{
+ // Note: flags below are listed explicitly because they're the most common.
+ // Do not send CLs removing them because they're covered by [get flags].
+ UsageLine: "go get [-t] [-u] [-v] [build flags] [packages]",
+ Short: "add dependencies to current module and install them",
+ Long: `
+Get resolves its command-line arguments to packages at specific module versions,
+updates go.mod to require those versions, and downloads source code into the
+module cache.
+
+To add a dependency for a package or upgrade it to its latest version:
+
+ go get example.com/pkg
+
+To upgrade or downgrade a package to a specific version:
+
+ go get example.com/pkg@v1.2.3
+
+To remove a dependency on a module and downgrade modules that require it:
+
+ go get example.com/mod@none
+
+To upgrade the minimum required Go version to the latest released Go version:
+
+ go get go@latest
+
+To upgrade the Go toolchain to the latest patch release of the current Go toolchain:
+
+ go get toolchain@patch
+
+See https://golang.org/ref/mod#go-get for details.
+
+In earlier versions of Go, 'go get' was used to build and install packages.
+Now, 'go get' is dedicated to adjusting dependencies in go.mod. 'go install'
+may be used to build and install commands instead. When a version is specified,
+'go install' runs in module-aware mode and ignores the go.mod file in the
+current directory. For example:
+
+ go install example.com/pkg@v1.2.3
+ go install example.com/pkg@latest
+
+See 'go help install' or https://golang.org/ref/mod#go-install for details.
+
+'go get' accepts the following flags.
+
+The -t flag instructs get to consider modules needed to build tests of
+packages specified on the command line.
+
+The -u flag instructs get to update modules providing dependencies
+of packages named on the command line to use newer minor or patch
+releases when available.
+
+The -u=patch flag (not -u patch) also instructs get to update dependencies,
+but changes the default to select patch releases.
+
+When the -t and -u flags are used together, get will update
+test dependencies as well.
+
+The -x flag prints commands as they are executed. This is useful for
+debugging version control commands when a module is downloaded directly
+from a repository.
+
+For more about modules, see https://golang.org/ref/mod.
+
+For more about using 'go get' to update the minimum Go version and
+suggested Go toolchain, see https://go.dev/doc/toolchain.
+
+For more about specifying packages, see 'go help packages'.
+
+This text describes the behavior of get using modules to manage source
+code and dependencies. If instead the go command is running in GOPATH
+mode, the details of get's flags and effects change, as does 'go help get'.
+See 'go help gopath-get'.
+
+See also: go build, go install, go clean, go mod.
+ `,
+}
+
+// Note that this help text is a stopgap to make the module-aware get help text
+// available even in non-module settings. It should be deleted when the old get
+// is deleted. It should NOT be considered to set a precedent of having hierarchical
+// help names with dashes.
+var HelpModuleGet = &base.Command{
+ UsageLine: "module-get",
+ Short: "module-aware go get",
+ Long: `
+The 'go get' command changes behavior depending on whether the
+go command is running in module-aware mode or legacy GOPATH mode.
+This help text, accessible as 'go help module-get' even in legacy GOPATH mode,
+describes 'go get' as it operates in module-aware mode.
+
+Usage: ` + CmdGet.UsageLine + `
+` + CmdGet.Long,
+}
+
+var HelpVCS = &base.Command{
+ UsageLine: "vcs",
+ Short: "controlling version control with GOVCS",
+ Long: `
+The 'go get' command can run version control commands like git
+to download imported code. This functionality is critical to the decentralized
+Go package ecosystem, in which code can be imported from any server,
+but it is also a potential security problem, if a malicious server finds a
+way to cause the invoked version control command to run unintended code.
+
+To balance the functionality and security concerns, the 'go get' command
+by default will only use git and hg to download code from public servers.
+But it will use any known version control system (bzr, fossil, git, hg, svn)
+to download code from private servers, defined as those hosting packages
+matching the GOPRIVATE variable (see 'go help private'). The rationale behind
+allowing only Git and Mercurial is that these two systems have had the most
+attention to issues of being run as clients of untrusted servers. In contrast,
+Bazaar, Fossil, and Subversion have primarily been used in trusted,
+authenticated environments and are not as well scrutinized as attack surfaces.
+
+The version control command restrictions only apply when using direct version
+control access to download code. When downloading modules from a proxy,
+'go get' uses the proxy protocol instead, which is always permitted.
+By default, the 'go get' command uses the Go module mirror (proxy.golang.org)
+for public packages and only falls back to version control for private
+packages or when the mirror refuses to serve a public package (typically for
+legal reasons). Therefore, clients can still access public code served from
+Bazaar, Fossil, or Subversion repositories by default, because those downloads
+use the Go module mirror, which takes on the security risk of running the
+version control commands using a custom sandbox.
+
+The GOVCS variable can be used to change the allowed version control systems
+for specific packages (identified by a module or import path).
+The GOVCS variable applies when building package in both module-aware mode
+and GOPATH mode. When using modules, the patterns match against the module path.
+When using GOPATH, the patterns match against the import path corresponding to
+the root of the version control repository.
+
+The general form of the GOVCS setting is a comma-separated list of
+pattern:vcslist rules. The pattern is a glob pattern that must match
+one or more leading elements of the module or import path. The vcslist
+is a pipe-separated list of allowed version control commands, or "all"
+to allow use of any known command, or "off" to disallow all commands.
+Note that if a module matches a pattern with vcslist "off", it may still be
+downloaded if the origin server uses the "mod" scheme, which instructs the
+go command to download the module using the GOPROXY protocol.
+The earliest matching pattern in the list applies, even if later patterns
+might also match.
+
+For example, consider:
+
+ GOVCS=github.com:git,evil.com:off,*:git|hg
+
+With this setting, code with a module or import path beginning with
+github.com/ can only use git; paths on evil.com cannot use any version
+control command, and all other paths (* matches everything) can use
+only git or hg.
+
+The special patterns "public" and "private" match public and private
+module or import paths. A path is private if it matches the GOPRIVATE
+variable; otherwise it is public.
+
+If no rules in the GOVCS variable match a particular module or import path,
+the 'go get' command applies its default rule, which can now be summarized
+in GOVCS notation as 'public:git|hg,private:all'.
+
+To allow unfettered use of any version control system for any package, use:
+
+ GOVCS=*:all
+
+To disable all use of version control, use:
+
+ GOVCS=*:off
+
+The 'go env -w' command (see 'go help env') can be used to set the GOVCS
+variable for future go command invocations.
+`,
+}
+
+var (
+ getD = CmdGet.Flag.Bool("d", true, "")
+ getF = CmdGet.Flag.Bool("f", false, "")
+ getFix = CmdGet.Flag.Bool("fix", false, "")
+ getM = CmdGet.Flag.Bool("m", false, "")
+ getT = CmdGet.Flag.Bool("t", false, "")
+ getU upgradeFlag
+ getInsecure = CmdGet.Flag.Bool("insecure", false, "")
+ // -v is cfg.BuildV
+)
+
+// upgradeFlag is a custom flag.Value for -u.
+type upgradeFlag struct {
+ rawVersion string
+ version string
+}
+
+func (*upgradeFlag) IsBoolFlag() bool { return true } // allow -u
+
+func (v *upgradeFlag) Set(s string) error {
+ if s == "false" {
+ v.version = ""
+ v.rawVersion = ""
+ } else if s == "true" {
+ v.version = "upgrade"
+ v.rawVersion = ""
+ } else {
+ v.version = s
+ v.rawVersion = s
+ }
+ return nil
+}
+
+func (v *upgradeFlag) String() string { return "" }
+
+func init() {
+ work.AddBuildFlags(CmdGet, work.OmitModFlag)
+ CmdGet.Run = runGet // break init loop
+ CmdGet.Flag.Var(&getU, "u", "")
+}
+
+func runGet(ctx context.Context, cmd *base.Command, args []string) {
+ switch getU.version {
+ case "", "upgrade", "patch":
+ // ok
+ default:
+ base.Fatalf("go: unknown upgrade flag -u=%s", getU.rawVersion)
+ }
+ // TODO(#43684): in the future (Go 1.20), warn that -d is a no-op.
+ if !*getD {
+ base.Fatalf("go: -d flag may not be disabled")
+ }
+ if *getF {
+ fmt.Fprintf(os.Stderr, "go: -f flag is a no-op when using modules\n")
+ }
+ if *getFix {
+ fmt.Fprintf(os.Stderr, "go: -fix flag is a no-op when using modules\n")
+ }
+ if *getM {
+ base.Fatalf("go: -m flag is no longer supported")
+ }
+ if *getInsecure {
+ base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead")
+ }
+
+ modload.ForceUseModules = true
+
+ // Do not allow any updating of go.mod until we've applied
+ // all the requested changes and checked that the result matches
+ // what was requested.
+ modload.ExplicitWriteGoMod = true
+
+ // Allow looking up modules for import paths when outside of a module.
+ // 'go get' is expected to do this, unlike other commands.
+ modload.AllowMissingModuleImports()
+
+ // 'go get' no longer builds or installs packages, so there's nothing to do
+ // if there's no go.mod file.
+ // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting.
+ // We could handle that here by printing a different message.
+ modload.Init()
+ if !modload.HasModRoot() {
+ base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" +
+ "\t'go get' is no longer supported outside a module.\n" +
+ "\tTo build and install a command, use 'go install' with a version,\n" +
+ "\tlike 'go install example.com/cmd@latest'\n" +
+ "\tFor more information, see https://golang.org/doc/go-get-install-deprecation\n" +
+ "\tor run 'go help get' or 'go help install'.")
+ }
+
+ dropToolchain, queries := parseArgs(ctx, args)
+ opts := modload.WriteOpts{
+ DropToolchain: dropToolchain,
+ }
+ for _, q := range queries {
+ if q.pattern == "toolchain" {
+ opts.ExplicitToolchain = true
+ }
+ }
+
+ r := newResolver(ctx, queries)
+ r.performLocalQueries(ctx)
+ r.performPathQueries(ctx)
+
+ for {
+ r.performWildcardQueries(ctx)
+ r.performPatternAllQueries(ctx)
+
+ if changed := r.resolveQueries(ctx, queries); changed {
+ // 'go get' arguments can be (and often are) package patterns rather than
+ // (just) modules. A package can be provided by any module with a prefix
+ // of its import path, and a wildcard can even match packages in modules
+ // with totally different paths. Because of these effects, and because any
+ // change to the selected version of a module can bring in entirely new
+ // module paths as dependencies, we need to reissue queries whenever we
+ // change the build list.
+ //
+ // The result of any version query for a given module — even "upgrade" or
+ // "patch" — is always relative to the build list at the start of
+ // the 'go get' command, not an intermediate state, and is therefore
+ // deterministic and therefore cachable, and the constraints on the
+ // selected version of each module can only narrow as we iterate.
+ //
+ // "all" is functionally very similar to a wildcard pattern. The set of
+ // packages imported by the main module does not change, and the query
+ // result for the module containing each such package also does not change
+ // (it is always relative to the initial build list, before applying
+ // queries). So the only way that the result of an "all" query can change
+ // is if some matching package moves from one module in the build list
+ // to another, which should not happen very often.
+ continue
+ }
+
+ // When we load imports, we detect the following conditions:
+ //
+ // - missing transitive dependencies that need to be resolved from outside the
+ // current build list (note that these may add new matches for existing
+ // pattern queries!)
+ //
+ // - transitive dependencies that didn't match any other query,
+ // but need to be upgraded due to the -u flag
+ //
+ // - ambiguous import errors.
+ // TODO(#27899): Try to resolve ambiguous import errors automatically.
+ upgrades := r.findAndUpgradeImports(ctx, queries)
+ if changed := r.applyUpgrades(ctx, upgrades); changed {
+ continue
+ }
+
+ r.findMissingWildcards(ctx)
+ if changed := r.resolveQueries(ctx, r.wildcardQueries); changed {
+ continue
+ }
+
+ break
+ }
+
+ r.checkWildcardVersions(ctx)
+
+ var pkgPatterns []string
+ for _, q := range queries {
+ if q.matchesPackages {
+ pkgPatterns = append(pkgPatterns, q.pattern)
+ }
+ }
+ r.checkPackageProblems(ctx, pkgPatterns)
+
+ // Everything succeeded. Update go.mod.
+ oldReqs := reqsFromGoMod(modload.ModFile())
+
+ if err := modload.WriteGoMod(ctx, opts); err != nil {
+ // A TooNewError can happen for 'go get go@newversion'
+ // when all the required modules are old enough
+ // but the command line is not.
+ // TODO(bcmills): modload.EditBuildList should catch this instead,
+ // and then this can be changed to base.Fatal(err).
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+
+ newReqs := reqsFromGoMod(modload.ModFile())
+ r.reportChanges(oldReqs, newReqs)
+
+ if gowork := modload.FindGoWork(base.Cwd()); gowork != "" {
+ wf, err := modload.ReadWorkFile(gowork)
+ if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) {
+ modload.WriteWorkFile(gowork, wf)
+ }
+ }
+}
+
+// parseArgs parses command-line arguments and reports errors.
+//
+// The command-line arguments are of the form path@version or simply path, with
+// implicit @upgrade. path@none is "downgrade away".
+func parseArgs(ctx context.Context, rawArgs []string) (dropToolchain bool, queries []*query) {
+ defer base.ExitIfErrors()
+
+ for _, arg := range search.CleanPatterns(rawArgs) {
+ q, err := newQuery(arg)
+ if err != nil {
+ base.Error(err)
+ continue
+ }
+
+ if q.version == "none" {
+ switch q.pattern {
+ case "go":
+ base.Errorf("go: cannot use go@none")
+ continue
+ case "toolchain":
+ dropToolchain = true
+ continue
+ }
+ }
+
+ // If there were no arguments, CleanPatterns returns ".". Set the raw
+ // string back to "" for better errors.
+ if len(rawArgs) == 0 {
+ q.raw = ""
+ }
+
+ // Guard against 'go get x.go', a common mistake.
+ // Note that package and module paths may end with '.go', so only print an error
+ // if the argument has no version and either has no slash or refers to an existing file.
+ if strings.HasSuffix(q.raw, ".go") && q.rawVersion == "" {
+ if !strings.Contains(q.raw, "/") {
+ base.Errorf("go: %s: arguments must be package or module paths", q.raw)
+ continue
+ }
+ if fi, err := os.Stat(q.raw); err == nil && !fi.IsDir() {
+ base.Errorf("go: %s exists as a file, but 'go get' requires package arguments", q.raw)
+ continue
+ }
+ }
+
+ queries = append(queries, q)
+ }
+
+ return dropToolchain, queries
+}
+
+type resolver struct {
+ localQueries []*query // queries for absolute or relative paths
+ pathQueries []*query // package path literal queries in original order
+ wildcardQueries []*query // path wildcard queries in original order
+ patternAllQueries []*query // queries with the pattern "all"
+
+ // Indexed "none" queries. These are also included in the slices above;
+ // they are indexed here to speed up noneForPath.
+ nonesByPath map[string]*query // path-literal "@none" queries indexed by path
+ wildcardNones []*query // wildcard "@none" queries
+
+ // resolvedVersion maps each module path to the version of that module that
+ // must be selected in the final build list, along with the first query
+ // that resolved the module to that version (the “reason”).
+ resolvedVersion map[string]versionReason
+
+ buildList []module.Version
+ buildListVersion map[string]string // index of buildList (module path → version)
+
+ initialVersion map[string]string // index of the initial build list at the start of 'go get'
+
+ missing []pathSet // candidates for missing transitive dependencies
+
+ work *par.Queue
+
+ matchInModuleCache par.ErrCache[matchInModuleKey, []string]
+}
+
+type versionReason struct {
+ version string
+ reason *query
+}
+
+type matchInModuleKey struct {
+ pattern string
+ m module.Version
+}
+
+func newResolver(ctx context.Context, queries []*query) *resolver {
+ // LoadModGraph also sets modload.Target, which is needed by various resolver
+ // methods.
+ mg, err := modload.LoadModGraph(ctx, "")
+ if err != nil {
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+
+ buildList := mg.BuildList()
+ initialVersion := make(map[string]string, len(buildList))
+ for _, m := range buildList {
+ initialVersion[m.Path] = m.Version
+ }
+
+ r := &resolver{
+ work: par.NewQueue(runtime.GOMAXPROCS(0)),
+ resolvedVersion: map[string]versionReason{},
+ buildList: buildList,
+ buildListVersion: initialVersion,
+ initialVersion: initialVersion,
+ nonesByPath: map[string]*query{},
+ }
+
+ for _, q := range queries {
+ if q.pattern == "all" {
+ r.patternAllQueries = append(r.patternAllQueries, q)
+ } else if q.patternIsLocal {
+ r.localQueries = append(r.localQueries, q)
+ } else if q.isWildcard() {
+ r.wildcardQueries = append(r.wildcardQueries, q)
+ } else {
+ r.pathQueries = append(r.pathQueries, q)
+ }
+
+ if q.version == "none" {
+ // Index "none" queries to make noneForPath more efficient.
+ if q.isWildcard() {
+ r.wildcardNones = append(r.wildcardNones, q)
+ } else {
+ // All "<path>@none" queries for the same path are identical; we only
+ // need to index one copy.
+ r.nonesByPath[q.pattern] = q
+ }
+ }
+ }
+
+ return r
+}
+
+// initialSelected returns the version of the module with the given path that
+// was selected at the start of this 'go get' invocation.
+func (r *resolver) initialSelected(mPath string) (version string) {
+ v, ok := r.initialVersion[mPath]
+ if !ok {
+ return "none"
+ }
+ return v
+}
+
+// selected returns the version of the module with the given path that is
+// selected in the resolver's current build list.
+func (r *resolver) selected(mPath string) (version string) {
+ v, ok := r.buildListVersion[mPath]
+ if !ok {
+ return "none"
+ }
+ return v
+}
+
+// noneForPath returns a "none" query matching the given module path,
+// or found == false if no such query exists.
+func (r *resolver) noneForPath(mPath string) (nq *query, found bool) {
+ if nq = r.nonesByPath[mPath]; nq != nil {
+ return nq, true
+ }
+ for _, nq := range r.wildcardNones {
+ if nq.matchesPath(mPath) {
+ return nq, true
+ }
+ }
+ return nil, false
+}
+
+// queryModule wraps modload.Query, substituting r.checkAllowedOr to decide
+// allowed versions.
+func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) {
+ current := r.initialSelected(mPath)
+ rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected))
+ if err != nil {
+ return module.Version{}, err
+ }
+ return module.Version{Path: mPath, Version: rev.Version}, nil
+}
+
+// queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to
+// decide allowed versions.
+func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) {
+ results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
+ if len(results) > 0 {
+ pkgMods = make([]module.Version, 0, len(results))
+ for _, qr := range results {
+ pkgMods = append(pkgMods, qr.Mod)
+ }
+ }
+ return pkgMods, err
+}
+
+// queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to
+// decide allowed versions.
+func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) {
+ results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
+ if len(results) > 0 {
+ pkgMods = make([]module.Version, 0, len(results))
+ for _, qr := range results {
+ pkgMods = append(pkgMods, qr.Mod)
+ }
+ }
+ if modOnly != nil {
+ mod = modOnly.Mod
+ }
+ return pkgMods, mod, err
+}
+
+// checkAllowedOr is like modload.CheckAllowed, but it always allows the requested
+// and current versions (even if they are retracted or otherwise excluded).
+func (r *resolver) checkAllowedOr(requested string, selected func(string) string) modload.AllowedFunc {
+ return func(ctx context.Context, m module.Version) error {
+ if m.Version == requested {
+ return modload.CheckExclusions(ctx, m)
+ }
+ if (requested == "upgrade" || requested == "patch") && m.Version == selected(m.Path) {
+ return nil
+ }
+ return modload.CheckAllowed(ctx, m)
+ }
+}
+
+// matchInModule is a caching wrapper around modload.MatchInModule.
+func (r *resolver) matchInModule(ctx context.Context, pattern string, m module.Version) (packages []string, err error) {
+ return r.matchInModuleCache.Do(matchInModuleKey{pattern, m}, func() ([]string, error) {
+ match := modload.MatchInModule(ctx, pattern, m, imports.AnyTags())
+ if len(match.Errs) > 0 {
+ return match.Pkgs, match.Errs[0]
+ }
+ return match.Pkgs, nil
+ })
+}
+
+// queryNone adds a candidate set to q for each module matching q.pattern.
+// Each candidate set has only one possible module version: the matched
+// module at version "none".
+//
+// We interpret arguments to 'go get' as packages first, and fall back to
+// modules second. However, no module exists at version "none", and therefore no
+// package exists at that version either: we know that the argument cannot match
+// any packages, and thus it must match modules instead.
+func (r *resolver) queryNone(ctx context.Context, q *query) {
+ if search.IsMetaPackage(q.pattern) {
+ panic(fmt.Sprintf("internal error: queryNone called with pattern %q", q.pattern))
+ }
+
+ if !q.isWildcard() {
+ q.pathOnce(q.pattern, func() pathSet {
+ hasModRoot := modload.HasModRoot()
+ if hasModRoot && modload.MainModules.Contains(q.pattern) {
+ v := module.Version{Path: q.pattern}
+ // The user has explicitly requested to downgrade their own module to
+ // version "none". This is not an entirely unreasonable request: it
+ // could plausibly mean “downgrade away everything that depends on any
+ // explicit version of the main module”, or “downgrade away the
+ // package with the same path as the main module, found in a module
+ // with a prefix of the main module's path”.
+ //
+ // However, neither of those behaviors would be consistent with the
+ // plain meaning of the query. To try to reduce confusion, reject the
+ // query explicitly.
+ return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{v}, Pattern: q.pattern, Query: q.version})
+ }
+
+ return pathSet{mod: module.Version{Path: q.pattern, Version: "none"}}
+ })
+ }
+
+ for _, curM := range r.buildList {
+ if !q.matchesPath(curM.Path) {
+ continue
+ }
+ q.pathOnce(curM.Path, func() pathSet {
+ if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) {
+ return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version})
+ }
+ return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}}
+ })
+ }
+}
+
+func (r *resolver) performLocalQueries(ctx context.Context) {
+ for _, q := range r.localQueries {
+ q.pathOnce(q.pattern, func() pathSet {
+ absDetail := ""
+ if !filepath.IsAbs(q.pattern) {
+ if absPath, err := filepath.Abs(q.pattern); err == nil {
+ absDetail = fmt.Sprintf(" (%s)", absPath)
+ }
+ }
+
+ // Absolute paths like C:\foo and relative paths like ../foo... are
+ // restricted to matching packages in the main module.
+ pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern)
+ if pkgPattern == "." {
+ modload.MustHaveModRoot()
+ var modRoots []string
+ for _, m := range modload.MainModules.Versions() {
+ modRoots = append(modRoots, modload.MainModules.ModRoot(m))
+ }
+ var plural string
+ if len(modRoots) != 1 {
+ plural = "s"
+ }
+ return errSet(fmt.Errorf("%s%s is not within module%s rooted at %s", q.pattern, absDetail, plural, strings.Join(modRoots, ", ")))
+ }
+
+ match := modload.MatchInModule(ctx, pkgPattern, mainModule, imports.AnyTags())
+ if len(match.Errs) > 0 {
+ return pathSet{err: match.Errs[0]}
+ }
+
+ if len(match.Pkgs) == 0 {
+ if q.raw == "" || q.raw == "." {
+ return errSet(fmt.Errorf("no package to get in current directory"))
+ }
+ if !q.isWildcard() {
+ modload.MustHaveModRoot()
+ return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule)))
+ }
+ search.WarnUnmatched([]*search.Match{match})
+ return pathSet{}
+ }
+
+ return pathSet{pkgMods: []module.Version{mainModule}}
+ })
+ }
+}
+
+// performWildcardQueries populates the candidates for each query whose pattern
+// is a wildcard.
+//
+// The candidates for a given module path matching (or containing a package
+// matching) a wildcard query depend only on the initial build list, but the set
+// of modules may be expanded by other queries, so wildcard queries need to be
+// re-evaluated whenever a potentially-matching module path is added to the
+// build list.
+func (r *resolver) performWildcardQueries(ctx context.Context) {
+ for _, q := range r.wildcardQueries {
+ q := q
+ r.work.Add(func() {
+ if q.version == "none" {
+ r.queryNone(ctx, q)
+ } else {
+ r.queryWildcard(ctx, q)
+ }
+ })
+ }
+ <-r.work.Idle()
+}
+
+// queryWildcard adds a candidate set to q for each module for which:
+// - some version of the module is already in the build list, and
+// - that module exists at some version matching q.version, and
+// - either the module path itself matches q.pattern, or some package within
+// the module at q.version matches q.pattern.
+func (r *resolver) queryWildcard(ctx context.Context, q *query) {
+ // For wildcard patterns, modload.QueryPattern only identifies modules
+ // matching the prefix of the path before the wildcard. However, the build
+ // list may already contain other modules with matching packages, and we
+ // should consider those modules to satisfy the query too.
+ // We want to match any packages in existing dependencies, but we only want to
+ // resolve new dependencies if nothing else turns up.
+ for _, curM := range r.buildList {
+ if !q.canMatchInModule(curM.Path) {
+ continue
+ }
+ q.pathOnce(curM.Path, func() pathSet {
+ if _, hit := r.noneForPath(curM.Path); hit {
+ // This module is being removed, so it will no longer be in the build list
+ // (and thus will no longer match the pattern).
+ return pathSet{}
+ }
+
+ if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) {
+ if q.matchesPath(curM.Path) {
+ return errSet(&modload.QueryMatchesMainModulesError{
+ MainModules: []module.Version{curM},
+ Pattern: q.pattern,
+ Query: q.version,
+ })
+ }
+
+ packages, err := r.matchInModule(ctx, q.pattern, curM)
+ if err != nil {
+ return errSet(err)
+ }
+ if len(packages) > 0 {
+ return errSet(&modload.QueryMatchesPackagesInMainModuleError{
+ Pattern: q.pattern,
+ Query: q.version,
+ Packages: packages,
+ })
+ }
+
+ return r.tryWildcard(ctx, q, curM)
+ }
+
+ m, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected)
+ if err != nil {
+ if !isNoSuchModuleVersion(err) {
+ // We can't tell whether a matching version exists.
+ return errSet(err)
+ }
+ // There is no version of curM.Path matching the query.
+
+ // We haven't checked whether curM contains any matching packages at its
+ // currently-selected version, or whether curM.Path itself matches q. If
+ // either of those conditions holds, *and* no other query changes the
+ // selected version of curM, then we will fail in checkWildcardVersions.
+ // (This could be an error, but it's too soon to tell.)
+ //
+ // However, even then the transitive requirements of some other query
+ // may downgrade this module out of the build list entirely, in which
+ // case the pattern will no longer include it and it won't be an error.
+ //
+ // Either way, punt on the query rather than erroring out just yet.
+ return pathSet{}
+ }
+
+ return r.tryWildcard(ctx, q, m)
+ })
+ }
+
+ // Even if no modules matched, we shouldn't query for a new module to provide
+ // the pattern yet: some other query may yet induce a new requirement that
+ // will match the wildcard. Instead, we'll check in findMissingWildcards.
+}
+
+// tryWildcard returns a pathSet for module m matching query q.
+// If m does not actually match q, tryWildcard returns an empty pathSet.
+func (r *resolver) tryWildcard(ctx context.Context, q *query, m module.Version) pathSet {
+ mMatches := q.matchesPath(m.Path)
+ packages, err := r.matchInModule(ctx, q.pattern, m)
+ if err != nil {
+ return errSet(err)
+ }
+ if len(packages) > 0 {
+ return pathSet{pkgMods: []module.Version{m}}
+ }
+ if mMatches {
+ return pathSet{mod: m}
+ }
+ return pathSet{}
+}
+
+// findMissingWildcards adds a candidate set for each query in r.wildcardQueries
+// that has not yet resolved to any version containing packages.
+func (r *resolver) findMissingWildcards(ctx context.Context) {
+ for _, q := range r.wildcardQueries {
+ if q.version == "none" || q.matchesPackages {
+ continue // q is not “missing”
+ }
+ r.work.Add(func() {
+ q.pathOnce(q.pattern, func() pathSet {
+ pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected)
+ if err != nil {
+ if isNoSuchPackageVersion(err) && len(q.resolved) > 0 {
+ // q already resolved one or more modules but matches no packages.
+ // That's ok: this pattern is just a module pattern, and we don't
+ // need to add any more modules to satisfy it.
+ return pathSet{}
+ }
+ return errSet(err)
+ }
+
+ return pathSet{pkgMods: pkgMods, mod: mod}
+ })
+ })
+ }
+ <-r.work.Idle()
+}
+
+// checkWildcardVersions reports an error if any module in the build list has a
+// path (or contains a package) matching a query with a wildcard pattern, but
+// has a selected version that does *not* match the query.
+func (r *resolver) checkWildcardVersions(ctx context.Context) {
+ defer base.ExitIfErrors()
+
+ for _, q := range r.wildcardQueries {
+ for _, curM := range r.buildList {
+ if !q.canMatchInModule(curM.Path) {
+ continue
+ }
+ if !q.matchesPath(curM.Path) {
+ packages, err := r.matchInModule(ctx, q.pattern, curM)
+ if len(packages) == 0 {
+ if err != nil {
+ reportError(q, err)
+ }
+ continue // curM is not relevant to q.
+ }
+ }
+
+ rev, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected)
+ if err != nil {
+ reportError(q, err)
+ continue
+ }
+ if rev.Version == curM.Version {
+ continue // curM already matches q.
+ }
+
+ if !q.matchesPath(curM.Path) {
+ m := module.Version{Path: curM.Path, Version: rev.Version}
+ packages, err := r.matchInModule(ctx, q.pattern, m)
+ if err != nil {
+ reportError(q, err)
+ continue
+ }
+ if len(packages) == 0 {
+ // curM at its original version contains a path matching q.pattern,
+ // but at rev.Version it does not, so (somewhat paradoxically) if
+ // we changed the version of curM it would no longer match the query.
+ var version any = m
+ if rev.Version != q.version {
+ version = fmt.Sprintf("%s@%s (%s)", m.Path, q.version, m.Version)
+ }
+ reportError(q, fmt.Errorf("%v matches packages in %v but not %v: specify a different version for module %s", q, curM, version, m.Path))
+ continue
+ }
+ }
+
+ // Since queryModule succeeded and either curM or one of the packages it
+ // contains matches q.pattern, we should have either selected the version
+ // of curM matching q, or reported a conflict error (and exited).
+ // If we're still here and the version doesn't match,
+ // something has gone very wrong.
+ reportError(q, fmt.Errorf("internal error: selected %v instead of %v", curM, rev.Version))
+ }
+ }
+}
+
+// performPathQueries populates the candidates for each query whose pattern is
+// a path literal.
+//
+// The candidate packages and modules for path literals depend only on the
+// initial build list, not the current build list, so we only need to query path
+// literals once.
+func (r *resolver) performPathQueries(ctx context.Context) {
+ for _, q := range r.pathQueries {
+ q := q
+ r.work.Add(func() {
+ if q.version == "none" {
+ r.queryNone(ctx, q)
+ } else {
+ r.queryPath(ctx, q)
+ }
+ })
+ }
+ <-r.work.Idle()
+}
+
+// queryPath adds a candidate set to q for the package with path q.pattern.
+// The candidate set consists of all modules that could provide q.pattern
+// and have a version matching q, plus (if it exists) the module whose path
+// is itself q.pattern (at a matching version).
+func (r *resolver) queryPath(ctx context.Context, q *query) {
+ q.pathOnce(q.pattern, func() pathSet {
+ if search.IsMetaPackage(q.pattern) || q.isWildcard() {
+ panic(fmt.Sprintf("internal error: queryPath called with pattern %q", q.pattern))
+ }
+ if q.version == "none" {
+ panic(`internal error: queryPath called with version "none"`)
+ }
+
+ if search.IsStandardImportPath(q.pattern) {
+ stdOnly := module.Version{}
+ packages, _ := r.matchInModule(ctx, q.pattern, stdOnly)
+ if len(packages) > 0 {
+ if q.rawVersion != "" {
+ return errSet(fmt.Errorf("can't request explicit version %q of standard library package %s", q.version, q.pattern))
+ }
+
+ q.matchesPackages = true
+ return pathSet{} // No module needed for standard library.
+ }
+ }
+
+ pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected)
+ if err != nil {
+ return errSet(err)
+ }
+ return pathSet{pkgMods: pkgMods, mod: mod}
+ })
+}
+
+// performPatternAllQueries populates the candidates for each query whose
+// pattern is "all".
+//
+// The candidate modules for a given package in "all" depend only on the initial
+// build list, but we cannot follow the dependencies of a given package until we
+// know which candidate is selected — and that selection may depend on the
+// results of other queries. We need to re-evaluate the "all" queries whenever
+// the module for one or more packages in "all" are resolved.
+func (r *resolver) performPatternAllQueries(ctx context.Context) {
+ if len(r.patternAllQueries) == 0 {
+ return
+ }
+
+ findPackage := func(ctx context.Context, path string, m module.Version) (versionOk bool) {
+ versionOk = true
+ for _, q := range r.patternAllQueries {
+ q.pathOnce(path, func() pathSet {
+ pkgMods, err := r.queryPackages(ctx, path, q.version, r.initialSelected)
+ if len(pkgMods) != 1 || pkgMods[0] != m {
+ // There are candidates other than m for the given path, so we can't
+ // be certain that m will actually be the module selected to provide
+ // the package. Don't load its dependencies just yet, because they
+ // might no longer be dependencies after we resolve the correct
+ // version.
+ versionOk = false
+ }
+ return pathSet{pkgMods: pkgMods, err: err}
+ })
+ }
+ return versionOk
+ }
+
+ r.loadPackages(ctx, []string{"all"}, findPackage)
+
+ // Since we built up the candidate lists concurrently, they may be in a
+ // nondeterministic order. We want 'go get' to be fully deterministic,
+ // including in which errors it chooses to report, so sort the candidates
+ // into a deterministic-but-arbitrary order.
+ for _, q := range r.patternAllQueries {
+ sort.Slice(q.candidates, func(i, j int) bool {
+ return q.candidates[i].path < q.candidates[j].path
+ })
+ }
+}
+
+// findAndUpgradeImports returns a pathSet for each package that is not yet
+// in the build list but is transitively imported by the packages matching the
+// given queries (which must already have been resolved).
+//
+// If the getU flag ("-u") is set, findAndUpgradeImports also returns a
+// pathSet for each module that is not constrained by any other
+// command-line argument and has an available matching upgrade.
+func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) (upgrades []pathSet) {
+ patterns := make([]string, 0, len(queries))
+ for _, q := range queries {
+ if q.matchesPackages {
+ patterns = append(patterns, q.pattern)
+ }
+ }
+ if len(patterns) == 0 {
+ return nil
+ }
+
+ // mu guards concurrent writes to upgrades, which will be sorted
+ // (to restore determinism) after loading.
+ var mu sync.Mutex
+
+ findPackage := func(ctx context.Context, path string, m module.Version) (versionOk bool) {
+ version := "latest"
+ if m.Path != "" {
+ if getU.version == "" {
+ // The user did not request that we upgrade transitive dependencies.
+ return true
+ }
+ if _, ok := r.resolvedVersion[m.Path]; ok {
+ // We cannot upgrade m implicitly because its version is determined by
+ // an explicit pattern argument.
+ return true
+ }
+ version = getU.version
+ }
+
+ // Unlike other queries, the "-u" flag upgrades relative to the build list
+ // after applying changes so far, not the initial build list.
+ // This is for two reasons:
+ //
+ // - The "-u" flag intentionally applies to transitive dependencies,
+ // which may not be known or even resolved in advance of applying
+ // other version changes.
+ //
+ // - The "-u" flag, unlike other arguments, does not cause version
+ // conflicts with other queries. (The other query always wins.)
+
+ pkgMods, err := r.queryPackages(ctx, path, version, r.selected)
+ for _, u := range pkgMods {
+ if u == m {
+ // The selected package version is already upgraded appropriately; there
+ // is no need to change it.
+ return true
+ }
+ }
+
+ if err != nil {
+ if isNoSuchPackageVersion(err) || (m.Path == "" && module.CheckPath(path) != nil) {
+ // We can't find the package because it doesn't — or can't — even exist
+ // in any module at the latest version. (Note that invalid module paths
+ // could in general exist due to replacements, so we at least need to
+ // run the query to check those.)
+ //
+ // There is no version change we can make to fix the package, so leave
+ // it unresolved. Either some other query (perhaps a wildcard matching a
+ // newly-added dependency for some other missing package) will fill in
+ // the gaps, or we will report an error (with a better import stack) in
+ // the final LoadPackages call.
+ return true
+ }
+ }
+
+ mu.Lock()
+ upgrades = append(upgrades, pathSet{path: path, pkgMods: pkgMods, err: err})
+ mu.Unlock()
+ return false
+ }
+
+ r.loadPackages(ctx, patterns, findPackage)
+
+ // Since we built up the candidate lists concurrently, they may be in a
+ // nondeterministic order. We want 'go get' to be fully deterministic,
+ // including in which errors it chooses to report, so sort the candidates
+ // into a deterministic-but-arbitrary order.
+ sort.Slice(upgrades, func(i, j int) bool {
+ return upgrades[i].path < upgrades[j].path
+ })
+ return upgrades
+}
+
+// loadPackages loads the packages matching the given patterns, invoking the
+// findPackage function for each package that may require a change to the
+// build list.
+//
+// loadPackages invokes the findPackage function for each package loaded from a
+// module outside the main module. If the module or version that supplies that
+// package needs to be changed due to a query, findPackage may return false
+// and the imports of that package will not be loaded.
+//
+// loadPackages also invokes the findPackage function for each imported package
+// that is neither present in the standard library nor in any module in the
+// build list.
+func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) {
+ opts := modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ VendorModulesInGOROOTSrc: true,
+ LoadTests: *getT,
+ AssumeRootsImported: true, // After 'go get foo', imports of foo should build.
+ SilencePackageErrors: true, // May be fixed by subsequent upgrades or downgrades.
+ Switcher: new(toolchain.Switcher),
+ }
+
+ opts.AllowPackage = func(ctx context.Context, path string, m module.Version) error {
+ if m.Path == "" || m.Version == "" {
+ // Packages in the standard library and main modules are already at their
+ // latest (and only) available versions.
+ return nil
+ }
+ if ok := findPackage(ctx, path, m); !ok {
+ return errVersionChange
+ }
+ return nil
+ }
+
+ _, pkgs := modload.LoadPackages(ctx, opts, patterns...)
+ for _, path := range pkgs {
+ const (
+ parentPath = ""
+ parentIsStd = false
+ )
+ _, _, err := modload.Lookup(parentPath, parentIsStd, path)
+ if err == nil {
+ continue
+ }
+ if errors.Is(err, errVersionChange) {
+ // We already added candidates during loading.
+ continue
+ }
+
+ var (
+ importMissing *modload.ImportMissingError
+ ambiguous *modload.AmbiguousImportError
+ )
+ if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) {
+ // The package, which is a dependency of something we care about, has some
+ // problem that we can't resolve with a version change.
+ // Leave the error for the final LoadPackages call.
+ continue
+ }
+
+ path := path
+ r.work.Add(func() {
+ findPackage(ctx, path, module.Version{})
+ })
+ }
+ <-r.work.Idle()
+}
+
+// errVersionChange is a sentinel error indicating that a module's version needs
+// to be updated before its dependencies can be loaded.
+var errVersionChange = errors.New("version change needed")
+
+// resolveQueries resolves candidate sets that are attached to the given
+// queries and/or needed to provide the given missing-package dependencies.
+//
+// resolveQueries starts by resolving one module version from each
+// unambiguous pathSet attached to the given queries.
+//
+// If no unambiguous query results in a change to the build list,
+// resolveQueries revisits the ambiguous query candidates and resolves them
+// arbitrarily in order to guarantee forward progress.
+//
+// If all pathSets are resolved without any changes to the build list,
+// resolveQueries returns with changed=false.
+func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (changed bool) {
+ defer base.ExitIfErrors()
+
+ // Note: this is O(N²) with the number of pathSets in the worst case.
+ //
+ // We could perhaps get it down to O(N) if we were to index the pathSets
+ // by module path, so that we only revisit a given pathSet when the
+ // version of some module in its containingPackage list has been determined.
+ //
+ // However, N tends to be small, and most candidate sets will include only one
+ // candidate module (so they will be resolved in the first iteration), so for
+ // now we'll stick to the simple O(N²) approach.
+
+ resolved := 0
+ for {
+ prevResolved := resolved
+
+ // If we found modules that were too new, find the max of the required versions
+ // and then try to switch to a newer toolchain.
+ var sw toolchain.Switcher
+ for _, q := range queries {
+ for _, cs := range q.candidates {
+ sw.Error(cs.err)
+ }
+ }
+ // Only switch if we need a newer toolchain.
+ // Otherwise leave the cs.err for reporting later.
+ if sw.NeedSwitch() {
+ sw.Switch(ctx)
+ // If NeedSwitch is true and Switch returns, Switch has failed to locate a newer toolchain.
+ // It printed the errors along with one more about not finding a good toolchain.
+ base.Exit()
+ }
+
+ for _, q := range queries {
+ unresolved := q.candidates[:0]
+
+ for _, cs := range q.candidates {
+ if cs.err != nil {
+ reportError(q, cs.err)
+ resolved++
+ continue
+ }
+
+ filtered, isPackage, m, unique := r.disambiguate(cs)
+ if !unique {
+ unresolved = append(unresolved, filtered)
+ continue
+ }
+
+ if m.Path == "" {
+ // The query is not viable. Choose an arbitrary candidate from
+ // before filtering and “resolve” it to report a conflict.
+ isPackage, m = r.chooseArbitrarily(cs)
+ }
+ if isPackage {
+ q.matchesPackages = true
+ }
+ r.resolve(q, m)
+ resolved++
+ }
+
+ q.candidates = unresolved
+ }
+
+ base.ExitIfErrors()
+ if resolved == prevResolved {
+ break // No unambiguous candidate remains.
+ }
+ }
+
+ if resolved > 0 {
+ if changed = r.updateBuildList(ctx, nil); changed {
+ // The build list has changed, so disregard any remaining ambiguous queries:
+ // they might now be determined by requirements in the build list, which we
+ // would prefer to use instead of arbitrary versions.
+ return true
+ }
+ }
+
+ // The build list will be the same on the next iteration as it was on this
+ // iteration, so any ambiguous queries will remain so. In order to make
+ // progress, resolve them arbitrarily but deterministically.
+ //
+ // If that results in conflicting versions, the user can re-run 'go get'
+ // with additional explicit versions for the conflicting packages or
+ // modules.
+ resolvedArbitrarily := 0
+ for _, q := range queries {
+ for _, cs := range q.candidates {
+ isPackage, m := r.chooseArbitrarily(cs)
+ if isPackage {
+ q.matchesPackages = true
+ }
+ r.resolve(q, m)
+ resolvedArbitrarily++
+ }
+ }
+ if resolvedArbitrarily > 0 {
+ changed = r.updateBuildList(ctx, nil)
+ }
+ return changed
+}
+
+// applyUpgrades disambiguates candidate sets that are needed to upgrade (or
+// provide) transitive dependencies imported by previously-resolved packages.
+//
+// applyUpgrades modifies the build list by adding one module version from each
+// pathSet in upgrades, then downgrading (or further upgrading) those modules as
+// needed to maintain any already-resolved versions of other modules.
+// applyUpgrades does not mark the new versions as resolved, so they can still
+// be further modified by other queries (such as wildcards).
+//
+// If all pathSets are resolved without any changes to the build list,
+// applyUpgrades returns with changed=false.
+func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (changed bool) {
+ defer base.ExitIfErrors()
+
+ // Arbitrarily add a "latest" version that provides each missing package, but
+ // do not mark the version as resolved: we still want to allow the explicit
+ // queries to modify the resulting versions.
+ var tentative []module.Version
+ for _, cs := range upgrades {
+ if cs.err != nil {
+ base.Error(cs.err)
+ continue
+ }
+
+ filtered, _, m, unique := r.disambiguate(cs)
+ if !unique {
+ _, m = r.chooseArbitrarily(filtered)
+ }
+ if m.Path == "" {
+ // There is no viable candidate for the missing package.
+ // Leave it unresolved.
+ continue
+ }
+ tentative = append(tentative, m)
+ }
+ base.ExitIfErrors()
+
+ changed = r.updateBuildList(ctx, tentative)
+ return changed
+}
+
+// disambiguate eliminates candidates from cs that conflict with other module
+// versions that have already been resolved. If there is only one (unique)
+// remaining candidate, disambiguate returns that candidate, along with
+// an indication of whether that result interprets cs.path as a package
+//
+// Note: we're only doing very simple disambiguation here. The goal is to
+// reproduce the user's intent, not to find a solution that a human couldn't.
+// In the vast majority of cases, we expect only one module per pathSet,
+// but we want to give some minimal additional tools so that users can add an
+// extra argument or two on the command line to resolve simple ambiguities.
+func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m module.Version, unique bool) {
+ if len(cs.pkgMods) == 0 && cs.mod.Path == "" {
+ panic("internal error: resolveIfUnambiguous called with empty pathSet")
+ }
+
+ for _, m := range cs.pkgMods {
+ if _, ok := r.noneForPath(m.Path); ok {
+ // A query with version "none" forces the candidate module to version
+ // "none", so we cannot use any other version for that module.
+ continue
+ }
+
+ if modload.MainModules.Contains(m.Path) {
+ if m.Version == "" {
+ return pathSet{}, true, m, true
+ }
+ // A main module can only be set to its own version.
+ continue
+ }
+
+ vr, ok := r.resolvedVersion[m.Path]
+ if !ok {
+ // m is a viable answer to the query, but other answers may also
+ // still be viable.
+ filtered.pkgMods = append(filtered.pkgMods, m)
+ continue
+ }
+
+ if vr.version != m.Version {
+ // Some query forces the candidate module to a version other than this
+ // one.
+ //
+ // The command could be something like
+ //
+ // go get example.com/foo/bar@none example.com/foo/bar/baz@latest
+ //
+ // in which case we *cannot* resolve the package from
+ // example.com/foo/bar (because it is constrained to version
+ // "none") and must fall through to module example.com/foo@latest.
+ continue
+ }
+
+ // Some query forces the candidate module *to* the candidate version.
+ // As a result, this candidate is the only viable choice to provide
+ // its package(s): any other choice would result in an ambiguous import
+ // for this path.
+ //
+ // For example, consider the command
+ //
+ // go get example.com/foo@latest example.com/foo/bar/baz@latest
+ //
+ // If modules example.com/foo and example.com/foo/bar both provide
+ // package example.com/foo/bar/baz, then we *must* resolve the package
+ // from example.com/foo: if we instead resolved it from
+ // example.com/foo/bar, we would have two copies of the package.
+ return pathSet{}, true, m, true
+ }
+
+ if cs.mod.Path != "" {
+ vr, ok := r.resolvedVersion[cs.mod.Path]
+ if !ok || vr.version == cs.mod.Version {
+ filtered.mod = cs.mod
+ }
+ }
+
+ if len(filtered.pkgMods) == 1 &&
+ (filtered.mod.Path == "" || filtered.mod == filtered.pkgMods[0]) {
+ // Exactly one viable module contains the package with the given path
+ // (by far the common case), so we can resolve it unambiguously.
+ return pathSet{}, true, filtered.pkgMods[0], true
+ }
+
+ if len(filtered.pkgMods) == 0 {
+ // All modules that could provide the path as a package conflict with other
+ // resolved arguments. If it can refer to a module instead, return that;
+ // otherwise, this pathSet cannot be resolved (and we will return the
+ // zero module.Version).
+ return pathSet{}, false, filtered.mod, true
+ }
+
+ // The query remains ambiguous: there are at least two different modules
+ // to which cs.path could refer.
+ return filtered, false, module.Version{}, false
+}
+
+// chooseArbitrarily returns an arbitrary (but deterministic) module version
+// from among those in the given set.
+//
+// chooseArbitrarily prefers module paths that were already in the build list at
+// the start of 'go get', prefers modules that provide packages over those that
+// do not, and chooses the first module meeting those criteria (so biases toward
+// longer paths).
+func (r *resolver) chooseArbitrarily(cs pathSet) (isPackage bool, m module.Version) {
+ // Prefer to upgrade some module that was already in the build list.
+ for _, m := range cs.pkgMods {
+ if r.initialSelected(m.Path) != "none" {
+ return true, m
+ }
+ }
+
+ // Otherwise, arbitrarily choose the first module that provides the package.
+ if len(cs.pkgMods) > 0 {
+ return true, cs.pkgMods[0]
+ }
+
+ return false, cs.mod
+}
+
+// checkPackageProblems reloads packages for the given patterns and reports
+// missing and ambiguous package errors. It also reports retractions and
+// deprecations for resolved modules and modules needed to build named packages.
+// It also adds a sum for each updated module in the build list if we had one
+// before and didn't get one while loading packages.
+//
+// We skip missing-package errors earlier in the process, since we want to
+// resolve pathSets ourselves, but at that point, we don't have enough context
+// to log the package-import chains leading to each error.
+func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []string) {
+ defer base.ExitIfErrors()
+
+ // Gather information about modules we might want to load retractions and
+ // deprecations for. Loading this metadata requires at least one version
+ // lookup per module, and we don't want to load information that's neither
+ // relevant nor actionable.
+ type modFlags int
+ const (
+ resolved modFlags = 1 << iota // version resolved by 'go get'
+ named // explicitly named on command line or provides a named package
+ hasPkg // needed to build named packages
+ direct // provides a direct dependency of the main module
+ )
+ relevantMods := make(map[module.Version]modFlags)
+ for path, reason := range r.resolvedVersion {
+ m := module.Version{Path: path, Version: reason.version}
+ relevantMods[m] |= resolved
+ }
+
+ // Reload packages, reporting errors for missing and ambiguous imports.
+ if len(pkgPatterns) > 0 {
+ // LoadPackages will print errors (since it has more context) but will not
+ // exit, since we need to load retractions later.
+ pkgOpts := modload.PackageOpts{
+ VendorModulesInGOROOTSrc: true,
+ LoadTests: *getT,
+ ResolveMissingImports: false,
+ AllowErrors: true,
+ SilenceNoGoErrors: true,
+ }
+ matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...)
+ for _, m := range matches {
+ if len(m.Errs) > 0 {
+ base.SetExitStatus(1)
+ break
+ }
+ }
+ for _, pkg := range pkgs {
+ if dir, _, err := modload.Lookup("", false, pkg); err != nil {
+ if dir != "" && errors.Is(err, imports.ErrNoGo) {
+ // Since dir is non-empty, we must have located source files
+ // associated with either the package or its test — ErrNoGo must
+ // indicate that none of those source files happen to apply in this
+ // configuration. If we are actually building the package (no -d
+ // flag), we will report the problem then; otherwise, assume that the
+ // user is going to build or test this package in some other
+ // configuration and suppress the error.
+ continue
+ }
+
+ base.SetExitStatus(1)
+ if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) {
+ for _, m := range ambiguousErr.Modules {
+ relevantMods[m] |= hasPkg
+ }
+ }
+ }
+ if m := modload.PackageModule(pkg); m.Path != "" {
+ relevantMods[m] |= hasPkg
+ }
+ }
+ for _, match := range matches {
+ for _, pkg := range match.Pkgs {
+ m := modload.PackageModule(pkg)
+ relevantMods[m] |= named
+ }
+ }
+ }
+
+ reqs := modload.LoadModFile(ctx)
+ for m := range relevantMods {
+ if reqs.IsDirect(m.Path) {
+ relevantMods[m] |= direct
+ }
+ }
+
+ // Load retractions for modules mentioned on the command line and modules
+ // needed to build named packages. We care about retractions of indirect
+ // dependencies, since we might be able to upgrade away from them.
+ type modMessage struct {
+ m module.Version
+ message string
+ }
+ retractions := make([]modMessage, 0, len(relevantMods))
+ for m, flags := range relevantMods {
+ if flags&(resolved|named|hasPkg) != 0 {
+ retractions = append(retractions, modMessage{m: m})
+ }
+ }
+ sort.Slice(retractions, func(i, j int) bool { return retractions[i].m.Path < retractions[j].m.Path })
+ for i := range retractions {
+ i := i
+ r.work.Add(func() {
+ err := modload.CheckRetractions(ctx, retractions[i].m)
+ if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) {
+ retractions[i].message = err.Error()
+ }
+ })
+ }
+
+ // Load deprecations for modules mentioned on the command line. Only load
+ // deprecations for indirect dependencies if they're also direct dependencies
+ // of the main module. Deprecations of purely indirect dependencies are
+ // not actionable.
+ deprecations := make([]modMessage, 0, len(relevantMods))
+ for m, flags := range relevantMods {
+ if flags&(resolved|named) != 0 || flags&(hasPkg|direct) == hasPkg|direct {
+ deprecations = append(deprecations, modMessage{m: m})
+ }
+ }
+ sort.Slice(deprecations, func(i, j int) bool { return deprecations[i].m.Path < deprecations[j].m.Path })
+ for i := range deprecations {
+ i := i
+ r.work.Add(func() {
+ deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m)
+ if err != nil || deprecation == "" {
+ return
+ }
+ deprecations[i].message = modload.ShortMessage(deprecation, "")
+ })
+ }
+
+ // Load sums for updated modules that had sums before. When we update a
+ // module, we may update another module in the build list that provides a
+ // package in 'all' that wasn't loaded as part of this 'go get' command.
+ // If we don't add a sum for that module, builds may fail later.
+ // Note that an incidentally updated package could still import packages
+ // from unknown modules or from modules in the build list that we didn't
+ // need previously. We can't handle that case without loading 'all'.
+ sumErrs := make([]error, len(r.buildList))
+ for i := range r.buildList {
+ i := i
+ m := r.buildList[i]
+ mActual := m
+ if mRepl := modload.Replacement(m); mRepl.Path != "" {
+ mActual = mRepl
+ }
+ old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]}
+ if old.Version == "" {
+ continue
+ }
+ oldActual := old
+ if oldRepl := modload.Replacement(old); oldRepl.Path != "" {
+ oldActual = oldRepl
+ }
+ if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) {
+ continue
+ }
+ r.work.Add(func() {
+ if _, err := modfetch.DownloadZip(ctx, mActual); err != nil {
+ verb := "upgraded"
+ if gover.ModCompare(m.Path, m.Version, old.Version) < 0 {
+ verb = "downgraded"
+ }
+ replaced := ""
+ if mActual != m {
+ replaced = fmt.Sprintf(" (replaced by %s)", mActual)
+ }
+ err = fmt.Errorf("%s %s %s => %s%s: error finding sum for %s: %v", verb, m.Path, old.Version, m.Version, replaced, mActual, err)
+ sumErrs[i] = err
+ }
+ })
+ }
+
+ <-r.work.Idle()
+
+ // Report deprecations, then retractions, then errors fetching sums.
+ // Only errors fetching sums are hard errors.
+ for _, mm := range deprecations {
+ if mm.message != "" {
+ fmt.Fprintf(os.Stderr, "go: module %s is deprecated: %s\n", mm.m.Path, mm.message)
+ }
+ }
+ var retractPath string
+ for _, mm := range retractions {
+ if mm.message != "" {
+ fmt.Fprintf(os.Stderr, "go: warning: %v\n", mm.message)
+ if retractPath == "" {
+ retractPath = mm.m.Path
+ } else {
+ retractPath = "<module>"
+ }
+ }
+ }
+ if retractPath != "" {
+ fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest\n", retractPath)
+ }
+ for _, err := range sumErrs {
+ if err != nil {
+ base.Error(err)
+ }
+ }
+ base.ExitIfErrors()
+}
+
+// reportChanges logs version changes to os.Stderr.
+//
+// reportChanges only logs changes to modules named on the command line and to
+// explicitly required modules in go.mod. Most changes to indirect requirements
+// are not relevant to the user and are not logged.
+//
+// reportChanges should be called after WriteGoMod.
+func (r *resolver) reportChanges(oldReqs, newReqs []module.Version) {
+ type change struct {
+ path, old, new string
+ }
+ changes := make(map[string]change)
+
+ // Collect changes in modules matched by command line arguments.
+ for path, reason := range r.resolvedVersion {
+ if gover.IsToolchain(path) {
+ continue
+ }
+ old := r.initialVersion[path]
+ new := reason.version
+ if old != new && (old != "" || new != "none") {
+ changes[path] = change{path, old, new}
+ }
+ }
+
+ // Collect changes to explicit requirements in go.mod.
+ for _, req := range oldReqs {
+ if gover.IsToolchain(req.Path) {
+ continue
+ }
+ path := req.Path
+ old := req.Version
+ new := r.buildListVersion[path]
+ if old != new {
+ changes[path] = change{path, old, new}
+ }
+ }
+ for _, req := range newReqs {
+ if gover.IsToolchain(req.Path) {
+ continue
+ }
+ path := req.Path
+ old := r.initialVersion[path]
+ new := req.Version
+ if old != new {
+ changes[path] = change{path, old, new}
+ }
+ }
+
+ // Toolchain diffs are easier than requirements: diff old and new directly.
+ toolchainVersions := func(reqs []module.Version) (goV, toolchain string) {
+ for _, req := range reqs {
+ if req.Path == "go" {
+ goV = req.Version
+ }
+ if req.Path == "toolchain" {
+ toolchain = req.Version
+ }
+ }
+ return
+ }
+ oldGo, oldToolchain := toolchainVersions(oldReqs)
+ newGo, newToolchain := toolchainVersions(newReqs)
+ if oldGo != newGo {
+ changes["go"] = change{"go", oldGo, newGo}
+ }
+ if oldToolchain != newToolchain {
+ changes["toolchain"] = change{"toolchain", oldToolchain, newToolchain}
+ }
+
+ sortedChanges := make([]change, 0, len(changes))
+ for _, c := range changes {
+ sortedChanges = append(sortedChanges, c)
+ }
+ sort.Slice(sortedChanges, func(i, j int) bool {
+ pi := sortedChanges[i].path
+ pj := sortedChanges[j].path
+ if pi == pj {
+ return false
+ }
+ // go first; toolchain second
+ switch {
+ case pi == "go":
+ return true
+ case pj == "go":
+ return false
+ case pi == "toolchain":
+ return true
+ case pj == "toolchain":
+ return false
+ }
+ return pi < pj
+ })
+
+ for _, c := range sortedChanges {
+ if c.old == "" {
+ fmt.Fprintf(os.Stderr, "go: added %s %s\n", c.path, c.new)
+ } else if c.new == "none" || c.new == "" {
+ fmt.Fprintf(os.Stderr, "go: removed %s %s\n", c.path, c.old)
+ } else if gover.ModCompare(c.path, c.new, c.old) > 0 {
+ fmt.Fprintf(os.Stderr, "go: upgraded %s %s => %s\n", c.path, c.old, c.new)
+ if c.path == "go" && gover.Compare(c.old, gover.ExplicitIndirectVersion) < 0 && gover.Compare(c.new, gover.ExplicitIndirectVersion) >= 0 {
+ fmt.Fprintf(os.Stderr, "\tnote: expanded dependencies to upgrade to go %s or higher; run 'go mod tidy' to clean up\n", gover.ExplicitIndirectVersion)
+ }
+
+ } else {
+ fmt.Fprintf(os.Stderr, "go: downgraded %s %s => %s\n", c.path, c.old, c.new)
+ }
+ }
+
+ // TODO(golang.org/issue/33284): attribute changes to command line arguments.
+ // For modules matched by command line arguments, this probably isn't
+ // necessary, but it would be useful for unmatched direct dependencies of
+ // the main module.
+}
+
+// resolve records that module m must be at its indicated version (which may be
+// "none") due to query q. If some other query forces module m to be at a
+// different version, resolve reports a conflict error.
+func (r *resolver) resolve(q *query, m module.Version) {
+ if m.Path == "" {
+ panic("internal error: resolving a module.Version with an empty path")
+ }
+
+ if modload.MainModules.Contains(m.Path) && m.Version != "" {
+ reportError(q, &modload.QueryMatchesMainModulesError{
+ MainModules: []module.Version{{Path: m.Path}},
+ Pattern: q.pattern,
+ Query: q.version,
+ })
+ return
+ }
+
+ vr, ok := r.resolvedVersion[m.Path]
+ if ok && vr.version != m.Version {
+ reportConflict(q, m, vr)
+ return
+ }
+ r.resolvedVersion[m.Path] = versionReason{m.Version, q}
+ q.resolved = append(q.resolved, m)
+}
+
+// updateBuildList updates the module loader's global build list to be
+// consistent with r.resolvedVersion, and to include additional modules
+// provided that they do not conflict with the resolved versions.
+//
+// If the additional modules conflict with the resolved versions, they will be
+// downgraded to a non-conflicting version (possibly "none").
+//
+// If the resulting build list is the same as the one resulting from the last
+// call to updateBuildList, updateBuildList returns with changed=false.
+func (r *resolver) updateBuildList(ctx context.Context, additions []module.Version) (changed bool) {
+ defer base.ExitIfErrors()
+
+ resolved := make([]module.Version, 0, len(r.resolvedVersion))
+ for mPath, rv := range r.resolvedVersion {
+ if !modload.MainModules.Contains(mPath) {
+ resolved = append(resolved, module.Version{Path: mPath, Version: rv.version})
+ }
+ }
+
+ changed, err := modload.EditBuildList(ctx, additions, resolved)
+ if err != nil {
+ if errors.Is(err, gover.ErrTooNew) {
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+
+ var constraint *modload.ConstraintError
+ if !errors.As(err, &constraint) {
+ base.Fatal(err)
+ }
+
+ if cfg.BuildV {
+ // Log complete paths for the conflicts before we summarize them.
+ for _, c := range constraint.Conflicts {
+ fmt.Fprintf(os.Stderr, "go: %v\n", c.String())
+ }
+ }
+
+ // modload.EditBuildList reports constraint errors at
+ // the module level, but 'go get' operates on packages.
+ // Rewrite the errors to explain them in terms of packages.
+ reason := func(m module.Version) string {
+ rv, ok := r.resolvedVersion[m.Path]
+ if !ok {
+ return fmt.Sprintf("(INTERNAL ERROR: no reason found for %v)", m)
+ }
+ return rv.reason.ResolvedString(module.Version{Path: m.Path, Version: rv.version})
+ }
+ for _, c := range constraint.Conflicts {
+ adverb := ""
+ if len(c.Path) > 2 {
+ adverb = "indirectly "
+ }
+ firstReason := reason(c.Path[0])
+ last := c.Path[len(c.Path)-1]
+ if c.Err != nil {
+ base.Errorf("go: %v %srequires %v: %v", firstReason, adverb, last, c.UnwrapModuleError())
+ } else {
+ base.Errorf("go: %v %srequires %v, not %v", firstReason, adverb, last, reason(c.Constraint))
+ }
+ }
+ return false
+ }
+ if !changed {
+ return false
+ }
+
+ mg, err := modload.LoadModGraph(ctx, "")
+ if err != nil {
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+
+ r.buildList = mg.BuildList()
+ r.buildListVersion = make(map[string]string, len(r.buildList))
+ for _, m := range r.buildList {
+ r.buildListVersion[m.Path] = m.Version
+ }
+ return true
+}
+
+func reqsFromGoMod(f *modfile.File) []module.Version {
+ reqs := make([]module.Version, len(f.Require), 2+len(f.Require))
+ for i, r := range f.Require {
+ reqs[i] = r.Mod
+ }
+ if f.Go != nil {
+ reqs = append(reqs, module.Version{Path: "go", Version: f.Go.Version})
+ }
+ if f.Toolchain != nil {
+ reqs = append(reqs, module.Version{Path: "toolchain", Version: f.Toolchain.Name})
+ }
+ return reqs
+}
+
+// isNoSuchModuleVersion reports whether err indicates that the requested module
+// does not exist at the requested version, either because the module does not
+// exist at all or because it does not include that specific version.
+func isNoSuchModuleVersion(err error) bool {
+ var noMatch *modload.NoMatchingVersionError
+ return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch)
+}
+
+// isNoSuchPackageVersion reports whether err indicates that the requested
+// package does not exist at the requested version, either because no module
+// that could contain it exists at that version, or because every such module
+// that does exist does not actually contain the package.
+func isNoSuchPackageVersion(err error) bool {
+ var noPackage *modload.PackageNotInModuleError
+ return isNoSuchModuleVersion(err) || errors.As(err, &noPackage)
+}
diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go
new file mode 100644
index 0000000..b78c1c4
--- /dev/null
+++ b/src/cmd/go/internal/modget/query.go
@@ -0,0 +1,358 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modget
+
+import (
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/internal/pkgpattern"
+
+ "golang.org/x/mod/module"
+)
+
+// A query describes a command-line argument and the modules and/or packages
+// to which that argument may resolve..
+type query struct {
+ // raw is the original argument, to be printed in error messages.
+ raw string
+
+ // rawVersion is the portion of raw corresponding to version, if any
+ rawVersion string
+
+ // pattern is the part of the argument before "@" (or the whole argument
+ // if there is no "@"), which may match either packages (preferred) or
+ // modules (if no matching packages).
+ //
+ // The pattern may also be "-u", for the synthetic query representing the -u
+ // (“upgrade”)flag.
+ pattern string
+
+ // patternIsLocal indicates whether pattern is restricted to match only paths
+ // local to the main module, such as absolute filesystem paths or paths
+ // beginning with './'.
+ //
+ // A local pattern must resolve to one or more packages in the main module.
+ patternIsLocal bool
+
+ // version is the part of the argument after "@", or an implied
+ // "upgrade" or "patch" if there is no "@". version specifies the
+ // module version to get.
+ version string
+
+ // matchWildcard, if non-nil, reports whether pattern, which must be a
+ // wildcard (with the substring "..."), matches the given package or module
+ // path.
+ matchWildcard func(path string) bool
+
+ // canMatchWildcard, if non-nil, reports whether the module with the given
+ // path could lexically contain a package matching pattern, which must be a
+ // wildcard.
+ canMatchWildcardInModule func(mPath string) bool
+
+ // conflict is the first query identified as incompatible with this one.
+ // conflict forces one or more of the modules matching this query to a
+ // version that does not match version.
+ conflict *query
+
+ // candidates is a list of sets of alternatives for a path that matches (or
+ // contains packages that match) the pattern. The query can be resolved by
+ // choosing exactly one alternative from each set in the list.
+ //
+ // A path-literal query results in only one set: the path itself, which
+ // may resolve to either a package path or a module path.
+ //
+ // A wildcard query results in one set for each matching module path, each
+ // module for which the matching version contains at least one matching
+ // package, and (if no other modules match) one candidate set for the pattern
+ // overall if no existing match is identified in the build list.
+ //
+ // A query for pattern "all" results in one set for each package transitively
+ // imported by the main module.
+ //
+ // The special query for the "-u" flag results in one set for each
+ // otherwise-unconstrained package that has available upgrades.
+ candidates []pathSet
+ candidatesMu sync.Mutex
+
+ // pathSeen ensures that only one pathSet is added to the query per
+ // unique path.
+ pathSeen sync.Map
+
+ // resolved contains the set of modules whose versions have been determined by
+ // this query, in the order in which they were determined.
+ //
+ // The resolver examines the candidate sets for each query, resolving one
+ // module per candidate set in a way that attempts to avoid obvious conflicts
+ // between the versions resolved by different queries.
+ resolved []module.Version
+
+ // matchesPackages is true if the resolved modules provide at least one
+ // package matching q.pattern.
+ matchesPackages bool
+}
+
+// A pathSet describes the possible options for resolving a specific path
+// to a package and/or module.
+type pathSet struct {
+ // path is a package (if "all" or "-u" or a non-wildcard) or module (if
+ // wildcard) path that could be resolved by adding any of the modules in this
+ // set. For a wildcard pattern that so far matches no packages, the path is
+ // the wildcard pattern itself.
+ //
+ // Each path must occur only once in a query's candidate sets, and the path is
+ // added implicitly to each pathSet returned to pathOnce.
+ path string
+
+ // pkgMods is a set of zero or more modules, each of which contains the
+ // package with the indicated path. Due to the requirement that imports be
+ // unambiguous, only one such module can be in the build list, and all others
+ // must be excluded.
+ pkgMods []module.Version
+
+ // mod is either the zero Version, or a module that does not contain any
+ // packages matching the query but for which the module path itself
+ // matches the query pattern.
+ //
+ // We track this module separately from pkgMods because, all else equal, we
+ // prefer to match a query to a package rather than just a module. Also,
+ // unlike the modules in pkgMods, this module does not inherently exclude
+ // any other module in pkgMods.
+ mod module.Version
+
+ err error
+}
+
+// errSet returns a pathSet containing the given error.
+func errSet(err error) pathSet { return pathSet{err: err} }
+
+// newQuery returns a new query parsed from the raw argument,
+// which must be either path or path@version.
+func newQuery(raw string) (*query, error) {
+ pattern, rawVers, found := strings.Cut(raw, "@")
+ if found && (strings.Contains(rawVers, "@") || rawVers == "") {
+ return nil, fmt.Errorf("invalid module version syntax %q", raw)
+ }
+
+ // If no version suffix is specified, assume @upgrade.
+ // If -u=patch was specified, assume @patch instead.
+ version := rawVers
+ if version == "" {
+ if getU.version == "" {
+ version = "upgrade"
+ } else {
+ version = getU.version
+ }
+ }
+
+ q := &query{
+ raw: raw,
+ rawVersion: rawVers,
+ pattern: pattern,
+ patternIsLocal: filepath.IsAbs(pattern) || search.IsRelativePath(pattern),
+ version: version,
+ }
+ if strings.Contains(q.pattern, "...") {
+ q.matchWildcard = pkgpattern.MatchPattern(q.pattern)
+ q.canMatchWildcardInModule = pkgpattern.TreeCanMatchPattern(q.pattern)
+ }
+ if err := q.validate(); err != nil {
+ return q, err
+ }
+ return q, nil
+}
+
+// validate reports a non-nil error if q is not sensible and well-formed.
+func (q *query) validate() error {
+ if q.patternIsLocal {
+ if q.rawVersion != "" {
+ return fmt.Errorf("can't request explicit version %q of path %q in main module", q.rawVersion, q.pattern)
+ }
+ return nil
+ }
+
+ if q.pattern == "all" {
+ // If there is no main module, "all" is not meaningful.
+ if !modload.HasModRoot() {
+ return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot)
+ }
+ if !versionOkForMainModule(q.version) {
+ // TODO(bcmills): "all@none" seems like a totally reasonable way to
+ // request that we remove all module requirements, leaving only the main
+ // module and standard library. Perhaps we should implement that someday.
+ return &modload.QueryUpgradesAllError{
+ MainModules: modload.MainModules.Versions(),
+ Query: q.version,
+ }
+ }
+ }
+
+ if search.IsMetaPackage(q.pattern) && q.pattern != "all" {
+ if q.pattern != q.raw {
+ return fmt.Errorf("can't request explicit version of standard-library pattern %q", q.pattern)
+ }
+ }
+
+ return nil
+}
+
+// String returns the original argument from which q was parsed.
+func (q *query) String() string { return q.raw }
+
+// ResolvedString returns a string describing m as a resolved match for q.
+func (q *query) ResolvedString(m module.Version) string {
+ if m.Path != q.pattern {
+ if m.Version != q.version {
+ return fmt.Sprintf("%v (matching %s@%s)", m, q.pattern, q.version)
+ }
+ return fmt.Sprintf("%v (matching %v)", m, q)
+ }
+ if m.Version != q.version {
+ return fmt.Sprintf("%s@%s (%s)", q.pattern, q.version, m.Version)
+ }
+ return q.String()
+}
+
+// isWildcard reports whether q is a pattern that can match multiple paths.
+func (q *query) isWildcard() bool {
+ return q.matchWildcard != nil || (q.patternIsLocal && strings.Contains(q.pattern, "..."))
+}
+
+// matchesPath reports whether the given path matches q.pattern.
+func (q *query) matchesPath(path string) bool {
+ if q.matchWildcard != nil && !gover.IsToolchain(path) {
+ return q.matchWildcard(path)
+ }
+ return path == q.pattern
+}
+
+// canMatchInModule reports whether the given module path can potentially
+// contain q.pattern.
+func (q *query) canMatchInModule(mPath string) bool {
+ if gover.IsToolchain(mPath) {
+ return false
+ }
+ if q.canMatchWildcardInModule != nil {
+ return q.canMatchWildcardInModule(mPath)
+ }
+ return str.HasPathPrefix(q.pattern, mPath)
+}
+
+// pathOnce invokes f to generate the pathSet for the given path,
+// if one is still needed.
+//
+// Note that, unlike sync.Once, pathOnce does not guarantee that a concurrent
+// call to f for the given path has completed on return.
+//
+// pathOnce is safe for concurrent use by multiple goroutines, but note that
+// multiple concurrent calls will result in the sets being added in
+// nondeterministic order.
+func (q *query) pathOnce(path string, f func() pathSet) {
+ if _, dup := q.pathSeen.LoadOrStore(path, nil); dup {
+ return
+ }
+
+ cs := f()
+
+ if len(cs.pkgMods) > 0 || cs.mod != (module.Version{}) || cs.err != nil {
+ cs.path = path
+ q.candidatesMu.Lock()
+ q.candidates = append(q.candidates, cs)
+ q.candidatesMu.Unlock()
+ }
+}
+
+// reportError logs err concisely using base.Errorf.
+func reportError(q *query, err error) {
+ errStr := err.Error()
+
+ // If err already mentions all of the relevant parts of q, just log err to
+ // reduce stutter. Otherwise, log both q and err.
+ //
+ // TODO(bcmills): Use errors.As to unpack these errors instead of parsing
+ // strings with regular expressions.
+
+ patternRE := regexp.MustCompile("(?m)(?:[ \t(\"`]|^)" + regexp.QuoteMeta(q.pattern) + "(?:[ @:;)\"`]|$)")
+ if patternRE.MatchString(errStr) {
+ if q.rawVersion == "" {
+ base.Errorf("go: %s", errStr)
+ return
+ }
+
+ versionRE := regexp.MustCompile("(?m)(?:[ @(\"`]|^)" + regexp.QuoteMeta(q.version) + "(?:[ :;)\"`]|$)")
+ if versionRE.MatchString(errStr) {
+ base.Errorf("go: %s", errStr)
+ return
+ }
+ }
+
+ if qs := q.String(); qs != "" {
+ base.Errorf("go: %s: %s", qs, errStr)
+ } else {
+ base.Errorf("go: %s", errStr)
+ }
+}
+
+func reportConflict(pq *query, m module.Version, conflict versionReason) {
+ if pq.conflict != nil {
+ // We've already reported a conflict for the proposed query.
+ // Don't report it again, even if it has other conflicts.
+ return
+ }
+ pq.conflict = conflict.reason
+
+ proposed := versionReason{
+ version: m.Version,
+ reason: pq,
+ }
+ if pq.isWildcard() && !conflict.reason.isWildcard() {
+ // Prefer to report the specific path first and the wildcard second.
+ proposed, conflict = conflict, proposed
+ }
+ reportError(pq, &conflictError{
+ mPath: m.Path,
+ proposed: proposed,
+ conflict: conflict,
+ })
+}
+
+type conflictError struct {
+ mPath string
+ proposed versionReason
+ conflict versionReason
+}
+
+func (e *conflictError) Error() string {
+ argStr := func(q *query, v string) string {
+ if v != q.version {
+ return fmt.Sprintf("%s@%s (%s)", q.pattern, q.version, v)
+ }
+ return q.String()
+ }
+
+ pq := e.proposed.reason
+ rq := e.conflict.reason
+ modDetail := ""
+ if e.mPath != pq.pattern {
+ modDetail = fmt.Sprintf("for module %s, ", e.mPath)
+ }
+
+ return fmt.Sprintf("%s%s conflicts with %s",
+ modDetail,
+ argStr(pq, e.proposed.version),
+ argStr(rq, e.conflict.version))
+}
+
+func versionOkForMainModule(version string) bool {
+ return version == "upgrade" || version == "patch"
+}
diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go
new file mode 100644
index 0000000..b57f2f6
--- /dev/null
+++ b/src/cmd/go/internal/modindex/build.go
@@ -0,0 +1,950 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/build.go with unused parts
+// removed.
+
+package modindex
+
+import (
+ "bytes"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/build/constraint"
+ "go/token"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Context specifies the supporting context for a build.
+type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go paths
+
+ // Dir is the caller's working directory, or the empty string to use
+ // the current directory of the running process. In module mode, this is used
+ // to locate the main module.
+ //
+ // If Dir is non-empty, directories passed to Import and ImportDir must
+ // be absolute.
+ Dir string
+
+ CgoEnabled bool // whether cgo files are included
+ UseAllFiles bool // use files regardless of //go:build lines, file names
+ Compiler string // compiler to assume when computing target paths
+
+ // The build, tool, and release tags specify build constraints
+ // that should be considered satisfied when processing +build lines.
+ // Clients creating a new context may customize BuildTags, which
+ // defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags.
+ // ToolTags defaults to build tags appropriate to the current Go toolchain configuration.
+ // ReleaseTags defaults to the list of Go releases the current release is compatible with.
+ // BuildTags is not set for the Default build Context.
+ // In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints
+ // consider the values of GOARCH and GOOS as satisfied tags.
+ // The last element in ReleaseTags is assumed to be the current release.
+ BuildTags []string
+ ToolTags []string
+ ReleaseTags []string
+
+ // The install suffix specifies a suffix to use in the name of the installation
+ // directory. By default it is empty, but custom builds that need to keep
+ // their outputs separate can set InstallSuffix to do so. For example, when
+ // using the race detector, the go command uses InstallSuffix = "race", so
+ // that on a Linux/386 system, packages are written to a directory named
+ // "linux_386_race" instead of the usual "linux_386".
+ InstallSuffix string
+
+ // By default, Import uses the operating system's file system calls
+ // to read directories and files. To read from other sources,
+ // callers can set the following functions. They all have default
+ // behaviors that use the local file system, so clients need only set
+ // the functions whose behaviors they wish to change.
+
+ // JoinPath joins the sequence of path fragments into a single path.
+ // If JoinPath is nil, Import uses filepath.Join.
+ JoinPath func(elem ...string) string
+
+ // SplitPathList splits the path list into a slice of individual paths.
+ // If SplitPathList is nil, Import uses filepath.SplitList.
+ SplitPathList func(list string) []string
+
+ // IsAbsPath reports whether path is an absolute path.
+ // If IsAbsPath is nil, Import uses filepath.IsAbs.
+ IsAbsPath func(path string) bool
+
+ // IsDir reports whether the path names a directory.
+ // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+ IsDir func(path string) bool
+
+ // HasSubdir reports whether dir is lexically a subdirectory of
+ // root, perhaps multiple levels below. It does not try to check
+ // whether dir exists.
+ // If so, HasSubdir sets rel to a slash-separated path that
+ // can be joined to root to produce a path equivalent to dir.
+ // If HasSubdir is nil, Import uses an implementation built on
+ // filepath.EvalSymlinks.
+ HasSubdir func(root, dir string) (rel string, ok bool)
+
+ // ReadDir returns a slice of fs.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // If ReadDir is nil, Import uses ioutil.ReadDir.
+ ReadDir func(dir string) ([]fs.FileInfo, error)
+
+ // OpenFile opens a file (not a directory) for reading.
+ // If OpenFile is nil, Import uses os.Open.
+ OpenFile func(path string) (io.ReadCloser, error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+ if f := ctxt.JoinPath; f != nil {
+ return f(elem...)
+ }
+ return filepath.Join(elem...)
+}
+
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+ if f := ctxt.SplitPathList; f != nil {
+ return f(s)
+ }
+ return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+ if f := ctxt.IsAbsPath; f != nil {
+ return f(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// isDir calls ctxt.IsDir (if not nil) or else uses fsys.Stat.
+func isDir(path string) bool {
+ fi, err := fsys.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
+
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
+ }
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
+ }
+ return hasSubdir(rootSym, dirSym)
+}
+
+// hasSubdir reports if dir is within root by performing lexical analysis only.
+func hasSubdir(root, dir string) (rel string, ok bool) {
+ root = str.WithFilePathSeparator(filepath.Clean(root))
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+ return filepath.ToSlash(dir[len(root):]), true
+}
+
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT.
+ // Do not get confused by this common mistake.
+ continue
+ }
+ if strings.HasPrefix(p, "~") {
+ // Path segments starting with ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
+ all = append(all, p)
+ }
+ return all
+}
+
+var defaultToolTags, defaultReleaseTags []string
+
+// NoGoError is the error used by Import to describe a directory
+// containing no buildable Go source files. (It may still contain
+// test files, files hidden by build tags, and so on.)
+type NoGoError struct {
+ Dir string
+}
+
+func (e *NoGoError) Error() string {
+ return "no buildable Go source files in " + e.Dir
+}
+
+// MultiplePackageError describes a directory containing
+// multiple buildable Go source files for multiple packages.
+type MultiplePackageError struct {
+ Dir string // directory containing files
+ Packages []string // package names found
+ Files []string // corresponding files: Files[i] declares package Packages[i]
+}
+
+func (e *MultiplePackageError) Error() string {
+ // Error string limited to two entries for compatibility.
+ return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
+}
+
+func nameExt(name string) string {
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ return ""
+ }
+ return name[i:]
+}
+
+func fileListForExt(p *build.Package, ext string) *[]string {
+ switch ext {
+ case ".c":
+ return &p.CFiles
+ case ".cc", ".cpp", ".cxx":
+ return &p.CXXFiles
+ case ".m":
+ return &p.MFiles
+ case ".h", ".hh", ".hpp", ".hxx":
+ return &p.HFiles
+ case ".f", ".F", ".for", ".f90":
+ return &p.FFiles
+ case ".s", ".S", ".sx":
+ return &p.SFiles
+ case ".swig":
+ return &p.SwigFiles
+ case ".swigcxx":
+ return &p.SwigCXXFiles
+ case ".syso":
+ return &p.SysoFiles
+ }
+ return nil
+}
+
+var errNoModules = errors.New("not using modules")
+
+func findImportComment(data []byte) (s string, line int) {
+ // expect keyword package
+ word, data := parseWord(data)
+ if string(word) != "package" {
+ return "", 0
+ }
+
+ // expect package name
+ _, data = parseWord(data)
+
+ // now ready for import comment, a // or /* */ comment
+ // beginning and ending on the current line.
+ for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
+ data = data[1:]
+ }
+
+ var comment []byte
+ switch {
+ case bytes.HasPrefix(data, slashSlash):
+ comment, _, _ = bytes.Cut(data[2:], newline)
+ case bytes.HasPrefix(data, slashStar):
+ var ok bool
+ comment, _, ok = bytes.Cut(data[2:], starSlash)
+ if !ok {
+ // malformed comment
+ return "", 0
+ }
+ if bytes.Contains(comment, newline) {
+ return "", 0
+ }
+ }
+ comment = bytes.TrimSpace(comment)
+
+ // split comment into `import`, `"pkg"`
+ word, arg := parseWord(comment)
+ if string(word) != "import" {
+ return "", 0
+ }
+
+ line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
+ return strings.TrimSpace(string(arg)), line
+}
+
+var (
+ slashSlash = []byte("//")
+ slashStar = []byte("/*")
+ starSlash = []byte("*/")
+ newline = []byte("\n")
+)
+
+// skipSpaceOrComment returns data with any leading spaces or comments removed.
+func skipSpaceOrComment(data []byte) []byte {
+ for len(data) > 0 {
+ switch data[0] {
+ case ' ', '\t', '\r', '\n':
+ data = data[1:]
+ continue
+ case '/':
+ if bytes.HasPrefix(data, slashSlash) {
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+1:]
+ continue
+ }
+ if bytes.HasPrefix(data, slashStar) {
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+2:]
+ continue
+ }
+ }
+ break
+ }
+ return data
+}
+
+// parseWord skips any leading spaces or comments in data
+// and then parses the beginning of data as an identifier or keyword,
+// returning that word and what remains after the word.
+func parseWord(data []byte) (word, rest []byte) {
+ data = skipSpaceOrComment(data)
+
+ // Parse past leading word characters.
+ rest = data
+ for {
+ r, size := utf8.DecodeRune(rest)
+ if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
+ rest = rest[size:]
+ continue
+ }
+ break
+ }
+
+ word = data[:len(data)-len(rest)]
+ if len(word) == 0 {
+ return nil, nil
+ }
+
+ return word, rest
+}
+
+var dummyPkg build.Package
+
+// fileInfo records information learned about a file included in a build.
+type fileInfo struct {
+ name string // full name including dir
+ header []byte
+ fset *token.FileSet
+ parsed *ast.File
+ parseErr error
+ imports []fileImport
+ embeds []fileEmbed
+ directives []build.Directive
+
+ // Additional fields added to go/build's fileinfo for the purposes of the modindex package.
+ binaryOnly bool
+ goBuildConstraint string
+ plusBuildConstraints []string
+}
+
+type fileImport struct {
+ path string
+ pos token.Pos
+ doc *ast.CommentGroup
+}
+
+type fileEmbed struct {
+ pattern string
+ pos token.Position
+}
+
+var errNonSource = errors.New("non source file")
+
+// getFileInfo extracts the information needed from each go file for the module
+// index.
+//
+// If Name denotes a Go program, matchFile reads until the end of the
+// Imports and returns that section of the file in the FileInfo's Header field,
+// even though it only considers text until the first non-comment
+// for +build lines.
+//
+// getFileInfo will return errNonSource if the file is not a source or object
+// file and shouldn't even be added to IgnoredFiles.
+func getFileInfo(dir, name string, fset *token.FileSet) (*fileInfo, error) {
+ if strings.HasPrefix(name, "_") ||
+ strings.HasPrefix(name, ".") {
+ return nil, nil
+ }
+
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ i = len(name)
+ }
+ ext := name[i:]
+
+ if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil {
+ // skip
+ return nil, errNonSource
+ }
+
+ info := &fileInfo{name: filepath.Join(dir, name), fset: fset}
+ if ext == ".syso" {
+ // binary, no reading
+ return info, nil
+ }
+
+ f, err := fsys.Open(info.name)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(matloob) should we decide whether to ignore binary only here or earlier
+ // when we create the index file?
+ var ignoreBinaryOnly bool
+ if strings.HasSuffix(name, ".go") {
+ err = readGoInfo(f, info)
+ if strings.HasSuffix(name, "_test.go") {
+ ignoreBinaryOnly = true // ignore //go:binary-only-package comments in _test.go files
+ }
+ } else {
+ info.header, err = readComments(f)
+ }
+ f.Close()
+ if err != nil {
+ return nil, fmt.Errorf("read %s: %v", info.name, err)
+ }
+
+ // Look for +build comments to accept or reject the file.
+ info.goBuildConstraint, info.plusBuildConstraints, info.binaryOnly, err = getConstraints(info.header)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", name, err)
+ }
+
+ if ignoreBinaryOnly && info.binaryOnly {
+ info.binaryOnly = false // override info.binaryOnly
+ }
+
+ return info, nil
+}
+
+func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+ all := make([]string, 0, len(m))
+ for path := range m {
+ all = append(all, path)
+ }
+ sort.Strings(all)
+ return all, m
+}
+
+var (
+ bSlashSlash = []byte(slashSlash)
+ bStarSlash = []byte(starSlash)
+ bSlashStar = []byte(slashStar)
+ bPlusBuild = []byte("+build")
+
+ goBuildComment = []byte("//go:build")
+
+ errMultipleGoBuild = errors.New("multiple //go:build comments")
+)
+
+func isGoBuildComment(line []byte) bool {
+ if !bytes.HasPrefix(line, goBuildComment) {
+ return false
+ }
+ line = bytes.TrimSpace(line)
+ rest := line[len(goBuildComment):]
+ return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
+}
+
+// Special comment denoting a binary-only package.
+// See https://golang.org/design/2775-binary-only-packages
+// for more about the design of binary-only packages.
+var binaryOnlyComment = []byte("//go:binary-only-package")
+
+func getConstraints(content []byte) (goBuild string, plusBuild []string, binaryOnly bool, err error) {
+ // Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ // Also identify any //go:build comments.
+ content, goBuildBytes, sawBinaryOnly, err := parseFileHeader(content)
+ if err != nil {
+ return "", nil, false, err
+ }
+
+ // If //go:build line is present, it controls, so no need to look for +build .
+ // Otherwise, get plusBuild constraints.
+ if goBuildBytes == nil {
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) {
+ continue
+ }
+ text := string(line)
+ if !constraint.IsPlusBuild(text) {
+ continue
+ }
+ plusBuild = append(plusBuild, text)
+ }
+ }
+
+ return string(goBuildBytes), plusBuild, sawBinaryOnly, nil
+}
+
+func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
+ end := 0
+ p := content
+ ended := false // found non-blank, non-// line, so stopped accepting // +build lines
+ inSlashStar := false // in /* */ comment
+
+Lines:
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 && !ended { // Blank line
+ // Remember position of most recent blank line.
+ // When we find the first non-blank, non-// line,
+ // this "end" position marks the latest file position
+ // where a // +build line can appear.
+ // (It must appear _before_ a blank line before the non-blank, non-// line.
+ // Yes, that's confusing, which is part of why we moved to //go:build lines.)
+ // Note that ended==false here means that inSlashStar==false,
+ // since seeing a /* would have set ended==true.
+ end = len(content) - len(p)
+ continue Lines
+ }
+ if !bytes.HasPrefix(line, slashSlash) { // Not comment line
+ ended = true
+ }
+
+ if !inSlashStar && isGoBuildComment(line) {
+ if goBuild != nil {
+ return nil, nil, false, errMultipleGoBuild
+ }
+ goBuild = line
+ }
+ if !inSlashStar && bytes.Equal(line, binaryOnlyComment) {
+ sawBinaryOnly = true
+ }
+
+ Comments:
+ for len(line) > 0 {
+ if inSlashStar {
+ if i := bytes.Index(line, starSlash); i >= 0 {
+ inSlashStar = false
+ line = bytes.TrimSpace(line[i+len(starSlash):])
+ continue Comments
+ }
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashSlash) {
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashStar) {
+ inSlashStar = true
+ line = bytes.TrimSpace(line[len(bSlashStar):])
+ continue Comments
+ }
+ // Found non-comment text.
+ break Lines
+ }
+ }
+
+ return content[:end], goBuild, sawBinaryOnly, nil
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
+// that affect the way cgo's C code is built.
+func (ctxt *Context) saveCgo(filename string, di *build.Package, text string) error {
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":")
+ if !ok {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.matchAuto(c, nil) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ for i, arg := range args {
+ if arg, ok = expandSrcDir(arg, di.Dir); !ok {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ args[i] = arg
+ }
+
+ switch verb {
+ case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS":
+ // Change relative paths to absolute.
+ ctxt.makePathsAbsolute(args, di.Dir)
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "CPPFLAGS":
+ di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
+ case "CXXFLAGS":
+ di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
+ case "FFLAGS":
+ di.CgoFFLAGS = append(di.CgoFFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
+}
+
+// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
+// the result is safe for the shell.
+func expandSrcDir(str string, srcdir string) (string, bool) {
+ // "\" delimited paths cause safeCgoName to fail
+ // so convert native paths with a different delimiter
+ // to "/" before starting (eg: on windows).
+ srcdir = filepath.ToSlash(srcdir)
+
+ chunks := strings.Split(str, "${SRCDIR}")
+ if len(chunks) < 2 {
+ return str, safeCgoName(str)
+ }
+ ok := true
+ for _, chunk := range chunks {
+ ok = ok && (chunk == "" || safeCgoName(chunk))
+ }
+ ok = ok && (srcdir == "" || safeCgoName(srcdir))
+ res := strings.Join(chunks, srcdir)
+ return res, ok && res != ""
+}
+
+// makePathsAbsolute looks for compiler options that take paths and
+// makes them absolute. We do this because through the 1.8 release we
+// ran the compiler in the package directory, so any relative -I or -L
+// options would be relative to that directory. In 1.9 we changed to
+// running the compiler in the build directory, to get consistent
+// build results (issue #19964). To keep builds working, we change any
+// relative -I or -L options to be absolute.
+//
+// Using filepath.IsAbs and filepath.Join here means the results will be
+// different on different systems, but that's OK: -I and -L options are
+// inherently system-dependent.
+func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) {
+ nextPath := false
+ for i, arg := range args {
+ if nextPath {
+ if !filepath.IsAbs(arg) {
+ args[i] = filepath.Join(srcDir, arg)
+ }
+ nextPath = false
+ } else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") {
+ if len(arg) == 2 {
+ nextPath = true
+ } else {
+ if !filepath.IsAbs(arg[2:]) {
+ args[i] = arg[:2] + filepath.Join(srcDir, arg[2:])
+ }
+ }
+ }
+ }
+}
+
+// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
+// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
+// See golang.org/issue/6038.
+// The @ is for OS X. See golang.org/issue/13720.
+// The % is for Jenkins. See golang.org/issue/16959.
+// The ! is because module paths may use them. See golang.org/issue/26716.
+// The ~ and ^ are for sr.ht. See golang.org/issue/32260.
+const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^"
+
+func safeCgoName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// matchAuto interprets text as either a +build or //go:build expression (whichever works),
+// reporting whether the expression matches the build context.
+//
+// matchAuto is only used for testing of tag evaluation
+// and in #cgo lines, which accept either syntax.
+func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool {
+ if strings.ContainsAny(text, "&|()") {
+ text = "//go:build " + text
+ } else {
+ text = "// +build " + text
+ }
+ x, err := constraint.Parse(text)
+ if err != nil {
+ return false
+ }
+ return ctxt.eval(x, allTags)
+}
+
+func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
+ return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) })
+}
+
+// matchTag reports whether the name is one of:
+//
+// cgo (if cgo is enabled)
+// $GOOS
+// $GOARCH
+// boringcrypto
+// ctxt.Compiler
+// linux (if GOOS == android)
+// solaris (if GOOS == illumos)
+// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+//
+// It records all consulted tags in allTags.
+func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
+ if allTags != nil {
+ allTags[name] = true
+ }
+
+ // special tags
+ if ctxt.CgoEnabled && name == "cgo" {
+ return true
+ }
+ if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
+ return true
+ }
+ if ctxt.GOOS == "android" && name == "linux" {
+ return true
+ }
+ if ctxt.GOOS == "illumos" && name == "solaris" {
+ return true
+ }
+ if ctxt.GOOS == "ios" && name == "darwin" {
+ return true
+ }
+ if name == "unix" && unixOS[ctxt.GOOS] {
+ return true
+ }
+ if name == "boringcrypto" {
+ name = "goexperiment.boringcrypto" // boringcrypto is an old name for goexperiment.boringcrypto
+ }
+
+ // other tags
+ for _, tag := range ctxt.BuildTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ToolTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ReleaseTags {
+ if tag == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+// Exceptions:
+// if GOOS=android, then files with GOOS=linux are also matched.
+// if GOOS=illumos, then files with GOOS=solaris are also matched.
+// if GOOS=ios, then files with GOOS=darwin are also matched.
+func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
+ name, _, _ = strings.Cut(name, ".")
+
+ // Before Go 1.4, a file called "linux.go" would be equivalent to having a
+ // build tag "linux" in that file. For Go 1.4 and beyond, we require this
+ // auto-tagging to apply only to files with a non-empty prefix, so
+ // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
+ // systems, such as android, to arrive without breaking existing code with
+ // innocuous source code in "android.go". The easiest fix: cut everything
+ // in the name before the initial _.
+ i := strings.Index(name, "_")
+ if i < 0 {
+ return true
+ }
+ name = name[i:] // ignore everything before first _
+
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ if allTags != nil {
+ // In case we short-circuit on l[n-1].
+ allTags[l[n-2]] = true
+ }
+ return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags)
+ }
+ if n >= 1 && (knownOS[l[n-1]] || knownArch[l[n-1]]) {
+ return ctxt.matchTag(l[n-1], allTags)
+ }
+ return true
+}
diff --git a/src/cmd/go/internal/modindex/build_read.go b/src/cmd/go/internal/modindex/build_read.go
new file mode 100644
index 0000000..9137200
--- /dev/null
+++ b/src/cmd/go/internal/modindex/build_read.go
@@ -0,0 +1,594 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/read.go with unused parts
+// removed.
+
+package modindex
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+ pos token.Position
+}
+
+var bom = []byte{0xef, 0xbb, 0xbf}
+
+func newImportReader(name string, r io.Reader) *importReader {
+ b := bufio.NewReader(r)
+ // Remove leading UTF-8 BOM.
+ // Per https://golang.org/ref/spec#Source_code_representation:
+ // a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
+ // if it is the first Unicode code point in the source text.
+ if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
+ b.Discard(3)
+ }
+ return &importReader{
+ b: b,
+ pos: token.Position{
+ Filename: name,
+ Line: 1,
+ Column: 1,
+ },
+ }
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// readByteNoBuf is like readByte but doesn't buffer the byte.
+// It exhausts r.buf before reading from r.b.
+func (r *importReader) readByteNoBuf() byte {
+ var c byte
+ var err error
+ if len(r.buf) > 0 {
+ c = r.buf[0]
+ r.buf = r.buf[1:]
+ } else {
+ c, err = r.b.ReadByte()
+ if err == nil && c == 0 {
+ err = errNUL
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ return 0
+ }
+ r.pos.Offset++
+ if c == '\n' {
+ r.pos.Line++
+ r.pos.Column = 1
+ } else {
+ r.pos.Column++
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+var goEmbed = []byte("go:embed")
+
+// findEmbed advances the input reader to the next //go:embed comment.
+// It reports whether it found a comment.
+// (Otherwise it found an error or EOF.)
+func (r *importReader) findEmbed(first bool) bool {
+ // The import block scan stopped after a non-space character,
+ // so the reader is not at the start of a line on the first call.
+ // After that, each //go:embed extraction leaves the reader
+ // at the end of a line.
+ startLine := !first
+ var c byte
+ for r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ Reswitch:
+ switch c {
+ default:
+ startLine = false
+
+ case '\n':
+ startLine = true
+
+ case ' ', '\t':
+ // leave startLine alone
+
+ case '"':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '\\' {
+ r.readByteNoBuf()
+ if r.err != nil {
+ r.syntaxError()
+ return false
+ }
+ continue
+ }
+ if c == '"' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+ goto Reswitch
+
+ case '`':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '`' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+
+ case '\'':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '\\' {
+ r.readByteNoBuf()
+ if r.err != nil {
+ r.syntaxError()
+ return false
+ }
+ continue
+ }
+ if c == '\'' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+
+ case '/':
+ c = r.readByteNoBuf()
+ switch c {
+ default:
+ startLine = false
+ goto Reswitch
+
+ case '*':
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByteNoBuf()
+ }
+ startLine = false
+
+ case '/':
+ if startLine {
+ // Try to read this as a //go:embed comment.
+ for i := range goEmbed {
+ c = r.readByteNoBuf()
+ if c != goEmbed[i] {
+ goto SkipSlashSlash
+ }
+ }
+ c = r.readByteNoBuf()
+ if c == ' ' || c == '\t' {
+ // Found one!
+ return true
+ }
+ }
+ SkipSlashSlash:
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ }
+ startLine = true
+ }
+ }
+ }
+ return false
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '`':
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// readComments is like io.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+ r := newImportReader("", f)
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// readGoInfo expects a Go file as input and reads the file up to and including the import section.
+// It records what it learned in *info.
+// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
+// info.imports and info.embeds.
+//
+// It only returns an error if there are problems reading the file,
+// not for syntax errors in the file itself.
+func readGoInfo(f io.Reader, info *fileInfo) error {
+ r := newImportReader(info.name, f)
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ info.header = r.buf
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ info.header = r.buf[:len(r.buf)-1]
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ info.header = r.buf
+ }
+ if r.err != nil {
+ return r.err
+ }
+
+ if info.fset == nil {
+ return nil
+ }
+
+ // Parse file header & record imports.
+ info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments)
+ if info.parseErr != nil {
+ return nil
+ }
+
+ hasEmbed := false
+ for _, decl := range info.parsed.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
+ }
+ if path == "embed" {
+ hasEmbed = true
+ }
+
+ doc := spec.Doc
+ if doc == nil && len(d.Specs) == 1 {
+ doc = d.Doc
+ }
+ info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
+ }
+ }
+
+ // Extract directives.
+ for _, group := range info.parsed.Comments {
+ if group.Pos() >= info.parsed.Package {
+ break
+ }
+ for _, c := range group.List {
+ if strings.HasPrefix(c.Text, "//go:") {
+ info.directives = append(info.directives, build.Directive{Text: c.Text, Pos: info.fset.Position(c.Slash)})
+ }
+ }
+ }
+
+ // If the file imports "embed",
+ // we have to look for //go:embed comments
+ // in the remainder of the file.
+ // The compiler will enforce the mapping of comments to
+ // declared variables. We just need to know the patterns.
+ // If there were //go:embed comments earlier in the file
+ // (near the package statement or imports), the compiler
+ // will reject them. They can be (and have already been) ignored.
+ if hasEmbed {
+ var line []byte
+ for first := true; r.findEmbed(first); first = false {
+ line = line[:0]
+ pos := r.pos
+ for {
+ c := r.readByteNoBuf()
+ if c == '\n' || r.err != nil || r.eof {
+ break
+ }
+ line = append(line, c)
+ }
+ // Add args if line is well-formed.
+ // Ignore badly-formed lines - the compiler will report them when it finds them,
+ // and we can pretend they are not there to help go list succeed with what it knows.
+ embs, err := parseGoEmbed(string(line), pos)
+ if err == nil {
+ info.embeds = append(info.embeds, embs...)
+ }
+ }
+ }
+
+ return nil
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// This is based on a similar function in cmd/compile/internal/gc/noder.go;
+// this version calculates position information as well.
+func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) {
+ trimBytes := func(n int) {
+ pos.Offset += n
+ pos.Column += utf8.RuneCountInString(args[:n])
+ args = args[n:]
+ }
+ trimSpace := func() {
+ trim := strings.TrimLeftFunc(args, unicode.IsSpace)
+ trimBytes(len(args) - len(trim))
+ }
+
+ var list []fileEmbed
+ for trimSpace(); args != ""; trimSpace() {
+ var path string
+ pathPos := pos
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ trimBytes(i)
+
+ case '`':
+ var ok bool
+ path, _, ok = strings.Cut(args[1:], "`")
+ if !ok {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ trimBytes(1 + len(path) + 1)
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ trimBytes(i + 1)
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, fileEmbed{path, pathPos})
+ }
+ return list, nil
+}
diff --git a/src/cmd/go/internal/modindex/index_format.txt b/src/cmd/go/internal/modindex/index_format.txt
new file mode 100644
index 0000000..8b1d2c6
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_format.txt
@@ -0,0 +1,63 @@
+This file documents the index format that is read and written by this package.
+The index format is an encoding of a series of RawPackage structs
+
+Field names refer to fields on RawPackage and rawFile.
+The file uses little endian encoding for the uint32s.
+Strings are written into the string table at the end of the file.
+Each string is prefixed with a uvarint-encoded length.
+Bools are written as uint32s: 0 for false and 1 for true.
+
+The following is the format for a full module:
+
+“go index v2\n”
+str uint32 - offset of string table
+n uint32 - number of packages
+for each rawPackage:
+ dirname - string offset
+ package - offset where package begins
+for each rawPackage:
+ error uint32 - string offset // error is produced by fsys.ReadDir or fmt.Errorf
+ dir uint32 - string offset (directory path relative to module root)
+ len(sourceFiles) uint32
+ sourceFiles [n]uint32 - offset to source file (relative to start of index file)
+ for each sourceFile:
+ error - string offset // error is either produced by fmt.Errorf,errors.New or is io.EOF
+ parseError - string offset // if non-empty, a json-encoded parseError struct (see below). Is either produced by io.ReadAll,os.ReadFile,errors.New or is scanner.Error,scanner.ErrorList
+ synopsis - string offset
+ name - string offset
+ pkgName - string offset
+ ignoreFile - int32 bool // report the file in Ignored(Go|Other)Files because there was an error reading it or parsing its build constraints.
+ binaryOnly uint32 bool
+ cgoDirectives string offset // the #cgo directive lines in the comment on import "C"
+ goBuildConstraint - string offset
+ len(plusBuildConstraints) - uint32
+ plusBuildConstraints - [n]uint32 (string offsets)
+ len(imports) uint32
+ for each rawImport:
+ path - string offset
+ position - file, offset, line, column - uint32
+ len(embeds) uint32
+ for each embed:
+ pattern - string offset
+ position - file, offset, line, column - uint32
+ len(directives) uint32
+ for each directive:
+ text - string offset
+ position - file, offset, line, column - uint32
+[string table]
+0xFF (marker)
+
+The following is the format for a single indexed package:
+
+“go index v0\n”
+str uint32 - offset of string table
+for the single RawPackage:
+ [same RawPackage format as above]
+[string table]
+
+The following is the definition of the json-serialized parseError struct:
+
+type parseError struct {
+ ErrorList *scanner.ErrorList // non-nil if the error was an ErrorList, nil otherwise
+ ErrorString string // non-empty for all other cases
+}
diff --git a/src/cmd/go/internal/modindex/index_test.go b/src/cmd/go/internal/modindex/index_test.go
new file mode 100644
index 0000000..6bc62f3
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_test.go
@@ -0,0 +1,104 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "go/build"
+ "internal/diff"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+func init() {
+ isTest = true
+ enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
+}
+
+func TestIndex(t *testing.T) {
+ src := filepath.Join(runtime.GOROOT(), "src")
+ checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
+ p := m.Package(pkg)
+ bp, err := p.Import(build.Default, build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bp1, err := build.Default.Import(".", filepath.Join(src, pkg), build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(bp, bp1) {
+ t.Errorf("mismatch")
+ t.Logf("index:\n%s", hex.Dump(data))
+
+ js, err := json.MarshalIndent(bp, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ js1, err := json.MarshalIndent(bp1, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
+ t.FailNow()
+ }
+ }
+
+ // Check packages in increasing complexity, one at a time.
+ pkgs := []string{
+ "crypto",
+ "encoding",
+ "unsafe",
+ "encoding/json",
+ "runtime",
+ "net",
+ }
+ var raws []*rawPackage
+ for _, pkg := range pkgs {
+ raw := importRaw(src, pkg)
+ raws = append(raws, raw)
+ t.Run(pkg, func(t *testing.T) {
+ data := encodeModuleBytes([]*rawPackage{raw})
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkPkg(t, m, pkg, data)
+ })
+ }
+
+ // Check that a multi-package index works too.
+ t.Run("all", func(t *testing.T) {
+ data := encodeModuleBytes(raws)
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, pkg := range pkgs {
+ checkPkg(t, m, pkg, data)
+ }
+ })
+}
+
+func TestImportRaw_IgnoreNonGo(t *testing.T) {
+ path := filepath.Join("testdata", "ignore_non_source")
+ p := importRaw(path, ".")
+
+ wantFiles := []string{"a.syso", "b.go", "c.c"}
+
+ var gotFiles []string
+ for i := range p.sourceFiles {
+ gotFiles = append(gotFiles, p.sourceFiles[i].name)
+ }
+
+ if !reflect.DeepEqual(gotFiles, wantFiles) {
+ t.Errorf("names of files in importRaw(testdata/ignore_non_source): got %v; want %v",
+ gotFiles, wantFiles)
+ }
+}
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
new file mode 100644
index 0000000..83d5faf
--- /dev/null
+++ b/src/cmd/go/internal/modindex/read.go
@@ -0,0 +1,1037 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/build/constraint"
+ "go/token"
+ "internal/godebug"
+ "internal/goroot"
+ "path"
+ "path/filepath"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/par"
+ "cmd/go/internal/str"
+)
+
+// enabled is used to flag off the behavior of the module index on tip.
+// It will be removed before the release.
+// TODO(matloob): Remove enabled once we have more confidence on the
+// module index.
+var enabled = godebug.New("#goindex").Value() != "0"
+
+// Module represents and encoded module index file. It is used to
+// do the equivalent of build.Import of packages in the module and answer other
+// questions based on the index file's data.
+type Module struct {
+ modroot string
+ d *decoder
+ n int // number of packages
+}
+
+// moduleHash returns an ActionID corresponding to the state of the module
+// located at filesystem path modroot.
+func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) {
+ // We expect modules stored within the module cache to be checksummed and
+ // immutable, and we expect released modules within GOROOT to change only
+ // infrequently (when the Go version changes).
+ if !ismodcache {
+ // The contents of this module may change over time. We don't want to pay
+ // the cost to detect changes and re-index whenever they occur, so just
+ // don't index it at all.
+ //
+ // Note that this is true even for modules in GOROOT/src: non-release builds
+ // of the Go toolchain may have arbitrary development changes on top of the
+ // commit reported by runtime.Version, or could be completely artificial due
+ // to lacking a `git` binary (like "devel gomote.XXXXX", as synthesized by
+ // "gomote push" as of 2022-06-15). (Release builds shouldn't have
+ // modifications, but we don't want to use a behavior for releases that we
+ // haven't tested during development.)
+ return cache.ActionID{}, ErrNotIndexed
+ }
+
+ h := cache.NewHash("moduleIndex")
+ // TODO(bcmills): Since modules in the index are checksummed, we could
+ // probably improve the cache hit rate by keying off of the module
+ // path@version (perhaps including the checksum?) instead of the module root
+ // directory.
+ fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot)
+ return h.Sum(), nil
+}
+
+const modTimeCutoff = 2 * time.Second
+
+// dirHash returns an ActionID corresponding to the state of the package
+// located at filesystem path pkgdir.
+func dirHash(modroot, pkgdir string) (cache.ActionID, error) {
+ h := cache.NewHash("moduleIndex")
+ fmt.Fprintf(h, "modroot %s\n", modroot)
+ fmt.Fprintf(h, "package %s %s %v\n", runtime.Version(), indexVersion, pkgdir)
+ entries, err := fsys.ReadDir(pkgdir)
+ if err != nil {
+ // pkgdir might not be a directory. give up on hashing.
+ return cache.ActionID{}, ErrNotIndexed
+ }
+ cutoff := time.Now().Add(-modTimeCutoff)
+ for _, info := range entries {
+ if info.IsDir() {
+ continue
+ }
+
+ if !info.Mode().IsRegular() {
+ return cache.ActionID{}, ErrNotIndexed
+ }
+ // To avoid problems for very recent files where a new
+ // write might not change the mtime due to file system
+ // mtime precision, reject caching if a file was read that
+ // is less than modTimeCutoff old.
+ //
+ // This is the same strategy used for hashing test inputs.
+ // See hashOpen in cmd/go/internal/test/test.go for the
+ // corresponding code.
+ if info.ModTime().After(cutoff) {
+ return cache.ActionID{}, ErrNotIndexed
+ }
+
+ fmt.Fprintf(h, "file %v %v %v\n", info.Name(), info.ModTime(), info.Size())
+ }
+ return h.Sum(), nil
+}
+
+var ErrNotIndexed = errors.New("not in module index")
+
+var (
+ errDisabled = fmt.Errorf("%w: module indexing disabled", ErrNotIndexed)
+ errNotFromModuleCache = fmt.Errorf("%w: not from module cache", ErrNotIndexed)
+)
+
+// GetPackage returns the IndexPackage for the package at the given path.
+// It will return ErrNotIndexed if the directory should be read without
+// using the index, for instance because the index is disabled, or the package
+// is not in a module.
+func GetPackage(modroot, pkgdir string) (*IndexPackage, error) {
+ mi, err := GetModule(modroot)
+ if err == nil {
+ return mi.Package(relPath(pkgdir, modroot)), nil
+ }
+ if !errors.Is(err, errNotFromModuleCache) {
+ return nil, err
+ }
+ if cfg.BuildContext.Compiler == "gccgo" && str.HasPathPrefix(modroot, cfg.GOROOTsrc) {
+ return nil, err // gccgo has no sources for GOROOT packages.
+ }
+ return openIndexPackage(modroot, pkgdir)
+}
+
+// GetModule returns the Module for the given modroot.
+// It will return ErrNotIndexed if the directory should be read without
+// using the index, for instance because the index is disabled, or the package
+// is not in a module.
+func GetModule(modroot string) (*Module, error) {
+ if !enabled || cache.DefaultDir() == "off" {
+ return nil, errDisabled
+ }
+ if modroot == "" {
+ panic("modindex.GetPackage called with empty modroot")
+ }
+ if cfg.BuildMod == "vendor" {
+ // Even if the main module is in the module cache,
+ // its vendored dependencies are not loaded from their
+ // usual cached locations.
+ return nil, errNotFromModuleCache
+ }
+ modroot = filepath.Clean(modroot)
+ if str.HasFilePathPrefix(modroot, cfg.GOROOTsrc) || !str.HasFilePathPrefix(modroot, cfg.GOMODCACHE) {
+ return nil, errNotFromModuleCache
+ }
+ return openIndexModule(modroot, true)
+}
+
+var mcache par.ErrCache[string, *Module]
+
+// openIndexModule returns the module index for modPath.
+// It will return ErrNotIndexed if the module can not be read
+// using the index because it contains symlinks.
+func openIndexModule(modroot string, ismodcache bool) (*Module, error) {
+ return mcache.Do(modroot, func() (*Module, error) {
+ fsys.Trace("openIndexModule", modroot)
+ id, err := moduleHash(modroot, ismodcache)
+ if err != nil {
+ return nil, err
+ }
+ data, _, err := cache.GetMmap(cache.Default(), id)
+ if err != nil {
+ // Couldn't read from modindex. Assume we couldn't read from
+ // the index because the module hasn't been indexed yet.
+ data, err = indexModule(modroot)
+ if err != nil {
+ return nil, err
+ }
+ if err = cache.PutBytes(cache.Default(), id, data); err != nil {
+ return nil, err
+ }
+ }
+ mi, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ return mi, nil
+ })
+}
+
+var pcache par.ErrCache[[2]string, *IndexPackage]
+
+func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
+ return pcache.Do([2]string{modroot, pkgdir}, func() (*IndexPackage, error) {
+ fsys.Trace("openIndexPackage", pkgdir)
+ id, err := dirHash(modroot, pkgdir)
+ if err != nil {
+ return nil, err
+ }
+ data, _, err := cache.GetMmap(cache.Default(), id)
+ if err != nil {
+ // Couldn't read from index. Assume we couldn't read from
+ // the index because the package hasn't been indexed yet.
+ data = indexPackage(modroot, pkgdir)
+ if err = cache.PutBytes(cache.Default(), id, data); err != nil {
+ return nil, err
+ }
+ }
+ pkg, err := packageFromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ return pkg, nil
+ })
+}
+
+var errCorrupt = errors.New("corrupt index")
+
+// protect marks the start of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// It should not be used for trivial accesses which would be
+// dwarfed by the overhead of the defer.
+func protect() bool {
+ return debug.SetPanicOnFault(true)
+}
+
+var isTest = false
+
+// unprotect marks the end of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// end looks for panics due to errCorrupt or bad mmap accesses.
+// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
+// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
+func unprotect(old bool, errp *error) {
+ // SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
+ // that all its errors satisfy this interface, we'll only check for these errors so that
+ // we don't suppress panics that could have been produced from other sources.
+ type addrer interface {
+ Addr() uintptr
+ }
+
+ debug.SetPanicOnFault(old)
+
+ if e := recover(); e != nil {
+ if _, ok := e.(addrer); ok || e == errCorrupt {
+ // This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
+ err := fmt.Errorf("error reading module index: %v", e)
+ if errp != nil {
+ *errp = err
+ return
+ }
+ if isTest {
+ panic(err)
+ }
+ base.Fatalf("%v", err)
+ }
+ // The panic was likely not caused by SetPanicOnFault.
+ panic(e)
+ }
+}
+
+// fromBytes returns a *Module given the encoded representation.
+func fromBytes(moddir string, data []byte) (m *Module, err error) {
+ if !enabled {
+ panic("use of index")
+ }
+
+ defer unprotect(protect(), &err)
+
+ if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
+ return nil, errCorrupt
+ }
+
+ const hdr = len(indexVersion + "\n")
+ d := &decoder{data: data}
+ str := d.intAt(hdr)
+ if str < hdr+8 || len(d.data) < str {
+ return nil, errCorrupt
+ }
+ d.data, d.str = data[:str], d.data[str:]
+ // Check that string table looks valid.
+ // First string is empty string (length 0),
+ // and we leave a marker byte 0xFF at the end
+ // just to make sure that the file is not truncated.
+ if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
+ return nil, errCorrupt
+ }
+
+ n := d.intAt(hdr + 4)
+ if n < 0 || n > (len(d.data)-8)/8 {
+ return nil, errCorrupt
+ }
+
+ m = &Module{
+ moddir,
+ d,
+ n,
+ }
+ return m, nil
+}
+
+// packageFromBytes returns a *IndexPackage given the encoded representation.
+func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
+ m, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ if m.n != 1 {
+ return nil, fmt.Errorf("corrupt single-package index")
+ }
+ return m.pkg(0), nil
+}
+
+// pkgDir returns the dir string of the i'th package in the index.
+func (m *Module) pkgDir(i int) string {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.stringAt(12 + 8 + 8*i)
+}
+
+// pkgOff returns the offset of the data for the i'th package in the index.
+func (m *Module) pkgOff(i int) int {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.intAt(12 + 8 + 8*i + 4)
+}
+
+// Walk calls f for each package in the index, passing the path to that package relative to the module root.
+func (m *Module) Walk(f func(path string)) {
+ defer unprotect(protect(), nil)
+ for i := 0; i < m.n; i++ {
+ f(m.pkgDir(i))
+ }
+}
+
+// relPath returns the path relative to the module's root.
+func relPath(path, modroot string) string {
+ return str.TrimFilePathPrefix(filepath.Clean(path), filepath.Clean(modroot))
+}
+
+var installgorootAll = godebug.New("installgoroot").Value() == "all"
+
+// Import is the equivalent of build.Import given the information in Module.
+func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
+ defer unprotect(protect(), &err)
+
+ ctxt := (*Context)(&bctxt)
+
+ p = &build.Package{}
+
+ p.ImportPath = "."
+ p.Dir = filepath.Join(rp.modroot, rp.dir)
+
+ var pkgerr error
+ switch ctxt.Compiler {
+ case "gccgo", "gc":
+ default:
+ // Save error for end of function.
+ pkgerr = fmt.Errorf("import %q: unknown compiler %q", p.Dir, ctxt.Compiler)
+ }
+
+ if p.Dir == "" {
+ return p, fmt.Errorf("import %q: import of unknown directory", p.Dir)
+ }
+
+ // goroot and gopath
+ inTestdata := func(sub string) bool {
+ return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata")
+ }
+ var pkga string
+ if !inTestdata(rp.dir) {
+ // In build.go, p.Root should only be set in the non-local-import case, or in
+ // GOROOT or GOPATH. Since module mode only calls Import with path set to "."
+ // and the module index doesn't apply outside modules, the GOROOT case is
+ // the only case where p.Root needs to be set.
+ if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc {
+ p.Root = ctxt.GOROOT
+ p.Goroot = true
+ modprefix := str.TrimFilePathPrefix(rp.modroot, cfg.GOROOTsrc)
+ p.ImportPath = rp.dir
+ if modprefix != "" {
+ p.ImportPath = filepath.Join(modprefix, p.ImportPath)
+ }
+
+ // Set GOROOT-specific fields (sometimes for modules in a GOPATH directory).
+ // The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj)
+ // are only set in build.Import if p.Root != "".
+ var pkgtargetroot string
+ suffix := ""
+ if ctxt.InstallSuffix != "" {
+ suffix = "_" + ctxt.InstallSuffix
+ }
+ switch ctxt.Compiler {
+ case "gccgo":
+ pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ dir, elem := path.Split(p.ImportPath)
+ pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
+ case "gc":
+ pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
+ }
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ // Always set PkgTargetRoot. It might be used when building in shared
+ // mode.
+ p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
+
+ // Set the install target if applicable.
+ if !p.Goroot || (installgorootAll && p.ImportPath != "unsafe" && p.ImportPath != "builtin") {
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
+ }
+ }
+ }
+ }
+
+ if rp.error != nil {
+ if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot {
+ return p, nil
+ }
+ return p, rp.error
+ }
+
+ if mode&build.FindOnly != 0 {
+ return p, pkgerr
+ }
+
+ // We need to do a second round of bad file processing.
+ var badGoError error
+ badGoFiles := make(map[string]bool)
+ badGoFile := func(name string, err error) {
+ if badGoError == nil {
+ badGoError = err
+ }
+ if !badGoFiles[name] {
+ p.InvalidGoFiles = append(p.InvalidGoFiles, name)
+ badGoFiles[name] = true
+ }
+ }
+
+ var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
+ var firstFile string
+ embedPos := make(map[string][]token.Position)
+ testEmbedPos := make(map[string][]token.Position)
+ xTestEmbedPos := make(map[string][]token.Position)
+ importPos := make(map[string][]token.Position)
+ testImportPos := make(map[string][]token.Position)
+ xTestImportPos := make(map[string][]token.Position)
+ allTags := make(map[string]bool)
+ for _, tf := range rp.sourceFiles {
+ name := tf.name()
+ // Check errors for go files and call badGoFiles to put them in
+ // InvalidGoFiles if they do have an error.
+ if strings.HasSuffix(name, ".go") {
+ if error := tf.error(); error != "" {
+ badGoFile(name, errors.New(tf.error()))
+ continue
+ } else if parseError := tf.parseError(); parseError != "" {
+ badGoFile(name, parseErrorFromString(tf.parseError()))
+ // Fall through: we still want to list files with parse errors.
+ }
+ }
+
+ var shouldBuild = true
+ if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
+ shouldBuild = false
+ } else if goBuildConstraint := tf.goBuildConstraint(); goBuildConstraint != "" {
+ x, err := constraint.Parse(goBuildConstraint)
+ if err != nil {
+ return p, fmt.Errorf("%s: parsing //go:build line: %v", name, err)
+ }
+ shouldBuild = ctxt.eval(x, allTags)
+ } else if plusBuildConstraints := tf.plusBuildConstraints(); len(plusBuildConstraints) > 0 {
+ for _, text := range plusBuildConstraints {
+ if x, err := constraint.Parse(text); err == nil {
+ if !ctxt.eval(x, allTags) {
+ shouldBuild = false
+ }
+ }
+ }
+ }
+
+ ext := nameExt(name)
+ if !shouldBuild || tf.ignoreFile() {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ } else if fileListForExt(p, ext) != nil {
+ p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
+ }
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
+ case ".go":
+ // keep going
+ case ".S", ".sx":
+ // special case for cgo, handled at end
+ Sfiles = append(Sfiles, name)
+ continue
+ default:
+ if list := fileListForExt(p, ext); list != nil {
+ *list = append(*list, name)
+ }
+ continue
+ }
+
+ pkg := tf.pkgName()
+ if pkg == "documentation" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ continue
+ }
+ isTest := strings.HasSuffix(name, "_test.go")
+ isXTest := false
+ if isTest && strings.HasSuffix(tf.pkgName(), "_test") && p.Name != tf.pkgName() {
+ isXTest = true
+ pkg = pkg[:len(pkg)-len("_test")]
+ }
+
+ if !isTest && tf.binaryOnly() {
+ p.BinaryOnly = true
+ }
+
+ if p.Name == "" {
+ p.Name = pkg
+ firstFile = name
+ } else if pkg != p.Name {
+ // TODO(#45999): The choice of p.Name is arbitrary based on file iteration
+ // order. Instead of resolving p.Name arbitrarily, we should clear out the
+ // existing Name and mark the existing files as also invalid.
+ badGoFile(name, &MultiplePackageError{
+ Dir: p.Dir,
+ Packages: []string{p.Name, pkg},
+ Files: []string{firstFile, name},
+ })
+ }
+ // Grab the first package comment as docs, provided it is not from a test file.
+ if p.Doc == "" && !isTest && !isXTest {
+ if synopsis := tf.synopsis(); synopsis != "" {
+ p.Doc = synopsis
+ }
+ }
+
+ // Record Imports and information about cgo.
+ isCgo := false
+ imports := tf.imports()
+ for _, imp := range imports {
+ if imp.path == "C" {
+ if isTest {
+ badGoFile(name, fmt.Errorf("use of cgo in test %s not supported", name))
+ continue
+ }
+ isCgo = true
+ }
+ }
+ if directives := tf.cgoDirectives(); directives != "" {
+ if err := ctxt.saveCgo(name, p, directives); err != nil {
+ badGoFile(name, err)
+ }
+ }
+
+ var fileList *[]string
+ var importMap, embedMap map[string][]token.Position
+ var directives *[]build.Directive
+ switch {
+ case isCgo:
+ allTags["cgo"] = true
+ if ctxt.CgoEnabled {
+ fileList = &p.CgoFiles
+ importMap = importPos
+ embedMap = embedPos
+ directives = &p.Directives
+ } else {
+ // Ignore Imports and Embeds from cgo files if cgo is disabled.
+ fileList = &p.IgnoredGoFiles
+ }
+ case isXTest:
+ fileList = &p.XTestGoFiles
+ importMap = xTestImportPos
+ embedMap = xTestEmbedPos
+ directives = &p.XTestDirectives
+ case isTest:
+ fileList = &p.TestGoFiles
+ importMap = testImportPos
+ embedMap = testEmbedPos
+ directives = &p.TestDirectives
+ default:
+ fileList = &p.GoFiles
+ importMap = importPos
+ embedMap = embedPos
+ directives = &p.Directives
+ }
+ *fileList = append(*fileList, name)
+ if importMap != nil {
+ for _, imp := range imports {
+ importMap[imp.path] = append(importMap[imp.path], imp.position)
+ }
+ }
+ if embedMap != nil {
+ for _, e := range tf.embeds() {
+ embedMap[e.pattern] = append(embedMap[e.pattern], e.position)
+ }
+ }
+ if directives != nil {
+ *directives = append(*directives, tf.directives()...)
+ }
+ }
+
+ p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
+ p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
+ p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
+
+ p.Imports, p.ImportPos = cleanDecls(importPos)
+ p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
+ p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
+
+ for tag := range allTags {
+ p.AllTags = append(p.AllTags, tag)
+ }
+ sort.Strings(p.AllTags)
+
+ if len(p.CgoFiles) > 0 {
+ p.SFiles = append(p.SFiles, Sfiles...)
+ sort.Strings(p.SFiles)
+ } else {
+ p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
+ sort.Strings(p.IgnoredOtherFiles)
+ }
+
+ if badGoError != nil {
+ return p, badGoError
+ }
+ if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ return p, &build.NoGoError{Dir: p.Dir}
+ }
+ return p, pkgerr
+}
+
+// IsStandardPackage reports whether path is a standard package
+// for the goroot and compiler using the module index if possible,
+// and otherwise falling back to internal/goroot.IsStandardPackage
+func IsStandardPackage(goroot_, compiler, path string) bool {
+ if !enabled || compiler != "gc" {
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+
+ reldir := filepath.FromSlash(path) // relative dir path in module index for package
+ modroot := filepath.Join(goroot_, "src")
+ if str.HasFilePathPrefix(reldir, "cmd") {
+ reldir = str.TrimFilePathPrefix(reldir, "cmd")
+ modroot = filepath.Join(modroot, "cmd")
+ }
+ if _, err := GetPackage(modroot, filepath.Join(modroot, reldir)); err == nil {
+ // Note that goroot.IsStandardPackage doesn't check that the directory
+ // actually contains any go files-- merely that it exists. GetPackage
+ // returning a nil error is enough for us to know the directory exists.
+ return true
+ } else if errors.Is(err, ErrNotIndexed) {
+ // Fall back because package isn't indexable. (Probably because
+ // a file was modified recently)
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+ return false
+}
+
+// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index.
+func (rp *IndexPackage) IsDirWithGoFiles() (_ bool, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("error reading module index: %v", e)
+ }
+ }()
+ for _, sf := range rp.sourceFiles {
+ if strings.HasSuffix(sf.name(), ".go") {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ScanDir implements imports.ScanDir using the information in the index.
+func (rp *IndexPackage) ScanDir(tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) {
+ // TODO(matloob) dir should eventually be relative to indexed directory
+ // TODO(matloob): skip reading raw package and jump straight to data we need?
+
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("error reading module index: %v", e)
+ }
+ }()
+
+ imports_ := make(map[string]bool)
+ testImports := make(map[string]bool)
+ numFiles := 0
+
+Files:
+ for _, sf := range rp.sourceFiles {
+ name := sf.name()
+ if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") || !strings.HasSuffix(name, ".go") || !imports.MatchFile(name, tags) {
+ continue
+ }
+
+ // The following section exists for backwards compatibility reasons:
+ // scanDir ignores files with import "C" when collecting the list
+ // of imports unless the "cgo" tag is provided. The following comment
+ // is copied from the original.
+ //
+ // import "C" is implicit requirement of cgo tag.
+ // When listing files on the command line (explicitFiles=true)
+ // we do not apply build tag filtering but we still do apply
+ // cgo filtering, so no explicitFiles check here.
+ // Why? Because we always have, and it's not worth breaking
+ // that behavior now.
+ imps := sf.imports() // TODO(matloob): directly read import paths to avoid the extra strings?
+ for _, imp := range imps {
+ if imp.path == "C" && !tags["cgo"] && !tags["*"] {
+ continue Files
+ }
+ }
+
+ if !shouldBuild(sf, tags) {
+ continue
+ }
+ numFiles++
+ m := imports_
+ if strings.HasSuffix(name, "_test.go") {
+ m = testImports
+ }
+ for _, p := range imps {
+ m[p.path] = true
+ }
+ }
+ if numFiles == 0 {
+ return nil, nil, imports.ErrNoGo
+ }
+ return keys(imports_), keys(testImports), nil
+}
+
+func keys(m map[string]bool) []string {
+ list := make([]string, 0, len(m))
+ for k := range m {
+ list = append(list, k)
+ }
+ sort.Strings(list)
+ return list
+}
+
+// implements imports.ShouldBuild in terms of an index sourcefile.
+func shouldBuild(sf *sourceFile, tags map[string]bool) bool {
+ if goBuildConstraint := sf.goBuildConstraint(); goBuildConstraint != "" {
+ x, err := constraint.Parse(goBuildConstraint)
+ if err != nil {
+ return false
+ }
+ return imports.Eval(x, tags, true)
+ }
+
+ plusBuildConstraints := sf.plusBuildConstraints()
+ for _, text := range plusBuildConstraints {
+ if x, err := constraint.Parse(text); err == nil {
+ if !imports.Eval(x, tags, true) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// IndexPackage holds the information needed to access information in the
+// index needed to load a package in a specific directory.
+type IndexPackage struct {
+ error error
+ dir string // directory of the package relative to the modroot
+
+ modroot string
+
+ // Source files
+ sourceFiles []*sourceFile
+}
+
+var errCannotFindPackage = errors.New("cannot find package")
+
+// Package and returns finds the package with the given path (relative to the module root).
+// If the package does not exist, Package returns an IndexPackage that will return an
+// appropriate error from its methods.
+func (m *Module) Package(path string) *IndexPackage {
+ defer unprotect(protect(), nil)
+
+ i, ok := sort.Find(m.n, func(i int) int {
+ return strings.Compare(path, m.pkgDir(i))
+ })
+ if !ok {
+ return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
+ }
+ return m.pkg(i)
+}
+
+// pkg returns the i'th IndexPackage in m.
+func (m *Module) pkg(i int) *IndexPackage {
+ r := m.d.readAt(m.pkgOff(i))
+ p := new(IndexPackage)
+ if errstr := r.string(); errstr != "" {
+ p.error = errors.New(errstr)
+ }
+ p.dir = r.string()
+ p.sourceFiles = make([]*sourceFile, r.int())
+ for i := range p.sourceFiles {
+ p.sourceFiles[i] = &sourceFile{
+ d: m.d,
+ pos: r.int(),
+ }
+ }
+ p.modroot = m.modroot
+ return p
+}
+
+// sourceFile represents the information of a given source file in the module index.
+type sourceFile struct {
+ d *decoder // encoding of this source file
+ pos int // start of sourceFile encoding in d
+ onceReadImports sync.Once
+ savedImports []rawImport // saved imports so that they're only read once
+}
+
+// Offsets for fields in the sourceFile.
+const (
+ sourceFileError = 4 * iota
+ sourceFileParseError
+ sourceFileSynopsis
+ sourceFileName
+ sourceFilePkgName
+ sourceFileIgnoreFile
+ sourceFileBinaryOnly
+ sourceFileCgoDirectives
+ sourceFileGoBuildConstraint
+ sourceFileNumPlusBuildConstraints
+)
+
+func (sf *sourceFile) error() string {
+ return sf.d.stringAt(sf.pos + sourceFileError)
+}
+func (sf *sourceFile) parseError() string {
+ return sf.d.stringAt(sf.pos + sourceFileParseError)
+}
+func (sf *sourceFile) synopsis() string {
+ return sf.d.stringAt(sf.pos + sourceFileSynopsis)
+}
+func (sf *sourceFile) name() string {
+ return sf.d.stringAt(sf.pos + sourceFileName)
+}
+func (sf *sourceFile) pkgName() string {
+ return sf.d.stringAt(sf.pos + sourceFilePkgName)
+}
+func (sf *sourceFile) ignoreFile() bool {
+ return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
+}
+func (sf *sourceFile) binaryOnly() bool {
+ return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
+}
+func (sf *sourceFile) cgoDirectives() string {
+ return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
+}
+func (sf *sourceFile) goBuildConstraint() string {
+ return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
+}
+
+func (sf *sourceFile) plusBuildConstraints() []string {
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ pos += 4
+ ret := make([]string, n)
+ for i := 0; i < n; i++ {
+ ret[i] = sf.d.stringAt(pos)
+ pos += 4
+ }
+ return ret
+}
+
+func (sf *sourceFile) importsOffset() int {
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ // each build constraint is 1 uint32
+ return pos + 4 + n*4
+}
+
+func (sf *sourceFile) embedsOffset() int {
+ pos := sf.importsOffset()
+ n := sf.d.intAt(pos)
+ // each import is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
+}
+
+func (sf *sourceFile) directivesOffset() int {
+ pos := sf.embedsOffset()
+ n := sf.d.intAt(pos)
+ // each embed is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
+}
+
+func (sf *sourceFile) imports() []rawImport {
+ sf.onceReadImports.Do(func() {
+ importsOffset := sf.importsOffset()
+ r := sf.d.readAt(importsOffset)
+ numImports := r.int()
+ ret := make([]rawImport, numImports)
+ for i := 0; i < numImports; i++ {
+ ret[i] = rawImport{r.string(), r.tokpos()}
+ }
+ sf.savedImports = ret
+ })
+ return sf.savedImports
+}
+
+func (sf *sourceFile) embeds() []embed {
+ embedsOffset := sf.embedsOffset()
+ r := sf.d.readAt(embedsOffset)
+ numEmbeds := r.int()
+ ret := make([]embed, numEmbeds)
+ for i := range ret {
+ ret[i] = embed{r.string(), r.tokpos()}
+ }
+ return ret
+}
+
+func (sf *sourceFile) directives() []build.Directive {
+ directivesOffset := sf.directivesOffset()
+ r := sf.d.readAt(directivesOffset)
+ numDirectives := r.int()
+ ret := make([]build.Directive, numDirectives)
+ for i := range ret {
+ ret[i] = build.Directive{Text: r.string(), Pos: r.tokpos()}
+ }
+ return ret
+}
+
+func asString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// A decoder helps decode the index format.
+type decoder struct {
+ data []byte // data after header
+ str []byte // string table
+}
+
+// intAt returns the int at the given offset in d.data.
+func (d *decoder) intAt(off int) int {
+ if off < 0 || len(d.data)-off < 4 {
+ panic(errCorrupt)
+ }
+ i := binary.LittleEndian.Uint32(d.data[off : off+4])
+ if int32(i)>>31 != 0 {
+ panic(errCorrupt)
+ }
+ return int(i)
+}
+
+// boolAt returns the bool at the given offset in d.data.
+func (d *decoder) boolAt(off int) bool {
+ return d.intAt(off) != 0
+}
+
+// stringAt returns the string pointed at by the int at the given offset in d.data.
+func (d *decoder) stringAt(off int) string {
+ return d.stringTableAt(d.intAt(off))
+}
+
+// stringTableAt returns the string at the given offset in the string table d.str.
+func (d *decoder) stringTableAt(off int) string {
+ if off < 0 || off >= len(d.str) {
+ panic(errCorrupt)
+ }
+ s := d.str[off:]
+ v, n := binary.Uvarint(s)
+ if n <= 0 || v > uint64(len(s[n:])) {
+ panic(errCorrupt)
+ }
+ return asString(s[n : n+int(v)])
+}
+
+// A reader reads sequential fields from a section of the index format.
+type reader struct {
+ d *decoder
+ pos int
+}
+
+// readAt returns a reader starting at the given position in d.
+func (d *decoder) readAt(pos int) *reader {
+ return &reader{d, pos}
+}
+
+// int reads the next int.
+func (r *reader) int() int {
+ i := r.d.intAt(r.pos)
+ r.pos += 4
+ return i
+}
+
+// string reads the next string.
+func (r *reader) string() string {
+ return r.d.stringTableAt(r.int())
+}
+
+// bool reads the next bool.
+func (r *reader) bool() bool {
+ return r.int() != 0
+}
+
+// tokpos reads the next token.Position.
+func (r *reader) tokpos() token.Position {
+ return token.Position{
+ Filename: r.string(),
+ Offset: r.int(),
+ Line: r.int(),
+ Column: r.int(),
+ }
+}
diff --git a/src/cmd/go/internal/modindex/scan.go b/src/cmd/go/internal/modindex/scan.go
new file mode 100644
index 0000000..6ca73e2
--- /dev/null
+++ b/src/cmd/go/internal/modindex/scan.go
@@ -0,0 +1,290 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/doc"
+ "go/scanner"
+ "go/token"
+ "io/fs"
+ "path/filepath"
+ "strings"
+)
+
+// moduleWalkErr returns filepath.SkipDir if the directory isn't relevant
+// when indexing a module or generating a filehash, ErrNotIndexed,
+// if the module shouldn't be indexed, and nil otherwise.
+func moduleWalkErr(root string, path string, info fs.FileInfo, err error) error {
+ if err != nil {
+ return ErrNotIndexed
+ }
+ // stop at module boundaries
+ if info.IsDir() && path != root {
+ if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() {
+ return filepath.SkipDir
+ }
+ }
+ if info.Mode()&fs.ModeSymlink != 0 {
+ if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+ // return an error to make the module hash invalid.
+ // Symlink directories in modules are tricky, so we won't index
+ // modules that contain them.
+ // TODO(matloob): perhaps don't return this error if the symlink leads to
+ // a directory with a go.mod file.
+ return ErrNotIndexed
+ }
+ }
+ return nil
+}
+
+// indexModule indexes the module at the given directory and returns its
+// encoded representation. It returns ErrNotIndexed if the module can't
+// be indexed because it contains symlinks.
+func indexModule(modroot string) ([]byte, error) {
+ fsys.Trace("indexModule", modroot)
+ var packages []*rawPackage
+
+ // If the root itself is a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ root := str.WithFilePathSeparator(modroot)
+ err := fsys.Walk(root, func(path string, info fs.FileInfo, err error) error {
+ if err := moduleWalkErr(root, path, info, err); err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+ if !strings.HasPrefix(path, root) {
+ panic(fmt.Errorf("path %v in walk doesn't have modroot %v as prefix", path, modroot))
+ }
+ rel := path[len(root):]
+ packages = append(packages, importRaw(modroot, rel))
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return encodeModuleBytes(packages), nil
+}
+
+// indexPackage indexes the package at the given directory and returns its
+// encoded representation. It returns ErrNotIndexed if the package can't
+// be indexed.
+func indexPackage(modroot, pkgdir string) []byte {
+ fsys.Trace("indexPackage", pkgdir)
+ p := importRaw(modroot, relPath(pkgdir, modroot))
+ return encodePackageBytes(p)
+}
+
+// rawPackage holds the information from each package that's needed to
+// fill a build.Package once the context is available.
+type rawPackage struct {
+ error string
+ dir string // directory containing package sources, relative to the module root
+
+ // Source files
+ sourceFiles []*rawFile
+}
+
+type parseError struct {
+ ErrorList *scanner.ErrorList
+ ErrorString string
+}
+
+// parseErrorToString converts the error from parsing the file into a string
+// representation. A nil error is converted to an empty string, and all other
+// errors are converted to a JSON-marshalled parseError struct, with ErrorList
+// set for errors of type scanner.ErrorList, and ErrorString set to the error's
+// string representation for all other errors.
+func parseErrorToString(err error) string {
+ if err == nil {
+ return ""
+ }
+ var p parseError
+ if e, ok := err.(scanner.ErrorList); ok {
+ p.ErrorList = &e
+ } else {
+ p.ErrorString = e.Error()
+ }
+ s, err := json.Marshal(p)
+ if err != nil {
+ panic(err) // This should be impossible because scanner.Error contains only strings and ints.
+ }
+ return string(s)
+}
+
+// parseErrorFromString converts a string produced by parseErrorToString back
+// to an error. An empty string is converted to a nil error, and all
+// other strings are expected to be JSON-marshalled parseError structs.
+// The two functions are meant to preserve the structure of an
+// error of type scanner.ErrorList in a round trip, but may not preserve the
+// structure of other errors.
+func parseErrorFromString(s string) error {
+ if s == "" {
+ return nil
+ }
+ var p parseError
+ if err := json.Unmarshal([]byte(s), &p); err != nil {
+ base.Fatalf(`go: invalid parse error value in index: %q. This indicates a corrupted index. Run "go clean -cache" to reset the module cache.`, s)
+ }
+ if p.ErrorList != nil {
+ return *p.ErrorList
+ }
+ return errors.New(p.ErrorString)
+}
+
+// rawFile is the struct representation of the file holding all
+// information in its fields.
+type rawFile struct {
+ error string
+ parseError string
+
+ name string
+ synopsis string // doc.Synopsis of package comment... Compute synopsis on all of these?
+ pkgName string
+ ignoreFile bool // starts with _ or . or should otherwise always be ignored
+ binaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment)
+ cgoDirectives string // the #cgo directive lines in the comment on import "C"
+ goBuildConstraint string
+ plusBuildConstraints []string
+ imports []rawImport
+ embeds []embed
+ directives []build.Directive
+}
+
+type rawImport struct {
+ path string
+ position token.Position
+}
+
+type embed struct {
+ pattern string
+ position token.Position
+}
+
+// importRaw fills the rawPackage from the package files in srcDir.
+// dir is the package's path relative to the modroot.
+func importRaw(modroot, reldir string) *rawPackage {
+ p := &rawPackage{
+ dir: reldir,
+ }
+
+ absdir := filepath.Join(modroot, reldir)
+
+ // We still haven't checked
+ // that p.dir directory exists. This is the right time to do that check.
+ // We can't do it earlier, because we want to gather partial information for the
+ // non-nil *build.Package returned when an error occurs.
+ // We need to do this before we return early on FindOnly flag.
+ if !isDir(absdir) {
+ // package was not found
+ p.error = fmt.Errorf("cannot find package in:\n\t%s", absdir).Error()
+ return p
+ }
+
+ entries, err := fsys.ReadDir(absdir)
+ if err != nil {
+ p.error = err.Error()
+ return p
+ }
+
+ fset := token.NewFileSet()
+ for _, d := range entries {
+ if d.IsDir() {
+ continue
+ }
+ if d.Mode()&fs.ModeSymlink != 0 {
+ if isDir(filepath.Join(absdir, d.Name())) {
+ // Symlinks to directories are not source files.
+ continue
+ }
+ }
+
+ name := d.Name()
+ ext := nameExt(name)
+
+ if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") {
+ continue
+ }
+ info, err := getFileInfo(absdir, name, fset)
+ if err == errNonSource {
+ // not a source or object file. completely ignore in the index
+ continue
+ } else if err != nil {
+ p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, error: err.Error()})
+ continue
+ } else if info == nil {
+ p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, ignoreFile: true})
+ continue
+ }
+ rf := &rawFile{
+ name: name,
+ goBuildConstraint: info.goBuildConstraint,
+ plusBuildConstraints: info.plusBuildConstraints,
+ binaryOnly: info.binaryOnly,
+ directives: info.directives,
+ }
+ if info.parsed != nil {
+ rf.pkgName = info.parsed.Name.Name
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ p.sourceFiles = append(p.sourceFiles, rf)
+ if ext != ".go" {
+ continue
+ }
+
+ if info.parseErr != nil {
+ rf.parseError = parseErrorToString(info.parseErr)
+ // Fall through: we might still have a partial AST in info.Parsed,
+ // and we want to list files with parse errors anyway.
+ }
+
+ if info.parsed != nil && info.parsed.Doc != nil {
+ rf.synopsis = doc.Synopsis(info.parsed.Doc.Text())
+ }
+
+ var cgoDirectives []string
+ for _, imp := range info.imports {
+ if imp.path == "C" {
+ cgoDirectives = append(cgoDirectives, extractCgoDirectives(imp.doc.Text())...)
+ }
+ rf.imports = append(rf.imports, rawImport{path: imp.path, position: fset.Position(imp.pos)})
+ }
+ rf.cgoDirectives = strings.Join(cgoDirectives, "\n")
+ for _, emb := range info.embeds {
+ rf.embeds = append(rf.embeds, embed{emb.pattern, emb.pos})
+ }
+
+ }
+ return p
+}
+
+// extractCgoDirectives filters only the lines containing #cgo directives from the input,
+// which is the comment on import "C".
+func extractCgoDirectives(doc string) []string {
+ var out []string
+ for _, line := range strings.Split(doc, "\n") {
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ out = append(out, line)
+ }
+ return out
+}
diff --git a/src/cmd/go/internal/modindex/syslist.go b/src/cmd/go/internal/modindex/syslist.go
new file mode 100644
index 0000000..41adcc5
--- /dev/null
+++ b/src/cmd/go/internal/modindex/syslist.go
@@ -0,0 +1,78 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/syslist_test.go.
+
+package modindex
+
+// knownOS is the list of past, present, and future known GOOS values.
+// Do not remove from this list, as it is used for filename matching.
+// If you add an entry to this list, look at unixOS, below.
+var knownOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "js": true,
+ "linux": true,
+ "nacl": true,
+ "netbsd": true,
+ "openbsd": true,
+ "plan9": true,
+ "solaris": true,
+ "wasip1": true,
+ "windows": true,
+ "zos": true,
+}
+
+// unixOS is the set of GOOS values matched by the "unix" build tag.
+// This is not used for filename matching.
+// This list also appears in cmd/dist/build.go.
+var unixOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "linux": true,
+ "netbsd": true,
+ "openbsd": true,
+ "solaris": true,
+}
+
+// knownArch is the list of past, present, and future known GOARCH values.
+// Do not remove from this list, as it is used for filename matching.
+var knownArch = map[string]bool{
+ "386": true,
+ "amd64": true,
+ "amd64p32": true,
+ "arm": true,
+ "armbe": true,
+ "arm64": true,
+ "arm64be": true,
+ "loong64": true,
+ "mips": true,
+ "mipsle": true,
+ "mips64": true,
+ "mips64le": true,
+ "mips64p32": true,
+ "mips64p32le": true,
+ "ppc": true,
+ "ppc64": true,
+ "ppc64le": true,
+ "riscv": true,
+ "riscv64": true,
+ "s390": true,
+ "s390x": true,
+ "sparc": true,
+ "sparc64": true,
+ "wasm": true,
+}
diff --git a/src/cmd/go/internal/modindex/syslist_test.go b/src/cmd/go/internal/modindex/syslist_test.go
new file mode 100644
index 0000000..1a61562
--- /dev/null
+++ b/src/cmd/go/internal/modindex/syslist_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/syslist_test.go.
+
+package modindex
+
+import (
+ "go/build"
+ "runtime"
+ "testing"
+)
+
+var (
+ thisOS = runtime.GOOS
+ thisArch = runtime.GOARCH
+ otherOS = anotherOS()
+ otherArch = anotherArch()
+)
+
+func anotherOS() string {
+ if thisOS != "darwin" && thisOS != "ios" {
+ return "darwin"
+ }
+ return "linux"
+}
+
+func anotherArch() string {
+ if thisArch != "amd64" {
+ return "amd64"
+ }
+ return "386"
+}
+
+type GoodFileTest struct {
+ name string
+ result bool
+}
+
+var tests = []GoodFileTest{
+ {"file.go", true},
+ {"file.c", true},
+ {"file_foo.go", true},
+ {"file_" + thisArch + ".go", true},
+ {"file_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".go", true},
+ {"file_" + otherOS + ".go", false},
+ {"file_" + thisOS + "_" + thisArch + ".go", true},
+ {"file_" + otherOS + "_" + thisArch + ".go", false},
+ {"file_" + thisOS + "_" + otherArch + ".go", false},
+ {"file_" + otherOS + "_" + otherArch + ".go", false},
+ {"file_foo_" + thisArch + ".go", true},
+ {"file_foo_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".c", true},
+ {"file_" + otherOS + ".c", false},
+}
+
+func TestGoodOSArch(t *testing.T) {
+ for _, test := range tests {
+ if (*Context)(&build.Default).goodOSArchFile(test.name, make(map[string]bool)) != test.result {
+ t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso b/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso
new file mode 100644
index 0000000..9527d05
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso
@@ -0,0 +1 @@
+package ignore_non_source
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go b/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json b/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log b/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c b/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c
diff --git a/src/cmd/go/internal/modindex/write.go b/src/cmd/go/internal/modindex/write.go
new file mode 100644
index 0000000..cd18ad9
--- /dev/null
+++ b/src/cmd/go/internal/modindex/write.go
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "cmd/go/internal/base"
+ "encoding/binary"
+ "go/token"
+ "sort"
+)
+
+const indexVersion = "go index v2" // 11 bytes (plus \n), to align uint32s in index
+
+// encodeModuleBytes produces the encoded representation of the module index.
+// encodeModuleBytes may modify the packages slice.
+func encodeModuleBytes(packages []*rawPackage) []byte {
+ e := newEncoder()
+ e.Bytes([]byte(indexVersion + "\n"))
+ stringTableOffsetPos := e.Pos() // fill this at the end
+ e.Uint32(0) // string table offset
+ sort.Slice(packages, func(i, j int) bool {
+ return packages[i].dir < packages[j].dir
+ })
+ e.Int(len(packages))
+ packagesPos := e.Pos()
+ for _, p := range packages {
+ e.String(p.dir)
+ e.Int(0)
+ }
+ for i, p := range packages {
+ e.IntAt(e.Pos(), packagesPos+8*i+4)
+ encodePackage(e, p)
+ }
+ e.IntAt(e.Pos(), stringTableOffsetPos)
+ e.Bytes(e.stringTable)
+ e.Bytes([]byte{0xFF}) // end of string table marker
+ return e.b
+}
+
+func encodePackageBytes(p *rawPackage) []byte {
+ return encodeModuleBytes([]*rawPackage{p})
+}
+
+func encodePackage(e *encoder, p *rawPackage) {
+ e.String(p.error)
+ e.String(p.dir)
+ e.Int(len(p.sourceFiles)) // number of source files
+ sourceFileOffsetPos := e.Pos() // the pos of the start of the source file offsets
+ for range p.sourceFiles {
+ e.Int(0)
+ }
+ for i, f := range p.sourceFiles {
+ e.IntAt(e.Pos(), sourceFileOffsetPos+4*i)
+ encodeFile(e, f)
+ }
+}
+
+func encodeFile(e *encoder, f *rawFile) {
+ e.String(f.error)
+ e.String(f.parseError)
+ e.String(f.synopsis)
+ e.String(f.name)
+ e.String(f.pkgName)
+ e.Bool(f.ignoreFile)
+ e.Bool(f.binaryOnly)
+ e.String(f.cgoDirectives)
+ e.String(f.goBuildConstraint)
+
+ e.Int(len(f.plusBuildConstraints))
+ for _, s := range f.plusBuildConstraints {
+ e.String(s)
+ }
+
+ e.Int(len(f.imports))
+ for _, m := range f.imports {
+ e.String(m.path)
+ e.Position(m.position)
+ }
+
+ e.Int(len(f.embeds))
+ for _, embed := range f.embeds {
+ e.String(embed.pattern)
+ e.Position(embed.position)
+ }
+
+ e.Int(len(f.directives))
+ for _, d := range f.directives {
+ e.String(d.Text)
+ e.Position(d.Pos)
+ }
+}
+
+func newEncoder() *encoder {
+ e := &encoder{strings: make(map[string]int)}
+
+ // place the empty string at position 0 in the string table
+ e.stringTable = append(e.stringTable, 0)
+ e.strings[""] = 0
+
+ return e
+}
+
+func (e *encoder) Position(position token.Position) {
+ e.String(position.Filename)
+ e.Int(position.Offset)
+ e.Int(position.Line)
+ e.Int(position.Column)
+}
+
+type encoder struct {
+ b []byte
+ stringTable []byte
+ strings map[string]int
+}
+
+func (e *encoder) Pos() int {
+ return len(e.b)
+}
+
+func (e *encoder) Bytes(b []byte) {
+ e.b = append(e.b, b...)
+}
+
+func (e *encoder) String(s string) {
+ if n, ok := e.strings[s]; ok {
+ e.Int(n)
+ return
+ }
+ pos := len(e.stringTable)
+ e.strings[s] = pos
+ e.Int(pos)
+ e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
+ e.stringTable = append(e.stringTable, s...)
+}
+
+func (e *encoder) Bool(b bool) {
+ if b {
+ e.Uint32(1)
+ } else {
+ e.Uint32(0)
+ }
+}
+
+func (e *encoder) Uint32(n uint32) {
+ e.b = binary.LittleEndian.AppendUint32(e.b, n)
+}
+
+// Int encodes n. Note that all ints are written to the index as uint32s,
+// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
+func (e *encoder) Int(n int) {
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
+ }
+ e.Uint32(uint32(n))
+}
+
+func (e *encoder) IntAt(n int, at int) {
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
+ }
+ binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
+}
diff --git a/src/cmd/go/internal/modinfo/info.go b/src/cmd/go/internal/modinfo/info.go
new file mode 100644
index 0000000..b0adcbc
--- /dev/null
+++ b/src/cmd/go/internal/modinfo/info.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modinfo
+
+import (
+ "cmd/go/internal/modfetch/codehost"
+ "encoding/json"
+ "time"
+)
+
+// Note that these structs are publicly visible (part of go list's API)
+// and the fields are documented in the help text in ../list/list.go
+
+type ModulePublic struct {
+ Path string `json:",omitempty"` // module path
+ Version string `json:",omitempty"` // module version
+ Query string `json:",omitempty"` // version query corresponding to this version
+ Versions []string `json:",omitempty"` // available module versions
+ Replace *ModulePublic `json:",omitempty"` // replaced by this module
+ Time *time.Time `json:",omitempty"` // time version was created
+ Update *ModulePublic `json:",omitempty"` // available update (with -u)
+ Main bool `json:",omitempty"` // is this the main module?
+ Indirect bool `json:",omitempty"` // module is only indirectly needed by main module
+ Dir string `json:",omitempty"` // directory holding local copy of files, if any
+ GoMod string `json:",omitempty"` // path to go.mod file describing module, if any
+ GoVersion string `json:",omitempty"` // go version used in module
+ Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u)
+ Deprecated string `json:",omitempty"` // deprecation message, if any (with -u)
+ Error *ModuleError `json:",omitempty"` // error loading module
+
+ Origin *codehost.Origin `json:",omitempty"` // provenance of module
+ Reuse bool `json:",omitempty"` // reuse of old module info is safe
+}
+
+type ModuleError struct {
+ Err string // error text
+}
+
+type moduleErrorNoMethods ModuleError
+
+// UnmarshalJSON accepts both {"Err":"text"} and "text",
+// so that the output of go mod download -json can still
+// be unmarshalled into a ModulePublic during -reuse processing.
+func (e *ModuleError) UnmarshalJSON(data []byte) error {
+ if len(data) > 0 && data[0] == '"' {
+ return json.Unmarshal(data, &e.Err)
+ }
+ return json.Unmarshal(data, (*moduleErrorNoMethods)(e))
+}
+
+func (m *ModulePublic) String() string {
+ s := m.Path
+ versionString := func(mm *ModulePublic) string {
+ v := mm.Version
+ if len(mm.Retracted) == 0 {
+ return v
+ }
+ return v + " (retracted)"
+ }
+
+ if m.Version != "" {
+ s += " " + versionString(m)
+ if m.Update != nil {
+ s += " [" + versionString(m.Update) + "]"
+ }
+ }
+ if m.Deprecated != "" {
+ s += " (deprecated)"
+ }
+ if m.Replace != nil {
+ s += " => " + m.Replace.Path
+ if m.Replace.Version != "" {
+ s += " " + versionString(m.Replace)
+ if m.Replace.Update != nil {
+ s += " [" + versionString(m.Replace.Update) + "]"
+ }
+ }
+ if m.Replace.Deprecated != "" {
+ s += " (deprecated)"
+ }
+ }
+ return s
+}
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
new file mode 100644
index 0000000..bb513ea
--- /dev/null
+++ b/src/cmd/go/internal/modload/build.go
@@ -0,0 +1,449 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/modinfo"
+ "cmd/go/internal/search"
+
+ "golang.org/x/mod/module"
+)
+
+var (
+ infoStart, _ = hex.DecodeString("3077af0c9274080241e1c107e6d618e6")
+ infoEnd, _ = hex.DecodeString("f932433186182072008242104116d8f2")
+)
+
+func isStandardImportPath(path string) bool {
+ return findStandardImportPath(path) != ""
+}
+
+func findStandardImportPath(path string) string {
+ if path == "" {
+ panic("findStandardImportPath called with empty path")
+ }
+ if search.IsStandardImportPath(path) {
+ if modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ return filepath.Join(cfg.GOROOT, "src", path)
+ }
+ }
+ return ""
+}
+
+// PackageModuleInfo returns information about the module that provides
+// a given package. If modules are not enabled or if the package is in the
+// standard library or if the package was not successfully loaded with
+// LoadPackages or ImportFromFiles, nil is returned.
+func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic {
+ if isStandardImportPath(pkgpath) || !Enabled() {
+ return nil
+ }
+ m, ok := findModule(loaded, pkgpath)
+ if !ok {
+ return nil
+ }
+
+ rs := LoadModFile(ctx)
+ return moduleInfo(ctx, rs, m, 0, nil)
+}
+
+// PackageModRoot returns the module root directory for the module that provides
+// a given package. If modules are not enabled or if the package is in the
+// standard library or if the package was not successfully loaded with
+// LoadPackages or ImportFromFiles, the empty string is returned.
+func PackageModRoot(ctx context.Context, pkgpath string) string {
+ if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" {
+ return ""
+ }
+ m, ok := findModule(loaded, pkgpath)
+ if !ok {
+ return ""
+ }
+ root, _, err := fetch(ctx, m)
+ if err != nil {
+ return ""
+ }
+ return root
+}
+
+func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
+ if !Enabled() {
+ return nil
+ }
+
+ if path, vers, found := strings.Cut(path, "@"); found {
+ m := module.Version{Path: path, Version: vers}
+ return moduleInfo(ctx, nil, m, 0, nil)
+ }
+
+ rs := LoadModFile(ctx)
+
+ var (
+ v string
+ ok bool
+ )
+ if rs.pruning == pruned {
+ v, ok = rs.rootSelected(path)
+ }
+ if !ok {
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ base.Fatal(err)
+ }
+ v = mg.Selected(path)
+ }
+
+ if v == "none" {
+ return &modinfo.ModulePublic{
+ Path: path,
+ Error: &modinfo.ModuleError{
+ Err: "module not in current build",
+ },
+ }
+ }
+
+ return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil)
+}
+
+// addUpdate fills in m.Update if an updated version is available.
+func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
+ if m.Version == "" {
+ return
+ }
+
+ info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed)
+ var noVersionErr *NoMatchingVersionError
+ if errors.Is(err, ErrDisallowed) ||
+ errors.Is(err, fs.ErrNotExist) ||
+ errors.As(err, &noVersionErr) {
+ // Ignore "not found" and "no matching version" errors.
+ // This means the proxy has no matching version or no versions at all.
+ //
+ // Ignore "disallowed" errors. This means the current version is
+ // excluded or retracted and there are no higher allowed versions.
+ //
+ // We should report other errors though. An attacker that controls the
+ // network shouldn't be able to hide versions by interfering with
+ // the HTTPS connection. An attacker that controls the proxy may still
+ // hide versions, since the "list" and "latest" endpoints are not
+ // authenticated.
+ return
+ } else if err != nil {
+ if m.Error == nil {
+ m.Error = &modinfo.ModuleError{Err: err.Error()}
+ }
+ return
+ }
+
+ if gover.ModCompare(m.Path, info.Version, m.Version) > 0 {
+ m.Update = &modinfo.ModulePublic{
+ Path: m.Path,
+ Version: info.Version,
+ Time: &info.Time,
+ }
+ }
+}
+
+// mergeOrigin merges two origins,
+// returning and possibly modifying one of its arguments.
+// If the two origins conflict, mergeOrigin returns a non-specific one
+// that will not pass CheckReuse.
+// If m1 or m2 is nil, the other is returned unmodified.
+// But if m1 or m2 is non-nil and uncheckable, the result is also uncheckable,
+// to preserve uncheckability.
+func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin {
+ if m1 == nil {
+ return m2
+ }
+ if m2 == nil {
+ return m1
+ }
+ if !m1.Checkable() {
+ return m1
+ }
+ if !m2.Checkable() {
+ return m2
+ }
+
+ merged := new(codehost.Origin)
+ *merged = *m1 // Clone to avoid overwriting fields in cached results.
+
+ if m2.TagSum != "" {
+ if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) {
+ merged.ClearCheckable()
+ return merged
+ }
+ merged.TagSum = m2.TagSum
+ merged.TagPrefix = m2.TagPrefix
+ }
+ if m2.Hash != "" {
+ if m1.Hash != "" && (m1.Hash != m2.Hash || m1.Ref != m2.Ref) {
+ merged.ClearCheckable()
+ return merged
+ }
+ merged.Hash = m2.Hash
+ merged.Ref = m2.Ref
+ }
+ return merged
+}
+
+// addVersions fills in m.Versions with the list of known versions.
+// Excluded versions will be omitted. If listRetracted is false, retracted
+// versions will also be omitted.
+func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) {
+ allowed := CheckAllowed
+ if listRetracted {
+ allowed = CheckExclusions
+ }
+ v, origin, err := versions(ctx, m.Path, allowed)
+ if err != nil && m.Error == nil {
+ m.Error = &modinfo.ModuleError{Err: err.Error()}
+ }
+ m.Versions = v
+ m.Origin = mergeOrigin(m.Origin, origin)
+}
+
+// addRetraction fills in m.Retracted if the module was retracted by its author.
+// m.Error is set if there's an error loading retraction information.
+func addRetraction(ctx context.Context, m *modinfo.ModulePublic) {
+ if m.Version == "" {
+ return
+ }
+
+ err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version})
+ var noVersionErr *NoMatchingVersionError
+ var retractErr *ModuleRetractedError
+ if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) {
+ // Ignore "not found" and "no matching version" errors.
+ // This means the proxy has no matching version or no versions at all.
+ //
+ // We should report other errors though. An attacker that controls the
+ // network shouldn't be able to hide versions by interfering with
+ // the HTTPS connection. An attacker that controls the proxy may still
+ // hide versions, since the "list" and "latest" endpoints are not
+ // authenticated.
+ return
+ } else if errors.As(err, &retractErr) {
+ if len(retractErr.Rationale) == 0 {
+ m.Retracted = []string{"retracted by module author"}
+ } else {
+ m.Retracted = retractErr.Rationale
+ }
+ } else if m.Error == nil {
+ m.Error = &modinfo.ModuleError{Err: err.Error()}
+ }
+}
+
+// addDeprecation fills in m.Deprecated if the module was deprecated by its
+// author. m.Error is set if there's an error loading deprecation information.
+func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) {
+ deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version})
+ var noVersionErr *NoMatchingVersionError
+ if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) {
+ // Ignore "not found" and "no matching version" errors.
+ // This means the proxy has no matching version or no versions at all.
+ //
+ // We should report other errors though. An attacker that controls the
+ // network shouldn't be able to hide versions by interfering with
+ // the HTTPS connection. An attacker that controls the proxy may still
+ // hide versions, since the "list" and "latest" endpoints are not
+ // authenticated.
+ return
+ }
+ if err != nil {
+ if m.Error == nil {
+ m.Error = &modinfo.ModuleError{Err: err.Error()}
+ }
+ return
+ }
+ m.Deprecated = deprecation
+}
+
+// moduleInfo returns information about module m, loaded from the requirements
+// in rs (which may be nil to indicate that m was not loaded from a requirement
+// graph).
+func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic {
+ if m.Version == "" && MainModules.Contains(m.Path) {
+ info := &modinfo.ModulePublic{
+ Path: m.Path,
+ Version: m.Version,
+ Main: true,
+ }
+ if v, ok := rawGoVersion.Load(m); ok {
+ info.GoVersion = v.(string)
+ } else {
+ panic("internal error: GoVersion not set for main module")
+ }
+ if modRoot := MainModules.ModRoot(m); modRoot != "" {
+ info.Dir = modRoot
+ info.GoMod = modFilePath(modRoot)
+ }
+ return info
+ }
+
+ info := &modinfo.ModulePublic{
+ Path: m.Path,
+ Version: m.Version,
+ Indirect: rs != nil && !rs.direct[m.Path],
+ }
+ if v, ok := rawGoVersion.Load(m); ok {
+ info.GoVersion = v.(string)
+ }
+
+ // completeFromModCache fills in the extra fields in m using the module cache.
+ completeFromModCache := func(m *modinfo.ModulePublic) {
+ if gover.IsToolchain(m.Path) {
+ return
+ }
+
+ if old := reuse[module.Version{Path: m.Path, Version: m.Version}]; old != nil {
+ if err := checkReuse(ctx, m.Path, old.Origin); err == nil {
+ *m = *old
+ m.Query = ""
+ m.Dir = ""
+ return
+ }
+ }
+
+ checksumOk := func(suffix string) bool {
+ return rs == nil || m.Version == "" || !mustHaveSums() ||
+ modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix})
+ }
+
+ if m.Version != "" {
+ if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil {
+ m.Error = &modinfo.ModuleError{Err: err.Error()}
+ } else {
+ m.Version = q.Version
+ m.Time = &q.Time
+ }
+ }
+ mod := module.Version{Path: m.Path, Version: m.Version}
+
+ if m.GoVersion == "" && checksumOk("/go.mod") {
+ // Load the go.mod file to determine the Go version, since it hasn't
+ // already been populated from rawGoVersion.
+ if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" {
+ m.GoVersion = summary.goVersion
+ }
+ }
+
+ if m.Version != "" {
+ if checksumOk("/go.mod") {
+ gomod, err := modfetch.CachePath(ctx, mod, "mod")
+ if err == nil {
+ if info, err := os.Stat(gomod); err == nil && info.Mode().IsRegular() {
+ m.GoMod = gomod
+ }
+ }
+ }
+ if checksumOk("") {
+ dir, err := modfetch.DownloadDir(ctx, mod)
+ if err == nil {
+ m.Dir = dir
+ }
+ }
+
+ if mode&ListRetracted != 0 {
+ addRetraction(ctx, m)
+ }
+ }
+ }
+
+ if rs == nil {
+ // If this was an explicitly-versioned argument to 'go mod download' or
+ // 'go list -m', report the actual requested version, not its replacement.
+ completeFromModCache(info) // Will set m.Error in vendor mode.
+ return info
+ }
+
+ r := Replacement(m)
+ if r.Path == "" {
+ if cfg.BuildMod == "vendor" {
+ // It's tempting to fill in the "Dir" field to point within the vendor
+ // directory, but that would be misleading: the vendor directory contains
+ // a flattened package tree, not complete modules, and it can even
+ // interleave packages from different modules if one module path is a
+ // prefix of the other.
+ } else {
+ completeFromModCache(info)
+ }
+ return info
+ }
+
+ // Don't hit the network to fill in extra data for replaced modules.
+ // The original resolved Version and Time don't matter enough to be
+ // worth the cost, and we're going to overwrite the GoMod and Dir from the
+ // replacement anyway. See https://golang.org/issue/27859.
+ info.Replace = &modinfo.ModulePublic{
+ Path: r.Path,
+ Version: r.Version,
+ }
+ if v, ok := rawGoVersion.Load(m); ok {
+ info.Replace.GoVersion = v.(string)
+ }
+ if r.Version == "" {
+ if filepath.IsAbs(r.Path) {
+ info.Replace.Dir = r.Path
+ } else {
+ info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path)
+ }
+ info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod")
+ }
+ if cfg.BuildMod != "vendor" {
+ completeFromModCache(info.Replace)
+ info.Dir = info.Replace.Dir
+ info.GoMod = info.Replace.GoMod
+ info.Retracted = info.Replace.Retracted
+ }
+ info.GoVersion = info.Replace.GoVersion
+ return info
+}
+
+// findModule searches for the module that contains the package at path.
+// If the package was loaded, its containing module and true are returned.
+// Otherwise, module.Version{} and false are returned.
+func findModule(ld *loader, path string) (module.Version, bool) {
+ if pkg, ok := ld.pkgCache.Get(path); ok {
+ return pkg.mod, pkg.mod != module.Version{}
+ }
+ return module.Version{}, false
+}
+
+func ModInfoProg(info string, isgccgo bool) []byte {
+ // Inject an init function to set runtime.modinfo.
+ // This is only used for gccgo - with gc we hand the info directly to the linker.
+ // The init function has the drawback that packages may want to
+ // look at the module info in their init functions (see issue 29628),
+ // which won't work. See also issue 30344.
+ if isgccgo {
+ return fmt.Appendf(nil, `package main
+import _ "unsafe"
+//go:linkname __set_debug_modinfo__ runtime.setmodinfo
+func __set_debug_modinfo__(string)
+func init() { __set_debug_modinfo__(%q) }
+`, ModInfoData(info))
+ }
+ return nil
+}
+
+func ModInfoData(info string) []byte {
+ return []byte(string(infoStart) + info + string(infoEnd))
+}
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
new file mode 100644
index 0000000..8d3af08
--- /dev/null
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -0,0 +1,1497 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "slices"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/mvs"
+ "cmd/go/internal/par"
+
+ "golang.org/x/mod/module"
+)
+
+// A Requirements represents a logically-immutable set of root module requirements.
+type Requirements struct {
+ // pruning is the pruning at which the requirement graph is computed.
+ //
+ // If unpruned, the graph includes all transitive requirements regardless
+ // of whether the requiring module supports pruning.
+ //
+ // If pruned, the graph includes only the root modules, the explicit
+ // requirements of those root modules, and the transitive requirements of only
+ // the root modules that do not support pruning.
+ //
+ // If workspace, the graph includes only the workspace modules, the explicit
+ // requirements of the workspace modules, and the transitive requirements of
+ // the workspace modules that do not support pruning.
+ pruning modPruning
+
+ // rootModules is the set of root modules of the graph, sorted and capped to
+ // length. It may contain duplicates, and may contain multiple versions for a
+ // given module path. The root modules of the graph are the set of main
+ // modules in workspace mode, and the main module's direct requirements
+ // outside workspace mode.
+ //
+ // The roots are always expected to contain an entry for the "go" module,
+ // indicating the Go language version in use.
+ rootModules []module.Version
+ maxRootVersion map[string]string
+
+ // direct is the set of module paths for which we believe the module provides
+ // a package directly imported by a package or test in the main module.
+ //
+ // The "direct" map controls which modules are annotated with "// indirect"
+ // comments in the go.mod file, and may impact which modules are listed as
+ // explicit roots (vs. indirect-only dependencies). However, it should not
+ // have a semantic effect on the build list overall.
+ //
+ // The initial direct map is populated from the existing "// indirect"
+ // comments (or lack thereof) in the go.mod file. It is updated by the
+ // package loader: dependencies may be promoted to direct if new
+ // direct imports are observed, and may be demoted to indirect during
+ // 'go mod tidy' or 'go mod vendor'.
+ //
+ // The direct map is keyed by module paths, not module versions. When a
+ // module's selected version changes, we assume that it remains direct if the
+ // previous version was a direct dependency. That assumption might not hold in
+ // rare cases (such as if a dependency splits out a nested module, or merges a
+ // nested module back into a parent module).
+ direct map[string]bool
+
+ graphOnce sync.Once // guards writes to (but not reads from) graph
+ graph atomic.Pointer[cachedGraph]
+}
+
+// A cachedGraph is a non-nil *ModuleGraph, together with any error discovered
+// while loading that graph.
+type cachedGraph struct {
+ mg *ModuleGraph
+ err error // If err is non-nil, mg may be incomplete (but must still be non-nil).
+}
+
+// requirements is the requirement graph for the main module.
+//
+// It is always non-nil if the main module's go.mod file has been loaded.
+//
+// This variable should only be read from the loadModFile function, and should
+// only be written in the loadModFile and commitRequirements functions.
+// All other functions that need or produce a *Requirements should
+// accept and/or return an explicit parameter.
+var requirements *Requirements
+
+func mustHaveGoRoot(roots []module.Version) {
+ for _, m := range roots {
+ if m.Path == "go" {
+ return
+ }
+ }
+ panic("go: internal error: missing go root module")
+}
+
+// newRequirements returns a new requirement set with the given root modules.
+// The dependencies of the roots will be loaded lazily at the first call to the
+// Graph method.
+//
+// The rootModules slice must be sorted according to gover.ModSort.
+// The caller must not modify the rootModules slice or direct map after passing
+// them to newRequirements.
+//
+// If vendoring is in effect, the caller must invoke initVendor on the returned
+// *Requirements before any other method.
+func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements {
+ mustHaveGoRoot(rootModules)
+
+ if pruning != workspace {
+ if workFilePath != "" {
+ panic("in workspace mode, but pruning is not workspace in newRequirements")
+ }
+ }
+
+ if pruning != workspace {
+ if workFilePath != "" {
+ panic("in workspace mode, but pruning is not workspace in newRequirements")
+ }
+ for i, m := range rootModules {
+ if m.Version == "" && MainModules.Contains(m.Path) {
+ panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i))
+ }
+ if m.Path == "" || m.Version == "" {
+ panic(fmt.Sprintf("bad requirement: rootModules[%v] = %v", i, m))
+ }
+ }
+ }
+
+ rs := &Requirements{
+ pruning: pruning,
+ rootModules: rootModules,
+ maxRootVersion: make(map[string]string, len(rootModules)),
+ direct: direct,
+ }
+
+ for i, m := range rootModules {
+ if i > 0 {
+ prev := rootModules[i-1]
+ if prev.Path > m.Path || (prev.Path == m.Path && gover.ModCompare(m.Path, prev.Version, m.Version) > 0) {
+ panic(fmt.Sprintf("newRequirements called with unsorted roots: %v", rootModules))
+ }
+ }
+
+ if v, ok := rs.maxRootVersion[m.Path]; ok && gover.ModCompare(m.Path, v, m.Version) >= 0 {
+ continue
+ }
+ rs.maxRootVersion[m.Path] = m.Version
+ }
+
+ if rs.maxRootVersion["go"] == "" {
+ panic(`newRequirements called without a "go" version`)
+ }
+ return rs
+}
+
+// String returns a string describing the Requirements for debugging.
+func (rs *Requirements) String() string {
+ return fmt.Sprintf("{%v %v}", rs.pruning, rs.rootModules)
+}
+
+// initVendor initializes rs.graph from the given list of vendored module
+// dependencies, overriding the graph that would normally be loaded from module
+// requirements.
+func (rs *Requirements) initVendor(vendorList []module.Version) {
+ rs.graphOnce.Do(func() {
+ mg := &ModuleGraph{
+ g: mvs.NewGraph(cmpVersion, MainModules.Versions()),
+ }
+
+ if MainModules.Len() != 1 {
+ panic("There should be exactly one main module in Vendor mode.")
+ }
+ mainModule := MainModules.Versions()[0]
+
+ if rs.pruning == pruned {
+ // The roots of a pruned module should already include every module in the
+ // vendor list, because the vendored modules are the same as those needed
+ // for graph pruning.
+ //
+ // Just to be sure, we'll double-check that here.
+ inconsistent := false
+ for _, m := range vendorList {
+ if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version {
+ base.Errorf("go: vendored module %v should be required explicitly in go.mod", m)
+ inconsistent = true
+ }
+ }
+ if inconsistent {
+ base.Fatal(errGoModDirty)
+ }
+
+ // Now we can treat the rest of the module graph as effectively “pruned
+ // out”, as though we are viewing the main module from outside: in vendor
+ // mode, the root requirements *are* the complete module graph.
+ mg.g.Require(mainModule, rs.rootModules)
+ } else {
+ // The transitive requirements of the main module are not in general available
+ // from the vendor directory, and we don't actually know how we got from
+ // the roots to the final build list.
+ //
+ // Instead, we'll inject a fake "vendor/modules.txt" module that provides
+ // those transitive dependencies, and mark it as a dependency of the main
+ // module. That allows us to elide the actual structure of the module
+ // graph, but still distinguishes between direct and indirect
+ // dependencies.
+ vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""}
+ mg.g.Require(mainModule, append(rs.rootModules, vendorMod))
+ mg.g.Require(vendorMod, vendorList)
+ }
+
+ rs.graph.Store(&cachedGraph{mg, nil})
+ })
+}
+
+// GoVersion returns the Go language version for the Requirements.
+func (rs *Requirements) GoVersion() string {
+ v, _ := rs.rootSelected("go")
+ if v == "" {
+ panic("internal error: missing go version in modload.Requirements")
+ }
+ return v
+}
+
+// rootSelected returns the version of the root dependency with the given module
+// path, or the zero module.Version and ok=false if the module is not a root
+// dependency.
+func (rs *Requirements) rootSelected(path string) (version string, ok bool) {
+ if MainModules.Contains(path) {
+ return "", true
+ }
+ if v, ok := rs.maxRootVersion[path]; ok {
+ return v, true
+ }
+ return "", false
+}
+
+// hasRedundantRoot returns true if the root list contains multiple requirements
+// of the same module or a requirement on any version of the main module.
+// Redundant requirements should be pruned, but they may influence version
+// selection.
+func (rs *Requirements) hasRedundantRoot() bool {
+ for i, m := range rs.rootModules {
+ if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) {
+ return true
+ }
+ }
+ return false
+}
+
+// Graph returns the graph of module requirements loaded from the current
+// root modules (as reported by RootModules).
+//
+// Graph always makes a best effort to load the requirement graph despite any
+// errors, and always returns a non-nil *ModuleGraph.
+//
+// If the requirements of any relevant module fail to load, Graph also
+// returns a non-nil error of type *mvs.BuildListError.
+func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) {
+ rs.graphOnce.Do(func() {
+ mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil)
+ rs.graph.Store(&cachedGraph{mg, mgErr})
+ })
+ cached := rs.graph.Load()
+ return cached.mg, cached.err
+}
+
+// IsDirect returns whether the given module provides a package directly
+// imported by a package or test in the main module.
+func (rs *Requirements) IsDirect(path string) bool {
+ return rs.direct[path]
+}
+
+// A ModuleGraph represents the complete graph of module dependencies
+// of a main module.
+//
+// If the main module supports module graph pruning, the graph does not include
+// transitive dependencies of non-root (implicit) dependencies.
+type ModuleGraph struct {
+ g *mvs.Graph
+ loadCache par.ErrCache[module.Version, *modFileSummary]
+
+ buildListOnce sync.Once
+ buildList []module.Version
+}
+
+var readModGraphDebugOnce sync.Once
+
+// readModGraph reads and returns the module dependency graph starting at the
+// given roots.
+//
+// The requirements of the module versions found in the unprune map are included
+// in the graph even if they would normally be pruned out.
+//
+// Unlike LoadModGraph, readModGraph does not attempt to diagnose or update
+// inconsistent roots.
+func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) {
+ mustHaveGoRoot(roots)
+ if pruning == pruned {
+ // Enable diagnostics for lazy module loading
+ // (https://golang.org/ref/mod#lazy-loading) only if the module graph is
+ // pruned.
+ //
+ // In unpruned modules,we load the module graph much more aggressively (in
+ // order to detect inconsistencies that wouldn't be feasible to spot-check),
+ // so it wouldn't be useful to log when that occurs (because it happens in
+ // normal operation all the time).
+ readModGraphDebugOnce.Do(func() {
+ for _, f := range strings.Split(os.Getenv("GODEBUG"), ",") {
+ switch f {
+ case "lazymod=log":
+ debug.PrintStack()
+ fmt.Fprintf(os.Stderr, "go: read full module graph.\n")
+ case "lazymod=strict":
+ debug.PrintStack()
+ base.Fatalf("go: read full module graph (forbidden by GODEBUG=lazymod=strict).")
+ }
+ }
+ })
+ }
+
+ var graphRoots []module.Version
+ if inWorkspaceMode() {
+ graphRoots = roots
+ } else {
+ graphRoots = MainModules.Versions()
+ }
+ var (
+ mu sync.Mutex // guards mg.g and hasError during loading
+ hasError bool
+ mg = &ModuleGraph{
+ g: mvs.NewGraph(cmpVersion, graphRoots),
+ }
+ )
+
+ if pruning != workspace {
+ if inWorkspaceMode() {
+ panic("pruning is not workspace in workspace mode")
+ }
+ mg.g.Require(MainModules.mustGetSingleMainModule(), roots)
+ }
+
+ type dedupKey struct {
+ m module.Version
+ pruning modPruning
+ }
+ var (
+ loadQueue = par.NewQueue(runtime.GOMAXPROCS(0))
+ loading sync.Map // dedupKey → nil; the set of modules that have been or are being loaded
+ )
+
+ // loadOne synchronously loads the explicit requirements for module m.
+ // It does not load the transitive requirements of m even if the go version in
+ // m's go.mod file indicates that it supports graph pruning.
+ loadOne := func(m module.Version) (*modFileSummary, error) {
+ return mg.loadCache.Do(m, func() (*modFileSummary, error) {
+ summary, err := goModSummary(m)
+
+ mu.Lock()
+ if err == nil {
+ mg.g.Require(m, summary.require)
+ } else {
+ hasError = true
+ }
+ mu.Unlock()
+
+ return summary, err
+ })
+ }
+
+ var enqueue func(m module.Version, pruning modPruning)
+ enqueue = func(m module.Version, pruning modPruning) {
+ if m.Version == "none" {
+ return
+ }
+
+ if _, dup := loading.LoadOrStore(dedupKey{m, pruning}, nil); dup {
+ // m has already been enqueued for loading. Since unpruned loading may
+ // follow cycles in the requirement graph, we need to return early
+ // to avoid making the load queue infinitely long.
+ return
+ }
+
+ loadQueue.Add(func() {
+ summary, err := loadOne(m)
+ if err != nil {
+ return // findError will report the error later.
+ }
+
+ // If the version in m's go.mod file does not support pruning, then we
+ // cannot assume that the explicit requirements of m (added by loadOne)
+ // are sufficient to build the packages it contains. We must load its full
+ // transitive dependency graph to be sure that we see all relevant
+ // dependencies. In addition, we must load the requirements of any module
+ // that is explicitly marked as unpruned.
+ nextPruning := summary.pruning
+ if pruning == unpruned {
+ nextPruning = unpruned
+ }
+ for _, r := range summary.require {
+ if pruning != pruned || summary.pruning == unpruned || unprune[r] {
+ enqueue(r, nextPruning)
+ }
+ }
+ })
+ }
+
+ mustHaveGoRoot(roots)
+ for _, m := range roots {
+ enqueue(m, pruning)
+ }
+ <-loadQueue.Idle()
+
+ // Reload any dependencies of the main modules which are not
+ // at their selected versions at workspace mode, because the
+ // requirements don't accurately reflect the transitive imports.
+ if pruning == workspace {
+ // hasDepsInAll contains the set of modules that need to be loaded
+ // at workspace pruning because any of their dependencies may
+ // provide packages in all.
+ hasDepsInAll := make(map[string]bool)
+ seen := map[module.Version]bool{}
+ for _, m := range roots {
+ hasDepsInAll[m.Path] = true
+ }
+ // This loop will terminate because it will call enqueue on each version of
+ // each dependency of the modules in hasDepsInAll at most once (and only
+ // calls enqueue on successively increasing versions of each dependency).
+ for {
+ needsEnqueueing := map[module.Version]bool{}
+ for p := range hasDepsInAll {
+ m := module.Version{Path: p, Version: mg.g.Selected(p)}
+ if !seen[m] {
+ needsEnqueueing[m] = true
+ continue
+ }
+ reqs, _ := mg.g.RequiredBy(m)
+ for _, r := range reqs {
+ s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)}
+ if gover.ModCompare(r.Path, s.Version, r.Version) > 0 && !seen[s] {
+ needsEnqueueing[s] = true
+ }
+ }
+ }
+ // add all needs enqueueing to paths we care about
+ if len(needsEnqueueing) == 0 {
+ break
+ }
+
+ for p := range needsEnqueueing {
+ enqueue(p, workspace)
+ seen[p] = true
+ hasDepsInAll[p.Path] = true
+ }
+ <-loadQueue.Idle()
+ }
+ }
+
+ if hasError {
+ return mg, mg.findError()
+ }
+ return mg, nil
+}
+
+// RequiredBy returns the dependencies required by module m in the graph,
+// or ok=false if module m's dependencies are pruned out.
+//
+// The caller must not modify the returned slice, but may safely append to it
+// and may rely on it not to be modified.
+func (mg *ModuleGraph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) {
+ return mg.g.RequiredBy(m)
+}
+
+// Selected returns the selected version of the module with the given path.
+//
+// If no version is selected, Selected returns version "none".
+func (mg *ModuleGraph) Selected(path string) (version string) {
+ return mg.g.Selected(path)
+}
+
+// WalkBreadthFirst invokes f once, in breadth-first order, for each module
+// version other than "none" that appears in the graph, regardless of whether
+// that version is selected.
+func (mg *ModuleGraph) WalkBreadthFirst(f func(m module.Version)) {
+ mg.g.WalkBreadthFirst(f)
+}
+
+// BuildList returns the selected versions of all modules present in the graph,
+// beginning with the main modules.
+//
+// The order of the remaining elements in the list is deterministic
+// but arbitrary.
+//
+// The caller must not modify the returned list, but may safely append to it
+// and may rely on it not to be modified.
+func (mg *ModuleGraph) BuildList() []module.Version {
+ mg.buildListOnce.Do(func() {
+ mg.buildList = slices.Clip(mg.g.BuildList())
+ })
+ return mg.buildList
+}
+
+func (mg *ModuleGraph) findError() error {
+ errStack := mg.g.FindPath(func(m module.Version) bool {
+ _, err := mg.loadCache.Get(m)
+ return err != nil && err != par.ErrCacheEntryNotFound
+ })
+ if len(errStack) > 0 {
+ _, err := mg.loadCache.Get(errStack[len(errStack)-1])
+ var noUpgrade func(from, to module.Version) bool
+ return mvs.NewBuildListError(err, errStack, noUpgrade)
+ }
+
+ return nil
+}
+
+func (mg *ModuleGraph) allRootsSelected() bool {
+ var roots []module.Version
+ if inWorkspaceMode() {
+ roots = MainModules.Versions()
+ } else {
+ roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule())
+ }
+ for _, m := range roots {
+ if mg.Selected(m.Path) != m.Version {
+ return false
+ }
+ }
+ return true
+}
+
+// LoadModGraph loads and returns the graph of module dependencies of the main module,
+// without loading any packages.
+//
+// If the goVersion string is non-empty, the returned graph is the graph
+// as interpreted by the given Go version (instead of the version indicated
+// in the go.mod file).
+//
+// Modules are loaded automatically (and lazily) in LoadPackages:
+// LoadModGraph need only be called if LoadPackages is not,
+// typically in commands that care about modules but no particular package.
+func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) {
+ rs, err := loadModFile(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if goVersion != "" {
+ v, _ := rs.rootSelected("go")
+ if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 {
+ return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v)
+ }
+
+ pruning := pruningForGoVersion(goVersion)
+ if pruning == unpruned && rs.pruning != unpruned {
+ // Use newRequirements instead of convertDepth because convertDepth
+ // also updates roots; here, we want to report the unmodified roots
+ // even though they may seem inconsistent.
+ rs = newRequirements(unpruned, rs.rootModules, rs.direct)
+ }
+
+ return rs.Graph(ctx)
+ }
+
+ rs, mg, err := expandGraph(ctx, rs)
+ if err != nil {
+ return nil, err
+ }
+ requirements = rs
+ return mg, err
+}
+
+// expandGraph loads the complete module graph from rs.
+//
+// If the complete graph reveals that some root of rs is not actually the
+// selected version of its path, expandGraph computes a new set of roots that
+// are consistent. (With a pruned module graph, this may result in upgrades to
+// other modules due to requirements that were previously pruned out.)
+//
+// expandGraph returns the updated roots, along with the module graph loaded
+// from those roots and any error encountered while loading that graph.
+// expandGraph returns non-nil requirements and a non-nil graph regardless of
+// errors. On error, the roots might not be updated to be consistent.
+func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) {
+ mg, mgErr := rs.Graph(ctx)
+ if mgErr != nil {
+ // Without the graph, we can't update the roots: we don't know which
+ // versions of transitive dependencies would be selected.
+ return rs, mg, mgErr
+ }
+
+ if !mg.allRootsSelected() {
+ // The roots of rs are not consistent with the rest of the graph. Update
+ // them. In an unpruned module this is a no-op for the build list as a whole —
+ // it just promotes what were previously transitive requirements to be
+ // roots — but in a pruned module it may pull in previously-irrelevant
+ // transitive dependencies.
+
+ newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ if rsErr != nil {
+ // Failed to update roots, perhaps because of an error in a transitive
+ // dependency needed for the update. Return the original Requirements
+ // instead.
+ return rs, mg, rsErr
+ }
+ rs = newRS
+ mg, mgErr = rs.Graph(ctx)
+ }
+
+ return rs, mg, mgErr
+}
+
+// EditBuildList edits the global build list by first adding every module in add
+// to the existing build list, then adjusting versions (and adding or removing
+// requirements as needed) until every module in mustSelect is selected at the
+// given version.
+//
+// (Note that the newly-added modules might not be selected in the resulting
+// build list: they could be lower than existing requirements or conflict with
+// versions in mustSelect.)
+//
+// If the versions listed in mustSelect are mutually incompatible (due to one of
+// the listed modules requiring a higher version of another), EditBuildList
+// returns a *ConstraintError and leaves the build list in its previous state.
+//
+// On success, EditBuildList reports whether the selected version of any module
+// in the build list may have been changed (possibly to or from "none") as a
+// result.
+func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) {
+ rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect)
+ if err != nil {
+ return false, err
+ }
+ requirements = rs
+ return changed, err
+}
+
+// OverrideRoots edits the global requirement roots by replacing the specific module versions.
+func OverrideRoots(ctx context.Context, replace []module.Version) {
+ requirements = overrideRoots(ctx, requirements, replace)
+}
+
+func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements {
+ drop := make(map[string]bool)
+ for _, m := range replace {
+ drop[m.Path] = true
+ }
+ var roots []module.Version
+ for _, m := range rs.rootModules {
+ if !drop[m.Path] {
+ roots = append(roots, m)
+ }
+ }
+ roots = append(roots, replace...)
+ gover.ModSort(roots)
+ return newRequirements(rs.pruning, roots, rs.direct)
+}
+
+// A ConstraintError describes inconsistent constraints in EditBuildList
+type ConstraintError struct {
+ // Conflict lists the source of the conflict for each version in mustSelect
+ // that could not be selected due to the requirements of some other version in
+ // mustSelect.
+ Conflicts []Conflict
+}
+
+func (e *ConstraintError) Error() string {
+ b := new(strings.Builder)
+ b.WriteString("version constraints conflict:")
+ for _, c := range e.Conflicts {
+ fmt.Fprintf(b, "\n\t%s", c.Summary())
+ }
+ return b.String()
+}
+
+// A Conflict is a path of requirements starting at a root or proposed root in
+// the requirement graph, explaining why that root either causes a module passed
+// in the mustSelect list to EditBuildList to be unattainable, or introduces an
+// unresolvable error in loading the requirement graph.
+type Conflict struct {
+ // Path is a path of requirements starting at some module version passed in
+ // the mustSelect argument and ending at a module whose requirements make that
+ // version unacceptable. (Path always has len ≥ 1.)
+ Path []module.Version
+
+ // If Err is nil, Constraint is a module version passed in the mustSelect
+ // argument that has the same module path as, and a lower version than,
+ // the last element of the Path slice.
+ Constraint module.Version
+
+ // If Constraint is unset, Err is an error encountered when loading the
+ // requirements of the last element in Path.
+ Err error
+}
+
+// UnwrapModuleError returns c.Err, but unwraps it if it is a module.ModuleError
+// with a version and path matching the last entry in the Path slice.
+func (c Conflict) UnwrapModuleError() error {
+ me, ok := c.Err.(*module.ModuleError)
+ if ok && len(c.Path) > 0 {
+ last := c.Path[len(c.Path)-1]
+ if me.Path == last.Path && me.Version == last.Version {
+ return me.Err
+ }
+ }
+ return c.Err
+}
+
+// Summary returns a string that describes only the first and last modules in
+// the conflict path.
+func (c Conflict) Summary() string {
+ if len(c.Path) == 0 {
+ return "(internal error: invalid Conflict struct)"
+ }
+ first := c.Path[0]
+ last := c.Path[len(c.Path)-1]
+ if len(c.Path) == 1 {
+ if c.Err != nil {
+ return fmt.Sprintf("%s: %v", first, c.UnwrapModuleError())
+ }
+ return fmt.Sprintf("%s is above %s", first, c.Constraint.Version)
+ }
+
+ adverb := ""
+ if len(c.Path) > 2 {
+ adverb = "indirectly "
+ }
+ if c.Err != nil {
+ return fmt.Sprintf("%s %srequires %s: %v", first, adverb, last, c.UnwrapModuleError())
+ }
+ return fmt.Sprintf("%s %srequires %s, but %s is requested", first, adverb, last, c.Constraint.Version)
+}
+
+// String returns a string that describes the full conflict path.
+func (c Conflict) String() string {
+ if len(c.Path) == 0 {
+ return "(internal error: invalid Conflict struct)"
+ }
+ b := new(strings.Builder)
+ fmt.Fprintf(b, "%v", c.Path[0])
+ if len(c.Path) == 1 {
+ fmt.Fprintf(b, " found")
+ } else {
+ for _, r := range c.Path[1:] {
+ fmt.Fprintf(b, " requires\n\t%v", r)
+ }
+ }
+ if c.Constraint != (module.Version{}) {
+ fmt.Fprintf(b, ", but %v is requested", c.Constraint.Version)
+ }
+ if c.Err != nil {
+ fmt.Fprintf(b, ": %v", c.UnwrapModuleError())
+ }
+ return b.String()
+}
+
+// tidyRoots trims the root dependencies to the minimal requirements needed to
+// both retain the same versions of all packages in pkgs and satisfy the
+// graph-pruning invariants (if applicable).
+func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+ mainModule := MainModules.mustGetSingleMainModule()
+ if rs.pruning == unpruned {
+ return tidyUnprunedRoots(ctx, mainModule, rs, pkgs)
+ }
+ return tidyPrunedRoots(ctx, mainModule, rs, pkgs)
+}
+
+func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
+ switch rs.pruning {
+ case unpruned:
+ return updateUnprunedRoots(ctx, direct, rs, add)
+ case pruned:
+ return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported)
+ case workspace:
+ return updateWorkspaceRoots(ctx, rs, add)
+ default:
+ panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning))
+ }
+}
+
+func updateWorkspaceRoots(ctx context.Context, rs *Requirements, add []module.Version) (*Requirements, error) {
+ if len(add) != 0 {
+ // add should be empty in workspace mode because workspace mode implies
+ // -mod=readonly, which in turn implies no new requirements. The code path
+ // that would result in add being non-empty returns an error before it
+ // reaches this point: The set of modules to add comes from
+ // resolveMissingImports, which in turn resolves each package by calling
+ // queryImport. But queryImport explicitly checks for -mod=readonly, and
+ // return an error.
+ panic("add is not empty")
+ }
+ return rs, nil
+}
+
+// tidyPrunedRoots returns a minimal set of root requirements that maintains the
+// invariants of the go.mod file needed to support graph pruning for the given
+// packages:
+//
+// 1. For each package marked with pkgInAll, the module path that provided that
+// package is included as a root.
+// 2. For all packages, the module that provided that package either remains
+// selected at the same version or is upgraded by the dependencies of a
+// root.
+//
+// If any module that provided a package has been upgraded above its previous
+// version, the caller may need to reload and recompute the package graph.
+//
+// To ensure that the loading process eventually converges, the caller should
+// add any needed roots from the tidy root set (without removing existing untidy
+// roots) until the set of roots has converged.
+func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+ var (
+ roots []module.Version
+ pathIsRoot = map[string]bool{mainModule.Path: true}
+ )
+ if v, ok := old.rootSelected("go"); ok {
+ roots = append(roots, module.Version{Path: "go", Version: v})
+ pathIsRoot["go"] = true
+ }
+ if v, ok := old.rootSelected("toolchain"); ok {
+ roots = append(roots, module.Version{Path: "toolchain", Version: v})
+ pathIsRoot["toolchain"] = true
+ }
+ // We start by adding roots for every package in "all".
+ //
+ // Once that is done, we may still need to add more roots to cover upgraded or
+ // otherwise-missing test dependencies for packages in "all". For those test
+ // dependencies, we prefer to add roots for packages with shorter import
+ // stacks first, on the theory that the module requirements for those will
+ // tend to fill in the requirements for their transitive imports (which have
+ // deeper import stacks). So we add the missing dependencies for one depth at
+ // a time, starting with the packages actually in "all" and expanding outwards
+ // until we have scanned every package that was loaded.
+ var (
+ queue []*loadPkg
+ queued = map[*loadPkg]bool{}
+ )
+ for _, pkg := range pkgs {
+ if !pkg.flags.has(pkgInAll) {
+ continue
+ }
+ if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] {
+ roots = append(roots, pkg.mod)
+ pathIsRoot[pkg.mod.Path] = true
+ }
+ queue = append(queue, pkg)
+ queued[pkg] = true
+ }
+ gover.ModSort(roots)
+ tidy := newRequirements(pruned, roots, old.direct)
+
+ for len(queue) > 0 {
+ roots = tidy.rootModules
+ mg, err := tidy.Graph(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ prevQueue := queue
+ queue = nil
+ for _, pkg := range prevQueue {
+ m := pkg.mod
+ if m.Path == "" {
+ continue
+ }
+ for _, dep := range pkg.imports {
+ if !queued[dep] {
+ queue = append(queue, dep)
+ queued[dep] = true
+ }
+ }
+ if pkg.test != nil && !queued[pkg.test] {
+ queue = append(queue, pkg.test)
+ queued[pkg.test] = true
+ }
+
+ if !pathIsRoot[m.Path] {
+ if s := mg.Selected(m.Path); gover.ModCompare(m.Path, s, m.Version) < 0 {
+ roots = append(roots, m)
+ pathIsRoot[m.Path] = true
+ }
+ }
+ }
+
+ if len(roots) > len(tidy.rootModules) {
+ gover.ModSort(roots)
+ tidy = newRequirements(pruned, roots, tidy.direct)
+ }
+ }
+
+ roots = tidy.rootModules
+ _, err := tidy.Graph(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // We try to avoid adding explicit requirements for test-only dependencies of
+ // packages in external modules. However, if we drop the explicit
+ // requirements, that may change an import from unambiguous (due to lazy
+ // module loading) to ambiguous (because lazy module loading no longer
+ // disambiguates it). For any package that has become ambiguous, we try
+ // to fix it by promoting its module to an explicit root.
+ // (See https://go.dev/issue/60313.)
+ q := par.NewQueue(runtime.GOMAXPROCS(0))
+ for {
+ var disambiguateRoot sync.Map
+ for _, pkg := range pkgs {
+ if pkg.mod.Path == "" || pathIsRoot[pkg.mod.Path] {
+ // Lazy module loading will cause pkg.mod to be checked before any other modules
+ // that are only indirectly required. It is as unambiguous as possible.
+ continue
+ }
+ pkg := pkg
+ q.Add(func() {
+ skipModFile := true
+ _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile)
+ if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) {
+ disambiguateRoot.Store(pkg.mod, true)
+ }
+ })
+ }
+ <-q.Idle()
+
+ disambiguateRoot.Range(func(k, _ any) bool {
+ m := k.(module.Version)
+ roots = append(roots, m)
+ pathIsRoot[m.Path] = true
+ return true
+ })
+
+ if len(roots) > len(tidy.rootModules) {
+ module.Sort(roots)
+ tidy = newRequirements(pruned, roots, tidy.direct)
+ _, err = tidy.Graph(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // Adding these roots may have pulled additional modules into the module
+ // graph, causing additional packages to become ambiguous. Keep iterating
+ // until we reach a fixed point.
+ continue
+ }
+
+ break
+ }
+
+ return tidy, nil
+}
+
+// updatePrunedRoots returns a set of root requirements that maintains the
+// invariants of the go.mod file needed to support graph pruning:
+//
+// 1. The selected version of the module providing each package marked with
+// either pkgInAll or pkgIsRoot is included as a root.
+// Note that certain root patterns (such as '...') may explode the root set
+// to contain every module that provides any package imported (or merely
+// required) by any other module.
+// 2. Each root appears only once, at the selected version of its path
+// (if rs.graph is non-nil) or at the highest version otherwise present as a
+// root (otherwise).
+// 3. Every module path that appears as a root in rs remains a root.
+// 4. Every version in add is selected at its given version unless upgraded by
+// (the dependencies of) an existing root or another module in add.
+//
+// The packages in pkgs are assumed to have been loaded from either the roots of
+// rs or the modules selected in the graph of rs.
+//
+// The above invariants together imply the graph-pruning invariants for the
+// go.mod file:
+//
+// 1. (The import invariant.) Every module that provides a package transitively
+// imported by any package or test in the main module is included as a root.
+// This follows by induction from (1) and (3) above. Transitively-imported
+// packages loaded during this invocation are marked with pkgInAll (1),
+// and by hypothesis any transitively-imported packages loaded in previous
+// invocations were already roots in rs (3).
+//
+// 2. (The argument invariant.) Every module that provides a package matching
+// an explicit package pattern is included as a root. This follows directly
+// from (1): packages matching explicit package patterns are marked with
+// pkgIsRoot.
+//
+// 3. (The completeness invariant.) Every module that contributed any package
+// to the build is required by either the main module or one of the modules
+// it requires explicitly. This invariant is left up to the caller, who must
+// not load packages from outside the module graph but may add roots to the
+// graph, but is facilitated by (3). If the caller adds roots to the graph in
+// order to resolve missing packages, then updatePrunedRoots will retain them,
+// the selected versions of those roots cannot regress, and they will
+// eventually be written back to the main module's go.mod file.
+//
+// (See https://golang.org/design/36460-lazy-module-loading#invariants for more
+// detail.)
+func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
+ roots := rs.rootModules
+ rootsUpgraded := false
+
+ spotCheckRoot := map[module.Version]bool{}
+
+ // “The selected version of the module providing each package marked with
+ // either pkgInAll or pkgIsRoot is included as a root.”
+ needSort := false
+ for _, pkg := range pkgs {
+ if !pkg.fromExternalModule() {
+ // pkg was not loaded from a module dependency, so we don't need
+ // to do anything special to maintain that dependency.
+ continue
+ }
+
+ switch {
+ case pkg.flags.has(pkgInAll):
+ // pkg is transitively imported by a package or test in the main module.
+ // We need to promote the module that maintains it to a root: if some
+ // other module depends on the main module, and that other module also
+ // uses a pruned module graph, it will expect to find all of our
+ // transitive dependencies by reading just our go.mod file, not the go.mod
+ // files of everything we depend on.
+ //
+ // (This is the “import invariant” that makes graph pruning possible.)
+
+ case rootsImported && pkg.flags.has(pkgFromRoot):
+ // pkg is a transitive dependency of some root, and we are treating the
+ // roots as if they are imported by the main module (as in 'go get').
+
+ case pkg.flags.has(pkgIsRoot):
+ // pkg is a root of the package-import graph. (Generally this means that
+ // it matches a command-line argument.) We want future invocations of the
+ // 'go' command — such as 'go test' on the same package — to continue to
+ // use the same versions of its dependencies that we are using right now.
+ // So we need to bring this package's dependencies inside the pruned
+ // module graph.
+ //
+ // Making the module containing this package a root of the module graph
+ // does exactly that: if the module containing the package supports graph
+ // pruning then it should satisfy the import invariant itself, so all of
+ // its dependencies should be in its go.mod file, and if the module
+ // containing the package does not support pruning then if we make it a
+ // root we will load all of its (unpruned) transitive dependencies into
+ // the module graph.
+ //
+ // (This is the “argument invariant”, and is important for
+ // reproducibility.)
+
+ default:
+ // pkg is a dependency of some other package outside of the main module.
+ // As far as we know it's not relevant to the main module (and thus not
+ // relevant to consumers of the main module either), and its dependencies
+ // should already be in the module graph — included in the dependencies of
+ // the package that imported it.
+ continue
+ }
+
+ if _, ok := rs.rootSelected(pkg.mod.Path); ok {
+ // It is possible that the main module's go.mod file is incomplete or
+ // otherwise erroneous — for example, perhaps the author forgot to 'git
+ // add' their updated go.mod file after adding a new package import, or
+ // perhaps they made an edit to the go.mod file using a third-party tool
+ // ('git merge'?) that doesn't maintain consistency for module
+ // dependencies. If that happens, ideally we want to detect the missing
+ // requirements and fix them up here.
+ //
+ // However, we also need to be careful not to be too aggressive. For
+ // transitive dependencies of external tests, the go.mod file for the
+ // module containing the test itself is expected to provide all of the
+ // relevant dependencies, and we explicitly don't want to pull in
+ // requirements on *irrelevant* requirements that happen to occur in the
+ // go.mod files for these transitive-test-only dependencies. (See the test
+ // in mod_lazy_test_horizon.txt for a concrete example.
+ //
+ // The “goldilocks zone” seems to be to spot-check exactly the same
+ // modules that we promote to explicit roots: namely, those that provide
+ // packages transitively imported by the main module, and those that
+ // provide roots of the package-import graph. That will catch erroneous
+ // edits to the main module's go.mod file and inconsistent requirements in
+ // dependencies that provide imported packages, but will ignore erroneous
+ // or misleading requirements in dependencies that aren't obviously
+ // relevant to the packages in the main module.
+ spotCheckRoot[pkg.mod] = true
+ } else {
+ roots = append(roots, pkg.mod)
+ rootsUpgraded = true
+ // The roots slice was initially sorted because rs.rootModules was sorted,
+ // but the root we just added could be out of order.
+ needSort = true
+ }
+ }
+
+ for _, m := range add {
+ if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 {
+ roots = append(roots, m)
+ rootsUpgraded = true
+ needSort = true
+ }
+ }
+ if needSort {
+ gover.ModSort(roots)
+ }
+
+ // "Each root appears only once, at the selected version of its path ….”
+ for {
+ var mg *ModuleGraph
+ if rootsUpgraded {
+ // We've added or upgraded one or more roots, so load the full module
+ // graph so that we can update those roots to be consistent with other
+ // requirements.
+ if mustHaveCompleteRequirements() {
+ // Our changes to the roots may have moved dependencies into or out of
+ // the graph-pruning horizon, which could in turn change the selected
+ // versions of other modules. (For pruned modules adding or removing an
+ // explicit root is a semantic change, not just a cosmetic one.)
+ return rs, errGoModDirty
+ }
+
+ rs = newRequirements(pruned, roots, direct)
+ var err error
+ mg, err = rs.Graph(ctx)
+ if err != nil {
+ return rs, err
+ }
+ } else {
+ // Since none of the roots have been upgraded, we have no reason to
+ // suspect that they are inconsistent with the requirements of any other
+ // roots. Only look at the full module graph if we've already loaded it;
+ // otherwise, just spot-check the explicit requirements of the roots from
+ // which we loaded packages.
+ if rs.graph.Load() != nil {
+ // We've already loaded the full module graph, which includes the
+ // requirements of all of the root modules — even the transitive
+ // requirements, if they are unpruned!
+ mg, _ = rs.Graph(ctx)
+ } else if cfg.BuildMod == "vendor" {
+ // We can't spot-check the requirements of other modules because we
+ // don't in general have their go.mod files available in the vendor
+ // directory. (Fortunately this case is impossible, because mg.graph is
+ // always non-nil in vendor mode!)
+ panic("internal error: rs.graph is unexpectedly nil with -mod=vendor")
+ } else if !spotCheckRoots(ctx, rs, spotCheckRoot) {
+ // We spot-checked the explicit requirements of the roots that are
+ // relevant to the packages we've loaded. Unfortunately, they're
+ // inconsistent in some way; we need to load the full module graph
+ // so that we can fix the roots properly.
+ var err error
+ mg, err = rs.Graph(ctx)
+ if err != nil {
+ return rs, err
+ }
+ }
+ }
+
+ roots = make([]module.Version, 0, len(rs.rootModules))
+ rootsUpgraded = false
+ inRootPaths := make(map[string]bool, len(rs.rootModules)+1)
+ for _, mm := range MainModules.Versions() {
+ inRootPaths[mm.Path] = true
+ }
+ for _, m := range rs.rootModules {
+ if inRootPaths[m.Path] {
+ // This root specifies a redundant path. We already retained the
+ // selected version of this path when we saw it before, so omit the
+ // redundant copy regardless of its version.
+ //
+ // When we read the full module graph, we include the dependencies of
+ // every root even if that root is redundant. That better preserves
+ // reproducibility if, say, some automated tool adds a redundant
+ // 'require' line and then runs 'go mod tidy' to try to make everything
+ // consistent, since the requirements of the older version are carried
+ // over.
+ //
+ // So omitting a root that was previously present may *reduce* the
+ // selected versions of non-roots, but merely removing a requirement
+ // cannot *increase* the selected versions of other roots as a result —
+ // we don't need to mark this change as an upgrade. (This particular
+ // change cannot invalidate any other roots.)
+ continue
+ }
+
+ var v string
+ if mg == nil {
+ v, _ = rs.rootSelected(m.Path)
+ } else {
+ v = mg.Selected(m.Path)
+ }
+ roots = append(roots, module.Version{Path: m.Path, Version: v})
+ inRootPaths[m.Path] = true
+ if v != m.Version {
+ rootsUpgraded = true
+ }
+ }
+ // Note that rs.rootModules was already sorted by module path and version,
+ // and we appended to the roots slice in the same order and guaranteed that
+ // each path has only one version, so roots is also sorted by module path
+ // and (trivially) version.
+
+ if !rootsUpgraded {
+ if cfg.BuildMod != "mod" {
+ // The only changes to the root set (if any) were to remove duplicates.
+ // The requirements are consistent (if perhaps redundant), so keep the
+ // original rs to preserve its ModuleGraph.
+ return rs, nil
+ }
+ // The root set has converged: every root going into this iteration was
+ // already at its selected version, although we have have removed other
+ // (redundant) roots for the same path.
+ break
+ }
+ }
+
+ if rs.pruning == pruned && reflect.DeepEqual(roots, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) {
+ // The root set is unchanged and rs was already pruned, so keep rs to
+ // preserve its cached ModuleGraph (if any).
+ return rs, nil
+ }
+ return newRequirements(pruned, roots, direct), nil
+}
+
+// spotCheckRoots reports whether the versions of the roots in rs satisfy the
+// explicit requirements of the modules in mods.
+func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ work := par.NewQueue(runtime.GOMAXPROCS(0))
+ for m := range mods {
+ m := m
+ work.Add(func() {
+ if ctx.Err() != nil {
+ return
+ }
+
+ summary, err := goModSummary(m)
+ if err != nil {
+ cancel()
+ return
+ }
+
+ for _, r := range summary.require {
+ if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 {
+ cancel()
+ return
+ }
+ }
+ })
+ }
+ <-work.Idle()
+
+ if ctx.Err() != nil {
+ // Either we failed a spot-check, or the caller no longer cares about our
+ // answer anyway.
+ return false
+ }
+
+ return true
+}
+
+// tidyUnprunedRoots returns a minimal set of root requirements that maintains
+// the selected version of every module that provided or lexically could have
+// provided a package in pkgs, and includes the selected version of every such
+// module in direct as a root.
+func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+ var (
+ // keep is a set of of modules that provide packages or are needed to
+ // disambiguate imports.
+ keep []module.Version
+ keptPath = map[string]bool{}
+
+ // rootPaths is a list of module paths that provide packages directly
+ // imported from the main module. They should be included as roots.
+ rootPaths []string
+ inRootPaths = map[string]bool{}
+
+ // altMods is a set of paths of modules that lexically could have provided
+ // imported packages. It may be okay to remove these from the list of
+ // explicit requirements if that removes them from the module graph. If they
+ // are present in the module graph reachable from rootPaths, they must not
+ // be at a lower version. That could cause a missing sum error or a new
+ // import ambiguity.
+ //
+ // For example, suppose a developer rewrites imports from example.com/m to
+ // example.com/m/v2, then runs 'go mod tidy'. Tidy may delete the
+ // requirement on example.com/m if there is no other transitive requirement
+ // on it. However, if example.com/m were downgraded to a version not in
+ // go.sum, when package example.com/m/v2/p is loaded, we'd get an error
+ // trying to disambiguate the import, since we can't check example.com/m
+ // without its sum. See #47738.
+ altMods = map[string]string{}
+ )
+ if v, ok := old.rootSelected("go"); ok {
+ keep = append(keep, module.Version{Path: "go", Version: v})
+ keptPath["go"] = true
+ }
+ if v, ok := old.rootSelected("toolchain"); ok {
+ keep = append(keep, module.Version{Path: "toolchain", Version: v})
+ keptPath["toolchain"] = true
+ }
+ for _, pkg := range pkgs {
+ if !pkg.fromExternalModule() {
+ continue
+ }
+ if m := pkg.mod; !keptPath[m.Path] {
+ keep = append(keep, m)
+ keptPath[m.Path] = true
+ if old.direct[m.Path] && !inRootPaths[m.Path] {
+ rootPaths = append(rootPaths, m.Path)
+ inRootPaths[m.Path] = true
+ }
+ }
+ for _, m := range pkg.altMods {
+ altMods[m.Path] = m.Version
+ }
+ }
+
+ // Construct a build list with a minimal set of roots.
+ // This may remove or downgrade modules in altMods.
+ reqs := &mvsReqs{roots: keep}
+ min, err := mvs.Req(mainModule, rootPaths, reqs)
+ if err != nil {
+ return nil, err
+ }
+ buildList, err := mvs.BuildList([]module.Version{mainModule}, reqs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if modules in altMods were downgraded but not removed.
+ // If so, add them to roots, which will retain an "// indirect" requirement
+ // in go.mod. See comment on altMods above.
+ keptAltMod := false
+ for _, m := range buildList {
+ if v, ok := altMods[m.Path]; ok && gover.ModCompare(m.Path, m.Version, v) < 0 {
+ keep = append(keep, module.Version{Path: m.Path, Version: v})
+ keptAltMod = true
+ }
+ }
+ if keptAltMod {
+ // We must run mvs.Req again instead of simply adding altMods to min.
+ // It's possible that a requirement in altMods makes some other
+ // explicit indirect requirement unnecessary.
+ reqs.roots = keep
+ min, err = mvs.Req(mainModule, rootPaths, reqs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return newRequirements(unpruned, min, old.direct), nil
+}
+
+// updateUnprunedRoots returns a set of root requirements that includes the selected
+// version of every module path in direct as a root, and maintains the selected
+// version of every module selected in the graph of rs.
+//
+// The roots are updated such that:
+//
+// 1. The selected version of every module path in direct is included as a root
+// (if it is not "none").
+// 2. Each root is the selected version of its path. (We say that such a root
+// set is “consistent”.)
+// 3. Every version selected in the graph of rs remains selected unless upgraded
+// by a dependency in add.
+// 4. Every version in add is selected at its given version unless upgraded by
+// (the dependencies of) an existing root or another module in add.
+func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ // We can't ignore errors in the module graph even if the user passed the -e
+ // flag to try to push past them. If we can't load the complete module
+ // dependencies, then we can't reliably compute a minimal subset of them.
+ return rs, err
+ }
+
+ if mustHaveCompleteRequirements() {
+ // Instead of actually updating the requirements, just check that no updates
+ // are needed.
+ if rs == nil {
+ // We're being asked to reconstruct the requirements from scratch,
+ // but we aren't even allowed to modify them.
+ return rs, errGoModDirty
+ }
+ for _, m := range rs.rootModules {
+ if m.Version != mg.Selected(m.Path) {
+ // The root version v is misleading: the actual selected version is higher.
+ return rs, errGoModDirty
+ }
+ }
+ for _, m := range add {
+ if m.Version != mg.Selected(m.Path) {
+ return rs, errGoModDirty
+ }
+ }
+ for mPath := range direct {
+ if _, ok := rs.rootSelected(mPath); !ok {
+ // Module m is supposed to be listed explicitly, but isn't.
+ //
+ // Note that this condition is also detected (and logged with more
+ // detail) earlier during package loading, so it shouldn't actually be
+ // possible at this point — this is just a defense in depth.
+ return rs, errGoModDirty
+ }
+ }
+
+ // No explicit roots are missing and all roots are already at the versions
+ // we want to keep. Any other changes we would make are purely cosmetic,
+ // such as pruning redundant indirect dependencies. Per issue #34822, we
+ // ignore cosmetic changes when we cannot update the go.mod file.
+ return rs, nil
+ }
+
+ var (
+ rootPaths []string // module paths that should be included as roots
+ inRootPaths = map[string]bool{}
+ )
+ for _, root := range rs.rootModules {
+ // If the selected version of the root is the same as what was already
+ // listed in the go.mod file, retain it as a root (even if redundant) to
+ // avoid unnecessary churn. (See https://golang.org/issue/34822.)
+ //
+ // We do this even for indirect requirements, since we don't know why they
+ // were added and they could become direct at any time.
+ if !inRootPaths[root.Path] && mg.Selected(root.Path) == root.Version {
+ rootPaths = append(rootPaths, root.Path)
+ inRootPaths[root.Path] = true
+ }
+ }
+
+ // “The selected version of every module path in direct is included as a root.”
+ //
+ // This is only for convenience and clarity for end users: in an unpruned module,
+ // the choice of explicit vs. implicit dependency has no impact on MVS
+ // selection (for itself or any other module).
+ keep := append(mg.BuildList()[MainModules.Len():], add...)
+ for _, m := range keep {
+ if direct[m.Path] && !inRootPaths[m.Path] {
+ rootPaths = append(rootPaths, m.Path)
+ inRootPaths[m.Path] = true
+ }
+ }
+
+ var roots []module.Version
+ for _, mainModule := range MainModules.Versions() {
+ min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep})
+ if err != nil {
+ return rs, err
+ }
+ roots = append(roots, min...)
+ }
+ if MainModules.Len() > 1 {
+ gover.ModSort(roots)
+ }
+ if rs.pruning == unpruned && reflect.DeepEqual(roots, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) {
+ // The root set is unchanged and rs was already unpruned, so keep rs to
+ // preserve its cached ModuleGraph (if any).
+ return rs, nil
+ }
+
+ return newRequirements(unpruned, roots, direct), nil
+}
+
+// convertPruning returns a version of rs with the given pruning behavior.
+// If rs already has the given pruning, convertPruning returns rs unmodified.
+func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) {
+ if rs.pruning == pruning {
+ return rs, nil
+ } else if rs.pruning == workspace || pruning == workspace {
+ panic("attempting to convert to/from workspace pruning and another pruning type")
+ }
+
+ if pruning == unpruned {
+ // We are converting a pruned module to an unpruned one. The roots of a
+ // pruned module graph are a superset of the roots of an unpruned one, so
+ // we don't need to add any new roots — we just need to drop the ones that
+ // are redundant, which is exactly what updateUnprunedRoots does.
+ return updateUnprunedRoots(ctx, rs.direct, rs, nil)
+ }
+
+ // We are converting an unpruned module to a pruned one.
+ //
+ // An unpruned module graph includes the transitive dependencies of every
+ // module in the build list. As it turns out, we can express that as a pruned
+ // root set! “Include the transitive dependencies of every module in the build
+ // list” is exactly what happens in a pruned module if we promote every module
+ // in the build list to a root.
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ return rs, err
+ }
+ return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil
+}
diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go
new file mode 100644
index 0000000..63ee15c
--- /dev/null
+++ b/src/cmd/go/internal/modload/edit.go
@@ -0,0 +1,855 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/mvs"
+ "cmd/go/internal/par"
+ "context"
+ "errors"
+ "fmt"
+ "maps"
+ "os"
+ "slices"
+
+ "golang.org/x/mod/module"
+)
+
+// editRequirements returns an edited version of rs such that:
+//
+// 1. Each module version in mustSelect is selected.
+//
+// 2. Each module version in tryUpgrade is upgraded toward the indicated
+// version as far as can be done without violating (1).
+// (Other upgrades are also allowed if they are caused by
+// transitive requirements of versions in mustSelect or
+// tryUpgrade.)
+//
+// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned)
+// is downgraded or upgraded from its original version only to the extent
+// needed to satisfy (1) and (2).
+//
+// Generally, the module versions in mustSelect are due to the module or a
+// package within the module matching an explicit command line argument to 'go
+// get', and the versions in tryUpgrade are transitive dependencies that are
+// either being upgraded by 'go get -u' or being added to satisfy some
+// otherwise-missing package import.
+//
+// If pruning is enabled, the roots of the edited requirements include an
+// explicit entry for each module path in tryUpgrade, mustSelect, and the roots
+// of rs, unless the selected version for the module path is "none".
+func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) {
+ if rs.pruning == workspace {
+ panic("editRequirements cannot edit workspace requirements")
+ }
+
+ orig := rs
+ // If we already know what go version we will end up on after the edit, and
+ // the pruning for that version is different, go ahead and apply it now.
+ //
+ // If we are changing from pruned to unpruned, then we MUST check the unpruned
+ // graph for conflicts from the start. (Checking only for pruned conflicts
+ // would miss some that would be introduced later.)
+ //
+ // If we are changing from unpruned to pruned, then we would like to avoid
+ // unnecessary downgrades due to conflicts that would be pruned out of the
+ // final graph anyway.
+ //
+ // Note that even if we don't find a go version in mustSelect, it is possible
+ // that we will switch from unpruned to pruned (but not the other way around!)
+ // after applying the edits if we find a dependency that requires a high
+ // enough go version to trigger an upgrade.
+ rootPruning := orig.pruning
+ for _, m := range mustSelect {
+ if m.Path == "go" {
+ rootPruning = pruningForGoVersion(m.Version)
+ break
+ } else if m.Path == "toolchain" && pruningForGoVersion(gover.FromToolchain(m.Version)) == unpruned {
+ // We don't know exactly what go version we will end up at, but we know
+ // that it must be a version supported by the requested toolchain, and
+ // that toolchain does not support pruning.
+ //
+ // TODO(bcmills): 'go get' ought to reject explicit toolchain versions
+ // older than gover.GoStrictVersion. Once that is fixed, is this still
+ // needed?
+ rootPruning = unpruned
+ break
+ }
+ }
+
+ if rootPruning != rs.pruning {
+ rs, err = convertPruning(ctx, rs, rootPruning)
+ if err != nil {
+ return orig, false, err
+ }
+ }
+
+ // selectedRoot records the edited version (possibly "none") for each module
+ // path that would be a root in the edited requirements.
+ var selectedRoot map[string]string // module path → edited version
+ if rootPruning == pruned {
+ selectedRoot = maps.Clone(rs.maxRootVersion)
+ } else {
+ // In a module without graph pruning, modules that provide packages imported
+ // by the main module may either be explicit roots or implicit transitive
+ // dependencies. To the extent possible, we want to preserve those implicit
+ // dependencies, so we need to treat everything in the build list as
+ // potentially relevant — that is, as what would be a “root” in a module
+ // with graph pruning enabled.
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ // If we couldn't load the graph, we don't know what its requirements were
+ // to begin with, so we can't edit those requirements in a coherent way.
+ return orig, false, err
+ }
+ bl := mg.BuildList()[MainModules.Len():]
+ selectedRoot = make(map[string]string, len(bl))
+ for _, m := range bl {
+ selectedRoot[m.Path] = m.Version
+ }
+ }
+
+ for _, r := range tryUpgrade {
+ if v, ok := selectedRoot[r.Path]; ok && gover.ModCompare(r.Path, v, r.Version) >= 0 {
+ continue
+ }
+ if cfg.BuildV {
+ fmt.Fprintf(os.Stderr, "go: trying upgrade to %v\n", r)
+ }
+ selectedRoot[r.Path] = r.Version
+ }
+
+ // conflicts is a list of conflicts that we cannot resolve without violating
+ // some version in mustSelect. It may be incomplete, but we want to report
+ // as many conflicts as we can so that the user can solve more of them at once.
+ var conflicts []Conflict
+
+ // mustSelectVersion is an index of the versions in mustSelect.
+ mustSelectVersion := make(map[string]string, len(mustSelect))
+ for _, r := range mustSelect {
+ if v, ok := mustSelectVersion[r.Path]; ok && v != r.Version {
+ prev := module.Version{Path: r.Path, Version: v}
+ if gover.ModCompare(r.Path, v, r.Version) > 0 {
+ conflicts = append(conflicts, Conflict{Path: []module.Version{prev}, Constraint: r})
+ } else {
+ conflicts = append(conflicts, Conflict{Path: []module.Version{r}, Constraint: prev})
+ }
+ continue
+ }
+
+ mustSelectVersion[r.Path] = r.Version
+ selectedRoot[r.Path] = r.Version
+ }
+
+ // We've indexed all of the data we need and we've computed the initial
+ // versions of the roots. Now we need to load the actual module graph and
+ // restore the invariant that every root is the selected version of its path.
+ //
+ // For 'go mod tidy' we would do that using expandGraph, which upgrades the
+ // roots until their requirements are internally consistent and then drops out
+ // the old roots. However, here we need to do more: we also need to make sure
+ // the modules in mustSelect don't get upgraded above their intended versions.
+ // To do that, we repeatedly walk the module graph, identify paths of
+ // requirements that result in versions that are too high, and downgrade the
+ // roots that lead to those paths. When no conflicts remain, we're done.
+ //
+ // Since we want to report accurate paths to each conflict, we don't drop out
+ // older-than-selected roots until the process completes. That might mean that
+ // we do some extra downgrades when they could be skipped, but for the benefit
+ // of being able to explain the reason for every downgrade that seems
+ // worthwhile.
+ //
+ // Graph pruning adds an extra wrinkle: a given node in the module graph
+ // may be reached from a root whose dependencies are pruned, and from a root
+ // whose dependencies are not pruned. It may be the case that the path from
+ // the unpruned root leads to a conflict, while the path from the pruned root
+ // prunes out the requirements that would lead to that conflict.
+ // So we need to track the two kinds of paths independently.
+ // They join back together at the roots of the graph: if a root r1 with pruned
+ // requirements depends on a root r2 with unpruned requirements, then
+ // selecting r1 would cause r2 to become a root and pull in all of its
+ // unpruned dependencies.
+ //
+ // The dqTracker type implements the logic for propagating conflict paths
+ // through the pruned and unpruned parts of the module graph.
+ //
+ // We make a best effort to fix incompatibilities, subject to two properties:
+ //
+ // 1. If the user runs 'go get' with a set of mutually-compatible module
+ // versions, we should accept those versions.
+ //
+ // 2. If we end up upgrading or downgrading a module, it should be
+ // clear why we did so.
+ //
+ // We don't try to find an optimal SAT solution,
+ // especially given the complex interactions with graph pruning.
+
+ var (
+ roots []module.Version // the current versions in selectedRoot, in sorted order
+ rootsDirty = true // true if roots does not match selectedRoot
+ )
+
+ // rejectedRoot records the set of module versions that have been disqualified
+ // as roots of the module graph. When downgrading due to a conflict or error,
+ // we skip any version that has already been rejected.
+ //
+ // NOTE(bcmills): I am not sure that the rejectedRoot map is really necessary,
+ // since we normally only downgrade roots or accept indirect upgrades to
+ // known-good versions. However, I am having trouble proving that accepting an
+ // indirect upgrade never introduces a conflict that leads to further
+ // downgrades. I really want to be able to prove that editRequirements
+ // terminates, and the easiest way to prove it is to add this map.
+ //
+ // Then the proof of termination is this:
+ // On every iteration where we mark the roots as dirty, we add some new module
+ // version to the map. The universe of module versions is finite, so we must
+ // eventually reach a state in which we do not add any version to the map.
+ // In that state, we either report a conflict or succeed in the edit.
+ rejectedRoot := map[module.Version]bool{}
+
+ for rootsDirty && len(conflicts) == 0 {
+ roots = roots[:0]
+ for p, v := range selectedRoot {
+ if v != "none" {
+ roots = append(roots, module.Version{Path: p, Version: v})
+ }
+ }
+ gover.ModSort(roots)
+
+ // First, we extend the graph so that it includes the selected version
+ // of every root. The upgraded roots are in addition to the original
+ // roots, so we will have enough information to trace a path to each
+ // conflict we discover from one or more of the original roots.
+ mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot)
+ if err != nil {
+ var tooNew *gover.TooNewError
+ if mg == nil || errors.As(err, &tooNew) {
+ return orig, false, err
+ }
+ // We're about to walk the entire extended module graph, so we will find
+ // any error then — and we will either try to resolve it by downgrading
+ // something or report it as a conflict with more detail.
+ }
+
+ // extendedRootPruning is an index of the pruning used to load each root in
+ // the extended module graph.
+ extendedRootPruning := make(map[module.Version]modPruning, len(roots)+len(upgradedRoots))
+ findPruning := func(m module.Version) modPruning {
+ if rootPruning == pruned {
+ summary, _ := mg.loadCache.Get(m)
+ if summary != nil && summary.pruning == unpruned {
+ return unpruned
+ }
+ }
+ return rootPruning
+ }
+ for _, m := range roots {
+ extendedRootPruning[m] = findPruning(m)
+ }
+ for m := range upgradedRoots {
+ extendedRootPruning[m] = findPruning(m)
+ }
+
+ // Now check the resulting extended graph for errors and incompatibilities.
+ t := dqTracker{extendedRootPruning: extendedRootPruning}
+ mg.g.WalkBreadthFirst(func(m module.Version) {
+ if max, ok := mustSelectVersion[m.Path]; ok && gover.ModCompare(m.Path, m.Version, max) > 0 {
+ // m itself violates mustSelect, so it cannot appear in the module graph
+ // even if its transitive dependencies would be pruned out.
+ t.disqualify(m, pruned, dqState{dep: m})
+ return
+ }
+
+ summary, err := mg.loadCache.Get(m)
+ if err != nil && err != par.ErrCacheEntryNotFound {
+ // We can't determine the requirements of m, so we don't know whether
+ // they would be allowed. This may be a transient error reaching the
+ // repository, rather than a permanent error with the retrieved version.
+ //
+ // TODO(golang.org/issue/31730, golang.org/issue/30134):
+ // decide what to do based on the actual error.
+ t.disqualify(m, pruned, dqState{err: err})
+ return
+ }
+
+ reqs, ok := mg.RequiredBy(m)
+ if !ok {
+ // The dependencies of m do not appear in the module graph, so they
+ // can't be causing any problems this time.
+ return
+ }
+
+ if summary == nil {
+ if m.Version != "" {
+ panic(fmt.Sprintf("internal error: %d reqs present for %v, but summary is nil", len(reqs), m))
+ }
+ // m is the main module: we are editing its dependencies, so it cannot
+ // become disqualified.
+ return
+ }
+
+ // Before we check for problems due to transitive dependencies, first
+ // check m's direct requirements. A requirement on a version r that
+ // violates mustSelect disqualifies m, even if the requirements of r are
+ // themselves pruned out.
+ for _, r := range reqs {
+ if max, ok := mustSelectVersion[r.Path]; ok && gover.ModCompare(r.Path, r.Version, max) > 0 {
+ t.disqualify(m, pruned, dqState{dep: r})
+ return
+ }
+ }
+ for _, r := range reqs {
+ if !t.require(m, r) {
+ break
+ }
+ }
+ })
+
+ // We have now marked all of the versions in the graph that have conflicts,
+ // with a path to each conflict from one or more roots that introduce it.
+ // Now we need to identify those roots and change their versions
+ // (if possible) in order to resolve the conflicts.
+ rootsDirty = false
+ for _, m := range roots {
+ path, err := t.path(m, extendedRootPruning[m])
+ if len(path) == 0 && err == nil {
+ continue // Nothing wrong with m; we can keep it.
+ }
+
+ // path leads to a module with a problem: either it violates a constraint,
+ // or some error prevents us from determining whether it violates a
+ // constraint. We might end up logging or returning the conflict
+ // information, so go ahead and fill in the details about it.
+ conflict := Conflict{
+ Path: path,
+ Err: err,
+ }
+ if err == nil {
+ var last module.Version = path[len(path)-1]
+ mustV, ok := mustSelectVersion[last.Path]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "go: %v\n", conflict)
+ panic("internal error: found a version conflict, but no constraint it violates")
+ }
+ conflict.Constraint = module.Version{
+ Path: last.Path,
+ Version: mustV,
+ }
+ }
+
+ if v, ok := mustSelectVersion[m.Path]; ok && v == m.Version {
+ // m is in mustSelect, but is marked as disqualified due to a transitive
+ // dependency.
+ //
+ // In theory we could try removing module paths that don't appear in
+ // mustSelect (added by tryUpgrade or already present in rs) in order to
+ // get graph pruning to take effect, but (a) it is likely that 'go mod
+ // tidy' would re-add those roots and reintroduce unwanted upgrades,
+ // causing confusion, and (b) deciding which roots to try to eliminate
+ // would add a lot of complexity.
+ //
+ // Instead, we report the path to the conflict as an error.
+ // If users want to explicitly prune out nodes from the dependency
+ // graph, they can always add an explicit 'exclude' directive.
+ conflicts = append(conflicts, conflict)
+ continue
+ }
+
+ // If m is not the selected version of its path, we have two options: we
+ // can either upgrade to the version that actually is selected (dropping m
+ // itself out of the bottom of the module graph), or we can try
+ // downgrading it.
+ //
+ // If the version we would be upgrading to is ok to use, we will just plan
+ // to do that and avoid the overhead of trying to find some lower version
+ // to downgrade to.
+ //
+ // However, it is possible that m depends on something that leads to its
+ // own upgrade, so if the upgrade isn't viable we should go ahead and try
+ // to downgrade (like with any other root).
+ if v := mg.Selected(m.Path); v != m.Version {
+ u := module.Version{Path: m.Path, Version: v}
+ uPruning, ok := t.extendedRootPruning[m]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "go: %v\n", conflict)
+ panic(fmt.Sprintf("internal error: selected version of root %v is %v, but it was not expanded as a new root", m, u))
+ }
+ if !t.check(u, uPruning).isDisqualified() && !rejectedRoot[u] {
+ // Applying the upgrade from m to u will resolve the conflict,
+ // so plan to do that if there are no other conflicts to resolve.
+ continue
+ }
+ }
+
+ // Figure out what version of m's path was present before we started
+ // the edit. We want to make sure we consider keeping it as-is,
+ // even if it wouldn't normally be included. (For example, it might
+ // be a pseudo-version or pre-release.)
+ origMG, _ := orig.Graph(ctx)
+ origV := origMG.Selected(m.Path)
+
+ if conflict.Err != nil && origV == m.Version {
+ // This version of m.Path was already in the module graph before we
+ // started editing, and the problem with it is that we can't load its
+ // (transitive) requirements.
+ //
+ // If this conflict was just one step in a longer chain of downgrades,
+ // then we would want to keep going past it until we find a version
+ // that doesn't have that problem. However, we only want to downgrade
+ // away from an *existing* requirement if we can confirm that it actually
+ // conflicts with mustSelect. (For example, we don't want
+ // 'go get -u ./...' to incidentally downgrade some dependency whose
+ // go.mod file is unavailable or has a bad checksum.)
+ conflicts = append(conflicts, conflict)
+ continue
+ }
+
+ // We need to downgrade m's path to some lower version to try to resolve
+ // the conflict. Find the next-lowest candidate and apply it.
+ rejectedRoot[m] = true
+ prev := m
+ for {
+ prev, err = previousVersion(ctx, prev)
+ if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) {
+ // previousVersion skipped over origV. Insert it into the order.
+ prev.Version = origV
+ } else if err != nil {
+ // We don't know the next downgrade to try. Give up.
+ return orig, false, err
+ }
+ if rejectedRoot[prev] {
+ // We already rejected prev in a previous round.
+ // To ensure that this algorithm terminates, don't try it again.
+ continue
+ }
+ pruning := rootPruning
+ if pruning == pruned {
+ if summary, err := mg.loadCache.Get(m); err == nil {
+ pruning = summary.pruning
+ }
+ }
+ if t.check(prev, pruning).isDisqualified() {
+ // We found a problem with prev this round that would also disqualify
+ // it as a root. Don't bother trying it next round.
+ rejectedRoot[prev] = true
+ continue
+ }
+ break
+ }
+ selectedRoot[m.Path] = prev.Version
+ rootsDirty = true
+
+ // If this downgrade is potentially interesting, log the reason for it.
+ if conflict.Err != nil || cfg.BuildV {
+ var action string
+ if prev.Version == "none" {
+ action = fmt.Sprintf("removing %s", m)
+ } else if prev.Version == origV {
+ action = fmt.Sprintf("restoring %s", prev)
+ } else {
+ action = fmt.Sprintf("trying %s", prev)
+ }
+ fmt.Fprintf(os.Stderr, "go: %s\n\t%s\n", conflict.Summary(), action)
+ }
+ }
+ if rootsDirty {
+ continue
+ }
+
+ // We didn't resolve any issues by downgrading, but we may still need to
+ // resolve some conflicts by locking in upgrades. Do that now.
+ //
+ // We don't do these upgrades until we're done downgrading because the
+ // downgrade process might reveal or remove conflicts (by changing which
+ // requirement edges are pruned out).
+ var upgradedFrom []module.Version // for logging only
+ for p, v := range selectedRoot {
+ if _, ok := mustSelectVersion[p]; !ok {
+ if actual := mg.Selected(p); actual != v {
+ if cfg.BuildV {
+ upgradedFrom = append(upgradedFrom, module.Version{Path: p, Version: v})
+ }
+ selectedRoot[p] = actual
+ // Accepting the upgrade to m.Path might cause the selected versions
+ // of other modules to fall, because they were being increased by
+ // dependencies of m that are no longer present in the graph.
+ //
+ // TODO(bcmills): Can removing m as a root also cause the selected
+ // versions of other modules to rise? I think not: we're strictly
+ // removing non-root nodes from the module graph, which can't cause
+ // any root to decrease (because they're roots), and the dependencies
+ // of non-roots don't matter because they're either always unpruned or
+ // always pruned out.
+ //
+ // At any rate, it shouldn't cost much to reload the module graph one
+ // last time and confirm that it is stable.
+ rootsDirty = true
+ }
+ }
+ }
+ if rootsDirty {
+ if cfg.BuildV {
+ gover.ModSort(upgradedFrom) // Make logging deterministic.
+ for _, m := range upgradedFrom {
+ fmt.Fprintf(os.Stderr, "go: accepting indirect upgrade from %v to %s\n", m, selectedRoot[m.Path])
+ }
+ }
+ continue
+ }
+ break
+ }
+ if len(conflicts) > 0 {
+ return orig, false, &ConstraintError{Conflicts: conflicts}
+ }
+
+ if rootPruning == unpruned {
+ // An unpruned go.mod file lists only a subset of the requirements needed
+ // for building packages. Figure out which requirements need to be explicit.
+ var rootPaths []string
+
+ // The modules in mustSelect are always promoted to be explicit.
+ for _, m := range mustSelect {
+ if m.Version != "none" && !MainModules.Contains(m.Path) {
+ rootPaths = append(rootPaths, m.Path)
+ }
+ }
+
+ for _, m := range roots {
+ if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) {
+ // m.Path was formerly a root, and either its version hasn't changed or
+ // we believe that it provides a package directly imported by a package
+ // or test in the main module. For now we'll assume that it is still
+ // relevant enough to remain a root. If we actually load all of the
+ // packages and tests in the main module (which we are not doing here),
+ // we can revise the explicit roots at that point.
+ rootPaths = append(rootPaths, m.Path)
+ }
+ }
+
+ roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots})
+ if err != nil {
+ return nil, false, err
+ }
+ }
+
+ changed = rootPruning != orig.pruning || !slices.Equal(roots, orig.rootModules)
+ if !changed {
+ // Because the roots we just computed are unchanged, the entire graph must
+ // be the same as it was before. Save the original rs, since we have
+ // probably already loaded its requirement graph.
+ return orig, false, nil
+ }
+
+ // A module that is not even in the build list necessarily cannot provide
+ // any imported packages. Mark as direct only the direct modules that are
+ // still in the build list. (We assume that any module path that provided a
+ // direct import before the edit continues to do so after. There are a few
+ // edge cases where that can change, such as if a package moves into or out of
+ // a nested module or disappears entirely. If that happens, the user can run
+ // 'go mod tidy' to clean up the direct/indirect annotations.)
+ //
+ // TODO(bcmills): Would it make more sense to leave the direct map as-is
+ // but allow it to refer to modules that are no longer in the build list?
+ // That might complicate updateRoots, but it may be cleaner in other ways.
+ direct := make(map[string]bool, len(rs.direct))
+ for _, m := range roots {
+ if rs.direct[m.Path] {
+ direct[m.Path] = true
+ }
+ }
+ edited = newRequirements(rootPruning, roots, direct)
+
+ // If we ended up adding a dependency that upgrades our go version far enough
+ // to activate pruning, we must convert the edited Requirements in order to
+ // avoid dropping transitive dependencies from the build list the next time
+ // someone uses the updated go.mod file.
+ //
+ // Note that it isn't possible to go in the other direction (from pruned to
+ // unpruned) unless the "go" or "toolchain" module is explicitly listed in
+ // mustSelect, which we already handled at the very beginning of the edit.
+ // That is because the virtual "go" module only requires a "toolchain",
+ // and the "toolchain" module never requires anything else, which means that
+ // those two modules will never be downgraded due to a conflict with any other
+ // constraint.
+ if rootPruning == unpruned {
+ if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned {
+ // Since we computed the edit with the unpruned graph, and the pruned
+ // graph is a strict subset of the unpruned graph, this conversion
+ // preserves the exact (edited) build list that we already computed.
+ //
+ // However, it does that by shoving the whole build list into the roots of
+ // the graph. 'go get' will check for that sort of transition and log a
+ // message reminding the user how to clean up this mess we're about to
+ // make. 😅
+ edited, err = convertPruning(ctx, edited, pruned)
+ if err != nil {
+ return orig, false, err
+ }
+ }
+ }
+ return edited, true, nil
+}
+
+// extendGraph loads the module graph from roots, and iteratively extends it by
+// unpruning the selected version of each module path that is a root in rs or in
+// the roots slice until the graph reaches a fixed point.
+//
+// The graph is guaranteed to converge to a fixed point because unpruning a
+// module version can only increase (never decrease) the selected versions,
+// and the set of versions for each module is finite.
+//
+// The extended graph is useful for diagnosing version conflicts: for each
+// selected module version, it can provide a complete path of requirements from
+// some root to that version.
+func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) {
+ for {
+ mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot)
+ // We keep on going even if err is non-nil until we reach a steady state.
+ // (Note that readModGraph returns a non-nil *ModuleGraph even in case of
+ // errors.) The caller may be able to fix the errors by adjusting versions,
+ // so we really want to return as complete a result as we can.
+
+ if rootPruning == unpruned {
+ // Everything is already unpruned, so there isn't anything we can do to
+ // extend it further.
+ break
+ }
+
+ nPrevRoots := len(upgradedRoot)
+ for p := range selectedRoot {
+ // Since p is a root path, when we fix up the module graph to be
+ // consistent with the selected versions, p will be promoted to a root,
+ // which will pull in its dependencies. Ensure that its dependencies are
+ // included in the module graph.
+ v := mg.g.Selected(p)
+ if v == "none" {
+ // Version “none” always has no requirements, so it doesn't need
+ // an explicit node in the module graph.
+ continue
+ }
+ m := module.Version{Path: p, Version: v}
+ if _, ok := mg.g.RequiredBy(m); !ok && !upgradedRoot[m] {
+ // The dependencies of the selected version of p were not loaded.
+ // Mark it as an upgrade so that we will load its dependencies
+ // in the next iteration.
+ //
+ // Note that we don't remove any of the existing roots, even if they are
+ // no longer the selected version: with graph pruning in effect this may
+ // leave some spurious dependencies in the graph, but it at least
+ // preserves enough of the graph to explain why each upgrade occurred:
+ // this way, we can report a complete path from the passed-in roots
+ // to every node in the module graph.
+ //
+ // This process is guaranteed to reach a fixed point: since we are only
+ // adding roots (never removing them), the selected version of each module
+ // can only increase, never decrease, and the set of module versions in the
+ // universe is finite.
+ if upgradedRoot == nil {
+ upgradedRoot = make(map[module.Version]bool)
+ }
+ upgradedRoot[m] = true
+ }
+ }
+ if len(upgradedRoot) == nPrevRoots {
+ break
+ }
+ }
+
+ return mg, upgradedRoot, err
+}
+
+type perPruning[T any] struct {
+ pruned T
+ unpruned T
+}
+
+func (pp perPruning[T]) from(p modPruning) T {
+ if p == unpruned {
+ return pp.unpruned
+ }
+ return pp.pruned
+}
+
+// A dqTracker tracks and propagates the reason that each module version
+// cannot be included in the module graph.
+type dqTracker struct {
+ // extendedRootPruning is the modPruning given the go.mod file for each root
+ // in the extended module graph.
+ extendedRootPruning map[module.Version]modPruning
+
+ // dqReason records whether and why each each encountered version is
+ // disqualified in a pruned or unpruned context.
+ dqReason map[module.Version]perPruning[dqState]
+
+ // requiring maps each not-yet-disqualified module version to the versions
+ // that would cause that module's requirements to be included in a pruned or
+ // unpruned context. If that version becomes disqualified, the
+ // disqualification will be propagated to all of the versions in the
+ // corresponding list.
+ //
+ // This map is similar to the module requirement graph, but includes more
+ // detail about whether a given dependency edge appears in a pruned or
+ // unpruned context. (Other commands do not need this level of detail.)
+ requiring map[module.Version][]module.Version
+}
+
+// A dqState indicates whether and why a module version is “disqualified” from
+// being used in a way that would incorporate its requirements.
+//
+// The zero dqState indicates that the module version is not known to be
+// disqualified, either because it is ok or because we are currently traversing
+// a cycle that includes it.
+type dqState struct {
+ err error // if non-nil, disqualified because the requirements of the module could not be read
+ dep module.Version // disqualified because the module is or requires dep
+}
+
+func (dq dqState) isDisqualified() bool {
+ return dq != dqState{}
+}
+
+func (dq dqState) String() string {
+ if dq.err != nil {
+ return dq.err.Error()
+ }
+ if dq.dep != (module.Version{}) {
+ return dq.dep.String()
+ }
+ return "(no conflict)"
+}
+
+// require records that m directly requires r, in case r becomes disqualified.
+// (These edges are in the opposite direction from the edges in an mvs.Graph.)
+//
+// If r is already disqualified, require propagates the disqualification to m
+// and returns the reason for the disqualification.
+func (t *dqTracker) require(m, r module.Version) (ok bool) {
+ rdq := t.dqReason[r]
+ rootPruning, isRoot := t.extendedRootPruning[r]
+ if isRoot && rdq.from(rootPruning).isDisqualified() {
+ // When we pull in m's dependencies, we will have an edge from m to r, and r
+ // is disqualified (it is a root, which causes its problematic dependencies
+ // to always be included). So we cannot pull in m's dependencies at all:
+ // m is completely disqualified.
+ t.disqualify(m, pruned, dqState{dep: r})
+ return false
+ }
+
+ if dq := rdq.from(unpruned); dq.isDisqualified() {
+ t.disqualify(m, unpruned, dqState{dep: r})
+ if _, ok := t.extendedRootPruning[m]; !ok {
+ // Since m is not a root, its dependencies can't be included in the pruned
+ // part of the module graph, and will never be disqualified from a pruned
+ // reason. We've already disqualified everything that matters.
+ return false
+ }
+ }
+
+ // Record that m is a dependant of r, so that if r is later disqualified
+ // m will be disqualified as well.
+ if t.requiring == nil {
+ t.requiring = make(map[module.Version][]module.Version)
+ }
+ t.requiring[r] = append(t.requiring[r], m)
+ return true
+}
+
+// disqualify records why the dependencies of m cannot be included in the module
+// graph if reached from a part of the graph with the given pruning.
+//
+// Since the pruned graph is a subgraph of the unpruned graph, disqualifying a
+// module from a pruned part of the graph also disqualifies it in the unpruned
+// parts.
+func (t *dqTracker) disqualify(m module.Version, fromPruning modPruning, reason dqState) {
+ if !reason.isDisqualified() {
+ panic("internal error: disqualify called with a non-disqualifying dqState")
+ }
+
+ dq := t.dqReason[m]
+ if dq.from(fromPruning).isDisqualified() {
+ return // Already disqualified for some other reason; don't overwrite it.
+ }
+ rootPruning, isRoot := t.extendedRootPruning[m]
+ if fromPruning == pruned {
+ dq.pruned = reason
+ if !dq.unpruned.isDisqualified() {
+ // Since the pruned graph of m is a subgraph of the unpruned graph, if it
+ // is disqualified due to something in the pruned graph, it is certainly
+ // disqualified in the unpruned graph from the same reason.
+ dq.unpruned = reason
+ }
+ } else {
+ dq.unpruned = reason
+ if dq.pruned.isDisqualified() {
+ panic(fmt.Sprintf("internal error: %v is marked as disqualified when pruned, but not when unpruned", m))
+ }
+ if isRoot && rootPruning == unpruned {
+ // Since m is a root that is always unpruned, any other roots — even
+ // pruned ones! — that cause it to be selected would also cause the reason
+ // for is disqualification to be included in the module graph.
+ dq.pruned = reason
+ }
+ }
+ if t.dqReason == nil {
+ t.dqReason = make(map[module.Version]perPruning[dqState])
+ }
+ t.dqReason[m] = dq
+
+ if isRoot && (fromPruning == pruned || rootPruning == unpruned) {
+ // Either m is disqualified even when its dependencies are pruned,
+ // or m's go.mod file causes its dependencies to *always* be unpruned.
+ // Everything that depends on it must be disqualified.
+ for _, p := range t.requiring[m] {
+ t.disqualify(p, pruned, dqState{dep: m})
+ // Note that since the pruned graph is a subset of the unpruned graph,
+ // disqualifying p in the pruned graph also disqualifies it in the
+ // unpruned graph.
+ }
+ // Everything in t.requiring[m] is now fully disqualified.
+ // We won't need to use it again.
+ delete(t.requiring, m)
+ return
+ }
+
+ // Either m is not a root, or it is a pruned root but only being disqualified
+ // when reached from the unpruned parts of the module graph.
+ // Either way, the reason for this disqualification is only visible to the
+ // unpruned parts of the module graph.
+ for _, p := range t.requiring[m] {
+ t.disqualify(p, unpruned, dqState{dep: m})
+ }
+ if !isRoot {
+ // Since m is not a root, its dependencies can't be included in the pruned
+ // part of the module graph, and will never be disqualified from a pruned
+ // reason. We've already disqualified everything that matters.
+ delete(t.requiring, m)
+ }
+}
+
+// check reports whether m is disqualified in the given pruning context.
+func (t *dqTracker) check(m module.Version, pruning modPruning) dqState {
+ return t.dqReason[m].from(pruning)
+}
+
+// path returns the path from m to the reason it is disqualified, which may be
+// either a module that violates constraints or an error in loading
+// requirements.
+//
+// If m is not disqualified, path returns (nil, nil).
+func (t *dqTracker) path(m module.Version, pruning modPruning) (path []module.Version, err error) {
+ for {
+ dq := t.dqReason[m].from(pruning)
+ if !dq.isDisqualified() {
+ return path, nil
+ }
+ path = append(path, m)
+ if dq.err != nil || dq.dep == m {
+ return path, dq.err // m itself is the conflict.
+ }
+ m = dq.dep
+ }
+}
diff --git a/src/cmd/go/internal/modload/help.go b/src/cmd/go/internal/modload/help.go
new file mode 100644
index 0000000..886ad62
--- /dev/null
+++ b/src/cmd/go/internal/modload/help.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import "cmd/go/internal/base"
+
+var HelpModules = &base.Command{
+ UsageLine: "modules",
+ Short: "modules, module versions, and more",
+ Long: `
+Modules are how Go manages dependencies.
+
+A module is a collection of packages that are released, versioned, and
+distributed together. Modules may be downloaded directly from version control
+repositories or from module proxy servers.
+
+For a series of tutorials on modules, see
+https://golang.org/doc/tutorial/create-module.
+
+For a detailed reference on modules, see https://golang.org/ref/mod.
+
+By default, the go command may download modules from https://proxy.golang.org.
+It may authenticate modules using the checksum database at
+https://sum.golang.org. Both services are operated by the Go team at Google.
+The privacy policies for these services are available at
+https://proxy.golang.org/privacy and https://sum.golang.org/privacy,
+respectively.
+
+The go command's download behavior may be configured using GOPROXY, GOSUMDB,
+GOPRIVATE, and other environment variables. See 'go help environment'
+and https://golang.org/ref/mod#private-module-privacy for more information.
+ `,
+}
+
+var HelpGoMod = &base.Command{
+ UsageLine: "go.mod",
+ Short: "the go.mod file",
+ Long: `
+A module version is defined by a tree of source files, with a go.mod
+file in its root. When the go command is run, it looks in the current
+directory and then successive parent directories to find the go.mod
+marking the root of the main (current) module.
+
+The go.mod file format is described in detail at
+https://golang.org/ref/mod#go-mod-file.
+
+To create a new go.mod file, use 'go mod init'. For details see
+'go help mod init' or https://golang.org/ref/mod#go-mod-init.
+
+To add missing module requirements or remove unneeded requirements,
+use 'go mod tidy'. For details, see 'go help mod tidy' or
+https://golang.org/ref/mod#go-mod-tidy.
+
+To add, upgrade, downgrade, or remove a specific module requirement, use
+'go get'. For details, see 'go help module-get' or
+https://golang.org/ref/mod#go-get.
+
+To make other changes or to parse go.mod as JSON for use by other tools,
+use 'go mod edit'. See 'go help mod edit' or
+https://golang.org/ref/mod#go-mod-edit.
+ `,
+}
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
new file mode 100644
index 0000000..83b9ad4
--- /dev/null
+++ b/src/cmd/go/internal/modload/import.go
@@ -0,0 +1,773 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/build"
+ "io/fs"
+ "os"
+ pathpkg "path"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/par"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+
+ "golang.org/x/mod/module"
+)
+
+type ImportMissingError struct {
+ Path string
+ Module module.Version
+ QueryErr error
+
+ ImportingMainModule module.Version
+
+ // isStd indicates whether we would expect to find the package in the standard
+ // library. This is normally true for all dotless import paths, but replace
+ // directives can cause us to treat the replaced paths as also being in
+ // modules.
+ isStd bool
+
+ // importerGoVersion is the version the module containing the import error
+ // specified. It is only set when isStd is true.
+ importerGoVersion string
+
+ // replaced the highest replaced version of the module where the replacement
+ // contains the package. replaced is only set if the replacement is unused.
+ replaced module.Version
+
+ // newMissingVersion is set to a newer version of Module if one is present
+ // in the build list. When set, we can't automatically upgrade.
+ newMissingVersion string
+}
+
+func (e *ImportMissingError) Error() string {
+ if e.Module.Path == "" {
+ if e.isStd {
+ msg := fmt.Sprintf("package %s is not in std (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path))
+ if e.importerGoVersion != "" {
+ msg += fmt.Sprintf("\nnote: imported by a module that requires go %s", e.importerGoVersion)
+ }
+ return msg
+ }
+ if e.QueryErr != nil && e.QueryErr != ErrNoModRoot {
+ return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr)
+ }
+ if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) {
+ return "cannot find module providing package " + e.Path
+ }
+
+ if e.replaced.Path != "" {
+ suggestArg := e.replaced.Path
+ if !module.IsZeroPseudoVersion(e.replaced.Version) {
+ suggestArg = e.replaced.String()
+ }
+ return fmt.Sprintf("module %s provides package %s and is replaced but not required; to add it:\n\tgo get %s", e.replaced.Path, e.Path, suggestArg)
+ }
+
+ message := fmt.Sprintf("no required module provides package %s", e.Path)
+ if e.QueryErr != nil {
+ return fmt.Sprintf("%s: %v", message, e.QueryErr)
+ }
+ if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() {
+ return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path)
+ }
+ return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path)
+ }
+
+ if e.newMissingVersion != "" {
+ return fmt.Sprintf("package %s provided by %s at latest version %s but not at required version %s", e.Path, e.Module.Path, e.Module.Version, e.newMissingVersion)
+ }
+
+ return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.Path)
+}
+
+func (e *ImportMissingError) Unwrap() error {
+ return e.QueryErr
+}
+
+func (e *ImportMissingError) ImportPath() string {
+ return e.Path
+}
+
+// An AmbiguousImportError indicates an import of a package found in multiple
+// modules in the build list, or found in both the main module and its vendor
+// directory.
+type AmbiguousImportError struct {
+ importPath string
+ Dirs []string
+ Modules []module.Version // Either empty or 1:1 with Dirs.
+}
+
+func (e *AmbiguousImportError) ImportPath() string {
+ return e.importPath
+}
+
+func (e *AmbiguousImportError) Error() string {
+ locType := "modules"
+ if len(e.Modules) == 0 {
+ locType = "directories"
+ }
+
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "ambiguous import: found package %s in multiple %s:", e.importPath, locType)
+
+ for i, dir := range e.Dirs {
+ buf.WriteString("\n\t")
+ if i < len(e.Modules) {
+ m := e.Modules[i]
+ buf.WriteString(m.Path)
+ if m.Version != "" {
+ fmt.Fprintf(&buf, " %s", m.Version)
+ }
+ fmt.Fprintf(&buf, " (%s)", dir)
+ } else {
+ buf.WriteString(dir)
+ }
+ }
+
+ return buf.String()
+}
+
+// A DirectImportFromImplicitDependencyError indicates a package directly
+// imported by a package or test in the main module that is satisfied by a
+// dependency that is not explicit in the main module's go.mod file.
+type DirectImportFromImplicitDependencyError struct {
+ ImporterPath string
+ ImportedPath string
+ Module module.Version
+}
+
+func (e *DirectImportFromImplicitDependencyError) Error() string {
+ return fmt.Sprintf("package %s imports %s from implicitly required module; to add missing requirements, run:\n\tgo get %s@%s", e.ImporterPath, e.ImportedPath, e.Module.Path, e.Module.Version)
+}
+
+func (e *DirectImportFromImplicitDependencyError) ImportPath() string {
+ return e.ImporterPath
+}
+
+// ImportMissingSumError is reported in readonly mode when we need to check
+// if a module contains a package, but we don't have a sum for its .zip file.
+// We might need sums for multiple modules to verify the package is unique.
+//
+// TODO(#43653): consolidate multiple errors of this type into a single error
+// that suggests a 'go get' command for root packages that transitively import
+// packages from modules with missing sums. load.CheckPackageErrors would be
+// a good place to consolidate errors, but we'll need to attach the import
+// stack here.
+type ImportMissingSumError struct {
+ importPath string
+ found bool
+ mods []module.Version
+ importer, importerVersion string // optional, but used for additional context
+ importerIsTest bool
+}
+
+func (e *ImportMissingSumError) Error() string {
+ var importParen string
+ if e.importer != "" {
+ importParen = fmt.Sprintf(" (imported by %s)", e.importer)
+ }
+ var message string
+ if e.found {
+ message = fmt.Sprintf("missing go.sum entry needed to verify package %s%s is provided by exactly one module", e.importPath, importParen)
+ } else {
+ message = fmt.Sprintf("missing go.sum entry for module providing package %s%s", e.importPath, importParen)
+ }
+ var hint string
+ if e.importer == "" {
+ // Importing package is unknown, or the missing package was named on the
+ // command line. Recommend 'go mod download' for the modules that could
+ // provide the package, since that shouldn't change go.mod.
+ if len(e.mods) > 0 {
+ args := make([]string, len(e.mods))
+ for i, mod := range e.mods {
+ args[i] = mod.Path
+ }
+ hint = fmt.Sprintf("; to add:\n\tgo mod download %s", strings.Join(args, " "))
+ }
+ } else {
+ // Importing package is known (common case). Recommend 'go get' on the
+ // current version of the importing package.
+ tFlag := ""
+ if e.importerIsTest {
+ tFlag = " -t"
+ }
+ version := ""
+ if e.importerVersion != "" {
+ version = "@" + e.importerVersion
+ }
+ hint = fmt.Sprintf("; to add:\n\tgo get%s %s%s", tFlag, e.importer, version)
+ }
+ return message + hint
+}
+
+func (e *ImportMissingSumError) ImportPath() string {
+ return e.importPath
+}
+
+type invalidImportError struct {
+ importPath string
+ err error
+}
+
+func (e *invalidImportError) ImportPath() string {
+ return e.importPath
+}
+
+func (e *invalidImportError) Error() string {
+ return e.err.Error()
+}
+
+func (e *invalidImportError) Unwrap() error {
+ return e.err
+}
+
+// importFromModules finds the module and directory in the dependency graph of
+// rs containing the package with the given import path. If mg is nil,
+// importFromModules attempts to locate the module using only the main module
+// and the roots of rs before it loads the full graph.
+//
+// The answer must be unique: importFromModules returns an error if multiple
+// modules are observed to provide the same package.
+//
+// importFromModules can return a module with an empty m.Path, for packages in
+// the standard library.
+//
+// importFromModules can return an empty directory string, for fake packages
+// like "C" and "unsafe".
+//
+// If the package is not present in any module selected from the requirement
+// graph, importFromModules returns an *ImportMissingError.
+//
+// If the package is present in exactly one module, importFromModules will
+// return the module, its root directory, and a list of other modules that
+// lexically could have provided the package but did not.
+//
+// If skipModFile is true, the go.mod file for the package is not loaded. This
+// allows 'go mod tidy' to preserve a minor checksum-preservation bug
+// (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and
+// 1.20, preventing unnecessary go.sum churn and network access in those
+// modules.
+func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) {
+ invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) {
+ return module.Version{}, "", "", nil, &invalidImportError{
+ importPath: path,
+ err: fmt.Errorf(format, args...),
+ }
+ }
+
+ if strings.Contains(path, "@") {
+ return invalidf("import path %q should not have @version", path)
+ }
+ if build.IsLocalImport(path) {
+ return invalidf("%q is relative, but relative import paths are not supported in module mode", path)
+ }
+ if filepath.IsAbs(path) {
+ return invalidf("%q is not a package path; see 'go help packages'", path)
+ }
+ if search.IsMetaPackage(path) {
+ return invalidf("%q is not an importable package; see 'go help packages'", path)
+ }
+
+ if path == "C" {
+ // There's no directory for import "C".
+ return module.Version{}, "", "", nil, nil
+ }
+ // Before any further lookup, check that the path is valid.
+ if err := module.CheckImportPath(path); err != nil {
+ return module.Version{}, "", "", nil, &invalidImportError{importPath: path, err: err}
+ }
+
+ // Check each module on the build list.
+ var dirs, roots []string
+ var mods []module.Version
+
+ // Is the package in the standard library?
+ pathIsStd := search.IsStandardImportPath(path)
+ if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ for _, mainModule := range MainModules.Versions() {
+ if MainModules.InGorootSrc(mainModule) {
+ if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil {
+ return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err
+ } else if ok {
+ return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil
+ }
+ }
+ }
+ dir := filepath.Join(cfg.GOROOTsrc, path)
+ modroot = cfg.GOROOTsrc
+ if str.HasPathPrefix(path, "cmd") {
+ modroot = filepath.Join(cfg.GOROOTsrc, "cmd")
+ }
+ dirs = append(dirs, dir)
+ roots = append(roots, modroot)
+ mods = append(mods, module.Version{})
+ }
+ // -mod=vendor is special.
+ // Everything must be in the main module or the main module's vendor directory.
+ if cfg.BuildMod == "vendor" {
+ mainModule := MainModules.mustGetSingleMainModule()
+ modRoot := MainModules.ModRoot(mainModule)
+ var mainErr error
+ if modRoot != "" {
+ mainDir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true)
+ mainErr = err
+ if mainOK {
+ mods = append(mods, mainModule)
+ dirs = append(dirs, mainDir)
+ roots = append(roots, modRoot)
+ }
+ vendorDir, vendorOK, _ := dirInModule(path, "", filepath.Join(modRoot, "vendor"), false)
+ if vendorOK {
+ readVendorList(mainModule)
+ mods = append(mods, vendorPkgModule[path])
+ dirs = append(dirs, vendorDir)
+ roots = append(roots, modRoot)
+ }
+ }
+
+ if len(dirs) > 1 {
+ return module.Version{}, modRoot, "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs}
+ }
+
+ if mainErr != nil {
+ return module.Version{}, "", "", nil, mainErr
+ }
+
+ if len(dirs) == 0 {
+ return module.Version{}, modRoot, "", nil, &ImportMissingError{Path: path}
+ }
+
+ return mods[0], roots[0], dirs[0], nil, nil
+ }
+
+ // Iterate over possible modules for the path, not all selected modules.
+ // Iterating over selected modules would make the overall loading time
+ // O(M × P) for M modules providing P imported packages, whereas iterating
+ // over path prefixes is only O(P × k) with maximum path depth k. For
+ // large projects both M and P may be very large (note that M ≤ P), but k
+ // will tend to remain smallish (if for no other reason than filesystem
+ // path limitations).
+ //
+ // We perform this iteration either one or two times. If mg is initially nil,
+ // then we first attempt to load the package using only the main module and
+ // its root requirements. If that does not identify the package, or if mg is
+ // already non-nil, then we attempt to load the package using the full
+ // requirements in mg.
+ for {
+ var sumErrMods, altMods []module.Version
+ for prefix := path; prefix != "."; prefix = pathpkg.Dir(prefix) {
+ if gover.IsToolchain(prefix) {
+ // Do not use the synthetic "go" module for "go/ast".
+ continue
+ }
+ var (
+ v string
+ ok bool
+ )
+ if mg == nil {
+ v, ok = rs.rootSelected(prefix)
+ } else {
+ v, ok = mg.Selected(prefix), true
+ }
+ if !ok || v == "none" {
+ continue
+ }
+ m := module.Version{Path: prefix, Version: v}
+
+ root, isLocal, err := fetch(ctx, m)
+ if err != nil {
+ if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) {
+ // We are missing a sum needed to fetch a module in the build list.
+ // We can't verify that the package is unique, and we may not find
+ // the package at all. Keep checking other modules to decide which
+ // error to report. Multiple sums may be missing if we need to look in
+ // multiple nested modules to resolve the import; we'll report them all.
+ sumErrMods = append(sumErrMods, m)
+ continue
+ }
+ // Report fetch error.
+ // Note that we don't know for sure this module is necessary,
+ // but it certainly _could_ provide the package, and even if we
+ // continue the loop and find the package in some other module,
+ // we need to look at this module to make sure the import is
+ // not ambiguous.
+ return module.Version{}, "", "", nil, err
+ }
+ if dir, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil {
+ return module.Version{}, "", "", nil, err
+ } else if ok {
+ mods = append(mods, m)
+ roots = append(roots, root)
+ dirs = append(dirs, dir)
+ } else {
+ altMods = append(altMods, m)
+ }
+ }
+
+ if len(mods) > 1 {
+ // We produce the list of directories from longest to shortest candidate
+ // module path, but the AmbiguousImportError should report them from
+ // shortest to longest. Reverse them now.
+ for i := 0; i < len(mods)/2; i++ {
+ j := len(mods) - 1 - i
+ mods[i], mods[j] = mods[j], mods[i]
+ roots[i], roots[j] = roots[j], roots[i]
+ dirs[i], dirs[j] = dirs[j], dirs[i]
+ }
+ return module.Version{}, "", "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods}
+ }
+
+ if len(sumErrMods) > 0 {
+ for i := 0; i < len(sumErrMods)/2; i++ {
+ j := len(sumErrMods) - 1 - i
+ sumErrMods[i], sumErrMods[j] = sumErrMods[j], sumErrMods[i]
+ }
+ return module.Version{}, "", "", nil, &ImportMissingSumError{
+ importPath: path,
+ mods: sumErrMods,
+ found: len(mods) > 0,
+ }
+ }
+
+ if len(mods) == 1 {
+ // We've found the unique module containing the package.
+ // However, in order to actually compile it we need to know what
+ // Go language version to use, which requires its go.mod file.
+ //
+ // If the module graph is pruned and this is a test-only dependency
+ // of a package in "all", we didn't necessarily load that file
+ // when we read the module graph, so do it now to be sure.
+ if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) {
+ if _, err := goModSummary(mods[0]); err != nil {
+ return module.Version{}, "", "", nil, err
+ }
+ }
+ return mods[0], roots[0], dirs[0], altMods, nil
+ }
+
+ if mg != nil {
+ // We checked the full module graph and still didn't find the
+ // requested package.
+ var queryErr error
+ if !HasModRoot() {
+ queryErr = ErrNoModRoot
+ }
+ return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd}
+ }
+
+ // So far we've checked the root dependencies.
+ // Load the full module graph and try again.
+ mg, err = rs.Graph(ctx)
+ if err != nil {
+ // We might be missing one or more transitive (implicit) dependencies from
+ // the module graph, so we can't return an ImportMissingError here — one
+ // of the missing modules might actually contain the package in question,
+ // in which case we shouldn't go looking for it in some new dependency.
+ return module.Version{}, "", "", nil, err
+ }
+ }
+}
+
+// queryImport attempts to locate a module that can be added to the current
+// build list to provide the package with the given import path.
+//
+// Unlike QueryPattern, queryImport prefers to add a replaced version of a
+// module *before* checking the proxies for a version to add.
+func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) {
+ // To avoid spurious remote fetches, try the latest replacement for each
+ // module (golang.org/issue/26241).
+ var mods []module.Version
+ if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check.
+ for mp, mv := range MainModules.HighestReplaced() {
+ if !maybeInModule(path, mp) {
+ continue
+ }
+ if mv == "" {
+ // The only replacement is a wildcard that doesn't specify a version, so
+ // synthesize a pseudo-version with an appropriate major version and a
+ // timestamp below any real timestamp. That way, if the main module is
+ // used from within some other module, the user will be able to upgrade
+ // the requirement to any real version they choose.
+ if _, pathMajor, ok := module.SplitPathVersion(mp); ok && len(pathMajor) > 0 {
+ mv = module.ZeroPseudoVersion(pathMajor[1:])
+ } else {
+ mv = module.ZeroPseudoVersion("v0")
+ }
+ }
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ return module.Version{}, err
+ }
+ if gover.ModCompare(mp, mg.Selected(mp), mv) >= 0 {
+ // We can't resolve the import by adding mp@mv to the module graph,
+ // because the selected version of mp is already at least mv.
+ continue
+ }
+ mods = append(mods, module.Version{Path: mp, Version: mv})
+ }
+ }
+
+ // Every module path in mods is a prefix of the import path.
+ // As in QueryPattern, prefer the longest prefix that satisfies the import.
+ sort.Slice(mods, func(i, j int) bool {
+ return len(mods[i].Path) > len(mods[j].Path)
+ })
+ for _, m := range mods {
+ root, isLocal, err := fetch(ctx, m)
+ if err != nil {
+ if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) {
+ return module.Version{}, &ImportMissingSumError{importPath: path}
+ }
+ return module.Version{}, err
+ }
+ if _, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil {
+ return m, err
+ } else if ok {
+ if cfg.BuildMod == "readonly" {
+ return module.Version{}, &ImportMissingError{Path: path, replaced: m}
+ }
+ return m, nil
+ }
+ }
+ if len(mods) > 0 && module.CheckPath(path) != nil {
+ // The package path is not valid to fetch remotely,
+ // so it can only exist in a replaced module,
+ // and we know from the above loop that it is not.
+ replacement := Replacement(mods[0])
+ return module.Version{}, &PackageNotInModuleError{
+ Mod: mods[0],
+ Query: "latest",
+ Pattern: path,
+ Replacement: replacement,
+ }
+ }
+
+ if search.IsStandardImportPath(path) {
+ // This package isn't in the standard library, isn't in any module already
+ // in the build list, and isn't in any other module that the user has
+ // shimmed in via a "replace" directive.
+ // Moreover, the import path is reserved for the standard library, so
+ // QueryPattern cannot possibly find a module containing this package.
+ //
+ // Instead of trying QueryPattern, report an ImportMissingError immediately.
+ return module.Version{}, &ImportMissingError{Path: path, isStd: true}
+ }
+
+ if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !allowMissingModuleImports {
+ // In readonly mode, we can't write go.mod, so we shouldn't try to look up
+ // the module. If readonly mode was enabled explicitly, include that in
+ // the error message.
+ // In vendor mode, we cannot use the network or module cache, so we
+ // shouldn't try to look up the module
+ var queryErr error
+ if cfg.BuildModExplicit {
+ queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod)
+ } else if cfg.BuildModReason != "" {
+ queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
+ }
+ return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr}
+ }
+
+ // Look up module containing the package, for addition to the build list.
+ // Goal is to determine the module, download it to dir,
+ // and return m, dir, ImportMissingError.
+ fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path)
+
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ return module.Version{}, err
+ }
+
+ candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ // Return "cannot find module providing package […]" instead of whatever
+ // low-level error QueryPattern produced.
+ return module.Version{}, &ImportMissingError{Path: path, QueryErr: err}
+ } else {
+ return module.Version{}, err
+ }
+ }
+
+ candidate0MissingVersion := ""
+ for i, c := range candidates {
+ if v := mg.Selected(c.Mod.Path); gover.ModCompare(c.Mod.Path, v, c.Mod.Version) > 0 {
+ // QueryPattern proposed that we add module c.Mod to provide the package,
+ // but we already depend on a newer version of that module (and that
+ // version doesn't have the package).
+ //
+ // This typically happens when a package is present at the "@latest"
+ // version (e.g., v1.0.0) of a module, but we have a newer version
+ // of the same module in the build list (e.g., v1.0.1-beta), and
+ // the package is not present there.
+ if i == 0 {
+ candidate0MissingVersion = v
+ }
+ continue
+ }
+ return c.Mod, nil
+ }
+ return module.Version{}, &ImportMissingError{
+ Path: path,
+ Module: candidates[0].Mod,
+ newMissingVersion: candidate0MissingVersion,
+ }
+}
+
+// maybeInModule reports whether, syntactically,
+// a package with the given import path could be supplied
+// by a module with the given module path (mpath).
+func maybeInModule(path, mpath string) bool {
+ return mpath == path ||
+ len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath
+}
+
+var (
+ haveGoModCache par.Cache[string, bool] // dir → bool
+ haveGoFilesCache par.ErrCache[string, bool] // dir → haveGoFiles
+)
+
+// dirInModule locates the directory that would hold the package named by the given path,
+// if it were in the module with module path mpath and root mdir.
+// If path is syntactically not within mpath,
+// or if mdir is a local file tree (isLocal == true) and the directory
+// that would hold path is in a sub-module (covered by a go.mod below mdir),
+// dirInModule returns "", false, nil.
+//
+// Otherwise, dirInModule returns the name of the directory where
+// Go source files would be expected, along with a boolean indicating
+// whether there are in fact Go source files in that directory.
+// A non-nil error indicates that the existence of the directory and/or
+// source files could not be determined, for example due to a permission error.
+func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFiles bool, err error) {
+ // Determine where to expect the package.
+ if path == mpath {
+ dir = mdir
+ } else if mpath == "" { // vendor directory
+ dir = filepath.Join(mdir, path)
+ } else if len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath {
+ dir = filepath.Join(mdir, path[len(mpath)+1:])
+ } else {
+ return "", false, nil
+ }
+
+ // Check that there aren't other modules in the way.
+ // This check is unnecessary inside the module cache
+ // and important to skip in the vendor directory,
+ // where all the module trees have been overlaid.
+ // So we only check local module trees
+ // (the main module, and any directory trees pointed at by replace directives).
+ if isLocal {
+ for d := dir; d != mdir && len(d) > len(mdir); {
+ haveGoMod := haveGoModCache.Do(d, func() bool {
+ fi, err := fsys.Stat(filepath.Join(d, "go.mod"))
+ return err == nil && !fi.IsDir()
+ })
+
+ if haveGoMod {
+ return "", false, nil
+ }
+ parent := filepath.Dir(d)
+ if parent == d {
+ // Break the loop, as otherwise we'd loop
+ // forever if d=="." and mdir=="".
+ break
+ }
+ d = parent
+ }
+ }
+
+ // Now committed to returning dir (not "").
+
+ // Are there Go source files in the directory?
+ // We don't care about build tags, not even "+build ignore".
+ // We're just looking for a plausible directory.
+ haveGoFiles, err = haveGoFilesCache.Do(dir, func() (bool, error) {
+ // modindex.GetPackage will return ErrNotIndexed for any directories which
+ // are reached through a symlink, so that they will be handled by
+ // fsys.IsDirWithGoFiles below.
+ if ip, err := modindex.GetPackage(mdir, dir); err == nil {
+ return ip.IsDirWithGoFiles()
+ } else if !errors.Is(err, modindex.ErrNotIndexed) {
+ return false, err
+ }
+ return fsys.IsDirWithGoFiles(dir)
+ })
+
+ return dir, haveGoFiles, err
+}
+
+// fetch downloads the given module (or its replacement)
+// and returns its location.
+//
+// The isLocal return value reports whether the replacement,
+// if any, is local to the filesystem.
+func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) {
+ if modRoot := MainModules.ModRoot(mod); modRoot != "" {
+ return modRoot, true, nil
+ }
+ if r := Replacement(mod); r.Path != "" {
+ if r.Version == "" {
+ dir = r.Path
+ if !filepath.IsAbs(dir) {
+ dir = filepath.Join(replaceRelativeTo(), dir)
+ }
+ // Ensure that the replacement directory actually exists:
+ // dirInModule does not report errors for missing modules,
+ // so if we don't report the error now, later failures will be
+ // very mysterious.
+ if _, err := fsys.Stat(dir); err != nil {
+ // TODO(bcmills): We should also read dir/go.mod here and check its Go version,
+ // and return a gover.TooNewError if appropriate.
+
+ if os.IsNotExist(err) {
+ // Semantically the module version itself “exists” — we just don't
+ // have its source code. Remove the equivalence to os.ErrNotExist,
+ // and make the message more concise while we're at it.
+ err = fmt.Errorf("replacement directory %s does not exist", r.Path)
+ } else {
+ err = fmt.Errorf("replacement directory %s: %w", r.Path, err)
+ }
+ return dir, true, module.VersionError(mod, err)
+ }
+ return dir, true, nil
+ }
+ mod = r
+ }
+
+ if mustHaveSums() && !modfetch.HaveSum(mod) {
+ return "", false, module.VersionError(mod, &sumMissingError{})
+ }
+
+ dir, err = modfetch.Download(ctx, mod)
+ return dir, false, err
+}
+
+// mustHaveSums reports whether we require that all checksums
+// needed to load or build packages are already present in the go.sum file.
+func mustHaveSums() bool {
+ return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode()
+}
+
+type sumMissingError struct {
+ suggestion string
+}
+
+func (e *sumMissingError) Error() string {
+ return "missing go.sum entry" + e.suggestion
+}
diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go
new file mode 100644
index 0000000..eb4f5d6
--- /dev/null
+++ b/src/cmd/go/internal/modload/import_test.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "internal/testenv"
+ "regexp"
+ "strings"
+ "testing"
+
+ "golang.org/x/mod/module"
+)
+
+var importTests = []struct {
+ path string
+ m module.Version
+ err string
+}{
+ {
+ path: "golang.org/x/net/context",
+ m: module.Version{
+ Path: "golang.org/x/net",
+ },
+ },
+ {
+ path: "golang.org/x/net",
+ err: `module golang.org/x/net@.* found \(v[01]\.\d+\.\d+\), but does not contain package golang.org/x/net`,
+ },
+ {
+ path: "golang.org/x/text",
+ m: module.Version{
+ Path: "golang.org/x/text",
+ },
+ },
+ {
+ path: "github.com/rsc/quote/buggy",
+ m: module.Version{
+ Path: "github.com/rsc/quote",
+ Version: "v1.5.2",
+ },
+ },
+ {
+ path: "github.com/rsc/quote",
+ m: module.Version{
+ Path: "github.com/rsc/quote",
+ Version: "v1.5.2",
+ },
+ },
+ {
+ path: "golang.org/x/foo/bar",
+ err: "cannot find module providing package golang.org/x/foo/bar",
+ },
+}
+
+func TestQueryImport(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+ testenv.MustHaveExecPath(t, "git")
+
+ oldAllowMissingModuleImports := allowMissingModuleImports
+ oldRootMode := RootMode
+ defer func() {
+ allowMissingModuleImports = oldAllowMissingModuleImports
+ RootMode = oldRootMode
+ }()
+ allowMissingModuleImports = true
+ RootMode = NoRoot
+
+ ctx := context.Background()
+ rs := LoadModFile(ctx)
+
+ for _, tt := range importTests {
+ t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
+ // Note that there is no build list, so Import should always fail.
+ m, err := queryImport(ctx, tt.path, rs)
+
+ if tt.err == "" {
+ if err != nil {
+ t.Fatalf("queryImport(_, %q): %v", tt.path, err)
+ }
+ } else {
+ if err == nil {
+ t.Fatalf("queryImport(_, %q) = %v, nil; expected error", tt.path, m)
+ }
+ if !regexp.MustCompile(tt.err).MatchString(err.Error()) {
+ t.Fatalf("queryImport(_, %q): error %q, want error matching %#q", tt.path, err, tt.err)
+ }
+ }
+
+ if m.Path != tt.m.Path || (tt.m.Version != "" && m.Version != tt.m.Version) {
+ t.Errorf("queryImport(_, %q) = %v, _; want %v", tt.path, m, tt.m)
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
new file mode 100644
index 0000000..1c6f7d9
--- /dev/null
+++ b/src/cmd/go/internal/modload/init.go
@@ -0,0 +1,1979 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "internal/lazyregexp"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modconv"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/search"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+// Variables set by other packages.
+//
+// TODO(#40775): See if these can be plumbed as explicit parameters.
+var (
+ // RootMode determines whether a module root is needed.
+ RootMode Root
+
+ // ForceUseModules may be set to force modules to be enabled when
+ // GO111MODULE=auto or to report an error when GO111MODULE=off.
+ ForceUseModules bool
+
+ allowMissingModuleImports bool
+
+ // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions
+ // from updating go.mod and go.sum or reporting errors when updates are
+ // needed. A package should set this if it would cause go.mod to be written
+ // multiple times (for example, 'go get' calls LoadPackages multiple times) or
+ // if it needs some other operation to be successful before go.mod and go.sum
+ // can be written (for example, 'go mod download' must download modules before
+ // adding sums to go.sum). Packages that set this are responsible for calling
+ // WriteGoMod explicitly.
+ ExplicitWriteGoMod bool
+)
+
+// Variables set in Init.
+var (
+ initialized bool
+
+ // These are primarily used to initialize the MainModules, and should be
+ // eventually superseded by them but are still used in cases where the module
+ // roots are required but MainModules hasn't been initialized yet. Set to
+ // the modRoots of the main modules.
+ // modRoots != nil implies len(modRoots) > 0
+ modRoots []string
+ gopath string
+)
+
+// EnterModule resets MainModules and requirements to refer to just this one module.
+func EnterModule(ctx context.Context, enterModroot string) {
+ MainModules = nil // reset MainModules
+ requirements = nil
+ workFilePath = "" // Force module mode
+ modfetch.Reset()
+
+ modRoots = []string{enterModroot}
+ LoadModFile(ctx)
+}
+
+// Variable set in InitWorkfile
+var (
+ // Set to the path to the go.work file, or "" if workspace mode is disabled.
+ workFilePath string
+)
+
+type MainModuleSet struct {
+ // versions are the module.Version values of each of the main modules.
+ // For each of them, the Path fields are ordinary module paths and the Version
+ // fields are empty strings.
+ // versions is clipped (len=cap).
+ versions []module.Version
+
+ // modRoot maps each module in versions to its absolute filesystem path.
+ modRoot map[module.Version]string
+
+ // pathPrefix is the path prefix for packages in the module, without a trailing
+ // slash. For most modules, pathPrefix is just version.Path, but the
+ // standard-library module "std" has an empty prefix.
+ pathPrefix map[module.Version]string
+
+ // inGorootSrc caches whether modRoot is within GOROOT/src.
+ // The "std" module is special within GOROOT/src, but not otherwise.
+ inGorootSrc map[module.Version]bool
+
+ modFiles map[module.Version]*modfile.File
+
+ modContainingCWD module.Version
+
+ workFile *modfile.WorkFile
+
+ workFileReplaceMap map[module.Version]module.Version
+ // highest replaced version of each module path; empty string for wildcard-only replacements
+ highestReplaced map[string]string
+
+ indexMu sync.Mutex
+ indices map[module.Version]*modFileIndex
+}
+
+func (mms *MainModuleSet) PathPrefix(m module.Version) string {
+ return mms.pathPrefix[m]
+}
+
+// Versions returns the module.Version values of each of the main modules.
+// For each of them, the Path fields are ordinary module paths and the Version
+// fields are empty strings.
+// Callers should not modify the returned slice.
+func (mms *MainModuleSet) Versions() []module.Version {
+ if mms == nil {
+ return nil
+ }
+ return mms.versions
+}
+
+func (mms *MainModuleSet) Contains(path string) bool {
+ if mms == nil {
+ return false
+ }
+ for _, v := range mms.versions {
+ if v.Path == path {
+ return true
+ }
+ }
+ return false
+}
+
+func (mms *MainModuleSet) ModRoot(m module.Version) string {
+ if mms == nil {
+ return ""
+ }
+ return mms.modRoot[m]
+}
+
+func (mms *MainModuleSet) InGorootSrc(m module.Version) bool {
+ if mms == nil {
+ return false
+ }
+ return mms.inGorootSrc[m]
+}
+
+func (mms *MainModuleSet) mustGetSingleMainModule() module.Version {
+ if mms == nil || len(mms.versions) == 0 {
+ panic("internal error: mustGetSingleMainModule called in context with no main modules")
+ }
+ if len(mms.versions) != 1 {
+ if inWorkspaceMode() {
+ panic("internal error: mustGetSingleMainModule called in workspace mode")
+ } else {
+ panic("internal error: multiple main modules present outside of workspace mode")
+ }
+ }
+ return mms.versions[0]
+}
+
+func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex {
+ if mms == nil {
+ return nil
+ }
+ if len(mms.versions) == 0 {
+ return nil
+ }
+ return mms.indices[mms.mustGetSingleMainModule()]
+}
+
+func (mms *MainModuleSet) Index(m module.Version) *modFileIndex {
+ mms.indexMu.Lock()
+ defer mms.indexMu.Unlock()
+ return mms.indices[m]
+}
+
+func (mms *MainModuleSet) SetIndex(m module.Version, index *modFileIndex) {
+ mms.indexMu.Lock()
+ defer mms.indexMu.Unlock()
+ mms.indices[m] = index
+}
+
+func (mms *MainModuleSet) ModFile(m module.Version) *modfile.File {
+ return mms.modFiles[m]
+}
+
+func (mms *MainModuleSet) Len() int {
+ if mms == nil {
+ return 0
+ }
+ return len(mms.versions)
+}
+
+// ModContainingCWD returns the main module containing the working directory,
+// or module.Version{} if none of the main modules contain the working
+// directory.
+func (mms *MainModuleSet) ModContainingCWD() module.Version {
+ return mms.modContainingCWD
+}
+
+func (mms *MainModuleSet) HighestReplaced() map[string]string {
+ return mms.highestReplaced
+}
+
+// GoVersion returns the go version set on the single module, in module mode,
+// or the go.work file in workspace mode.
+func (mms *MainModuleSet) GoVersion() string {
+ if inWorkspaceMode() {
+ return gover.FromGoWork(mms.workFile)
+ }
+ if mms != nil && len(mms.versions) == 1 {
+ f := mms.ModFile(mms.mustGetSingleMainModule())
+ if f == nil {
+ // Special case: we are outside a module, like 'go run x.go'.
+ // Assume the local Go version.
+ // TODO(#49228): Clean this up; see loadModFile.
+ return gover.Local()
+ }
+ return gover.FromGoMod(f)
+ }
+ return gover.DefaultGoModVersion
+}
+
+// Toolchain returns the toolchain set on the single module, in module mode,
+// or the go.work file in workspace mode.
+func (mms *MainModuleSet) Toolchain() string {
+ if inWorkspaceMode() {
+ if mms.workFile != nil && mms.workFile.Toolchain != nil {
+ return mms.workFile.Toolchain.Name
+ }
+ return "go" + mms.GoVersion()
+ }
+ if mms != nil && len(mms.versions) == 1 {
+ f := mms.ModFile(mms.mustGetSingleMainModule())
+ if f == nil {
+ // Special case: we are outside a module, like 'go run x.go'.
+ // Assume the local Go version.
+ // TODO(#49228): Clean this up; see loadModFile.
+ return gover.LocalToolchain()
+ }
+ if f.Toolchain != nil {
+ return f.Toolchain.Name
+ }
+ }
+ return "go" + mms.GoVersion()
+}
+
+func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version {
+ return mms.workFileReplaceMap
+}
+
+var MainModules *MainModuleSet
+
+type Root int
+
+const (
+ // AutoRoot is the default for most commands. modload.Init will look for
+ // a go.mod file in the current directory or any parent. If none is found,
+ // modules may be disabled (GO111MODULE=auto) or commands may run in a
+ // limited module mode.
+ AutoRoot Root = iota
+
+ // NoRoot is used for commands that run in module mode and ignore any go.mod
+ // file the current directory or in parent directories.
+ NoRoot
+
+ // NeedRoot is used for commands that must run in module mode and don't
+ // make sense without a main module.
+ NeedRoot
+)
+
+// ModFile returns the parsed go.mod file.
+//
+// Note that after calling LoadPackages or LoadModGraph,
+// the require statements in the modfile.File are no longer
+// the source of truth and will be ignored: edits made directly
+// will be lost at the next call to WriteGoMod.
+// To make permanent changes to the require statements
+// in go.mod, edit it before loading.
+func ModFile() *modfile.File {
+ Init()
+ modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule())
+ if modFile == nil {
+ die()
+ }
+ return modFile
+}
+
+func BinDir() string {
+ Init()
+ if cfg.GOBIN != "" {
+ return cfg.GOBIN
+ }
+ if gopath == "" {
+ return ""
+ }
+ return filepath.Join(gopath, "bin")
+}
+
+// InitWorkfile initializes the workFilePath variable for commands that
+// operate in workspace mode. It should not be called by other commands,
+// for example 'go mod tidy', that don't operate in workspace mode.
+func InitWorkfile() {
+ workFilePath = FindGoWork(base.Cwd())
+}
+
+// FindGoWork returns the name of the go.work file for this command,
+// or the empty string if there isn't one.
+// Most code should use Init and Enabled rather than use this directly.
+// It is exported mainly for Go toolchain switching, which must process
+// the go.work very early at startup.
+func FindGoWork(wd string) string {
+ if RootMode == NoRoot {
+ return ""
+ }
+
+ switch gowork := cfg.Getenv("GOWORK"); gowork {
+ case "off":
+ return ""
+ case "", "auto":
+ return findWorkspaceFile(wd)
+ default:
+ if !filepath.IsAbs(gowork) {
+ base.Fatalf("go: invalid GOWORK: not an absolute path")
+ }
+ return gowork
+ }
+}
+
+// WorkFilePath returns the absolute path of the go.work file, or "" if not in
+// workspace mode. WorkFilePath must be called after InitWorkfile.
+func WorkFilePath() string {
+ return workFilePath
+}
+
+// Reset clears all the initialized, cached state about the use of modules,
+// so that we can start over.
+func Reset() {
+ initialized = false
+ ForceUseModules = false
+ RootMode = 0
+ modRoots = nil
+ cfg.ModulesEnabled = false
+ MainModules = nil
+ requirements = nil
+ workFilePath = ""
+ modfetch.Reset()
+}
+
+// Init determines whether module mode is enabled, locates the root of the
+// current module (if any), sets environment variables for Git subprocesses, and
+// configures the cfg, codehost, load, modfetch, and search packages for use
+// with modules.
+func Init() {
+ if initialized {
+ return
+ }
+ initialized = true
+
+ // Keep in sync with WillBeEnabled. We perform extra validation here, and
+ // there are lots of diagnostics and side effects, so we can't use
+ // WillBeEnabled directly.
+ var mustUseModules bool
+ env := cfg.Getenv("GO111MODULE")
+ switch env {
+ default:
+ base.Fatalf("go: unknown environment setting GO111MODULE=%s", env)
+ case "auto":
+ mustUseModules = ForceUseModules
+ case "on", "":
+ mustUseModules = true
+ case "off":
+ if ForceUseModules {
+ base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
+ }
+ mustUseModules = false
+ return
+ }
+
+ if err := fsys.Init(base.Cwd()); err != nil {
+ base.Fatal(err)
+ }
+
+ // Disable any prompting for passwords by Git.
+ // Only has an effect for 2.3.0 or later, but avoiding
+ // the prompt in earlier versions is just too hard.
+ // If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
+ // prompting.
+ // See golang.org/issue/9341 and golang.org/issue/12706.
+ if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
+ os.Setenv("GIT_TERMINAL_PROMPT", "0")
+ }
+
+ // Disable any ssh connection pooling by Git.
+ // If a Git subprocess forks a child into the background to cache a new connection,
+ // that child keeps stdout/stderr open. After the Git subprocess exits,
+ // os /exec expects to be able to read from the stdout/stderr pipe
+ // until EOF to get all the data that the Git subprocess wrote before exiting.
+ // The EOF doesn't come until the child exits too, because the child
+ // is holding the write end of the pipe.
+ // This is unfortunate, but it has come up at least twice
+ // (see golang.org/issue/13453 and golang.org/issue/16104)
+ // and confuses users when it does.
+ // If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
+ // assume they know what they are doing and don't step on it.
+ // But default to turning off ControlMaster.
+ if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
+ os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes")
+ }
+
+ if os.Getenv("GCM_INTERACTIVE") == "" {
+ os.Setenv("GCM_INTERACTIVE", "never")
+ }
+ if modRoots != nil {
+ // modRoot set before Init was called ("go mod init" does this).
+ // No need to search for go.mod.
+ } else if RootMode == NoRoot {
+ if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") {
+ base.Fatalf("go: -modfile cannot be used with commands that ignore the current module")
+ }
+ modRoots = nil
+ } else if workFilePath != "" {
+ // We're in workspace mode, which implies module mode.
+ if cfg.ModFile != "" {
+ base.Fatalf("go: -modfile cannot be used in workspace mode")
+ }
+ } else {
+ if modRoot := findModuleRoot(base.Cwd()); modRoot == "" {
+ if cfg.ModFile != "" {
+ base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.")
+ }
+ if RootMode == NeedRoot {
+ base.Fatal(ErrNoModRoot)
+ }
+ if !mustUseModules {
+ // GO111MODULE is 'auto', and we can't find a module root.
+ // Stay in GOPATH mode.
+ return
+ }
+ } else if search.InDir(modRoot, os.TempDir()) == "." {
+ // If you create /tmp/go.mod for experimenting,
+ // then any tests that create work directories under /tmp
+ // will find it and get modules when they're not expecting them.
+ // It's a bit of a peculiar thing to disallow but quite mysterious
+ // when it happens. See golang.org/issue/26708.
+ fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir())
+ if RootMode == NeedRoot {
+ base.Fatal(ErrNoModRoot)
+ }
+ if !mustUseModules {
+ return
+ }
+ } else {
+ modRoots = []string{modRoot}
+ }
+ }
+ if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") {
+ base.Fatalf("go: -modfile=%s: file does not have .mod extension", cfg.ModFile)
+ }
+
+ // We're in module mode. Set any global variables that need to be set.
+ cfg.ModulesEnabled = true
+ setDefaultBuildMod()
+ list := filepath.SplitList(cfg.BuildContext.GOPATH)
+ if len(list) > 0 && list[0] != "" {
+ gopath = list[0]
+ if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil {
+ base.Fatalf("$GOPATH/go.mod exists but should not")
+ }
+ }
+}
+
+// WillBeEnabled checks whether modules should be enabled but does not
+// initialize modules by installing hooks. If Init has already been called,
+// WillBeEnabled returns the same result as Enabled.
+//
+// This function is needed to break a cycle. The main package needs to know
+// whether modules are enabled in order to install the module or GOPATH version
+// of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't
+// be called until the command is installed and flags are parsed. Instead of
+// calling Init and Enabled, the main package can call this function.
+func WillBeEnabled() bool {
+ if modRoots != nil || cfg.ModulesEnabled {
+ // Already enabled.
+ return true
+ }
+ if initialized {
+ // Initialized, not enabled.
+ return false
+ }
+
+ // Keep in sync with Init. Init does extra validation and prints warnings or
+ // exits, so it can't call this function directly.
+ env := cfg.Getenv("GO111MODULE")
+ switch env {
+ case "on", "":
+ return true
+ case "auto":
+ break
+ default:
+ return false
+ }
+
+ return FindGoMod(base.Cwd()) != ""
+}
+
+// FindGoMod returns the name of the go.mod file for this command,
+// or the empty string if there isn't one.
+// Most code should use Init and Enabled rather than use this directly.
+// It is exported mainly for Go toolchain switching, which must process
+// the go.mod very early at startup.
+func FindGoMod(wd string) string {
+ modRoot := findModuleRoot(wd)
+ if modRoot == "" {
+ // GO111MODULE is 'auto', and we can't find a module root.
+ // Stay in GOPATH mode.
+ return ""
+ }
+ if search.InDir(modRoot, os.TempDir()) == "." {
+ // If you create /tmp/go.mod for experimenting,
+ // then any tests that create work directories under /tmp
+ // will find it and get modules when they're not expecting them.
+ // It's a bit of a peculiar thing to disallow but quite mysterious
+ // when it happens. See golang.org/issue/26708.
+ return ""
+ }
+ return filepath.Join(modRoot, "go.mod")
+}
+
+// Enabled reports whether modules are (or must be) enabled.
+// If modules are enabled but there is no main module, Enabled returns true
+// and then the first use of module information will call die
+// (usually through MustModRoot).
+func Enabled() bool {
+ Init()
+ return modRoots != nil || cfg.ModulesEnabled
+}
+
+func VendorDir() string {
+ return filepath.Join(MainModules.ModRoot(MainModules.mustGetSingleMainModule()), "vendor")
+}
+
+func inWorkspaceMode() bool {
+ if !initialized {
+ panic("inWorkspaceMode called before modload.Init called")
+ }
+ if !Enabled() {
+ return false
+ }
+ return workFilePath != ""
+}
+
+// HasModRoot reports whether a main module is present.
+// HasModRoot may return false even if Enabled returns true: for example, 'get'
+// does not require a main module.
+func HasModRoot() bool {
+ Init()
+ return modRoots != nil
+}
+
+// MustHaveModRoot checks that a main module or main modules are present,
+// and calls base.Fatalf if there are no main modules.
+func MustHaveModRoot() {
+ Init()
+ if !HasModRoot() {
+ die()
+ }
+}
+
+// ModFilePath returns the path that would be used for the go.mod
+// file, if in module mode. ModFilePath calls base.Fatalf if there is no main
+// module, even if -modfile is set.
+func ModFilePath() string {
+ MustHaveModRoot()
+ return modFilePath(findModuleRoot(base.Cwd()))
+}
+
+func modFilePath(modRoot string) string {
+ if cfg.ModFile != "" {
+ return cfg.ModFile
+ }
+ return filepath.Join(modRoot, "go.mod")
+}
+
+func die() {
+ if cfg.Getenv("GO111MODULE") == "off" {
+ base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
+ }
+ if inWorkspaceMode() {
+ base.Fatalf("go: no modules were found in the current workspace; see 'go help work'")
+ }
+ if dir, name := findAltConfig(base.Cwd()); dir != "" {
+ rel, err := filepath.Rel(base.Cwd(), dir)
+ if err != nil {
+ rel = dir
+ }
+ cdCmd := ""
+ if rel != "." {
+ cdCmd = fmt.Sprintf("cd %s && ", rel)
+ }
+ base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd)
+ }
+ base.Fatal(ErrNoModRoot)
+}
+
+var ErrNoModRoot = errors.New("go.mod file not found in current directory or any parent directory; see 'go help modules'")
+
+type goModDirtyError struct{}
+
+func (goModDirtyError) Error() string {
+ if cfg.BuildModExplicit {
+ return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%v; to update it:\n\tgo mod tidy", cfg.BuildMod)
+ }
+ if cfg.BuildModReason != "" {
+ return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%s\n\t(%s)\n\tto update it:\n\tgo mod tidy", cfg.BuildMod, cfg.BuildModReason)
+ }
+ return "updates to go.mod needed; to update it:\n\tgo mod tidy"
+}
+
+var errGoModDirty error = goModDirtyError{}
+
+func loadWorkFile(path string) (workFile *modfile.WorkFile, modRoots []string, err error) {
+ workDir := filepath.Dir(path)
+ wf, err := ReadWorkFile(path)
+ if err != nil {
+ return nil, nil, err
+ }
+ seen := map[string]bool{}
+ for _, d := range wf.Use {
+ modRoot := d.Path
+ if !filepath.IsAbs(modRoot) {
+ modRoot = filepath.Join(workDir, modRoot)
+ }
+
+ if seen[modRoot] {
+ return nil, nil, fmt.Errorf("path %s appears multiple times in workspace", modRoot)
+ }
+ seen[modRoot] = true
+ modRoots = append(modRoots, modRoot)
+ }
+
+ return wf, modRoots, nil
+}
+
+// ReadWorkFile reads and parses the go.work file at the given path.
+func ReadWorkFile(path string) (*modfile.WorkFile, error) {
+ workData, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := modfile.ParseWork(path, workData, nil)
+ if err != nil {
+ return nil, err
+ }
+ if f.Go != nil && gover.Compare(f.Go.Version, gover.Local()) > 0 && cfg.CmdName != "work edit" {
+ base.Fatal(&gover.TooNewError{What: base.ShortPath(path), GoVersion: f.Go.Version})
+ }
+ return f, nil
+}
+
+// WriteWorkFile cleans and writes out the go.work file to the given path.
+func WriteWorkFile(path string, wf *modfile.WorkFile) error {
+ wf.SortBlocks()
+ wf.Cleanup()
+ out := modfile.Format(wf.Syntax)
+
+ return os.WriteFile(path, out, 0666)
+}
+
+// UpdateWorkGoVersion updates the go line in wf to be at least goVers,
+// reporting whether it changed the file.
+func UpdateWorkGoVersion(wf *modfile.WorkFile, goVers string) (changed bool) {
+ old := gover.FromGoWork(wf)
+ if gover.Compare(old, goVers) >= 0 {
+ return false
+ }
+
+ wf.AddGoStmt(goVers)
+
+ // We wrote a new go line. For reproducibility,
+ // if the toolchain running right now is newer than the new toolchain line,
+ // update the toolchain line to record the newer toolchain.
+ // The user never sets the toolchain explicitly in a 'go work' command,
+ // so this is only happening as a result of a go or toolchain line found
+ // in a module.
+ // If the toolchain running right now is a dev toolchain (like "go1.21")
+ // writing 'toolchain go1.21' will not be useful, since that's not an actual
+ // toolchain you can download and run. In that case fall back to at least
+ // checking that the toolchain is new enough for the Go version.
+ toolchain := "go" + old
+ if wf.Toolchain != nil {
+ toolchain = wf.Toolchain.Name
+ }
+ if gover.IsLang(gover.Local()) {
+ toolchain = gover.ToolchainMax(toolchain, "go"+goVers)
+ } else {
+ toolchain = gover.ToolchainMax(toolchain, "go"+gover.Local())
+ }
+
+ // Drop the toolchain line if it is implied by the go line
+ // or if it is asking for a toolchain older than Go 1.21,
+ // which will not understand the toolchain line.
+ if toolchain == "go"+goVers || gover.Compare(gover.FromToolchain(toolchain), gover.GoStrictVersion) < 0 {
+ wf.DropToolchainStmt()
+ } else {
+ wf.AddToolchainStmt(toolchain)
+ }
+ return true
+}
+
+// UpdateWorkFile updates comments on directory directives in the go.work
+// file to include the associated module path.
+func UpdateWorkFile(wf *modfile.WorkFile) {
+ missingModulePaths := map[string]string{} // module directory listed in file -> abspath modroot
+
+ for _, d := range wf.Use {
+ if d.Path == "" {
+ continue // d is marked for deletion.
+ }
+ modRoot := d.Path
+ if d.ModulePath == "" {
+ missingModulePaths[d.Path] = modRoot
+ }
+ }
+
+ // Clean up and annotate directories.
+ // TODO(matloob): update x/mod to actually add module paths.
+ for moddir, absmodroot := range missingModulePaths {
+ _, f, err := ReadModFile(filepath.Join(absmodroot, "go.mod"), nil)
+ if err != nil {
+ continue // Error will be reported if modules are loaded.
+ }
+ wf.AddUse(moddir, f.Module.Mod.Path)
+ }
+}
+
+// LoadModFile sets Target and, if there is a main module, parses the initial
+// build list from its go.mod file.
+//
+// LoadModFile may make changes in memory, like adding a go directive and
+// ensuring requirements are consistent. The caller is responsible for ensuring
+// those changes are written to disk by calling LoadPackages or ListModules
+// (unless ExplicitWriteGoMod is set) or by calling WriteGoMod directly.
+//
+// As a side-effect, LoadModFile may change cfg.BuildMod to "vendor" if
+// -mod wasn't set explicitly and automatic vendoring should be enabled.
+//
+// If LoadModFile or CreateModFile has already been called, LoadModFile returns
+// the existing in-memory requirements (rather than re-reading them from disk).
+//
+// LoadModFile checks the roots of the module graph for consistency with each
+// other, but unlike LoadModGraph does not load the full module graph or check
+// it for global consistency. Most callers outside of the modload package should
+// use LoadModGraph instead.
+func LoadModFile(ctx context.Context) *Requirements {
+ rs, err := loadModFile(ctx, nil)
+ if err != nil {
+ base.Fatal(err)
+ }
+ return rs
+}
+
+func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) {
+ if requirements != nil {
+ return requirements, nil
+ }
+
+ Init()
+ var workFile *modfile.WorkFile
+ if inWorkspaceMode() {
+ var err error
+ workFile, modRoots, err = loadWorkFile(workFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("reading go.work: %w", err)
+ }
+ for _, modRoot := range modRoots {
+ sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum"
+ modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile)
+ }
+ modfetch.GoSumFile = workFilePath + ".sum"
+ } else if len(modRoots) == 0 {
+ // We're in module mode, but not inside a module.
+ //
+ // Commands like 'go build', 'go run', 'go list' have no go.mod file to
+ // read or write. They would need to find and download the latest versions
+ // of a potentially large number of modules with no way to save version
+ // information. We can succeed slowly (but not reproducibly), but that's
+ // not usually a good experience.
+ //
+ // Instead, we forbid resolving import paths to modules other than std and
+ // cmd. Users may still build packages specified with .go files on the
+ // command line, but they'll see an error if those files import anything
+ // outside std.
+ //
+ // This can be overridden by calling AllowMissingModuleImports.
+ // For example, 'go get' does this, since it is expected to resolve paths.
+ //
+ // See golang.org/issue/32027.
+ } else {
+ modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum"
+ }
+ if len(modRoots) == 0 {
+ // TODO(#49228): Instead of creating a fake module with an empty modroot,
+ // make MainModules.Len() == 0 mean that we're in module mode but not inside
+ // any module.
+ mainModule := module.Version{Path: "command-line-arguments"}
+ MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil)
+ var (
+ goVersion string
+ pruning modPruning
+ roots []module.Version
+ direct = map[string]bool{"go": true}
+ )
+ if inWorkspaceMode() {
+ // Since we are in a workspace, the Go version for the synthetic
+ // "command-line-arguments" module must not exceed the Go version
+ // for the workspace.
+ goVersion = MainModules.GoVersion()
+ pruning = workspace
+ roots = []module.Version{
+ mainModule,
+ {Path: "go", Version: goVersion},
+ {Path: "toolchain", Version: gover.LocalToolchain()},
+ }
+ } else {
+ goVersion = gover.Local()
+ pruning = pruningForGoVersion(goVersion)
+ roots = []module.Version{
+ {Path: "go", Version: goVersion},
+ {Path: "toolchain", Version: gover.LocalToolchain()},
+ }
+ }
+ rawGoVersion.Store(mainModule, goVersion)
+ requirements = newRequirements(pruning, roots, direct)
+ if cfg.BuildMod == "vendor" {
+ // For issue 56536: Some users may have GOFLAGS=-mod=vendor set.
+ // Make sure it behaves as though the fake module is vendored
+ // with no dependencies.
+ requirements.initVendor(nil)
+ }
+ return requirements, nil
+ }
+
+ var modFiles []*modfile.File
+ var mainModules []module.Version
+ var indices []*modFileIndex
+ var errs []error
+ for _, modroot := range modRoots {
+ gomod := modFilePath(modroot)
+ var fixed bool
+ data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed))
+ if err != nil {
+ if inWorkspaceMode() {
+ if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") {
+ // Switching to a newer toolchain won't help - the go.work has the wrong version.
+ // Report this more specific error, unless we are a command like 'go work use'
+ // or 'go work sync', which will fix the problem after the caller sees the TooNewError
+ // and switches to a newer toolchain.
+ err = errWorkTooOld(gomod, workFile, tooNew.GoVersion)
+ } else {
+ err = fmt.Errorf("cannot load module %s listed in go.work file: %w",
+ base.ShortPath(filepath.Dir(gomod)), err)
+ }
+ }
+ errs = append(errs, err)
+ continue
+ }
+ if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") {
+ // Refuse to use workspace if its go version is too old.
+ // Disable this check if we are a workspace command like work use or work sync,
+ // which will fix the problem.
+ mv := gover.FromGoMod(f)
+ wv := gover.FromGoWork(workFile)
+ if gover.Compare(mv, wv) > 0 && gover.Compare(mv, gover.GoStrictVersion) >= 0 {
+ errs = append(errs, errWorkTooOld(gomod, workFile, mv))
+ continue
+ }
+ }
+
+ modFiles = append(modFiles, f)
+ mainModule := f.Module.Mod
+ mainModules = append(mainModules, mainModule)
+ indices = append(indices, indexModFile(data, f, mainModule, fixed))
+
+ if err := module.CheckImportPath(f.Module.Mod.Path); err != nil {
+ if pathErr, ok := err.(*module.InvalidPathError); ok {
+ pathErr.Kind = "module"
+ }
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) > 0 {
+ return nil, errors.Join(errs...)
+ }
+
+ MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile)
+ setDefaultBuildMod() // possibly enable automatic vendoring
+ rs := requirementsFromModFiles(ctx, workFile, modFiles, opts)
+
+ if inWorkspaceMode() {
+ // We don't need to do anything for vendor or update the mod file so
+ // return early.
+ requirements = rs
+ return rs, nil
+ }
+
+ mainModule := MainModules.mustGetSingleMainModule()
+
+ if cfg.BuildMod == "vendor" {
+ readVendorList(mainModule)
+ index := MainModules.Index(mainModule)
+ modFile := MainModules.ModFile(mainModule)
+ checkVendorConsistency(index, modFile)
+ rs.initVendor(vendorList)
+ }
+
+ if rs.hasRedundantRoot() {
+ // If any module path appears more than once in the roots, we know that the
+ // go.mod file needs to be updated even though we have not yet loaded any
+ // transitive dependencies.
+ var err error
+ rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace {
+ // TODO(#45551): Do something more principled instead of checking
+ // cfg.CmdName directly here.
+ if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" {
+ // go line is missing from go.mod; add one there and add to derived requirements.
+ v := gover.Local()
+ if opts != nil && opts.TidyGoVersion != "" {
+ v = opts.TidyGoVersion
+ }
+ addGoStmt(MainModules.ModFile(mainModule), mainModule, v)
+ rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}})
+
+ // We need to add a 'go' version to the go.mod file, but we must assume
+ // that its existing contents match something between Go 1.11 and 1.16.
+ // Go 1.11 through 1.16 do not support graph pruning, but the latest Go
+ // version uses a pruned module graph — so we need to convert the
+ // requirements to support pruning.
+ if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 {
+ var err error
+ rs, err = convertPruning(ctx, rs, pruned)
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ rawGoVersion.Store(mainModule, gover.DefaultGoModVersion)
+ }
+ }
+
+ requirements = rs
+ return requirements, nil
+}
+
+func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error {
+ return fmt.Errorf("module %s listed in go.work file requires go >= %s, but go.work lists go %s; to update it:\n\tgo work use",
+ base.ShortPath(filepath.Dir(gomod)), goVers, gover.FromGoWork(wf))
+}
+
+// CreateModFile initializes a new module by creating a go.mod file.
+//
+// If modPath is empty, CreateModFile will attempt to infer the path from the
+// directory location within GOPATH.
+//
+// If a vendoring configuration file is present, CreateModFile will attempt to
+// translate it to go.mod directives. The resulting build list may not be
+// exactly the same as in the legacy configuration (for example, we can't get
+// packages at multiple versions from the same module).
+func CreateModFile(ctx context.Context, modPath string) {
+ modRoot := base.Cwd()
+ modRoots = []string{modRoot}
+ Init()
+ modFilePath := modFilePath(modRoot)
+ if _, err := fsys.Stat(modFilePath); err == nil {
+ base.Fatalf("go: %s already exists", modFilePath)
+ }
+
+ if modPath == "" {
+ var err error
+ modPath, err = findModulePath(modRoot)
+ if err != nil {
+ base.Fatal(err)
+ }
+ } else if err := module.CheckImportPath(modPath); err != nil {
+ if pathErr, ok := err.(*module.InvalidPathError); ok {
+ pathErr.Kind = "module"
+ // Same as build.IsLocalPath()
+ if pathErr.Path == "." || pathErr.Path == ".." ||
+ strings.HasPrefix(pathErr.Path, "./") || strings.HasPrefix(pathErr.Path, "../") {
+ pathErr.Err = errors.New("is a local import path")
+ }
+ }
+ base.Fatal(err)
+ } else if _, _, ok := module.SplitPathVersion(modPath); !ok {
+ if strings.HasPrefix(modPath, "gopkg.in/") {
+ invalidMajorVersionMsg := fmt.Errorf("module paths beginning with gopkg.in/ must always have a major version suffix in the form of .vN:\n\tgo mod init %s", suggestGopkgIn(modPath))
+ base.Fatalf(`go: invalid module path "%v": %v`, modPath, invalidMajorVersionMsg)
+ }
+ invalidMajorVersionMsg := fmt.Errorf("major version suffixes must be in the form of /vN and are only allowed for v2 or later:\n\tgo mod init %s", suggestModulePath(modPath))
+ base.Fatalf(`go: invalid module path "%v": %v`, modPath, invalidMajorVersionMsg)
+ }
+
+ fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath)
+ modFile := new(modfile.File)
+ modFile.AddModuleStmt(modPath)
+ MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil)
+ addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements.
+
+ convertedFrom, err := convertLegacyConfig(modFile, modRoot)
+ if convertedFrom != "" {
+ fmt.Fprintf(os.Stderr, "go: copying requirements from %s\n", base.ShortPath(convertedFrom))
+ }
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil)
+ rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ if err != nil {
+ base.Fatal(err)
+ }
+ requirements = rs
+ if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ base.Fatal(err)
+ }
+
+ // Suggest running 'go mod tidy' unless the project is empty. Even if we
+ // imported all the correct requirements above, we're probably missing
+ // some sums, so the next build command in -mod=readonly will likely fail.
+ //
+ // We look for non-hidden .go files or subdirectories to determine whether
+ // this is an existing project. Walking the tree for packages would be more
+ // accurate, but could take much longer.
+ empty := true
+ files, _ := os.ReadDir(modRoot)
+ for _, f := range files {
+ name := f.Name()
+ if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") {
+ continue
+ }
+ if strings.HasSuffix(name, ".go") || f.IsDir() {
+ empty = false
+ break
+ }
+ }
+ if !empty {
+ fmt.Fprintf(os.Stderr, "go: to add module requirements and sums:\n\tgo mod tidy\n")
+ }
+}
+
+// fixVersion returns a modfile.VersionFixer implemented using the Query function.
+//
+// It resolves commit hashes and branch names to versions,
+// canonicalizes versions that appeared in early vgo drafts,
+// and does nothing for versions that already appear to be canonical.
+//
+// The VersionFixer sets 'fixed' if it ever returns a non-canonical version.
+func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
+ return func(path, vers string) (resolved string, err error) {
+ defer func() {
+ if err == nil && resolved != vers {
+ *fixed = true
+ }
+ }()
+
+ // Special case: remove the old -gopkgin- hack.
+ if strings.HasPrefix(path, "gopkg.in/") && strings.Contains(vers, "-gopkgin-") {
+ vers = vers[strings.Index(vers, "-gopkgin-")+len("-gopkgin-"):]
+ }
+
+ // fixVersion is called speculatively on every
+ // module, version pair from every go.mod file.
+ // Avoid the query if it looks OK.
+ _, pathMajor, ok := module.SplitPathVersion(path)
+ if !ok {
+ return "", &module.ModuleError{
+ Path: path,
+ Err: &module.InvalidVersionError{
+ Version: vers,
+ Err: fmt.Errorf("malformed module path %q", path),
+ },
+ }
+ }
+ if vers != "" && module.CanonicalVersion(vers) == vers {
+ if err := module.CheckPathMajor(vers, pathMajor); err != nil {
+ return "", module.VersionError(module.Version{Path: path, Version: vers}, err)
+ }
+ return vers, nil
+ }
+
+ info, err := Query(ctx, path, vers, "", nil)
+ if err != nil {
+ return "", err
+ }
+ return info.Version, nil
+ }
+}
+
+// AllowMissingModuleImports allows import paths to be resolved to modules
+// when there is no module root. Normally, this is forbidden because it's slow
+// and there's no way to make the result reproducible, but some commands
+// like 'go get' are expected to do this.
+//
+// This function affects the default cfg.BuildMod when outside of a module,
+// so it can only be called prior to Init.
+func AllowMissingModuleImports() {
+ if initialized {
+ panic("AllowMissingModuleImports after Init")
+ }
+ allowMissingModuleImports = true
+}
+
+// makeMainModules creates a MainModuleSet and associated variables according to
+// the given main modules.
+func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet {
+ for _, m := range ms {
+ if m.Version != "" {
+ panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m))
+ }
+ }
+ modRootContainingCWD := findModuleRoot(base.Cwd())
+ mainModules := &MainModuleSet{
+ versions: slices.Clip(ms),
+ inGorootSrc: map[module.Version]bool{},
+ pathPrefix: map[module.Version]string{},
+ modRoot: map[module.Version]string{},
+ modFiles: map[module.Version]*modfile.File{},
+ indices: map[module.Version]*modFileIndex{},
+ highestReplaced: map[string]string{},
+ workFile: workFile,
+ }
+ var workFileReplaces []*modfile.Replace
+ if workFile != nil {
+ workFileReplaces = workFile.Replace
+ mainModules.workFileReplaceMap = toReplaceMap(workFile.Replace)
+ }
+ mainModulePaths := make(map[string]bool)
+ for _, m := range ms {
+ if mainModulePaths[m.Path] {
+ base.Errorf("go: module %s appears multiple times in workspace", m.Path)
+ }
+ mainModulePaths[m.Path] = true
+ }
+ replacedByWorkFile := make(map[string]bool)
+ replacements := make(map[module.Version]module.Version)
+ for _, r := range workFileReplaces {
+ if mainModulePaths[r.Old.Path] && r.Old.Version == "" {
+ base.Errorf("go: workspace module %v is replaced at all versions in the go.work file. To fix, remove the replacement from the go.work file or specify the version at which to replace the module.", r.Old.Path)
+ }
+ replacedByWorkFile[r.Old.Path] = true
+ v, ok := mainModules.highestReplaced[r.Old.Path]
+ if !ok || gover.ModCompare(r.Old.Path, r.Old.Version, v) > 0 {
+ mainModules.highestReplaced[r.Old.Path] = r.Old.Version
+ }
+ replacements[r.Old] = r.New
+ }
+ for i, m := range ms {
+ mainModules.pathPrefix[m] = m.Path
+ mainModules.modRoot[m] = rootDirs[i]
+ mainModules.modFiles[m] = modFiles[i]
+ mainModules.indices[m] = indices[i]
+
+ if mainModules.modRoot[m] == modRootContainingCWD {
+ mainModules.modContainingCWD = m
+ }
+
+ if rel := search.InDir(rootDirs[i], cfg.GOROOTsrc); rel != "" {
+ mainModules.inGorootSrc[m] = true
+ if m.Path == "std" {
+ // The "std" module in GOROOT/src is the Go standard library. Unlike other
+ // modules, the packages in the "std" module have no import-path prefix.
+ //
+ // Modules named "std" outside of GOROOT/src do not receive this special
+ // treatment, so it is possible to run 'go test .' in other GOROOTs to
+ // test individual packages using a combination of the modified package
+ // and the ordinary standard library.
+ // (See https://golang.org/issue/30756.)
+ mainModules.pathPrefix[m] = ""
+ }
+ }
+
+ if modFiles[i] != nil {
+ curModuleReplaces := make(map[module.Version]bool)
+ for _, r := range modFiles[i].Replace {
+ if replacedByWorkFile[r.Old.Path] {
+ continue
+ }
+ var newV module.Version = r.New
+ if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) {
+ // Since we are in a workspace, we may be loading replacements from
+ // multiple go.mod files. Relative paths in those replacement are
+ // relative to the go.mod file, not the workspace, so the same string
+ // may refer to two different paths and different strings may refer to
+ // the same path. Convert them all to be absolute instead.
+ //
+ // (We could do this outside of a workspace too, but it would mean that
+ // replacement paths in error strings needlessly differ from what's in
+ // the go.mod file.)
+ newV.Path = filepath.Join(rootDirs[i], newV.Path)
+ }
+ if prev, ok := replacements[r.Old]; ok && !curModuleReplaces[r.Old] && prev != newV {
+ base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go work edit -replace %v=[override]\" to resolve", r.Old, prev, newV, r.Old)
+ }
+ curModuleReplaces[r.Old] = true
+ replacements[r.Old] = newV
+
+ v, ok := mainModules.highestReplaced[r.Old.Path]
+ if !ok || gover.ModCompare(r.Old.Path, r.Old.Version, v) > 0 {
+ mainModules.highestReplaced[r.Old.Path] = r.Old.Version
+ }
+ }
+ }
+ }
+ return mainModules
+}
+
+// requirementsFromModFiles returns the set of non-excluded requirements from
+// the global modFile.
+func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements {
+ var roots []module.Version
+ direct := map[string]bool{}
+ var pruning modPruning
+ var goVersion, toolchain string
+ if inWorkspaceMode() {
+ pruning = workspace
+ roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions()))
+ copy(roots, MainModules.Versions())
+ goVersion = gover.FromGoWork(workFile)
+ if workFile.Toolchain != nil {
+ toolchain = workFile.Toolchain.Name
+ }
+ } else {
+ pruning = pruningForGoVersion(MainModules.GoVersion())
+ if len(modFiles) != 1 {
+ panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles)))
+ }
+ modFile := modFiles[0]
+ roots = make([]module.Version, 0, 2+len(modFile.Require))
+ mm := MainModules.mustGetSingleMainModule()
+ for _, r := range modFile.Require {
+ if index := MainModules.Index(mm); index != nil && index.exclude[r.Mod] {
+ if cfg.BuildMod == "mod" {
+ fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
+ } else {
+ fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
+ }
+ continue
+ }
+
+ roots = append(roots, r.Mod)
+ if !r.Indirect {
+ direct[r.Mod.Path] = true
+ }
+ }
+ goVersion = gover.FromGoMod(modFile)
+ if modFile.Toolchain != nil {
+ toolchain = modFile.Toolchain.Name
+ }
+ }
+
+ // Add explicit go and toolchain versions, inferring as needed.
+ roots = append(roots, module.Version{Path: "go", Version: goVersion})
+ direct["go"] = true // Every module directly uses the language and runtime.
+
+ if toolchain != "" {
+ roots = append(roots, module.Version{Path: "toolchain", Version: toolchain})
+ // Leave the toolchain as indirect: nothing in the user's module directly
+ // imports a package from the toolchain, and (like an indirect dependency in
+ // a module without graph pruning) we may remove the toolchain line
+ // automatically if the 'go' version is changed so that it implies the exact
+ // same toolchain.
+ }
+
+ gover.ModSort(roots)
+ rs := newRequirements(pruning, roots, direct)
+ return rs
+}
+
+// setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag
+// wasn't provided. setDefaultBuildMod may be called multiple times.
+func setDefaultBuildMod() {
+ if cfg.BuildModExplicit {
+ if inWorkspaceMode() && cfg.BuildMod != "readonly" {
+ base.Fatalf("go: -mod may only be set to readonly when in workspace mode, but it is set to %q"+
+ "\n\tRemove the -mod flag to use the default readonly value,"+
+ "\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod)
+ }
+ // Don't override an explicit '-mod=' argument.
+ return
+ }
+
+ // TODO(#40775): commands should pass in the module mode as an option
+ // to modload functions instead of relying on an implicit setting
+ // based on command name.
+ switch cfg.CmdName {
+ case "get", "mod download", "mod init", "mod tidy", "work sync":
+ // These commands are intended to update go.mod and go.sum.
+ cfg.BuildMod = "mod"
+ return
+ case "mod graph", "mod verify", "mod why":
+ // These commands should not update go.mod or go.sum, but they should be
+ // able to fetch modules not in go.sum and should not report errors if
+ // go.mod is inconsistent. They're useful for debugging, and they need
+ // to work in buggy situations.
+ cfg.BuildMod = "mod"
+ return
+ case "mod vendor":
+ cfg.BuildMod = "readonly"
+ return
+ }
+ if modRoots == nil {
+ if allowMissingModuleImports {
+ cfg.BuildMod = "mod"
+ } else {
+ cfg.BuildMod = "readonly"
+ }
+ return
+ }
+
+ if len(modRoots) == 1 && !inWorkspaceMode() {
+ index := MainModules.GetSingleIndexOrNil()
+ if fi, err := fsys.Stat(filepath.Join(modRoots[0], "vendor")); err == nil && fi.IsDir() {
+ modGo := "unspecified"
+ if index != nil && index.goVersion != "" {
+ if gover.Compare(index.goVersion, "1.14") >= 0 {
+ // The Go version is at least 1.14, and a vendor directory exists.
+ // Set -mod=vendor by default.
+ cfg.BuildMod = "vendor"
+ cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists."
+ return
+ } else {
+ modGo = index.goVersion
+ }
+ }
+
+ // Since a vendor directory exists, we should record why we didn't use it.
+ // This message won't normally be shown, but it may appear with import errors.
+ cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s, so vendor directory was not used.", modGo)
+ }
+ }
+
+ cfg.BuildMod = "readonly"
+}
+
+func mustHaveCompleteRequirements() bool {
+ return cfg.BuildMod != "mod" && !inWorkspaceMode()
+}
+
+// convertLegacyConfig imports module requirements from a legacy vendoring
+// configuration file, if one is present.
+func convertLegacyConfig(modFile *modfile.File, modRoot string) (from string, err error) {
+ noneSelected := func(path string) (version string) { return "none" }
+ queryPackage := func(path, rev string) (module.Version, error) {
+ pkgMods, modOnly, err := QueryPattern(context.Background(), path, rev, noneSelected, nil)
+ if err != nil {
+ return module.Version{}, err
+ }
+ if len(pkgMods) > 0 {
+ return pkgMods[0].Mod, nil
+ }
+ return modOnly.Mod, nil
+ }
+ for _, name := range altConfigs {
+ cfg := filepath.Join(modRoot, name)
+ data, err := os.ReadFile(cfg)
+ if err == nil {
+ convert := modconv.Converters[name]
+ if convert == nil {
+ return "", nil
+ }
+ cfg = filepath.ToSlash(cfg)
+ err := modconv.ConvertLegacyConfig(modFile, cfg, data, queryPackage)
+ return name, err
+ }
+ }
+ return "", nil
+}
+
+// addGoStmt adds a go directive to the go.mod file if it does not already
+// include one. The 'go' version added, if any, is the latest version supported
+// by this toolchain.
+func addGoStmt(modFile *modfile.File, mod module.Version, v string) {
+ if modFile.Go != nil && modFile.Go.Version != "" {
+ return
+ }
+ forceGoStmt(modFile, mod, v)
+}
+
+func forceGoStmt(modFile *modfile.File, mod module.Version, v string) {
+ if err := modFile.AddGoStmt(v); err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ rawGoVersion.Store(mod, v)
+}
+
+var altConfigs = []string{
+ "Gopkg.lock",
+
+ "GLOCKFILE",
+ "Godeps/Godeps.json",
+ "dependencies.tsv",
+ "glide.lock",
+ "vendor.conf",
+ "vendor.yml",
+ "vendor/manifest",
+ "vendor/vendor.json",
+
+ ".git/config",
+}
+
+func findModuleRoot(dir string) (roots string) {
+ if dir == "" {
+ panic("dir not set")
+ }
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ if fi, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
+func findWorkspaceFile(dir string) (root string) {
+ if dir == "" {
+ panic("dir not set")
+ }
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ f := filepath.Join(dir, "go.work")
+ if fi, err := fsys.Stat(f); err == nil && !fi.IsDir() {
+ return f
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ if d == cfg.GOROOT {
+ // As a special case, don't cross GOROOT to find a go.work file.
+ // The standard library and commands built in go always use the vendored
+ // dependencies, so avoid using a most likely irrelevant go.work file.
+ return ""
+ }
+ dir = d
+ }
+ return ""
+}
+
+func findAltConfig(dir string) (root, name string) {
+ if dir == "" {
+ panic("dir not set")
+ }
+ dir = filepath.Clean(dir)
+ if rel := search.InDir(dir, cfg.BuildContext.GOROOT); rel != "" {
+ // Don't suggest creating a module from $GOROOT/.git/config
+ // or a config file found in any parent of $GOROOT (see #34191).
+ return "", ""
+ }
+ for {
+ for _, name := range altConfigs {
+ if fi, err := fsys.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() {
+ return dir, name
+ }
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return "", ""
+}
+
+func findModulePath(dir string) (string, error) {
+ // TODO(bcmills): once we have located a plausible module path, we should
+ // query version control (if available) to verify that it matches the major
+ // version of the most recent tag.
+ // See https://golang.org/issue/29433, https://golang.org/issue/27009, and
+ // https://golang.org/issue/31549.
+
+ // Cast about for import comments,
+ // first in top-level directory, then in subdirectories.
+ list, _ := os.ReadDir(dir)
+ for _, info := range list {
+ if info.Type().IsRegular() && strings.HasSuffix(info.Name(), ".go") {
+ if com := findImportComment(filepath.Join(dir, info.Name())); com != "" {
+ return com, nil
+ }
+ }
+ }
+ for _, info1 := range list {
+ if info1.IsDir() {
+ files, _ := os.ReadDir(filepath.Join(dir, info1.Name()))
+ for _, info2 := range files {
+ if info2.Type().IsRegular() && strings.HasSuffix(info2.Name(), ".go") {
+ if com := findImportComment(filepath.Join(dir, info1.Name(), info2.Name())); com != "" {
+ return path.Dir(com), nil
+ }
+ }
+ }
+ }
+ }
+
+ // Look for Godeps.json declaring import path.
+ data, _ := os.ReadFile(filepath.Join(dir, "Godeps/Godeps.json"))
+ var cfg1 struct{ ImportPath string }
+ json.Unmarshal(data, &cfg1)
+ if cfg1.ImportPath != "" {
+ return cfg1.ImportPath, nil
+ }
+
+ // Look for vendor.json declaring import path.
+ data, _ = os.ReadFile(filepath.Join(dir, "vendor/vendor.json"))
+ var cfg2 struct{ RootPath string }
+ json.Unmarshal(data, &cfg2)
+ if cfg2.RootPath != "" {
+ return cfg2.RootPath, nil
+ }
+
+ // Look for path in GOPATH.
+ var badPathErr error
+ for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) {
+ if gpdir == "" {
+ continue
+ }
+ if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." {
+ path := filepath.ToSlash(rel)
+ // gorelease will alert users publishing their modules to fix their paths.
+ if err := module.CheckImportPath(path); err != nil {
+ badPathErr = err
+ break
+ }
+ return path, nil
+ }
+ }
+
+ reason := "outside GOPATH, module path must be specified"
+ if badPathErr != nil {
+ // return a different error message if the module was in GOPATH, but
+ // the module path determined above would be an invalid path.
+ reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr)
+ }
+ msg := `cannot determine module path for source directory %s (%s)
+
+Example usage:
+ 'go mod init example.com/m' to initialize a v0 or v1 module
+ 'go mod init example.com/m/v2' to initialize a v2 module
+
+Run 'go help mod init' for more information.
+`
+ return "", fmt.Errorf(msg, dir, reason)
+}
+
+var (
+ importCommentRE = lazyregexp.New(`(?m)^package[ \t]+[^ \t\r\n/]+[ \t]+//[ \t]+import[ \t]+(\"[^"]+\")[ \t]*\r?\n`)
+)
+
+func findImportComment(file string) string {
+ data, err := os.ReadFile(file)
+ if err != nil {
+ return ""
+ }
+ m := importCommentRE.FindSubmatch(data)
+ if m == nil {
+ return ""
+ }
+ path, err := strconv.Unquote(string(m[1]))
+ if err != nil {
+ return ""
+ }
+ return path
+}
+
+// WriteOpts control the behavior of WriteGoMod.
+type WriteOpts struct {
+ DropToolchain bool // go get toolchain@none
+ ExplicitToolchain bool // go get has set explicit toolchain version
+
+ // TODO(bcmills): Make 'go mod tidy' update the go version in the Requirements
+ // instead of writing directly to the modfile.File
+ TidyWroteGo bool // Go.Version field already updated by 'go mod tidy'
+}
+
+// WriteGoMod writes the current build list back to go.mod.
+func WriteGoMod(ctx context.Context, opts WriteOpts) error {
+ requirements = LoadModFile(ctx)
+ return commitRequirements(ctx, opts)
+}
+
+// commitRequirements ensures go.mod and go.sum are up to date with the current
+// requirements.
+//
+// In "mod" mode, commitRequirements writes changes to go.mod and go.sum.
+//
+// In "readonly" and "vendor" modes, commitRequirements returns an error if
+// go.mod or go.sum are out of date in a semantically significant way.
+//
+// In workspace mode, commitRequirements only writes changes to go.work.sum.
+func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
+ if inWorkspaceMode() {
+ // go.mod files aren't updated in workspace mode, but we still want to
+ // update the go.work.sum file.
+ return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements())
+ }
+ if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" {
+ // We aren't in a module, so we don't have anywhere to write a go.mod file.
+ return nil
+ }
+ mainModule := MainModules.mustGetSingleMainModule()
+ modFile := MainModules.ModFile(mainModule)
+ if modFile == nil {
+ // command-line-arguments has no .mod file to write.
+ return nil
+ }
+ modFilePath := modFilePath(MainModules.ModRoot(mainModule))
+
+ var list []*modfile.Require
+ toolchain := ""
+ goVersion := ""
+ for _, m := range requirements.rootModules {
+ if m.Path == "go" {
+ goVersion = m.Version
+ continue
+ }
+ if m.Path == "toolchain" {
+ toolchain = m.Version
+ continue
+ }
+ list = append(list, &modfile.Require{
+ Mod: m,
+ Indirect: !requirements.direct[m.Path],
+ })
+ }
+
+ // Update go line.
+ // Every MVS graph we consider should have go as a root,
+ // and toolchain is either implied by the go line or explicitly a root.
+ if goVersion == "" {
+ base.Fatalf("go: internal error: missing go root module in WriteGoMod")
+ }
+ if gover.Compare(goVersion, gover.Local()) > 0 {
+ // We cannot assume that we know how to update a go.mod to a newer version.
+ return &gover.TooNewError{What: "updating go.mod", GoVersion: goVersion}
+ }
+ wroteGo := opts.TidyWroteGo
+ if !wroteGo && modFile.Go == nil || modFile.Go.Version != goVersion {
+ alwaysUpdate := cfg.BuildMod == "mod" || cfg.CmdName == "mod tidy" || cfg.CmdName == "get"
+ if modFile.Go == nil && goVersion == gover.DefaultGoModVersion && !alwaysUpdate {
+ // The go.mod has no go line, the implied default Go version matches
+ // what we've computed for the graph, and we're not in one of the
+ // traditional go.mod-updating programs, so leave it alone.
+ } else {
+ wroteGo = true
+ forceGoStmt(modFile, mainModule, goVersion)
+ }
+ }
+ if toolchain == "" {
+ toolchain = "go" + goVersion
+ }
+
+ // For reproducibility, if we are writing a new go line,
+ // and we're not explicitly modifying the toolchain line with 'go get toolchain@something',
+ // and the go version is one that supports switching toolchains,
+ // and the toolchain running right now is newer than the current toolchain line,
+ // then update the toolchain line to record the newer toolchain.
+ //
+ // TODO(#57001): This condition feels too complicated. Can we simplify it?
+ // TODO(#57001): Add more tests for toolchain lines.
+ toolVers := gover.FromToolchain(toolchain)
+ if wroteGo && !opts.DropToolchain && !opts.ExplicitToolchain &&
+ gover.Compare(goVersion, gover.GoStrictVersion) >= 0 &&
+ (gover.Compare(gover.Local(), toolVers) > 0 && !gover.IsLang(gover.Local())) {
+ toolchain = "go" + gover.Local()
+ toolVers = gover.FromToolchain(toolchain)
+ }
+
+ if opts.DropToolchain || toolchain == "go"+goVersion || (gover.Compare(toolVers, gover.GoStrictVersion) < 0 && !opts.ExplicitToolchain) {
+ // go get toolchain@none or toolchain matches go line or isn't valid; drop it.
+ // TODO(#57001): 'go get' should reject explicit toolchains below GoStrictVersion.
+ modFile.DropToolchainStmt()
+ } else {
+ modFile.AddToolchainStmt(toolchain)
+ }
+
+ // Update require blocks.
+ if gover.Compare(goVersion, gover.SeparateIndirectVersion) < 0 {
+ modFile.SetRequire(list)
+ } else {
+ modFile.SetRequireSeparateIndirect(list)
+ }
+ modFile.Cleanup()
+
+ index := MainModules.GetSingleIndexOrNil()
+ dirty := index.modFileIsDirty(modFile)
+ if dirty && cfg.BuildMod != "mod" {
+ // If we're about to fail due to -mod=readonly,
+ // prefer to report a dirty go.mod over a dirty go.sum
+ return errGoModDirty
+ }
+
+ if !dirty && cfg.CmdName != "mod tidy" {
+ // The go.mod file has the same semantic content that it had before
+ // (but not necessarily the same exact bytes).
+ // Don't write go.mod, but write go.sum in case we added or trimmed sums.
+ // 'go mod init' shouldn't write go.sum, since it will be incomplete.
+ if cfg.CmdName != "mod init" {
+ if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if _, ok := fsys.OverlayPath(modFilePath); ok {
+ if dirty {
+ return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay")
+ }
+ return nil
+ }
+
+ new, err := modFile.Format()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // At this point we have determined to make the go.mod file on disk equal to new.
+ MainModules.SetIndex(mainModule, indexModFile(new, modFile, mainModule, false))
+
+ // Update go.sum after releasing the side lock and refreshing the index.
+ // 'go mod init' shouldn't write go.sum, since it will be incomplete.
+ if cfg.CmdName != "mod init" {
+ if err == nil {
+ err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements())
+ }
+ }
+ }()
+
+ // Make a best-effort attempt to acquire the side lock, only to exclude
+ // previous versions of the 'go' command from making simultaneous edits.
+ if unlock, err := modfetch.SideLock(ctx); err == nil {
+ defer unlock()
+ }
+
+ errNoChange := errors.New("no update needed")
+
+ err = lockedfile.Transform(modFilePath, func(old []byte) ([]byte, error) {
+ if bytes.Equal(old, new) {
+ // The go.mod file is already equal to new, possibly as the result of some
+ // other process.
+ return nil, errNoChange
+ }
+
+ if index != nil && !bytes.Equal(old, index.data) {
+ // The contents of the go.mod file have changed. In theory we could add all
+ // of the new modules to the build list, recompute, and check whether any
+ // module in *our* build list got bumped to a different version, but that's
+ // a lot of work for marginal benefit. Instead, fail the command: if users
+ // want to run concurrent commands, they need to start with a complete,
+ // consistent module definition.
+ return nil, fmt.Errorf("existing contents have changed since last read")
+ }
+
+ return new, nil
+ })
+
+ if err != nil && err != errNoChange {
+ return fmt.Errorf("updating go.mod: %w", err)
+ }
+ return nil
+}
+
+// keepSums returns the set of modules (and go.mod file entries) for which
+// checksums would be needed in order to reload the same set of packages
+// loaded by the most recent call to LoadPackages or ImportFromFiles,
+// including any go.mod files needed to reconstruct the MVS result
+// or identify go versions,
+// in addition to the checksums for every module in keepMods.
+func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool {
+ // Every module in the full module graph contributes its requirements,
+ // so in order to ensure that the build list itself is reproducible,
+ // we need sums for every go.mod in the graph (regardless of whether
+ // that version is selected).
+ keep := make(map[module.Version]bool)
+
+ // Add entries for modules in the build list with paths that are prefixes of
+ // paths of loaded packages. We need to retain sums for all of these modules —
+ // not just the modules containing the actual packages — in order to rule out
+ // ambiguous import errors the next time we load the package.
+ keepModSumsForZipSums := true
+ if ld == nil {
+ if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" {
+ keepModSumsForZipSums = false
+ }
+ } else {
+ keepPkgGoModSums := true
+ if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") {
+ keepPkgGoModSums = false
+ keepModSumsForZipSums = false
+ }
+ for _, pkg := range ld.pkgs {
+ // We check pkg.mod.Path here instead of pkg.inStd because the
+ // pseudo-package "C" is not in std, but not provided by any module (and
+ // shouldn't force loading the whole module graph).
+ if pkg.testOf != nil || (pkg.mod.Path == "" && pkg.err == nil) || module.CheckImportPath(pkg.path) != nil {
+ continue
+ }
+
+ // We need the checksum for the go.mod file for pkg.mod
+ // so that we know what Go version to use to compile pkg.
+ // However, we didn't do so before Go 1.21, and the bug is relatively
+ // minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to
+ // avoid introducing unnecessary churn.
+ if keepPkgGoModSums {
+ r := resolveReplacement(pkg.mod)
+ keep[modkey(r)] = true
+ }
+
+ if rs.pruning == pruned && pkg.mod.Path != "" {
+ if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version {
+ // pkg was loaded from a root module, and because the main module has
+ // a pruned module graph we do not check non-root modules for
+ // conflicts for packages that can be found in roots. So we only need
+ // the checksums for the root modules that may contain pkg, not all
+ // possible modules.
+ for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
+ if v, ok := rs.rootSelected(prefix); ok && v != "none" {
+ m := module.Version{Path: prefix, Version: v}
+ r := resolveReplacement(m)
+ keep[r] = true
+ }
+ }
+ continue
+ }
+ }
+
+ mg, _ := rs.Graph(ctx)
+ for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
+ if v := mg.Selected(prefix); v != "none" {
+ m := module.Version{Path: prefix, Version: v}
+ r := resolveReplacement(m)
+ keep[r] = true
+ }
+ }
+ }
+ }
+
+ if rs.graph.Load() == nil {
+ // We haven't needed to load the module graph so far.
+ // Save sums for the root modules (or their replacements), but don't
+ // incur the cost of loading the graph just to find and retain the sums.
+ for _, m := range rs.rootModules {
+ r := resolveReplacement(m)
+ keep[modkey(r)] = true
+ if which == addBuildListZipSums {
+ keep[r] = true
+ }
+ }
+ } else {
+ mg, _ := rs.Graph(ctx)
+ mg.WalkBreadthFirst(func(m module.Version) {
+ if _, ok := mg.RequiredBy(m); ok {
+ // The requirements from m's go.mod file are present in the module graph,
+ // so they are relevant to the MVS result regardless of whether m was
+ // actually selected.
+ r := resolveReplacement(m)
+ keep[modkey(r)] = true
+ }
+ })
+
+ if which == addBuildListZipSums {
+ for _, m := range mg.BuildList() {
+ r := resolveReplacement(m)
+ if keepModSumsForZipSums {
+ keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile
+ }
+ keep[r] = true
+ }
+ }
+ }
+
+ return keep
+}
+
+type whichSums int8
+
+const (
+ loadedZipSumsOnly = whichSums(iota)
+ addBuildListZipSums
+)
+
+// modkey returns the module.Version under which the checksum for m's go.mod
+// file is stored in the go.sum file.
+func modkey(m module.Version) module.Version {
+ return module.Version{Path: m.Path, Version: m.Version + "/go.mod"}
+}
+
+func suggestModulePath(path string) string {
+ var m string
+
+ i := len(path)
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
+ i--
+ }
+ url := path[:i]
+ url = strings.TrimSuffix(url, "/v")
+ url = strings.TrimSuffix(url, "/")
+
+ f := func(c rune) bool {
+ return c > '9' || c < '0'
+ }
+ s := strings.FieldsFunc(path[i:], f)
+ if len(s) > 0 {
+ m = s[0]
+ }
+ m = strings.TrimLeft(m, "0")
+ if m == "" || m == "1" {
+ return url + "/v2"
+ }
+
+ return url + "/v" + m
+}
+
+func suggestGopkgIn(path string) string {
+ var m string
+ i := len(path)
+ for i > 0 && (('0' <= path[i-1] && path[i-1] <= '9') || (path[i-1] == '.')) {
+ i--
+ }
+ url := path[:i]
+ url = strings.TrimSuffix(url, ".v")
+ url = strings.TrimSuffix(url, "/v")
+ url = strings.TrimSuffix(url, "/")
+
+ f := func(c rune) bool {
+ return c > '9' || c < '0'
+ }
+ s := strings.FieldsFunc(path, f)
+ if len(s) > 0 {
+ m = s[0]
+ }
+
+ m = strings.TrimLeft(m, "0")
+
+ if m == "" {
+ return url + ".v1"
+ }
+ return url + ".v" + m
+}
diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go
new file mode 100644
index 0000000..e8872ba
--- /dev/null
+++ b/src/cmd/go/internal/modload/list.go
@@ -0,0 +1,311 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/modinfo"
+ "cmd/go/internal/search"
+ "cmd/internal/pkgpattern"
+
+ "golang.org/x/mod/module"
+)
+
+type ListMode int
+
+const (
+ ListU ListMode = 1 << iota
+ ListRetracted
+ ListDeprecated
+ ListVersions
+ ListRetractedVersions
+)
+
+// ListModules returns a description of the modules matching args, if known,
+// along with any error preventing additional matches from being identified.
+//
+// The returned slice can be nonempty even if the error is non-nil.
+func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) {
+ var reuse map[module.Version]*modinfo.ModulePublic
+ if reuseFile != "" {
+ data, err := os.ReadFile(reuseFile)
+ if err != nil {
+ return nil, err
+ }
+ dec := json.NewDecoder(bytes.NewReader(data))
+ reuse = make(map[module.Version]*modinfo.ModulePublic)
+ for {
+ var m modinfo.ModulePublic
+ if err := dec.Decode(&m); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("parsing %s: %v", reuseFile, err)
+ }
+ if m.Origin == nil || !m.Origin.Checkable() {
+ // Nothing to check to validate reuse.
+ continue
+ }
+ m.Reuse = true
+ reuse[module.Version{Path: m.Path, Version: m.Version}] = &m
+ if m.Query != "" {
+ reuse[module.Version{Path: m.Path, Version: m.Query}] = &m
+ }
+ }
+ }
+
+ rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse)
+
+ type token struct{}
+ sem := make(chan token, runtime.GOMAXPROCS(0))
+ if mode != 0 {
+ for _, m := range mods {
+ if m.Reuse {
+ continue
+ }
+ add := func(m *modinfo.ModulePublic) {
+ sem <- token{}
+ go func() {
+ if mode&ListU != 0 {
+ addUpdate(ctx, m)
+ }
+ if mode&ListVersions != 0 {
+ addVersions(ctx, m, mode&ListRetractedVersions != 0)
+ }
+ if mode&ListRetracted != 0 {
+ addRetraction(ctx, m)
+ }
+ if mode&ListDeprecated != 0 {
+ addDeprecation(ctx, m)
+ }
+ <-sem
+ }()
+ }
+
+ add(m)
+ if m.Replace != nil {
+ add(m.Replace)
+ }
+ }
+ }
+ // Fill semaphore channel to wait for all tasks to finish.
+ for n := cap(sem); n > 0; n-- {
+ sem <- token{}
+ }
+
+ if err == nil {
+ requirements = rs
+ // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
+ // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
+ // should be considered up-to-date. The fix for now is to always treat the
+ // go.sum as up-to-date during list -m -u. Probably the right fix is more targeted,
+ // but in general list -u is looking up other checksums in the checksum database
+ // that won't be necessary later, so it makes sense not to write the go.sum back out.
+ if !ExplicitWriteGoMod && mode&ListU == 0 {
+ err = commitRequirements(ctx, WriteOpts{})
+ }
+ }
+ return mods, err
+}
+
+func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) {
+ if len(args) == 0 {
+ var ms []*modinfo.ModulePublic
+ for _, m := range MainModules.Versions() {
+ if gover.IsToolchain(m.Path) {
+ continue
+ }
+ ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse))
+ }
+ return rs, ms, nil
+ }
+
+ needFullGraph := false
+ for _, arg := range args {
+ if strings.Contains(arg, `\`) {
+ base.Fatalf("go: module paths never use backslash")
+ }
+ if search.IsRelativePath(arg) {
+ base.Fatalf("go: cannot use relative path %s to specify module", arg)
+ }
+ if arg == "all" || strings.Contains(arg, "...") {
+ needFullGraph = true
+ if !HasModRoot() {
+ base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot)
+ }
+ continue
+ }
+ if path, vers, found := strings.Cut(arg, "@"); found {
+ if vers == "upgrade" || vers == "patch" {
+ if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned {
+ needFullGraph = true
+ if !HasModRoot() {
+ base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot)
+ }
+ }
+ }
+ continue
+ }
+ if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned {
+ needFullGraph = true
+ if mode&ListVersions == 0 && !HasModRoot() {
+ base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot)
+ }
+ }
+ }
+
+ var mg *ModuleGraph
+ if needFullGraph {
+ rs, mg, mgErr = expandGraph(ctx, rs)
+ }
+
+ matchedModule := map[module.Version]bool{}
+ for _, arg := range args {
+ if path, vers, found := strings.Cut(arg, "@"); found {
+ var current string
+ if mg == nil {
+ current, _ = rs.rootSelected(path)
+ } else {
+ current = mg.Selected(path)
+ }
+ if current == "none" && mgErr != nil {
+ if vers == "upgrade" || vers == "patch" {
+ // The module graph is incomplete, so we don't know what version we're
+ // actually upgrading from.
+ // mgErr is already set, so just skip this module.
+ continue
+ }
+ }
+
+ allowed := CheckAllowed
+ if IsRevisionQuery(path, vers) || mode&ListRetracted != 0 {
+ // Allow excluded and retracted versions if the user asked for a
+ // specific revision or used 'go list -retracted'.
+ allowed = nil
+ }
+ info, err := queryReuse(ctx, path, vers, current, allowed, reuse)
+ if err != nil {
+ var origin *codehost.Origin
+ if info != nil {
+ origin = info.Origin
+ }
+ mods = append(mods, &modinfo.ModulePublic{
+ Path: path,
+ Version: vers,
+ Error: modinfoError(path, vers, err),
+ Origin: origin,
+ })
+ continue
+ }
+
+ // Indicate that m was resolved from outside of rs by passing a nil
+ // *Requirements instead.
+ var noRS *Requirements
+
+ mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse)
+ if vers != mod.Version {
+ mod.Query = vers
+ }
+ mod.Origin = info.Origin
+ mods = append(mods, mod)
+ continue
+ }
+
+ // Module path or pattern.
+ var match func(string) bool
+ if arg == "all" {
+ match = func(p string) bool { return !gover.IsToolchain(p) }
+ } else if strings.Contains(arg, "...") {
+ mp := pkgpattern.MatchPattern(arg)
+ match = func(p string) bool { return mp(p) && !gover.IsToolchain(p) }
+ } else {
+ var v string
+ if mg == nil {
+ var ok bool
+ v, ok = rs.rootSelected(arg)
+ if !ok {
+ // We checked rootSelected(arg) in the earlier args loop, so if there
+ // is no such root we should have loaded a non-nil mg.
+ panic(fmt.Sprintf("internal error: root requirement expected but not found for %v", arg))
+ }
+ } else {
+ v = mg.Selected(arg)
+ }
+ if v == "none" && mgErr != nil {
+ // mgErr is already set, so just skip this module.
+ continue
+ }
+ if v != "none" {
+ mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse))
+ } else if cfg.BuildMod == "vendor" {
+ // In vendor mode, we can't determine whether a missing module is “a
+ // known dependency” because the module graph is incomplete.
+ // Give a more explicit error message.
+ mods = append(mods, &modinfo.ModulePublic{
+ Path: arg,
+ Error: modinfoError(arg, "", errors.New("can't resolve module using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")),
+ })
+ } else if mode&ListVersions != 0 {
+ // Don't make the user provide an explicit '@latest' when they're
+ // explicitly asking what the available versions are. Instead, return a
+ // module with version "none", to which we can add the requested list.
+ mods = append(mods, &modinfo.ModulePublic{Path: arg})
+ } else {
+ mods = append(mods, &modinfo.ModulePublic{
+ Path: arg,
+ Error: modinfoError(arg, "", errors.New("not a known dependency")),
+ })
+ }
+ continue
+ }
+
+ matched := false
+ for _, m := range mg.BuildList() {
+ if match(m.Path) {
+ matched = true
+ if !matchedModule[m] {
+ matchedModule[m] = true
+ mods = append(mods, moduleInfo(ctx, rs, m, mode, reuse))
+ }
+ }
+ }
+ if !matched {
+ fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg)
+ }
+ }
+
+ return rs, mods, mgErr
+}
+
+// modinfoError wraps an error to create an error message in
+// modinfo.ModuleError with minimal redundancy.
+func modinfoError(path, vers string, err error) *modinfo.ModuleError {
+ var nerr *NoMatchingVersionError
+ var merr *module.ModuleError
+ if errors.As(err, &nerr) {
+ // NoMatchingVersionError contains the query, so we don't mention the
+ // query again in ModuleError.
+ err = &module.ModuleError{Path: path, Err: err}
+ } else if !errors.As(err, &merr) {
+ // If the error does not contain path and version, wrap it in a
+ // module.ModuleError.
+ err = &module.ModuleError{Path: path, Version: vers, Err: err}
+ }
+
+ return &modinfo.ModuleError{Err: err.Error()}
+}
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
new file mode 100644
index 0000000..a993fe8
--- /dev/null
+++ b/src/cmd/go/internal/modload/load.go
@@ -0,0 +1,2343 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+// This file contains the module-mode package loader, as well as some accessory
+// functions pertaining to the package import graph.
+//
+// There are two exported entry points into package loading — LoadPackages and
+// ImportFromFiles — both implemented in terms of loadFromRoots, which itself
+// manipulates an instance of the loader struct.
+//
+// Although most of the loading state is maintained in the loader struct,
+// one key piece - the build list - is a global, so that it can be modified
+// separate from the loading operation, such as during "go get"
+// upgrades/downgrades or in "go mod" operations.
+// TODO(#40775): It might be nice to make the loader take and return
+// a buildList rather than hard-coding use of the global.
+//
+// Loading is an iterative process. On each iteration, we try to load the
+// requested packages and their transitive imports, then try to resolve modules
+// for any imported packages that are still missing.
+//
+// The first step of each iteration identifies a set of “root” packages.
+// Normally the root packages are exactly those matching the named pattern
+// arguments. However, for the "all" meta-pattern, the final set of packages is
+// computed from the package import graph, and therefore cannot be an initial
+// input to loading that graph. Instead, the root packages for the "all" pattern
+// are those contained in the main module, and allPatternIsRoot parameter to the
+// loader instructs it to dynamically expand those roots to the full "all"
+// pattern as loading progresses.
+//
+// The pkgInAll flag on each loadPkg instance tracks whether that
+// package is known to match the "all" meta-pattern.
+// A package matches the "all" pattern if:
+// - it is in the main module, or
+// - it is imported by any test in the main module, or
+// - it is imported by another package in "all", or
+// - the main module specifies a go version ≤ 1.15, and the package is imported
+// by a *test of* another package in "all".
+//
+// When graph pruning is in effect, we want to spot-check the graph-pruning
+// invariants — which depend on which packages are known to be in "all" — even
+// when we are only loading individual packages, so we set the pkgInAll flag
+// regardless of the whether the "all" pattern is a root.
+// (This is necessary to maintain the “import invariant” described in
+// https://golang.org/design/36460-lazy-module-loading.)
+//
+// Because "go mod vendor" prunes out the tests of vendored packages, the
+// behavior of the "all" pattern with -mod=vendor in Go 1.11–1.15 is the same
+// as the "all" pattern (regardless of the -mod flag) in 1.16+.
+// The loader uses the GoVersion parameter to determine whether the "all"
+// pattern should close over tests (as in Go 1.11–1.15) or stop at only those
+// packages transitively imported by the packages and tests in the main module
+// ("all" in Go 1.16+ and "go mod vendor" in Go 1.11+).
+//
+// Note that it is possible for a loaded package NOT to be in "all" even when we
+// are loading the "all" pattern. For example, packages that are transitive
+// dependencies of other roots named on the command line must be loaded, but are
+// not in "all". (The mod_notall test illustrates this behavior.)
+// Similarly, if the LoadTests flag is set but the "all" pattern does not close
+// over test dependencies, then when we load the test of a package that is in
+// "all" but outside the main module, the dependencies of that test will not
+// necessarily themselves be in "all". (That configuration does not arise in Go
+// 1.11–1.15, but it will be possible in Go 1.16+.)
+//
+// Loading proceeds from the roots, using a parallel work-queue with a limit on
+// the amount of active work (to avoid saturating disks, CPU cores, and/or
+// network connections). Each package is added to the queue the first time it is
+// imported by another package. When we have finished identifying the imports of
+// a package, we add the test for that package if it is needed. A test may be
+// needed if:
+// - the package matches a root pattern and tests of the roots were requested, or
+// - the package is in the main module and the "all" pattern is requested
+// (because the "all" pattern includes the dependencies of tests in the main
+// module), or
+// - the package is in "all" and the definition of "all" we are using includes
+// dependencies of tests (as is the case in Go ≤1.15).
+//
+// After all available packages have been loaded, we examine the results to
+// identify any requested or imported packages that are still missing, and if
+// so, which modules we could add to the module graph in order to make the
+// missing packages available. We add those to the module graph and iterate,
+// until either all packages resolve successfully or we cannot identify any
+// module that would resolve any remaining missing package.
+//
+// If the main module is “tidy” (that is, if "go mod tidy" is a no-op for it)
+// and all requested packages are in "all", then loading completes in a single
+// iteration.
+// TODO(bcmills): We should also be able to load in a single iteration if the
+// requested packages all come from modules that are themselves tidy, regardless
+// of whether those packages are in "all". Today, that requires two iterations
+// if those packages are not found in existing dependencies of the main module.
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/build"
+ "io/fs"
+ "os"
+ "path"
+ pathpkg "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/mvs"
+ "cmd/go/internal/par"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+
+ "golang.org/x/mod/module"
+)
+
+// loaded is the most recently-used package loader.
+// It holds details about individual packages.
+//
+// This variable should only be accessed directly in top-level exported
+// functions. All other functions that require or produce a *loader should pass
+// or return it as an explicit parameter.
+var loaded *loader
+
+// PackageOpts control the behavior of the LoadPackages function.
+type PackageOpts struct {
+ // TidyGoVersion is the Go version to which the go.mod file should be updated
+ // after packages have been loaded.
+ //
+ // An empty TidyGoVersion means to use the Go version already specified in the
+ // main module's go.mod file, or the latest Go version if there is no main
+ // module.
+ TidyGoVersion string
+
+ // Tags are the build tags in effect (as interpreted by the
+ // cmd/go/internal/imports package).
+ // If nil, treated as equivalent to imports.Tags().
+ Tags map[string]bool
+
+ // Tidy, if true, requests that the build list and go.sum file be reduced to
+ // the minimal dependencies needed to reproducibly reload the requested
+ // packages.
+ Tidy bool
+
+ // TidyCompatibleVersion is the oldest Go version that must be able to
+ // reproducibly reload the requested packages.
+ //
+ // If empty, the compatible version is the Go version immediately prior to the
+ // 'go' version listed in the go.mod file.
+ TidyCompatibleVersion string
+
+ // VendorModulesInGOROOTSrc indicates that if we are within a module in
+ // GOROOT/src, packages in the module's vendor directory should be resolved as
+ // actual module dependencies (instead of standard-library packages).
+ VendorModulesInGOROOTSrc bool
+
+ // ResolveMissingImports indicates that we should attempt to add module
+ // dependencies as needed to resolve imports of packages that are not found.
+ //
+ // For commands that support the -mod flag, resolving imports may still fail
+ // if the flag is set to "readonly" (the default) or "vendor".
+ ResolveMissingImports bool
+
+ // AssumeRootsImported indicates that the transitive dependencies of the root
+ // packages should be treated as if those roots will be imported by the main
+ // module.
+ AssumeRootsImported bool
+
+ // AllowPackage, if non-nil, is called after identifying the module providing
+ // each package. If AllowPackage returns a non-nil error, that error is set
+ // for the package, and the imports and test of that package will not be
+ // loaded.
+ //
+ // AllowPackage may be invoked concurrently by multiple goroutines,
+ // and may be invoked multiple times for a given package path.
+ AllowPackage func(ctx context.Context, path string, mod module.Version) error
+
+ // LoadTests loads the test dependencies of each package matching a requested
+ // pattern. If ResolveMissingImports is also true, test dependencies will be
+ // resolved if missing.
+ LoadTests bool
+
+ // UseVendorAll causes the "all" package pattern to be interpreted as if
+ // running "go mod vendor" (or building with "-mod=vendor").
+ //
+ // This is a no-op for modules that declare 'go 1.16' or higher, for which this
+ // is the default (and only) interpretation of the "all" pattern in module mode.
+ UseVendorAll bool
+
+ // AllowErrors indicates that LoadPackages should not terminate the process if
+ // an error occurs.
+ AllowErrors bool
+
+ // SilencePackageErrors indicates that LoadPackages should not print errors
+ // that occur while matching or loading packages, and should not terminate the
+ // process if such an error occurs.
+ //
+ // Errors encountered in the module graph will still be reported.
+ //
+ // The caller may retrieve the silenced package errors using the Lookup
+ // function, and matching errors are still populated in the Errs field of the
+ // associated search.Match.)
+ SilencePackageErrors bool
+
+ // SilenceMissingStdImports indicates that LoadPackages should not print
+ // errors or terminate the process if an imported package is missing, and the
+ // import path looks like it might be in the standard library (perhaps in a
+ // future version).
+ SilenceMissingStdImports bool
+
+ // SilenceNoGoErrors indicates that LoadPackages should not print
+ // imports.ErrNoGo errors.
+ // This allows the caller to invoke LoadPackages (and report other errors)
+ // without knowing whether the requested packages exist for the given tags.
+ //
+ // Note that if a requested package does not exist *at all*, it will fail
+ // during module resolution and the error will not be suppressed.
+ SilenceNoGoErrors bool
+
+ // SilenceUnmatchedWarnings suppresses the warnings normally emitted for
+ // patterns that did not match any packages.
+ SilenceUnmatchedWarnings bool
+
+ // Resolve the query against this module.
+ MainModule module.Version
+
+ // If Switcher is non-nil, then LoadPackages passes all encountered errors
+ // to Switcher.Error and tries Switcher.Switch before base.ExitIfErrors.
+ Switcher gover.Switcher
+}
+
+// LoadPackages identifies the set of packages matching the given patterns and
+// loads the packages in the import graph rooted at that set.
+func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) {
+ if opts.Tags == nil {
+ opts.Tags = imports.Tags()
+ }
+
+ patterns = search.CleanPatterns(patterns)
+ matches = make([]*search.Match, 0, len(patterns))
+ allPatternIsRoot := false
+ for _, pattern := range patterns {
+ matches = append(matches, search.NewMatch(pattern))
+ if pattern == "all" {
+ allPatternIsRoot = true
+ }
+ }
+
+ updateMatches := func(rs *Requirements, ld *loader) {
+ for _, m := range matches {
+ switch {
+ case m.IsLocal():
+ // Evaluate list of file system directories on first iteration.
+ if m.Dirs == nil {
+ matchModRoots := modRoots
+ if opts.MainModule != (module.Version{}) {
+ matchModRoots = []string{MainModules.ModRoot(opts.MainModule)}
+ }
+ matchLocalDirs(ctx, matchModRoots, m, rs)
+ }
+
+ // Make a copy of the directory list and translate to import paths.
+ // Note that whether a directory corresponds to an import path
+ // changes as the build list is updated, and a directory can change
+ // from not being in the build list to being in it and back as
+ // the exact version of a particular module increases during
+ // the loader iterations.
+ m.Pkgs = m.Pkgs[:0]
+ for _, dir := range m.Dirs {
+ pkg, err := resolveLocalPackage(ctx, dir, rs)
+ if err != nil {
+ if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) {
+ continue // Don't include "builtin" or GOROOT/src in wildcard patterns.
+ }
+
+ // If we're outside of a module, ensure that the failure mode
+ // indicates that.
+ if !HasModRoot() {
+ die()
+ }
+
+ if ld != nil {
+ m.AddError(err)
+ }
+ continue
+ }
+ m.Pkgs = append(m.Pkgs, pkg)
+ }
+
+ case m.IsLiteral():
+ m.Pkgs = []string{m.Pattern()}
+
+ case strings.Contains(m.Pattern(), "..."):
+ m.Errs = m.Errs[:0]
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ // The module graph is (or may be) incomplete — perhaps we failed to
+ // load the requirements of some module. This is an error in matching
+ // the patterns to packages, because we may be missing some packages
+ // or we may erroneously match packages in the wrong versions of
+ // modules. However, for cases like 'go list -e', the error should not
+ // necessarily prevent us from loading the packages we could find.
+ m.Errs = append(m.Errs, err)
+ }
+ matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList())
+
+ case m.Pattern() == "all":
+ if ld == nil {
+ // The initial roots are the packages in the main module.
+ // loadFromRoots will expand that to "all".
+ m.Errs = m.Errs[:0]
+ matchModules := MainModules.Versions()
+ if opts.MainModule != (module.Version{}) {
+ matchModules = []module.Version{opts.MainModule}
+ }
+ matchPackages(ctx, m, opts.Tags, omitStd, matchModules)
+ } else {
+ // Starting with the packages in the main module,
+ // enumerate the full list of "all".
+ m.Pkgs = ld.computePatternAll()
+ }
+
+ case m.Pattern() == "std" || m.Pattern() == "cmd":
+ if m.Pkgs == nil {
+ m.MatchPackages() // Locate the packages within GOROOT/src.
+ }
+
+ default:
+ panic(fmt.Sprintf("internal error: modload missing case for pattern %s", m.Pattern()))
+ }
+ }
+ }
+
+ initialRS, err := loadModFile(ctx, &opts)
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ ld := loadFromRoots(ctx, loaderParams{
+ PackageOpts: opts,
+ requirements: initialRS,
+
+ allPatternIsRoot: allPatternIsRoot,
+
+ listRoots: func(rs *Requirements) (roots []string) {
+ updateMatches(rs, nil)
+ for _, m := range matches {
+ roots = append(roots, m.Pkgs...)
+ }
+ return roots
+ },
+ })
+
+ // One last pass to finalize wildcards.
+ updateMatches(ld.requirements, ld)
+
+ // List errors in matching patterns (such as directory permission
+ // errors for wildcard patterns).
+ if !ld.SilencePackageErrors {
+ for _, match := range matches {
+ for _, err := range match.Errs {
+ ld.error(err)
+ }
+ }
+ }
+ ld.exitIfErrors(ctx)
+
+ if !opts.SilenceUnmatchedWarnings {
+ search.WarnUnmatched(matches)
+ }
+
+ if opts.Tidy {
+ if cfg.BuildV {
+ mg, _ := ld.requirements.Graph(ctx)
+ for _, m := range initialRS.rootModules {
+ var unused bool
+ if ld.requirements.pruning == unpruned {
+ // m is unused if it was dropped from the module graph entirely. If it
+ // was only demoted from direct to indirect, it may still be in use via
+ // a transitive import.
+ unused = mg.Selected(m.Path) == "none"
+ } else {
+ // m is unused if it was dropped from the roots. If it is still present
+ // as a transitive dependency, that transitive dependency is not needed
+ // by any package or test in the main module.
+ _, ok := ld.requirements.rootSelected(m.Path)
+ unused = !ok
+ }
+ if unused {
+ fmt.Fprintf(os.Stderr, "unused %s\n", m.Path)
+ }
+ }
+ }
+
+ keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly)
+ compatVersion := ld.TidyCompatibleVersion
+ goVersion := ld.requirements.GoVersion()
+ if compatVersion == "" {
+ if gover.Compare(goVersion, gover.GoStrictVersion) < 0 {
+ compatVersion = gover.Prev(goVersion)
+ } else {
+ // Starting at GoStrictVersion, we no longer maintain compatibility with
+ // versions older than what is listed in the go.mod file.
+ compatVersion = goVersion
+ }
+ }
+ if gover.Compare(compatVersion, goVersion) > 0 {
+ // Each version of the Go toolchain knows how to interpret go.mod and
+ // go.sum files produced by all previous versions, so a compatibility
+ // version higher than the go.mod version adds nothing.
+ compatVersion = goVersion
+ }
+ if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning {
+ compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct)
+ ld.checkTidyCompatibility(ctx, compatRS, compatVersion)
+
+ for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) {
+ keep[m] = true
+ }
+ }
+
+ if !ExplicitWriteGoMod {
+ modfetch.TrimGoSum(keep)
+
+ // commitRequirements below will also call WriteGoSum, but the "keep" map
+ // we have here could be strictly larger: commitRequirements only commits
+ // loaded.requirements, but here we may have also loaded (and want to
+ // preserve checksums for) additional entities from compatRS, which are
+ // only needed for compatibility with ld.TidyCompatibleVersion.
+ if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil {
+ base.Fatal(err)
+ }
+ }
+ }
+
+ // Success! Update go.mod and go.sum (if needed) and return the results.
+ // We'll skip updating if ExplicitWriteGoMod is true (the caller has opted
+ // to call WriteGoMod itself) or if ResolveMissingImports is false (the
+ // command wants to examine the package graph as-is).
+ loaded = ld
+ requirements = loaded.requirements
+
+ for _, pkg := range ld.pkgs {
+ if !pkg.isTest() {
+ loadedPackages = append(loadedPackages, pkg.path)
+ }
+ }
+ sort.Strings(loadedPackages)
+
+ if !ExplicitWriteGoMod && opts.ResolveMissingImports {
+ if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ base.Fatal(err)
+ }
+ }
+
+ return matches, loadedPackages
+}
+
+// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories
+// outside of the standard library and active modules.
+func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) {
+ if !m.IsLocal() {
+ panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern()))
+ }
+
+ if i := strings.Index(m.Pattern(), "..."); i >= 0 {
+ // The pattern is local, but it is a wildcard. Its packages will
+ // only resolve to paths if they are inside of the standard
+ // library, the main module, or some dependency of the main
+ // module. Verify that before we walk the filesystem: a filesystem
+ // walk in a directory like /var or /etc can be very expensive!
+ dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3]))
+ absDir := dir
+ if !filepath.IsAbs(dir) {
+ absDir = filepath.Join(base.Cwd(), dir)
+ }
+
+ modRoot := findModuleRoot(absDir)
+ found := false
+ for _, mainModuleRoot := range modRoots {
+ if mainModuleRoot == modRoot {
+ found = true
+ break
+ }
+ }
+ if !found && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" {
+ m.Dirs = []string{}
+ scope := "main module or its selected dependencies"
+ if inWorkspaceMode() {
+ scope = "modules listed in go.work or their selected dependencies"
+ }
+ m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope))
+ return
+ }
+ }
+
+ m.MatchDirs(modRoots)
+}
+
+// resolveLocalPackage resolves a filesystem path to a package path.
+func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) {
+ var absDir string
+ if filepath.IsAbs(dir) {
+ absDir = filepath.Clean(dir)
+ } else {
+ absDir = filepath.Join(base.Cwd(), dir)
+ }
+
+ bp, err := cfg.BuildContext.ImportDir(absDir, 0)
+ if err != nil && (bp == nil || len(bp.IgnoredGoFiles) == 0) {
+ // golang.org/issue/32917: We should resolve a relative path to a
+ // package path only if the relative path actually contains the code
+ // for that package.
+ //
+ // If the named directory does not exist or contains no Go files,
+ // the package does not exist.
+ // Other errors may affect package loading, but not resolution.
+ if _, err := fsys.Stat(absDir); err != nil {
+ if os.IsNotExist(err) {
+ // Canonicalize OS-specific errors to errDirectoryNotFound so that error
+ // messages will be easier for users to search for.
+ return "", &fs.PathError{Op: "stat", Path: absDir, Err: errDirectoryNotFound}
+ }
+ return "", err
+ }
+ if _, noGo := err.(*build.NoGoError); noGo {
+ // A directory that does not contain any Go source files — even ignored
+ // ones! — is not a Go package, and we can't resolve it to a package
+ // path because that path could plausibly be provided by some other
+ // module.
+ //
+ // Any other error indicates that the package “exists” (at least in the
+ // sense that it cannot exist in any other module), but has some other
+ // problem (such as a syntax error).
+ return "", err
+ }
+ }
+
+ for _, mod := range MainModules.Versions() {
+ modRoot := MainModules.ModRoot(mod)
+ if modRoot != "" && absDir == modRoot {
+ if absDir == cfg.GOROOTsrc {
+ return "", errPkgIsGorootSrc
+ }
+ return MainModules.PathPrefix(mod), nil
+ }
+ }
+
+ // Note: The checks for @ here are just to avoid misinterpreting
+ // the module cache directories (formerly GOPATH/src/mod/foo@v1.5.2/bar).
+ // It's not strictly necessary but helpful to keep the checks.
+ var pkgNotFoundErr error
+ pkgNotFoundLongestPrefix := ""
+ for _, mainModule := range MainModules.Versions() {
+ modRoot := MainModules.ModRoot(mainModule)
+ if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") {
+ suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot))
+ if pkg, found := strings.CutPrefix(suffix, "vendor/"); found {
+ if cfg.BuildMod != "vendor" {
+ return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir)
+ }
+
+ readVendorList(mainModule)
+ if _, ok := vendorPkgModule[pkg]; !ok {
+ return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir)
+ }
+ return pkg, nil
+ }
+
+ mainModulePrefix := MainModules.PathPrefix(mainModule)
+ if mainModulePrefix == "" {
+ pkg := suffix
+ if pkg == "builtin" {
+ // "builtin" is a pseudo-package with a real source file.
+ // It's not included in "std", so it shouldn't resolve from "."
+ // within module "std" either.
+ return "", errPkgIsBuiltin
+ }
+ return pkg, nil
+ }
+
+ pkg := pathpkg.Join(mainModulePrefix, suffix)
+ if _, ok, err := dirInModule(pkg, mainModulePrefix, modRoot, true); err != nil {
+ return "", err
+ } else if !ok {
+ // This main module could contain the directory but doesn't. Other main
+ // modules might contain the directory, so wait till we finish the loop
+ // to see if another main module contains directory. But if not,
+ // return an error.
+ if len(mainModulePrefix) > len(pkgNotFoundLongestPrefix) {
+ pkgNotFoundLongestPrefix = mainModulePrefix
+ pkgNotFoundErr = &PackageNotInModuleError{MainModules: []module.Version{mainModule}, Pattern: pkg}
+ }
+ continue
+ }
+ return pkg, nil
+ }
+ }
+ if pkgNotFoundErr != nil {
+ return "", pkgNotFoundErr
+ }
+
+ if sub := search.InDir(absDir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") {
+ pkg := filepath.ToSlash(sub)
+ if pkg == "builtin" {
+ return "", errPkgIsBuiltin
+ }
+ return pkg, nil
+ }
+
+ pkg := pathInModuleCache(ctx, absDir, rs)
+ if pkg == "" {
+ dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir))
+ if dirstr == "directory ." {
+ dirstr = "current directory"
+ }
+ if inWorkspaceMode() {
+ if mr := findModuleRoot(absDir); mr != "" {
+ return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr))
+ }
+ return "", fmt.Errorf("%s outside modules listed in go.work or their selected dependencies", dirstr)
+ }
+ return "", fmt.Errorf("%s outside main module or its selected dependencies", dirstr)
+ }
+ return pkg, nil
+}
+
+var (
+ errDirectoryNotFound = errors.New("directory not found")
+ errPkgIsGorootSrc = errors.New("GOROOT/src is not an importable package")
+ errPkgIsBuiltin = errors.New(`"builtin" is a pseudo-package, not an importable package`)
+)
+
+// pathInModuleCache returns the import path of the directory dir,
+// if dir is in the module cache copy of a module in our build list.
+func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string {
+ tryMod := func(m module.Version) (string, bool) {
+ if gover.IsToolchain(m.Path) {
+ return "", false
+ }
+ var root string
+ var err error
+ if repl := Replacement(m); repl.Path != "" && repl.Version == "" {
+ root = repl.Path
+ if !filepath.IsAbs(root) {
+ root = filepath.Join(replaceRelativeTo(), root)
+ }
+ } else if repl.Path != "" {
+ root, err = modfetch.DownloadDir(ctx, repl)
+ } else {
+ root, err = modfetch.DownloadDir(ctx, m)
+ }
+ if err != nil {
+ return "", false
+ }
+
+ sub := search.InDir(dir, root)
+ if sub == "" {
+ return "", false
+ }
+ sub = filepath.ToSlash(sub)
+ if strings.Contains(sub, "/vendor/") || strings.HasPrefix(sub, "vendor/") || strings.Contains(sub, "@") {
+ return "", false
+ }
+
+ return path.Join(m.Path, filepath.ToSlash(sub)), true
+ }
+
+ if rs.pruning == pruned {
+ for _, m := range rs.rootModules {
+ if v, _ := rs.rootSelected(m.Path); v != m.Version {
+ continue // m is a root, but we have a higher root for the same path.
+ }
+ if importPath, ok := tryMod(m); ok {
+ // checkMultiplePaths ensures that a module can be used for at most one
+ // requirement, so this must be it.
+ return importPath
+ }
+ }
+ }
+
+ // None of the roots contained dir, or the graph is unpruned (so we don't want
+ // to distinguish between roots and transitive dependencies). Either way,
+ // check the full graph to see if the directory is a non-root dependency.
+ //
+ // If the roots are not consistent with the full module graph, the selected
+ // versions of root modules may differ from what we already checked above.
+ // Re-check those paths too.
+
+ mg, _ := rs.Graph(ctx)
+ var importPath string
+ for _, m := range mg.BuildList() {
+ var found bool
+ importPath, found = tryMod(m)
+ if found {
+ break
+ }
+ }
+ return importPath
+}
+
+// ImportFromFiles adds modules to the build list as needed
+// to satisfy the imports in the named Go source files.
+//
+// Errors in missing dependencies are silenced.
+//
+// TODO(bcmills): Silencing errors seems off. Take a closer look at this and
+// figure out what the error-reporting actually ought to be.
+func ImportFromFiles(ctx context.Context, gofiles []string) {
+ rs := LoadModFile(ctx)
+
+ tags := imports.Tags()
+ imports, testImports, err := imports.ScanFiles(gofiles, tags)
+ if err != nil {
+ base.Fatal(err)
+ }
+
+ loaded = loadFromRoots(ctx, loaderParams{
+ PackageOpts: PackageOpts{
+ Tags: tags,
+ ResolveMissingImports: true,
+ SilencePackageErrors: true,
+ },
+ requirements: rs,
+ listRoots: func(*Requirements) (roots []string) {
+ roots = append(roots, imports...)
+ roots = append(roots, testImports...)
+ return roots
+ },
+ })
+ requirements = loaded.requirements
+
+ if !ExplicitWriteGoMod {
+ if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ base.Fatal(err)
+ }
+ }
+}
+
+// DirImportPath returns the effective import path for dir,
+// provided it is within a main module, or else returns ".".
+func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) {
+ if !HasModRoot() {
+ return ".", module.Version{}
+ }
+ LoadModFile(ctx) // Sets targetPrefix.
+
+ if !filepath.IsAbs(dir) {
+ dir = filepath.Join(base.Cwd(), dir)
+ } else {
+ dir = filepath.Clean(dir)
+ }
+
+ var longestPrefix string
+ var longestPrefixPath string
+ var longestPrefixVersion module.Version
+ for _, v := range mms.Versions() {
+ modRoot := mms.ModRoot(v)
+ if dir == modRoot {
+ return mms.PathPrefix(v), v
+ }
+ if str.HasFilePathPrefix(dir, modRoot) {
+ pathPrefix := MainModules.PathPrefix(v)
+ if pathPrefix > longestPrefix {
+ longestPrefix = pathPrefix
+ longestPrefixVersion = v
+ suffix := filepath.ToSlash(str.TrimFilePathPrefix(dir, modRoot))
+ if strings.HasPrefix(suffix, "vendor/") {
+ longestPrefixPath = strings.TrimPrefix(suffix, "vendor/")
+ continue
+ }
+ longestPrefixPath = pathpkg.Join(mms.PathPrefix(v), suffix)
+ }
+ }
+ }
+ if len(longestPrefix) > 0 {
+ return longestPrefixPath, longestPrefixVersion
+ }
+
+ return ".", module.Version{}
+}
+
+// PackageModule returns the module providing the package named by the import path.
+func PackageModule(path string) module.Version {
+ pkg, ok := loaded.pkgCache.Get(path)
+ if !ok {
+ return module.Version{}
+ }
+ return pkg.mod
+}
+
+// Lookup returns the source directory, import path, and any loading error for
+// the package at path as imported from the package in parentDir.
+// Lookup requires that one of the Load functions in this package has already
+// been called.
+func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) {
+ if path == "" {
+ panic("Lookup called with empty package path")
+ }
+
+ if parentIsStd {
+ path = loaded.stdVendor(parentPath, path)
+ }
+ pkg, ok := loaded.pkgCache.Get(path)
+ if !ok {
+ // The loader should have found all the relevant paths.
+ // There are a few exceptions, though:
+ // - during go list without -test, the p.Resolve calls to process p.TestImports and p.XTestImports
+ // end up here to canonicalize the import paths.
+ // - during any load, non-loaded packages like "unsafe" end up here.
+ // - during any load, build-injected dependencies like "runtime/cgo" end up here.
+ // - because we ignore appengine/* in the module loader,
+ // the dependencies of any actual appengine/* library end up here.
+ dir := findStandardImportPath(path)
+ if dir != "" {
+ return dir, path, nil
+ }
+ return "", "", errMissing
+ }
+ return pkg.dir, pkg.path, pkg.err
+}
+
+// A loader manages the process of loading information about
+// the required packages for a particular build,
+// checking that the packages are available in the module set,
+// and updating the module set if needed.
+type loader struct {
+ loaderParams
+
+ // allClosesOverTests indicates whether the "all" pattern includes
+ // dependencies of tests outside the main module (as in Go 1.11–1.15).
+ // (Otherwise — as in Go 1.16+ — the "all" pattern includes only the packages
+ // transitively *imported by* the packages and tests in the main module.)
+ allClosesOverTests bool
+
+ // skipImportModFiles indicates whether we may skip loading go.mod files
+ // for imported packages (as in 'go mod tidy' in Go 1.17–1.20).
+ skipImportModFiles bool
+
+ work *par.Queue
+
+ // reset on each iteration
+ roots []*loadPkg
+ pkgCache *par.Cache[string, *loadPkg]
+ pkgs []*loadPkg // transitive closure of loaded packages and tests; populated in buildStacks
+}
+
+// loaderParams configure the packages loaded by, and the properties reported
+// by, a loader instance.
+type loaderParams struct {
+ PackageOpts
+ requirements *Requirements
+
+ allPatternIsRoot bool // Is the "all" pattern an additional root?
+
+ listRoots func(rs *Requirements) []string
+}
+
+func (ld *loader) reset() {
+ select {
+ case <-ld.work.Idle():
+ default:
+ panic("loader.reset when not idle")
+ }
+
+ ld.roots = nil
+ ld.pkgCache = new(par.Cache[string, *loadPkg])
+ ld.pkgs = nil
+}
+
+// error reports an error via either os.Stderr or base.Error,
+// according to whether ld.AllowErrors is set.
+func (ld *loader) error(err error) {
+ if ld.AllowErrors {
+ fmt.Fprintf(os.Stderr, "go: %v\n", err)
+ } else if ld.Switcher != nil {
+ ld.Switcher.Error(err)
+ } else {
+ base.Error(err)
+ }
+}
+
+// switchIfErrors switches toolchains if a switch is needed.
+func (ld *loader) switchIfErrors(ctx context.Context) {
+ if ld.Switcher != nil {
+ ld.Switcher.Switch(ctx)
+ }
+}
+
+// exitIfErrors switches toolchains if a switch is needed
+// or else exits if any errors have been reported.
+func (ld *loader) exitIfErrors(ctx context.Context) {
+ ld.switchIfErrors(ctx)
+ base.ExitIfErrors()
+}
+
+// goVersion reports the Go version that should be used for the loader's
+// requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion()
+// otherwise.
+func (ld *loader) goVersion() string {
+ if ld.TidyGoVersion != "" {
+ return ld.TidyGoVersion
+ }
+ return ld.requirements.GoVersion()
+}
+
+// A loadPkg records information about a single loaded package.
+type loadPkg struct {
+ // Populated at construction time:
+ path string // import path
+ testOf *loadPkg
+
+ // Populated at construction time and updated by (*loader).applyPkgFlags:
+ flags atomicLoadPkgFlags
+
+ // Populated by (*loader).load:
+ mod module.Version // module providing package
+ dir string // directory containing source code
+ err error // error loading package
+ imports []*loadPkg // packages imported by this one
+ testImports []string // test-only imports, saved for use by pkg.test.
+ inStd bool
+ altMods []module.Version // modules that could have contained the package but did not
+
+ // Populated by (*loader).pkgTest:
+ testOnce sync.Once
+ test *loadPkg
+
+ // Populated by postprocessing in (*loader).buildStacks:
+ stack *loadPkg // package importing this one in minimal import stack for this pkg
+}
+
+// loadPkgFlags is a set of flags tracking metadata about a package.
+type loadPkgFlags int8
+
+const (
+ // pkgInAll indicates that the package is in the "all" package pattern,
+ // regardless of whether we are loading the "all" package pattern.
+ //
+ // When the pkgInAll flag and pkgImportsLoaded flags are both set, the caller
+ // who set the last of those flags must propagate the pkgInAll marking to all
+ // of the imports of the marked package.
+ //
+ // A test is marked with pkgInAll if that test would promote the packages it
+ // imports to be in "all" (such as when the test is itself within the main
+ // module, or when ld.allClosesOverTests is true).
+ pkgInAll loadPkgFlags = 1 << iota
+
+ // pkgIsRoot indicates that the package matches one of the root package
+ // patterns requested by the caller.
+ //
+ // If LoadTests is set, then when pkgIsRoot and pkgImportsLoaded are both set,
+ // the caller who set the last of those flags must populate a test for the
+ // package (in the pkg.test field).
+ //
+ // If the "all" pattern is included as a root, then non-test packages in "all"
+ // are also roots (and must be marked pkgIsRoot).
+ pkgIsRoot
+
+ // pkgFromRoot indicates that the package is in the transitive closure of
+ // imports starting at the roots. (Note that every package marked as pkgIsRoot
+ // is also trivially marked pkgFromRoot.)
+ pkgFromRoot
+
+ // pkgImportsLoaded indicates that the imports and testImports fields of a
+ // loadPkg have been populated.
+ pkgImportsLoaded
+)
+
+// has reports whether all of the flags in cond are set in f.
+func (f loadPkgFlags) has(cond loadPkgFlags) bool {
+ return f&cond == cond
+}
+
+// An atomicLoadPkgFlags stores a loadPkgFlags for which individual flags can be
+// added atomically.
+type atomicLoadPkgFlags struct {
+ bits atomic.Int32
+}
+
+// update sets the given flags in af (in addition to any flags already set).
+//
+// update returns the previous flag state so that the caller may determine which
+// flags were newly-set.
+func (af *atomicLoadPkgFlags) update(flags loadPkgFlags) (old loadPkgFlags) {
+ for {
+ old := af.bits.Load()
+ new := old | int32(flags)
+ if new == old || af.bits.CompareAndSwap(old, new) {
+ return loadPkgFlags(old)
+ }
+ }
+}
+
+// has reports whether all of the flags in cond are set in af.
+func (af *atomicLoadPkgFlags) has(cond loadPkgFlags) bool {
+ return loadPkgFlags(af.bits.Load())&cond == cond
+}
+
+// isTest reports whether pkg is a test of another package.
+func (pkg *loadPkg) isTest() bool {
+ return pkg.testOf != nil
+}
+
+// fromExternalModule reports whether pkg was loaded from a module other than
+// the main module.
+func (pkg *loadPkg) fromExternalModule() bool {
+ if pkg.mod.Path == "" {
+ return false // loaded from the standard library, not a module
+ }
+ return !MainModules.Contains(pkg.mod.Path)
+}
+
+var errMissing = errors.New("cannot find package")
+
+// loadFromRoots attempts to load the build graph needed to process a set of
+// root packages and their dependencies.
+//
+// The set of root packages is returned by the params.listRoots function, and
+// expanded to the full set of packages by tracing imports (and possibly tests)
+// as needed.
+func loadFromRoots(ctx context.Context, params loaderParams) *loader {
+ ld := &loader{
+ loaderParams: params,
+ work: par.NewQueue(runtime.GOMAXPROCS(0)),
+ }
+
+ if ld.requirements.pruning == unpruned {
+ // If the module graph does not support pruning, we assume that we will need
+ // the full module graph in order to load package dependencies.
+ //
+ // This might not be strictly necessary, but it matches the historical
+ // behavior of the 'go' command and keeps the go.mod file more consistent in
+ // case of erroneous hand-edits — which are less likely to be detected by
+ // spot-checks in modules that do not maintain the expanded go.mod
+ // requirements needed for graph pruning.
+ var err error
+ ld.requirements, _, err = expandGraph(ctx, ld.requirements)
+ if err != nil {
+ ld.error(err)
+ }
+ }
+ ld.exitIfErrors(ctx)
+
+ updateGoVersion := func() {
+ goVersion := ld.goVersion()
+
+ if ld.requirements.pruning != workspace {
+ var err error
+ ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion))
+ if err != nil {
+ ld.error(err)
+ ld.exitIfErrors(ctx)
+ }
+ }
+
+ // If the module's Go version omits go.sum entries for go.mod files for test
+ // dependencies of external packages, avoid loading those files in the first
+ // place.
+ ld.skipImportModFiles = ld.Tidy && gover.Compare(goVersion, gover.TidyGoModSumVersion) < 0
+
+ // If the module's go version explicitly predates the change in "all" for
+ // graph pruning, continue to use the older interpretation.
+ ld.allClosesOverTests = gover.Compare(goVersion, gover.NarrowAllVersion) < 0 && !ld.UseVendorAll
+ }
+
+ for {
+ ld.reset()
+ updateGoVersion()
+
+ // Load the root packages and their imports.
+ // Note: the returned roots can change on each iteration,
+ // since the expansion of package patterns depends on the
+ // build list we're using.
+ rootPkgs := ld.listRoots(ld.requirements)
+
+ if ld.requirements.pruning == pruned && cfg.BuildMod == "mod" {
+ // Before we start loading transitive imports of packages, locate all of
+ // the root packages and promote their containing modules to root modules
+ // dependencies. If their go.mod files are tidy (the common case) and the
+ // set of root packages does not change then we can select the correct
+ // versions of all transitive imports on the first try and complete
+ // loading in a single iteration.
+ changedBuildList := ld.preloadRootModules(ctx, rootPkgs)
+ if changedBuildList {
+ // The build list has changed, so the set of root packages may have also
+ // changed. Start over to pick up the changes. (Preloading roots is much
+ // cheaper than loading the full import graph, so we would rather pay
+ // for an extra iteration of preloading than potentially end up
+ // discarding the result of a full iteration of loading.)
+ continue
+ }
+ }
+
+ inRoots := map[*loadPkg]bool{}
+ for _, path := range rootPkgs {
+ root := ld.pkg(ctx, path, pkgIsRoot)
+ if !inRoots[root] {
+ ld.roots = append(ld.roots, root)
+ inRoots[root] = true
+ }
+ }
+
+ // ld.pkg adds imported packages to the work queue and calls applyPkgFlags,
+ // which adds tests (and test dependencies) as needed.
+ //
+ // When all of the work in the queue has completed, we'll know that the
+ // transitive closure of dependencies has been loaded.
+ <-ld.work.Idle()
+
+ ld.buildStacks()
+
+ changed, err := ld.updateRequirements(ctx)
+ if err != nil {
+ ld.error(err)
+ break
+ }
+ if changed {
+ // Don't resolve missing imports until the module graph has stabilized.
+ // If the roots are still changing, they may turn out to specify a
+ // requirement on the missing package(s), and we would rather use a
+ // version specified by a new root than add a new dependency on an
+ // unrelated version.
+ continue
+ }
+
+ if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) {
+ // We've loaded as much as we can without resolving missing imports.
+ break
+ }
+
+ modAddedBy, err := ld.resolveMissingImports(ctx)
+ if err != nil {
+ ld.error(err)
+ break
+ }
+ if len(modAddedBy) == 0 {
+ // The roots are stable, and we've resolved all of the missing packages
+ // that we can.
+ break
+ }
+
+ toAdd := make([]module.Version, 0, len(modAddedBy))
+ for m := range modAddedBy {
+ toAdd = append(toAdd, m)
+ }
+ gover.ModSort(toAdd) // to make errors deterministic
+
+ // We ran updateRequirements before resolving missing imports and it didn't
+ // make any changes, so we know that the requirement graph is already
+ // consistent with ld.pkgs: we don't need to pass ld.pkgs to updateRoots
+ // again. (That would waste time looking for changes that we have already
+ // applied.)
+ var noPkgs []*loadPkg
+ // We also know that we're going to call updateRequirements again next
+ // iteration so we don't need to also update it here. (That would waste time
+ // computing a "direct" map that we'll have to recompute later anyway.)
+ direct := ld.requirements.direct
+ rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported)
+ if err != nil {
+ // If an error was found in a newly added module, report the package
+ // import stack instead of the module requirement stack. Packages
+ // are more descriptive.
+ if err, ok := err.(*mvs.BuildListError); ok {
+ if pkg := modAddedBy[err.Module()]; pkg != nil {
+ ld.error(fmt.Errorf("%s: %w", pkg.stackText(), err.Err))
+ break
+ }
+ }
+ ld.error(err)
+ break
+ }
+ if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) {
+ // Something is deeply wrong. resolveMissingImports gave us a non-empty
+ // set of modules to add to the graph, but adding those modules had no
+ // effect — either they were already in the graph, or updateRoots did not
+ // add them as requested.
+ panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules))
+ }
+ ld.requirements = rs
+ }
+ ld.exitIfErrors(ctx)
+
+ // Tidy the build list, if applicable, before we report errors.
+ // (The process of tidying may remove errors from irrelevant dependencies.)
+ if ld.Tidy {
+ rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs)
+ if err != nil {
+ ld.error(err)
+ } else {
+ if ld.TidyGoVersion != "" {
+ // Attempt to switch to the requested Go version. We have been using its
+ // pruning and semantics all along, but there may have been — and may
+ // still be — requirements on higher versions in the graph.
+ tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}})
+ mg, err := tidy.Graph(ctx)
+ if err != nil {
+ ld.error(err)
+ }
+ if v := mg.Selected("go"); v == ld.TidyGoVersion {
+ rs = tidy
+ } else {
+ conflict := Conflict{
+ Path: mg.g.FindPath(func(m module.Version) bool {
+ return m.Path == "go" && m.Version == v
+ })[1:],
+ Constraint: module.Version{Path: "go", Version: ld.TidyGoVersion},
+ }
+ msg := conflict.Summary()
+ if cfg.BuildV {
+ msg = conflict.String()
+ }
+ ld.error(errors.New(msg))
+ }
+ }
+
+ if ld.requirements.pruning == pruned {
+ // We continuously add tidy roots to ld.requirements during loading, so
+ // at this point the tidy roots (other than possibly the "go" version
+ // edited above) should be a subset of the roots of ld.requirements,
+ // ensuring that no new dependencies are brought inside the
+ // graph-pruning horizon.
+ // If that is not the case, there is a bug in the loading loop above.
+ for _, m := range rs.rootModules {
+ if m.Path == "go" && ld.TidyGoVersion != "" {
+ continue
+ }
+ if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version {
+ ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v))
+ }
+ }
+ }
+
+ ld.requirements = rs
+ }
+
+ ld.exitIfErrors(ctx)
+ }
+
+ // Report errors, if any.
+ for _, pkg := range ld.pkgs {
+ if pkg.err == nil {
+ continue
+ }
+
+ // Add importer information to checksum errors.
+ if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) {
+ if importer := pkg.stack; importer != nil {
+ sumErr.importer = importer.path
+ sumErr.importerVersion = importer.mod.Version
+ sumErr.importerIsTest = importer.testOf != nil
+ }
+ }
+
+ if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd {
+ // Add importer go version information to import errors of standard
+ // library packages arising from newer releases.
+ if importer := pkg.stack; importer != nil {
+ if v, ok := rawGoVersion.Load(importer.mod); ok && gover.Compare(gover.Local(), v.(string)) < 0 {
+ stdErr.importerGoVersion = v.(string)
+ }
+ }
+ if ld.SilenceMissingStdImports {
+ continue
+ }
+ }
+ if ld.SilencePackageErrors {
+ continue
+ }
+ if ld.SilenceNoGoErrors && errors.Is(pkg.err, imports.ErrNoGo) {
+ continue
+ }
+
+ ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err))
+ }
+
+ ld.checkMultiplePaths()
+ return ld
+}
+
+// updateRequirements ensures that ld.requirements is consistent with the
+// information gained from ld.pkgs.
+//
+// In particular:
+//
+// - Modules that provide packages directly imported from the main module are
+// marked as direct, and are promoted to explicit roots. If a needed root
+// cannot be promoted due to -mod=readonly or -mod=vendor, the importing
+// package is marked with an error.
+//
+// - If ld scanned the "all" pattern independent of build constraints, it is
+// guaranteed to have seen every direct import. Module dependencies that did
+// not provide any directly-imported package are then marked as indirect.
+//
+// - Root dependencies are updated to their selected versions.
+//
+// The "changed" return value reports whether the update changed the selected
+// version of any module that either provided a loaded package or may now
+// provide a package that was previously unresolved.
+func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) {
+ rs := ld.requirements
+
+ // direct contains the set of modules believed to provide packages directly
+ // imported by the main module.
+ var direct map[string]bool
+
+ // If we didn't scan all of the imports from the main module, or didn't use
+ // imports.AnyTags, then we didn't necessarily load every package that
+ // contributes “direct” imports — so we can't safely mark existing direct
+ // dependencies in ld.requirements as indirect-only. Propagate them as direct.
+ loadedDirect := ld.allPatternIsRoot && reflect.DeepEqual(ld.Tags, imports.AnyTags())
+ if loadedDirect {
+ direct = make(map[string]bool)
+ } else {
+ // TODO(bcmills): It seems like a shame to allocate and copy a map here when
+ // it will only rarely actually vary from rs.direct. Measure this cost and
+ // maybe avoid the copy.
+ direct = make(map[string]bool, len(rs.direct))
+ for mPath := range rs.direct {
+ direct[mPath] = true
+ }
+ }
+
+ var maxTooNew *gover.TooNewError
+ for _, pkg := range ld.pkgs {
+ if pkg.err != nil {
+ if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) {
+ if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 {
+ maxTooNew = tooNew
+ }
+ }
+ }
+ if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) {
+ continue
+ }
+
+ for _, dep := range pkg.imports {
+ if !dep.fromExternalModule() {
+ continue
+ }
+
+ if inWorkspaceMode() {
+ // In workspace mode / workspace pruning mode, the roots are the main modules
+ // rather than the main module's direct dependencies. The check below on the selected
+ // roots does not apply.
+ if mg, err := rs.Graph(ctx); err != nil {
+ return false, err
+ } else if _, ok := mg.RequiredBy(dep.mod); !ok {
+ // dep.mod is not an explicit dependency, but needs to be.
+ // See comment on error returned below.
+ pkg.err = &DirectImportFromImplicitDependencyError{
+ ImporterPath: pkg.path,
+ ImportedPath: dep.path,
+ Module: dep.mod,
+ }
+ }
+ continue
+ }
+
+ if pkg.err == nil && cfg.BuildMod != "mod" {
+ if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version {
+ // dep.mod is not an explicit dependency, but needs to be.
+ // Because we are not in "mod" mode, we will not be able to update it.
+ // Instead, mark the importing package with an error.
+ //
+ // TODO(#41688): The resulting error message fails to include the file
+ // position of the import statement (because that information is not
+ // tracked by the module loader). Figure out how to plumb the import
+ // position through.
+ pkg.err = &DirectImportFromImplicitDependencyError{
+ ImporterPath: pkg.path,
+ ImportedPath: dep.path,
+ Module: dep.mod,
+ }
+ // cfg.BuildMod does not allow us to change dep.mod to be a direct
+ // dependency, so don't mark it as such.
+ continue
+ }
+ }
+
+ // dep is a package directly imported by a package or test in the main
+ // module and loaded from some other module (not the standard library).
+ // Mark its module as a direct dependency.
+ direct[dep.mod.Path] = true
+ }
+ }
+ if maxTooNew != nil {
+ return false, maxTooNew
+ }
+
+ var addRoots []module.Version
+ if ld.Tidy {
+ // When we are tidying a module with a pruned dependency graph, we may need
+ // to add roots to preserve the versions of indirect, test-only dependencies
+ // that are upgraded above or otherwise missing from the go.mod files of
+ // direct dependencies. (For example, the direct dependency might be a very
+ // stable codebase that predates modules and thus lacks a go.mod file, or
+ // the author of the direct dependency may have forgotten to commit a change
+ // to the go.mod file, or may have made an erroneous hand-edit that causes
+ // it to be untidy.)
+ //
+ // Promoting an indirect dependency to a root adds the next layer of its
+ // dependencies to the module graph, which may increase the selected
+ // versions of other modules from which we have already loaded packages.
+ // So after we promote an indirect dependency to a root, we need to reload
+ // packages, which means another iteration of loading.
+ //
+ // As an extra wrinkle, the upgrades due to promoting a root can cause
+ // previously-resolved packages to become unresolved. For example, the
+ // module providing an unstable package might be upgraded to a version
+ // that no longer contains that package. If we then resolve the missing
+ // package, we might add yet another root that upgrades away some other
+ // dependency. (The tests in mod_tidy_convergence*.txt illustrate some
+ // particularly worrisome cases.)
+ //
+ // To ensure that this process of promoting, adding, and upgrading roots
+ // eventually terminates, during iteration we only ever add modules to the
+ // root set — we only remove irrelevant roots at the very end of
+ // iteration, after we have already added every root that we plan to need
+ // in the (eventual) tidy root set.
+ //
+ // Since we do not remove any roots during iteration, even if they no
+ // longer provide any imported packages, the selected versions of the
+ // roots can only increase and the set of roots can only expand. The set
+ // of extant root paths is finite and the set of versions of each path is
+ // finite, so the iteration *must* reach a stable fixed-point.
+ tidy, err := tidyRoots(ctx, rs, ld.pkgs)
+ if err != nil {
+ return false, err
+ }
+ addRoots = tidy.rootModules
+ }
+
+ rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported)
+ if err != nil {
+ // We don't actually know what even the root requirements are supposed to be,
+ // so we can't proceed with loading. Return the error to the caller
+ return false, err
+ }
+
+ if rs.GoVersion() != ld.requirements.GoVersion() {
+ // A change in the selected Go version may or may not affect the set of
+ // loaded packages, but in some cases it can change the meaning of the "all"
+ // pattern, the level of pruning in the module graph, and even the set of
+ // packages present in the standard library. If it has changed, it's best to
+ // reload packages once more to be sure everything is stable.
+ changed = true
+ } else if rs != ld.requirements && !reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) {
+ // The roots of the module graph have changed in some way (not just the
+ // "direct" markings). Check whether the changes affected any of the loaded
+ // packages.
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ return false, err
+ }
+ for _, pkg := range ld.pkgs {
+ if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version {
+ changed = true
+ break
+ }
+ if pkg.err != nil {
+ // Promoting a module to a root may resolve an import that was
+ // previously missing (by pulling in a previously-prune dependency that
+ // provides it) or ambiguous (by promoting exactly one of the
+ // alternatives to a root and ignoring the second-level alternatives) or
+ // otherwise errored out (by upgrading from a version that cannot be
+ // fetched to one that can be).
+ //
+ // Instead of enumerating all of the possible errors, we'll just check
+ // whether importFromModules returns nil for the package.
+ // False-positives are ok: if we have a false-positive here, we'll do an
+ // extra iteration of package loading this time, but we'll still
+ // converge when the root set stops changing.
+ //
+ // In some sense, we can think of this as ‘upgraded the module providing
+ // pkg.path from "none" to a version higher than "none"’.
+ if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil {
+ changed = true
+ break
+ }
+ }
+ }
+ }
+
+ ld.requirements = rs
+ return changed, nil
+}
+
+// resolveMissingImports returns a set of modules that could be added as
+// dependencies in order to resolve missing packages from pkgs.
+//
+// The newly-resolved packages are added to the addedModuleFor map, and
+// resolveMissingImports returns a map from each new module version to
+// the first missing package that module would resolve.
+func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) {
+ type pkgMod struct {
+ pkg *loadPkg
+ mod *module.Version
+ }
+ var pkgMods []pkgMod
+ for _, pkg := range ld.pkgs {
+ if pkg.err == nil {
+ continue
+ }
+ if pkg.isTest() {
+ // If we are missing a test, we are also missing its non-test version, and
+ // we should only add the missing import once.
+ continue
+ }
+ if !errors.As(pkg.err, new(*ImportMissingError)) {
+ // Leave other errors for Import or load.Packages to report.
+ continue
+ }
+
+ pkg := pkg
+ var mod module.Version
+ ld.work.Add(func() {
+ var err error
+ mod, err = queryImport(ctx, pkg.path, ld.requirements)
+ if err != nil {
+ var ime *ImportMissingError
+ if errors.As(err, &ime) {
+ for curstack := pkg.stack; curstack != nil; curstack = curstack.stack {
+ if MainModules.Contains(curstack.mod.Path) {
+ ime.ImportingMainModule = curstack.mod
+ break
+ }
+ }
+ }
+ // pkg.err was already non-nil, so we can reasonably attribute the error
+ // for pkg to either the original error or the one returned by
+ // queryImport. The existing error indicates only that we couldn't find
+ // the package, whereas the query error also explains why we didn't fix
+ // the problem — so we prefer the latter.
+ pkg.err = err
+ }
+
+ // err is nil, but we intentionally leave pkg.err non-nil and pkg.mod
+ // unset: we still haven't satisfied other invariants of a
+ // successfully-loaded package, such as scanning and loading the imports
+ // of that package. If we succeed in resolving the new dependency graph,
+ // the caller can reload pkg and update the error at that point.
+ //
+ // Even then, the package might not be loaded from the version we've
+ // identified here. The module may be upgraded by some other dependency,
+ // or by a transitive dependency of mod itself, or — less likely — the
+ // package may be rejected by an AllowPackage hook or rendered ambiguous
+ // by some other newly-added or newly-upgraded dependency.
+ })
+
+ pkgMods = append(pkgMods, pkgMod{pkg: pkg, mod: &mod})
+ }
+ <-ld.work.Idle()
+
+ modAddedBy = map[module.Version]*loadPkg{}
+
+ var (
+ maxTooNew *gover.TooNewError
+ maxTooNewPkg *loadPkg
+ )
+ for _, pm := range pkgMods {
+ if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) {
+ if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 {
+ maxTooNew = tooNew
+ maxTooNewPkg = pm.pkg
+ }
+ }
+ }
+ if maxTooNew != nil {
+ fmt.Fprintf(os.Stderr, "go: toolchain upgrade needed to resolve %s\n", maxTooNewPkg.path)
+ return nil, maxTooNew
+ }
+
+ for _, pm := range pkgMods {
+ pkg, mod := pm.pkg, *pm.mod
+ if mod.Path == "" {
+ continue
+ }
+
+ fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, mod.Path, mod.Version)
+ if modAddedBy[mod] == nil {
+ modAddedBy[mod] = pkg
+ }
+ }
+
+ return modAddedBy, nil
+}
+
+// pkg locates the *loadPkg for path, creating and queuing it for loading if
+// needed, and updates its state to reflect the given flags.
+//
+// The imports of the returned *loadPkg will be loaded asynchronously in the
+// ld.work queue, and its test (if requested) will also be populated once
+// imports have been resolved. When ld.work goes idle, all transitive imports of
+// the requested package (and its test, if requested) will have been loaded.
+func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg {
+ if flags.has(pkgImportsLoaded) {
+ panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set")
+ }
+
+ pkg := ld.pkgCache.Do(path, func() *loadPkg {
+ pkg := &loadPkg{
+ path: path,
+ }
+ ld.applyPkgFlags(ctx, pkg, flags)
+
+ ld.work.Add(func() { ld.load(ctx, pkg) })
+ return pkg
+ })
+
+ ld.applyPkgFlags(ctx, pkg, flags)
+ return pkg
+}
+
+// applyPkgFlags updates pkg.flags to set the given flags and propagate the
+// (transitive) effects of those flags, possibly loading or enqueueing further
+// packages as a result.
+func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) {
+ if flags == 0 {
+ return
+ }
+
+ if flags.has(pkgInAll) && ld.allPatternIsRoot && !pkg.isTest() {
+ // This package matches a root pattern by virtue of being in "all".
+ flags |= pkgIsRoot
+ }
+ if flags.has(pkgIsRoot) {
+ flags |= pkgFromRoot
+ }
+
+ old := pkg.flags.update(flags)
+ new := old | flags
+ if new == old || !new.has(pkgImportsLoaded) {
+ // We either didn't change the state of pkg, or we don't know anything about
+ // its dependencies yet. Either way, we can't usefully load its test or
+ // update its dependencies.
+ return
+ }
+
+ if !pkg.isTest() {
+ // Check whether we should add (or update the flags for) a test for pkg.
+ // ld.pkgTest is idempotent and extra invocations are inexpensive,
+ // so it's ok if we call it more than is strictly necessary.
+ wantTest := false
+ switch {
+ case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path):
+ // We are loading the "all" pattern, which includes packages imported by
+ // tests in the main module. This package is in the main module, so we
+ // need to identify the imports of its test even if LoadTests is not set.
+ //
+ // (We will filter out the extra tests explicitly in computePatternAll.)
+ wantTest = true
+
+ case ld.allPatternIsRoot && ld.allClosesOverTests && new.has(pkgInAll):
+ // This variant of the "all" pattern includes imports of tests of every
+ // package that is itself in "all", and pkg is in "all", so its test is
+ // also in "all" (as above).
+ wantTest = true
+
+ case ld.LoadTests && new.has(pkgIsRoot):
+ // LoadTest explicitly requests tests of “the root packages”.
+ wantTest = true
+ }
+
+ if wantTest {
+ var testFlags loadPkgFlags
+ if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) {
+ // Tests of packages in the main module are in "all", in the sense that
+ // they cause the packages they import to also be in "all". So are tests
+ // of packages in "all" if "all" closes over test dependencies.
+ testFlags |= pkgInAll
+ }
+ ld.pkgTest(ctx, pkg, testFlags)
+ }
+ }
+
+ if new.has(pkgInAll) && !old.has(pkgInAll|pkgImportsLoaded) {
+ // We have just marked pkg with pkgInAll, or we have just loaded its
+ // imports, or both. Now is the time to propagate pkgInAll to the imports.
+ for _, dep := range pkg.imports {
+ ld.applyPkgFlags(ctx, dep, pkgInAll)
+ }
+ }
+
+ if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) {
+ for _, dep := range pkg.imports {
+ ld.applyPkgFlags(ctx, dep, pkgFromRoot)
+ }
+ }
+}
+
+// preloadRootModules loads the module requirements needed to identify the
+// selected version of each module providing a package in rootPkgs,
+// adding new root modules to the module graph if needed.
+func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) {
+ needc := make(chan map[module.Version]bool, 1)
+ needc <- map[module.Version]bool{}
+ for _, path := range rootPkgs {
+ path := path
+ ld.work.Add(func() {
+ // First, try to identify the module containing the package using only roots.
+ //
+ // If the main module is tidy and the package is in "all" — or if we're
+ // lucky — we can identify all of its imports without actually loading the
+ // full module graph.
+ m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles)
+ if err != nil {
+ var missing *ImportMissingError
+ if errors.As(err, &missing) && ld.ResolveMissingImports {
+ // This package isn't provided by any selected module.
+ // If we can find it, it will be a new root dependency.
+ m, err = queryImport(ctx, path, ld.requirements)
+ }
+ if err != nil {
+ // We couldn't identify the root module containing this package.
+ // Leave it unresolved; we will report it during loading.
+ return
+ }
+ }
+ if m.Path == "" {
+ // The package is in std or cmd. We don't need to change the root set.
+ return
+ }
+
+ v, ok := ld.requirements.rootSelected(m.Path)
+ if !ok || v != m.Version {
+ // We found the requested package in m, but m is not a root, so
+ // loadModGraph will not load its requirements. We need to promote the
+ // module to a root to ensure that any other packages this package
+ // imports are resolved from correct dependency versions.
+ //
+ // (This is the “argument invariant” from
+ // https://golang.org/design/36460-lazy-module-loading.)
+ need := <-needc
+ need[m] = true
+ needc <- need
+ }
+ })
+ }
+ <-ld.work.Idle()
+
+ need := <-needc
+ if len(need) == 0 {
+ return false // No roots to add.
+ }
+
+ toAdd := make([]module.Version, 0, len(need))
+ for m := range need {
+ toAdd = append(toAdd, m)
+ }
+ gover.ModSort(toAdd)
+
+ rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported)
+ if err != nil {
+ // We are missing some root dependency, and for some reason we can't load
+ // enough of the module dependency graph to add the missing root. Package
+ // loading is doomed to fail, so fail quickly.
+ ld.error(err)
+ ld.exitIfErrors(ctx)
+ return false
+ }
+ if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) {
+ // Something is deeply wrong. resolveMissingImports gave us a non-empty
+ // set of modules to add to the graph, but adding those modules had no
+ // effect — either they were already in the graph, or updateRoots did not
+ // add them as requested.
+ panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules))
+ }
+
+ ld.requirements = rs
+ return true
+}
+
+// load loads an individual package.
+func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
+ var mg *ModuleGraph
+ if ld.requirements.pruning == unpruned {
+ var err error
+ mg, err = ld.requirements.Graph(ctx)
+ if err != nil {
+ // We already checked the error from Graph in loadFromRoots and/or
+ // updateRequirements, so we ignored the error on purpose and we should
+ // keep trying to push past it.
+ //
+ // However, because mg may be incomplete (and thus may select inaccurate
+ // versions), we shouldn't use it to load packages. Instead, we pass a nil
+ // *ModuleGraph, which will cause mg to first try loading from only the
+ // main module and root dependencies.
+ mg = nil
+ }
+ }
+
+ var modroot string
+ pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles)
+ if pkg.dir == "" {
+ return
+ }
+ if MainModules.Contains(pkg.mod.Path) {
+ // Go ahead and mark pkg as in "all". This provides the invariant that a
+ // package that is *only* imported by other packages in "all" is always
+ // marked as such before loading its imports.
+ //
+ // We don't actually rely on that invariant at the moment, but it may
+ // improve efficiency somewhat and makes the behavior a bit easier to reason
+ // about (by reducing churn on the flag bits of dependencies), and costs
+ // essentially nothing (these atomic flag ops are essentially free compared
+ // to scanning source code for imports).
+ ld.applyPkgFlags(ctx, pkg, pkgInAll)
+ }
+ if ld.AllowPackage != nil {
+ if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil {
+ pkg.err = err
+ }
+ }
+
+ pkg.inStd = (search.IsStandardImportPath(pkg.path) && search.InDir(pkg.dir, cfg.GOROOTsrc) != "")
+
+ var imports, testImports []string
+
+ if cfg.BuildContext.Compiler == "gccgo" && pkg.inStd {
+ // We can't scan standard packages for gccgo.
+ } else {
+ var err error
+ imports, testImports, err = scanDir(modroot, pkg.dir, ld.Tags)
+ if err != nil {
+ pkg.err = err
+ return
+ }
+ }
+
+ pkg.imports = make([]*loadPkg, 0, len(imports))
+ var importFlags loadPkgFlags
+ if pkg.flags.has(pkgInAll) {
+ importFlags = pkgInAll
+ }
+ for _, path := range imports {
+ if pkg.inStd {
+ // Imports from packages in "std" and "cmd" should resolve using
+ // GOROOT/src/vendor even when "std" is not the main module.
+ path = ld.stdVendor(pkg.path, path)
+ }
+ pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags))
+ }
+ pkg.testImports = testImports
+
+ ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded)
+}
+
+// pkgTest locates the test of pkg, creating it if needed, and updates its state
+// to reflect the given flags.
+//
+// pkgTest requires that the imports of pkg have already been loaded (flagged
+// with pkgImportsLoaded).
+func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg {
+ if pkg.isTest() {
+ panic("pkgTest called on a test package")
+ }
+
+ createdTest := false
+ pkg.testOnce.Do(func() {
+ pkg.test = &loadPkg{
+ path: pkg.path,
+ testOf: pkg,
+ mod: pkg.mod,
+ dir: pkg.dir,
+ err: pkg.err,
+ inStd: pkg.inStd,
+ }
+ ld.applyPkgFlags(ctx, pkg.test, testFlags)
+ createdTest = true
+ })
+
+ test := pkg.test
+ if createdTest {
+ test.imports = make([]*loadPkg, 0, len(pkg.testImports))
+ var importFlags loadPkgFlags
+ if test.flags.has(pkgInAll) {
+ importFlags = pkgInAll
+ }
+ for _, path := range pkg.testImports {
+ if pkg.inStd {
+ path = ld.stdVendor(test.path, path)
+ }
+ test.imports = append(test.imports, ld.pkg(ctx, path, importFlags))
+ }
+ pkg.testImports = nil
+ ld.applyPkgFlags(ctx, test, pkgImportsLoaded)
+ } else {
+ ld.applyPkgFlags(ctx, test, testFlags)
+ }
+
+ return test
+}
+
+// stdVendor returns the canonical import path for the package with the given
+// path when imported from the standard-library package at parentPath.
+func (ld *loader) stdVendor(parentPath, path string) string {
+ if search.IsStandardImportPath(path) {
+ return path
+ }
+
+ if str.HasPathPrefix(parentPath, "cmd") {
+ if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") {
+ vendorPath := pathpkg.Join("cmd", "vendor", path)
+
+ if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil {
+ return vendorPath
+ }
+ }
+ } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") {
+ // If we are outside of the 'std' module, resolve imports from within 'std'
+ // to the vendor directory.
+ //
+ // Do the same for importers beginning with the prefix 'vendor/' even if we
+ // are *inside* of the 'std' module: the 'vendor/' packages that resolve
+ // globally from GOROOT/src/vendor (and are listed as part of 'go list std')
+ // are distinct from the real module dependencies, and cannot import
+ // internal packages from the real module.
+ //
+ // (Note that although the 'vendor/' packages match the 'std' *package*
+ // pattern, they are not part of the std *module*, and do not affect
+ // 'go mod tidy' and similar module commands when working within std.)
+ vendorPath := pathpkg.Join("vendor", path)
+ if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil {
+ return vendorPath
+ }
+ }
+
+ // Not vendored: resolve from modules.
+ return path
+}
+
+// computePatternAll returns the list of packages matching pattern "all",
+// starting with a list of the import paths for the packages in the main module.
+func (ld *loader) computePatternAll() (all []string) {
+ for _, pkg := range ld.pkgs {
+ if pkg.flags.has(pkgInAll) && !pkg.isTest() {
+ all = append(all, pkg.path)
+ }
+ }
+ sort.Strings(all)
+ return all
+}
+
+// checkMultiplePaths verifies that a given module path is used as itself
+// or as a replacement for another module, but not both at the same time.
+//
+// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.)
+func (ld *loader) checkMultiplePaths() {
+ mods := ld.requirements.rootModules
+ if cached := ld.requirements.graph.Load(); cached != nil {
+ if mg := cached.mg; mg != nil {
+ mods = mg.BuildList()
+ }
+ }
+
+ firstPath := map[module.Version]string{}
+ for _, mod := range mods {
+ src := resolveReplacement(mod)
+ if prev, ok := firstPath[src]; !ok {
+ firstPath[src] = mod.Path
+ } else if prev != mod.Path {
+ ld.error(fmt.Errorf("%s@%s used for two different module paths (%s and %s)", src.Path, src.Version, prev, mod.Path))
+ }
+ }
+}
+
+// checkTidyCompatibility emits an error if any package would be loaded from a
+// different module under rs than under ld.requirements.
+func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) {
+ goVersion := rs.GoVersion()
+ suggestUpgrade := false
+ suggestEFlag := false
+ suggestFixes := func() {
+ if ld.AllowErrors {
+ // The user is explicitly ignoring these errors, so don't bother them with
+ // other options.
+ return
+ }
+
+ // We print directly to os.Stderr because this information is advice about
+ // how to fix errors, not actually an error itself.
+ // (The actual errors should have been logged already.)
+
+ fmt.Fprintln(os.Stderr)
+
+ goFlag := ""
+ if goVersion != MainModules.GoVersion() {
+ goFlag = " -go=" + goVersion
+ }
+
+ compatFlag := ""
+ if compatVersion != gover.Prev(goVersion) {
+ compatFlag = " -compat=" + compatVersion
+ }
+ if suggestUpgrade {
+ eDesc := ""
+ eFlag := ""
+ if suggestEFlag {
+ eDesc = ", leaving some packages unresolved"
+ eFlag = " -e"
+ }
+ fmt.Fprintf(os.Stderr, "To upgrade to the versions selected by go %s%s:\n\tgo mod tidy%s -go=%s && go mod tidy%s -go=%s%s\n", compatVersion, eDesc, eFlag, compatVersion, eFlag, goVersion, compatFlag)
+ } else if suggestEFlag {
+ // If some packages are missing but no package is upgraded, then we
+ // shouldn't suggest upgrading to the Go 1.16 versions explicitly — that
+ // wouldn't actually fix anything for Go 1.16 users, and *would* break
+ // something for Go 1.17 users.
+ fmt.Fprintf(os.Stderr, "To proceed despite packages unresolved in go %s:\n\tgo mod tidy -e%s%s\n", compatVersion, goFlag, compatFlag)
+ }
+
+ fmt.Fprintf(os.Stderr, "If reproducibility with go %s is not needed:\n\tgo mod tidy%s -compat=%s\n", compatVersion, goFlag, goVersion)
+
+ // TODO(#46141): Populate the linked wiki page.
+ fmt.Fprintf(os.Stderr, "For other options, see:\n\thttps://golang.org/doc/modules/pruning\n")
+ }
+
+ mg, err := rs.Graph(ctx)
+ if err != nil {
+ ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err))
+ ld.switchIfErrors(ctx)
+ suggestFixes()
+ ld.exitIfErrors(ctx)
+ return
+ }
+
+ // Re-resolve packages in parallel.
+ //
+ // We re-resolve each package — rather than just checking versions — to ensure
+ // that we have fetched module source code (and, importantly, checksums for
+ // that source code) for all modules that are necessary to ensure that imports
+ // are unambiguous. That also produces clearer diagnostics, since we can say
+ // exactly what happened to the package if it became ambiguous or disappeared
+ // entirely.
+ //
+ // We re-resolve the packages in parallel because this process involves disk
+ // I/O to check for package sources, and because the process of checking for
+ // ambiguous imports may require us to download additional modules that are
+ // otherwise pruned out in Go 1.17 — we don't want to block progress on other
+ // packages while we wait for a single new download.
+ type mismatch struct {
+ mod module.Version
+ err error
+ }
+ mismatchMu := make(chan map[*loadPkg]mismatch, 1)
+ mismatchMu <- map[*loadPkg]mismatch{}
+ for _, pkg := range ld.pkgs {
+ if pkg.mod.Path == "" && pkg.err == nil {
+ // This package is from the standard library (which does not vary based on
+ // the module graph).
+ continue
+ }
+
+ pkg := pkg
+ ld.work.Add(func() {
+ mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles)
+ if mod != pkg.mod {
+ mismatches := <-mismatchMu
+ mismatches[pkg] = mismatch{mod: mod, err: err}
+ mismatchMu <- mismatches
+ }
+ })
+ }
+ <-ld.work.Idle()
+
+ mismatches := <-mismatchMu
+ if len(mismatches) == 0 {
+ // Since we're running as part of 'go mod tidy', the roots of the module
+ // graph should contain only modules that are relevant to some package in
+ // the package graph. We checked every package in the package graph and
+ // didn't find any mismatches, so that must mean that all of the roots of
+ // the module graph are also consistent.
+ //
+ // If we're wrong, Go 1.16 in -mod=readonly mode will error out with
+ // "updates to go.mod needed", which would be very confusing. So instead,
+ // we'll double-check that our reasoning above actually holds — if it
+ // doesn't, we'll emit an internal error and hopefully the user will report
+ // it as a bug.
+ for _, m := range ld.requirements.rootModules {
+ if v := mg.Selected(m.Path); v != m.Version {
+ fmt.Fprintln(os.Stderr)
+ base.Fatalf("go: internal error: failed to diagnose selected-version mismatch for module %s: go %s selects %s, but go %s selects %s\n\tPlease report this at https://golang.org/issue.", m.Path, goVersion, m.Version, compatVersion, v)
+ }
+ }
+ return
+ }
+
+ // Iterate over the packages (instead of the mismatches map) to emit errors in
+ // deterministic order.
+ for _, pkg := range ld.pkgs {
+ mismatch, ok := mismatches[pkg]
+ if !ok {
+ continue
+ }
+
+ if pkg.isTest() {
+ // We already did (or will) report an error for the package itself,
+ // so don't report a duplicate (and more verbose) error for its test.
+ if _, ok := mismatches[pkg.testOf]; !ok {
+ base.Fatalf("go: internal error: mismatch recorded for test %s, but not its non-test package", pkg.path)
+ }
+ continue
+ }
+
+ switch {
+ case mismatch.err != nil:
+ // pkg resolved successfully, but errors out using the requirements in rs.
+ //
+ // This could occur because the import is provided by a single root (and
+ // is thus unambiguous in a main module with a pruned module graph) and
+ // also one or more transitive dependencies (and is ambiguous with an
+ // unpruned graph).
+ //
+ // It could also occur because some transitive dependency upgrades the
+ // module that previously provided the package to a version that no
+ // longer does, or to a version for which the module source code (but
+ // not the go.mod file in isolation) has a checksum error.
+ if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) {
+ selected := module.Version{
+ Path: pkg.mod.Path,
+ Version: mg.Selected(pkg.mod.Path),
+ }
+ ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected))
+ } else {
+ if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) {
+ // TODO: Is this check needed?
+ }
+ ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err))
+ }
+
+ suggestEFlag = true
+
+ // Even if we press ahead with the '-e' flag, the older version will
+ // error out in readonly mode if it thinks the go.mod file contains
+ // any *explicit* dependency that is not at its selected version,
+ // even if that dependency is not relevant to any package being loaded.
+ //
+ // We check for that condition here. If all of the roots are consistent
+ // the '-e' flag suffices, but otherwise we need to suggest an upgrade.
+ if !suggestUpgrade {
+ for _, m := range ld.requirements.rootModules {
+ if v := mg.Selected(m.Path); v != m.Version {
+ suggestUpgrade = true
+ break
+ }
+ }
+ }
+
+ case pkg.err != nil:
+ // pkg had an error in with a pruned module graph (presumably suppressed
+ // with the -e flag), but the error went away using an unpruned graph.
+ //
+ // This is possible, if, say, the import is unresolved in the pruned graph
+ // (because the "latest" version of each candidate module either is
+ // unavailable or does not contain the package), but is resolved in the
+ // unpruned graph due to a newer-than-latest dependency that is normally
+ // pruned out.
+ //
+ // This could also occur if the source code for the module providing the
+ // package in the pruned graph has a checksum error, but the unpruned
+ // graph upgrades that module to a version with a correct checksum.
+ //
+ // pkg.err should have already been logged elsewhere — along with a
+ // stack trace — so log only the import path and non-error info here.
+ suggestUpgrade = true
+ ld.error(fmt.Errorf("%s failed to load from any module,\n\tbut go %s would load it from %v", pkg.path, compatVersion, mismatch.mod))
+
+ case pkg.mod != mismatch.mod:
+ // The package is loaded successfully by both Go versions, but from a
+ // different module in each. This could lead to subtle (and perhaps even
+ // unnoticed!) variations in behavior between builds with different
+ // toolchains.
+ suggestUpgrade = true
+ ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would select %v\n", pkg.stackText(), pkg.mod, compatVersion, mismatch.mod.Version))
+
+ default:
+ base.Fatalf("go: internal error: mismatch recorded for package %s, but no differences found", pkg.path)
+ }
+ }
+
+ ld.switchIfErrors(ctx)
+ suggestFixes()
+ ld.exitIfErrors(ctx)
+}
+
+// scanDir is like imports.ScanDir but elides known magic imports from the list,
+// so that we do not go looking for packages that don't really exist.
+//
+// The standard magic import is "C", for cgo.
+//
+// The only other known magic imports are appengine and appengine/*.
+// These are so old that they predate "go get" and did not use URL-like paths.
+// Most code today now uses google.golang.org/appengine instead,
+// but not all code has been so updated. When we mostly ignore build tags
+// during "go vendor", we look into "// +build appengine" files and
+// may see these legacy imports. We drop them so that the module
+// search does not look for modules to try to satisfy them.
+func scanDir(modroot string, dir string, tags map[string]bool) (imports_, testImports []string, err error) {
+ if ip, mierr := modindex.GetPackage(modroot, dir); mierr == nil {
+ imports_, testImports, err = ip.ScanDir(tags)
+ goto Happy
+ } else if !errors.Is(mierr, modindex.ErrNotIndexed) {
+ return nil, nil, mierr
+ }
+
+ imports_, testImports, err = imports.ScanDir(dir, tags)
+Happy:
+
+ filter := func(x []string) []string {
+ w := 0
+ for _, pkg := range x {
+ if pkg != "C" && pkg != "appengine" && !strings.HasPrefix(pkg, "appengine/") &&
+ pkg != "appengine_internal" && !strings.HasPrefix(pkg, "appengine_internal/") {
+ x[w] = pkg
+ w++
+ }
+ }
+ return x[:w]
+ }
+
+ return filter(imports_), filter(testImports), err
+}
+
+// buildStacks computes minimal import stacks for each package,
+// for use in error messages. When it completes, packages that
+// are part of the original root set have pkg.stack == nil,
+// and other packages have pkg.stack pointing at the next
+// package up the import stack in their minimal chain.
+// As a side effect, buildStacks also constructs ld.pkgs,
+// the list of all packages loaded.
+func (ld *loader) buildStacks() {
+ if len(ld.pkgs) > 0 {
+ panic("buildStacks")
+ }
+ for _, pkg := range ld.roots {
+ pkg.stack = pkg // sentinel to avoid processing in next loop
+ ld.pkgs = append(ld.pkgs, pkg)
+ }
+ for i := 0; i < len(ld.pkgs); i++ { // not range: appending to ld.pkgs in loop
+ pkg := ld.pkgs[i]
+ for _, next := range pkg.imports {
+ if next.stack == nil {
+ next.stack = pkg
+ ld.pkgs = append(ld.pkgs, next)
+ }
+ }
+ if next := pkg.test; next != nil && next.stack == nil {
+ next.stack = pkg
+ ld.pkgs = append(ld.pkgs, next)
+ }
+ }
+ for _, pkg := range ld.roots {
+ pkg.stack = nil
+ }
+}
+
+// stackText builds the import stack text to use when
+// reporting an error in pkg. It has the general form
+//
+// root imports
+// other imports
+// other2 tested by
+// other2.test imports
+// pkg
+func (pkg *loadPkg) stackText() string {
+ var stack []*loadPkg
+ for p := pkg; p != nil; p = p.stack {
+ stack = append(stack, p)
+ }
+
+ var buf strings.Builder
+ for i := len(stack) - 1; i >= 0; i-- {
+ p := stack[i]
+ fmt.Fprint(&buf, p.path)
+ if p.testOf != nil {
+ fmt.Fprint(&buf, ".test")
+ }
+ if i > 0 {
+ if stack[i-1].testOf == p {
+ fmt.Fprint(&buf, " tested by\n\t")
+ } else {
+ fmt.Fprint(&buf, " imports\n\t")
+ }
+ }
+ }
+ return buf.String()
+}
+
+// why returns the text to use in "go mod why" output about the given package.
+// It is less ornate than the stackText but contains the same information.
+func (pkg *loadPkg) why() string {
+ var buf strings.Builder
+ var stack []*loadPkg
+ for p := pkg; p != nil; p = p.stack {
+ stack = append(stack, p)
+ }
+
+ for i := len(stack) - 1; i >= 0; i-- {
+ p := stack[i]
+ if p.testOf != nil {
+ fmt.Fprintf(&buf, "%s.test\n", p.testOf.path)
+ } else {
+ fmt.Fprintf(&buf, "%s\n", p.path)
+ }
+ }
+ return buf.String()
+}
+
+// Why returns the "go mod why" output stanza for the given package,
+// without the leading # comment.
+// The package graph must have been loaded already, usually by LoadPackages.
+// If there is no reason for the package to be in the current build,
+// Why returns an empty string.
+func Why(path string) string {
+ pkg, ok := loaded.pkgCache.Get(path)
+ if !ok {
+ return ""
+ }
+ return pkg.why()
+}
+
+// WhyDepth returns the number of steps in the Why listing.
+// If there is no reason for the package to be in the current build,
+// WhyDepth returns 0.
+func WhyDepth(path string) int {
+ n := 0
+ pkg, _ := loaded.pkgCache.Get(path)
+ for p := pkg; p != nil; p = p.stack {
+ n++
+ }
+ return n
+}
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
new file mode 100644
index 0000000..d6c395f
--- /dev/null
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -0,0 +1,806 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unicode"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/par"
+ "cmd/go/internal/trace"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+)
+
+// ReadModFile reads and parses the mod file at gomod. ReadModFile properly applies the
+// overlay, locks the file while reading, and applies fix, if applicable.
+func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfile.File, err error) {
+ gomod = base.ShortPath(gomod) // use short path in any errors
+ if gomodActual, ok := fsys.OverlayPath(gomod); ok {
+ // Don't lock go.mod if it's part of the overlay.
+ // On Plan 9, locking requires chmod, and we don't want to modify any file
+ // in the overlay. See #44700.
+ data, err = os.ReadFile(gomodActual)
+ } else {
+ data, err = lockedfile.Read(gomodActual)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ f, err = modfile.Parse(gomod, data, fix)
+ if err != nil {
+ // Errors returned by modfile.Parse begin with file:line.
+ return nil, nil, fmt.Errorf("errors parsing %s:\n%w", gomod, err)
+ }
+ if f.Go != nil && gover.Compare(f.Go.Version, gover.Local()) > 0 {
+ toolchain := ""
+ if f.Toolchain != nil {
+ toolchain = f.Toolchain.Name
+ }
+ return nil, nil, &gover.TooNewError{What: gomod, GoVersion: f.Go.Version, Toolchain: toolchain}
+ }
+ if f.Module == nil {
+ // No module declaration. Must add module path.
+ return nil, nil, fmt.Errorf("error reading %s: missing module declaration. To specify the module path:\n\tgo mod edit -module=example.com/mod", gomod)
+ }
+
+ return data, f, err
+}
+
+// A modFileIndex is an index of data corresponding to a modFile
+// at a specific point in time.
+type modFileIndex struct {
+ data []byte
+ dataNeedsFix bool // true if fixVersion applied a change while parsing data
+ module module.Version
+ goVersion string // Go version (no "v" or "go" prefix)
+ toolchain string
+ require map[module.Version]requireMeta
+ replace map[module.Version]module.Version
+ exclude map[module.Version]bool
+}
+
+type requireMeta struct {
+ indirect bool
+}
+
+// A modPruning indicates whether transitive dependencies of Go 1.17 dependencies
+// are pruned out of the module subgraph rooted at a given module.
+// (See https://golang.org/ref/mod#graph-pruning.)
+type modPruning uint8
+
+const (
+ pruned modPruning = iota // transitive dependencies of modules at go 1.17 and higher are pruned out
+ unpruned // no transitive dependencies are pruned out
+ workspace // pruned to the union of modules in the workspace
+)
+
+func (p modPruning) String() string {
+ switch p {
+ case pruned:
+ return "pruned"
+ case unpruned:
+ return "unpruned"
+ case workspace:
+ return "workspace"
+ default:
+ return fmt.Sprintf("%T(%d)", p, p)
+ }
+}
+
+func pruningForGoVersion(goVersion string) modPruning {
+ if gover.Compare(goVersion, gover.ExplicitIndirectVersion) < 0 {
+ // The go.mod file does not duplicate relevant information about transitive
+ // dependencies, so they cannot be pruned out.
+ return unpruned
+ }
+ return pruned
+}
+
+// CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by
+// the main module's go.mod or retracted by its author. Most version queries use
+// this to filter out versions that should not be used.
+func CheckAllowed(ctx context.Context, m module.Version) error {
+ if err := CheckExclusions(ctx, m); err != nil {
+ return err
+ }
+ if err := CheckRetractions(ctx, m); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ErrDisallowed is returned by version predicates passed to Query and similar
+// functions to indicate that a version should not be considered.
+var ErrDisallowed = errors.New("disallowed module version")
+
+// CheckExclusions returns an error equivalent to ErrDisallowed if module m is
+// excluded by the main module's go.mod file.
+func CheckExclusions(ctx context.Context, m module.Version) error {
+ for _, mainModule := range MainModules.Versions() {
+ if index := MainModules.Index(mainModule); index != nil && index.exclude[m] {
+ return module.VersionError(m, errExcluded)
+ }
+ }
+ return nil
+}
+
+var errExcluded = &excludedError{}
+
+type excludedError struct{}
+
+func (e *excludedError) Error() string { return "excluded by go.mod" }
+func (e *excludedError) Is(err error) bool { return err == ErrDisallowed }
+
+// CheckRetractions returns an error if module m has been retracted by
+// its author.
+func CheckRetractions(ctx context.Context, m module.Version) (err error) {
+ defer func() {
+ if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) {
+ return
+ }
+ // Attribute the error to the version being checked, not the version from
+ // which the retractions were to be loaded.
+ if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) {
+ err = mErr.Err
+ }
+ err = &retractionLoadingError{m: m, err: err}
+ }()
+
+ if m.Version == "" {
+ // Main module, standard library, or file replacement module.
+ // Cannot be retracted.
+ return nil
+ }
+ if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" {
+ // All versions of the module were replaced.
+ // Don't load retractions, since we'd just load the replacement.
+ return nil
+ }
+
+ // Find the latest available version of the module, and load its go.mod. If
+ // the latest version is replaced, we'll load the replacement.
+ //
+ // If there's an error loading the go.mod, we'll return it here. These errors
+ // should generally be ignored by callers since they happen frequently when
+ // we're offline. These errors are not equivalent to ErrDisallowed, so they
+ // may be distinguished from retraction errors.
+ //
+ // We load the raw file here: the go.mod file may have a different module
+ // path that we expect if the module or its repository was renamed.
+ // We still want to apply retractions to other aliases of the module.
+ rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path)
+ if err != nil {
+ return err
+ }
+ summary, err := rawGoModSummary(rm)
+ if err != nil {
+ return err
+ }
+
+ var rationale []string
+ isRetracted := false
+ for _, r := range summary.retract {
+ if gover.ModCompare(m.Path, r.Low, m.Version) <= 0 && gover.ModCompare(m.Path, m.Version, r.High) <= 0 {
+ isRetracted = true
+ if r.Rationale != "" {
+ rationale = append(rationale, r.Rationale)
+ }
+ }
+ }
+ if isRetracted {
+ return module.VersionError(m, &ModuleRetractedError{Rationale: rationale})
+ }
+ return nil
+}
+
+type ModuleRetractedError struct {
+ Rationale []string
+}
+
+func (e *ModuleRetractedError) Error() string {
+ msg := "retracted by module author"
+ if len(e.Rationale) > 0 {
+ // This is meant to be a short error printed on a terminal, so just
+ // print the first rationale.
+ msg += ": " + ShortMessage(e.Rationale[0], "retracted by module author")
+ }
+ return msg
+}
+
+func (e *ModuleRetractedError) Is(err error) bool {
+ return err == ErrDisallowed
+}
+
+type retractionLoadingError struct {
+ m module.Version
+ err error
+}
+
+func (e *retractionLoadingError) Error() string {
+ return fmt.Sprintf("loading module retractions for %v: %v", e.m, e.err)
+}
+
+func (e *retractionLoadingError) Unwrap() error {
+ return e.err
+}
+
+// ShortMessage returns a string from go.mod (for example, a retraction
+// rationale or deprecation message) that is safe to print in a terminal.
+//
+// If the given string is empty, ShortMessage returns the given default. If the
+// given string is too long or contains non-printable characters, ShortMessage
+// returns a hard-coded string.
+func ShortMessage(message, emptyDefault string) string {
+ const maxLen = 500
+ if i := strings.Index(message, "\n"); i >= 0 {
+ message = message[:i]
+ }
+ message = strings.TrimSpace(message)
+ if message == "" {
+ return emptyDefault
+ }
+ if len(message) > maxLen {
+ return "(message omitted: too long)"
+ }
+ for _, r := range message {
+ if !unicode.IsGraphic(r) && !unicode.IsSpace(r) {
+ return "(message omitted: contains non-printable characters)"
+ }
+ }
+ // NOTE: the go.mod parser rejects invalid UTF-8, so we don't check that here.
+ return message
+}
+
+// CheckDeprecation returns a deprecation message from the go.mod file of the
+// latest version of the given module. Deprecation messages are comments
+// before or on the same line as the module directives that start with
+// "Deprecated:" and run until the end of the paragraph.
+//
+// CheckDeprecation returns an error if the message can't be loaded.
+// CheckDeprecation returns "", nil if there is no deprecation message.
+func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err)
+ }
+ }()
+
+ if m.Version == "" {
+ // Main module, standard library, or file replacement module.
+ // Don't look up deprecation.
+ return "", nil
+ }
+ if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" {
+ // All versions of the module were replaced.
+ // We'll look up deprecation separately for the replacement.
+ return "", nil
+ }
+
+ latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path)
+ if err != nil {
+ return "", err
+ }
+ summary, err := rawGoModSummary(latest)
+ if err != nil {
+ return "", err
+ }
+ return summary.deprecated, nil
+}
+
+func replacement(mod module.Version, replace map[module.Version]module.Version) (fromVersion string, to module.Version, ok bool) {
+ if r, ok := replace[mod]; ok {
+ return mod.Version, r, true
+ }
+ if r, ok := replace[module.Version{Path: mod.Path}]; ok {
+ return "", r, true
+ }
+ return "", module.Version{}, false
+}
+
+// Replacement returns the replacement for mod, if any. If the path in the
+// module.Version is relative it's relative to the single main module outside
+// workspace mode, or the workspace's directory in workspace mode.
+func Replacement(mod module.Version) module.Version {
+ foundFrom, found, foundModRoot := "", module.Version{}, ""
+ if MainModules == nil {
+ return module.Version{}
+ } else if MainModules.Contains(mod.Path) && mod.Version == "" {
+ // Don't replace the workspace version of the main module.
+ return module.Version{}
+ }
+ if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok {
+ return r
+ }
+ for _, v := range MainModules.Versions() {
+ if index := MainModules.Index(v); index != nil {
+ if from, r, ok := replacement(mod, index.replace); ok {
+ modRoot := MainModules.ModRoot(v)
+ if foundModRoot != "" && foundFrom != from && found != r {
+ base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v",
+ mod, modFilePath(foundModRoot), modFilePath(modRoot))
+ return canonicalizeReplacePath(found, foundModRoot)
+ }
+ found, foundModRoot = r, modRoot
+ }
+ }
+ }
+ return canonicalizeReplacePath(found, foundModRoot)
+}
+
+func replaceRelativeTo() string {
+ if workFilePath := WorkFilePath(); workFilePath != "" {
+ return filepath.Dir(workFilePath)
+ }
+ return MainModules.ModRoot(MainModules.mustGetSingleMainModule())
+}
+
+// canonicalizeReplacePath ensures that relative, on-disk, replaced module paths
+// are relative to the workspace directory (in workspace mode) or to the module's
+// directory (in module mode, as they already are).
+func canonicalizeReplacePath(r module.Version, modRoot string) module.Version {
+ if filepath.IsAbs(r.Path) || r.Version != "" {
+ return r
+ }
+ workFilePath := WorkFilePath()
+ if workFilePath == "" {
+ return r
+ }
+ abs := filepath.Join(modRoot, r.Path)
+ if rel, err := filepath.Rel(filepath.Dir(workFilePath), abs); err == nil {
+ return module.Version{Path: rel, Version: r.Version}
+ }
+ // We couldn't make the version's path relative to the workspace's path,
+ // so just return the absolute path. It's the best we can do.
+ return module.Version{Path: abs, Version: r.Version}
+}
+
+// resolveReplacement returns the module actually used to load the source code
+// for m: either m itself, or the replacement for m (iff m is replaced).
+// It also returns the modroot of the module providing the replacement if
+// one was found.
+func resolveReplacement(m module.Version) module.Version {
+ if r := Replacement(m); r.Path != "" {
+ return r
+ }
+ return m
+}
+
+func toReplaceMap(replacements []*modfile.Replace) map[module.Version]module.Version {
+ replaceMap := make(map[module.Version]module.Version, len(replacements))
+ for _, r := range replacements {
+ if prev, dup := replaceMap[r.Old]; dup && prev != r.New {
+ base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v", r.Old, prev, r.New)
+ }
+ replaceMap[r.Old] = r.New
+ }
+ return replaceMap
+}
+
+// indexModFile rebuilds the index of modFile.
+// If modFile has been changed since it was first read,
+// modFile.Cleanup must be called before indexModFile.
+func indexModFile(data []byte, modFile *modfile.File, mod module.Version, needsFix bool) *modFileIndex {
+ i := new(modFileIndex)
+ i.data = data
+ i.dataNeedsFix = needsFix
+
+ i.module = module.Version{}
+ if modFile.Module != nil {
+ i.module = modFile.Module.Mod
+ }
+
+ i.goVersion = ""
+ if modFile.Go == nil {
+ rawGoVersion.Store(mod, "")
+ } else {
+ i.goVersion = modFile.Go.Version
+ rawGoVersion.Store(mod, modFile.Go.Version)
+ }
+ if modFile.Toolchain != nil {
+ i.toolchain = modFile.Toolchain.Name
+ }
+
+ i.require = make(map[module.Version]requireMeta, len(modFile.Require))
+ for _, r := range modFile.Require {
+ i.require[r.Mod] = requireMeta{indirect: r.Indirect}
+ }
+
+ i.replace = toReplaceMap(modFile.Replace)
+
+ i.exclude = make(map[module.Version]bool, len(modFile.Exclude))
+ for _, x := range modFile.Exclude {
+ i.exclude[x.Mod] = true
+ }
+
+ return i
+}
+
+// modFileIsDirty reports whether the go.mod file differs meaningfully
+// from what was indexed.
+// If modFile has been changed (even cosmetically) since it was first read,
+// modFile.Cleanup must be called before modFileIsDirty.
+func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool {
+ if i == nil {
+ return modFile != nil
+ }
+
+ if i.dataNeedsFix {
+ return true
+ }
+
+ if modFile.Module == nil {
+ if i.module != (module.Version{}) {
+ return true
+ }
+ } else if modFile.Module.Mod != i.module {
+ return true
+ }
+
+ var goV, toolchain string
+ if modFile.Go != nil {
+ goV = modFile.Go.Version
+ }
+ if modFile.Toolchain != nil {
+ toolchain = modFile.Toolchain.Name
+ }
+
+ if goV != i.goVersion ||
+ toolchain != i.toolchain ||
+ len(modFile.Require) != len(i.require) ||
+ len(modFile.Replace) != len(i.replace) ||
+ len(modFile.Exclude) != len(i.exclude) {
+ return true
+ }
+
+ for _, r := range modFile.Require {
+ if meta, ok := i.require[r.Mod]; !ok {
+ return true
+ } else if r.Indirect != meta.indirect {
+ if cfg.BuildMod == "readonly" {
+ // The module's requirements are consistent; only the "// indirect"
+ // comments that are wrong. But those are only guaranteed to be accurate
+ // after a "go mod tidy" — it's a good idea to run those before
+ // committing a change, but it's certainly not mandatory.
+ } else {
+ return true
+ }
+ }
+ }
+
+ for _, r := range modFile.Replace {
+ if r.New != i.replace[r.Old] {
+ return true
+ }
+ }
+
+ for _, x := range modFile.Exclude {
+ if !i.exclude[x.Mod] {
+ return true
+ }
+ }
+
+ return false
+}
+
+// rawGoVersion records the Go version parsed from each module's go.mod file.
+//
+// If a module is replaced, the version of the replacement is keyed by the
+// replacement module.Version, not the version being replaced.
+var rawGoVersion sync.Map // map[module.Version]string
+
+// A modFileSummary is a summary of a go.mod file for which we do not need to
+// retain complete information — for example, the go.mod file of a dependency
+// module.
+type modFileSummary struct {
+ module module.Version
+ goVersion string
+ toolchain string
+ pruning modPruning
+ require []module.Version
+ retract []retraction
+ deprecated string
+}
+
+// A retraction consists of a retracted version interval and rationale.
+// retraction is like modfile.Retract, but it doesn't point to the syntax tree.
+type retraction struct {
+ modfile.VersionInterval
+ Rationale string
+}
+
+// goModSummary returns a summary of the go.mod file for module m,
+// taking into account any replacements for m, exclusions of its dependencies,
+// and/or vendoring.
+//
+// m must be a version in the module graph, reachable from the Target module.
+// In readonly mode, the go.sum file must contain an entry for m's go.mod file
+// (or its replacement). goModSummary must not be called for the Target module
+// itself, as its requirements may change. Use rawGoModSummary for other
+// module versions.
+//
+// The caller must not modify the returned summary.
+func goModSummary(m module.Version) (*modFileSummary, error) {
+ if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) {
+ panic("internal error: goModSummary called on a main module")
+ }
+ if gover.IsToolchain(m.Path) {
+ return rawGoModSummary(m)
+ }
+
+ if cfg.BuildMod == "vendor" {
+ summary := &modFileSummary{
+ module: module.Version{Path: m.Path},
+ }
+
+ readVendorList(MainModules.mustGetSingleMainModule())
+ if vendorVersion[m.Path] != m.Version {
+ // This module is not vendored, so packages cannot be loaded from it and
+ // it cannot be relevant to the build.
+ return summary, nil
+ }
+
+ // For every module other than the target,
+ // return the full list of modules from modules.txt.
+ // We don't know what versions the vendored module actually relies on,
+ // so assume that it requires everything.
+ summary.require = vendorList
+ return summary, nil
+ }
+
+ actual := resolveReplacement(m)
+ if mustHaveSums() && actual.Version != "" {
+ key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"}
+ if !modfetch.HaveSum(key) {
+ suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path)
+ return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion})
+ }
+ }
+ summary, err := rawGoModSummary(actual)
+ if err != nil {
+ return nil, err
+ }
+
+ if actual.Version == "" {
+ // The actual module is a filesystem-local replacement, for which we have
+ // unfortunately not enforced any sort of invariants about module lines or
+ // matching module paths. Anything goes.
+ //
+ // TODO(bcmills): Remove this special-case, update tests, and add a
+ // release note.
+ } else {
+ if summary.module.Path == "" {
+ return nil, module.VersionError(actual, errors.New("parsing go.mod: missing module line"))
+ }
+
+ // In theory we should only allow mpath to be unequal to m.Path here if the
+ // version that we fetched lacks an explicit go.mod file: if the go.mod file
+ // is explicit, then it should match exactly (to ensure that imports of other
+ // packages within the module are interpreted correctly). Unfortunately, we
+ // can't determine that information from the module proxy protocol: we'll have
+ // to leave that validation for when we load actual packages from within the
+ // module.
+ if mpath := summary.module.Path; mpath != m.Path && mpath != actual.Path {
+ return nil, module.VersionError(actual,
+ fmt.Errorf("parsing go.mod:\n"+
+ "\tmodule declares its path as: %s\n"+
+ "\t but was required as: %s", mpath, m.Path))
+ }
+ }
+
+ for _, mainModule := range MainModules.Versions() {
+ if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 {
+ // Drop any requirements on excluded versions.
+ // Don't modify the cached summary though, since we might need the raw
+ // summary separately.
+ haveExcludedReqs := false
+ for _, r := range summary.require {
+ if index.exclude[r] {
+ haveExcludedReqs = true
+ break
+ }
+ }
+ if haveExcludedReqs {
+ s := new(modFileSummary)
+ *s = *summary
+ s.require = make([]module.Version, 0, len(summary.require))
+ for _, r := range summary.require {
+ if !index.exclude[r] {
+ s.require = append(s.require, r)
+ }
+ }
+ summary = s
+ }
+ }
+ }
+ return summary, nil
+}
+
+// rawGoModSummary returns a new summary of the go.mod file for module m,
+// ignoring all replacements that may apply to m and excludes that may apply to
+// its dependencies.
+//
+// rawGoModSummary cannot be used on the main module outside of workspace mode.
+func rawGoModSummary(m module.Version) (*modFileSummary, error) {
+ if gover.IsToolchain(m.Path) {
+ if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 {
+ // Declare that go 1.21.3 requires toolchain 1.21.3,
+ // so that go get knows that downgrading toolchain implies downgrading go
+ // and similarly upgrading go requires upgrading the toolchain.
+ return &modFileSummary{module: m, require: []module.Version{{Path: "toolchain", Version: "go" + m.Version}}}, nil
+ }
+ return &modFileSummary{module: m}, nil
+ }
+ if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) {
+ // Calling rawGoModSummary implies that we are treating m as a module whose
+ // requirements aren't the roots of the module graph and can't be modified.
+ //
+ // If we are not in workspace mode, then the requirements of the main module
+ // are the roots of the module graph and we expect them to be kept consistent.
+ panic("internal error: rawGoModSummary called on a main module")
+ }
+ if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" {
+ // "go work sync" calls LoadModGraph to make sure the module graph is valid.
+ // If there are no modules in the workspace, we synthesize an empty
+ // command-line-arguments module, which rawGoModData cannot read a go.mod for.
+ return &modFileSummary{module: m}, nil
+ }
+ return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) {
+ summary := new(modFileSummary)
+ name, data, err := rawGoModData(m)
+ if err != nil {
+ return nil, err
+ }
+ f, err := modfile.ParseLax(name, data, nil)
+ if err != nil {
+ return nil, module.VersionError(m, fmt.Errorf("parsing %s: %v", base.ShortPath(name), err))
+ }
+ if f.Module != nil {
+ summary.module = f.Module.Mod
+ summary.deprecated = f.Module.Deprecated
+ }
+ if f.Go != nil {
+ rawGoVersion.LoadOrStore(m, f.Go.Version)
+ summary.goVersion = f.Go.Version
+ summary.pruning = pruningForGoVersion(f.Go.Version)
+ } else {
+ summary.pruning = unpruned
+ }
+ if f.Toolchain != nil {
+ summary.toolchain = f.Toolchain.Name
+ }
+ if len(f.Require) > 0 {
+ summary.require = make([]module.Version, 0, len(f.Require)+1)
+ for _, req := range f.Require {
+ summary.require = append(summary.require, req.Mod)
+ }
+ }
+ if summary.goVersion != "" && gover.Compare(summary.goVersion, gover.GoStrictVersion) >= 0 {
+ if gover.Compare(summary.goVersion, gover.Local()) > 0 {
+ return nil, &gover.TooNewError{What: "module " + m.String(), GoVersion: summary.goVersion}
+ }
+ summary.require = append(summary.require, module.Version{Path: "go", Version: summary.goVersion})
+ }
+ if len(f.Retract) > 0 {
+ summary.retract = make([]retraction, 0, len(f.Retract))
+ for _, ret := range f.Retract {
+ summary.retract = append(summary.retract, retraction{
+ VersionInterval: ret.VersionInterval,
+ Rationale: ret.Rationale,
+ })
+ }
+ }
+
+ return summary, nil
+ })
+}
+
+var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary]
+
+// rawGoModData returns the content of the go.mod file for module m, ignoring
+// all replacements that may apply to m.
+//
+// rawGoModData cannot be used on the main module outside of workspace mode.
+//
+// Unlike rawGoModSummary, rawGoModData does not cache its results in memory.
+// Use rawGoModSummary instead unless you specifically need these bytes.
+func rawGoModData(m module.Version) (name string, data []byte, err error) {
+ if m.Version == "" {
+ dir := m.Path
+ if !filepath.IsAbs(dir) {
+ if inWorkspaceMode() && MainModules.Contains(m.Path) {
+ dir = MainModules.ModRoot(m)
+ } else {
+ // m is a replacement module with only a file path.
+ dir = filepath.Join(replaceRelativeTo(), dir)
+ }
+ }
+ name = filepath.Join(dir, "go.mod")
+ if gomodActual, ok := fsys.OverlayPath(name); ok {
+ // Don't lock go.mod if it's part of the overlay.
+ // On Plan 9, locking requires chmod, and we don't want to modify any file
+ // in the overlay. See #44700.
+ data, err = os.ReadFile(gomodActual)
+ } else {
+ data, err = lockedfile.Read(gomodActual)
+ }
+ if err != nil {
+ return "", nil, module.VersionError(m, fmt.Errorf("reading %s: %v", base.ShortPath(name), err))
+ }
+ } else {
+ if !gover.ModIsValid(m.Path, m.Version) {
+ // Disallow the broader queries supported by fetch.Lookup.
+ base.Fatalf("go: internal error: %s@%s: unexpected invalid semantic version", m.Path, m.Version)
+ }
+ name = "go.mod"
+ data, err = modfetch.GoMod(context.TODO(), m.Path, m.Version)
+ }
+ return name, data, err
+}
+
+// queryLatestVersionIgnoringRetractions looks up the latest version of the
+// module with the given path without considering retracted or excluded
+// versions.
+//
+// If all versions of the module are replaced,
+// queryLatestVersionIgnoringRetractions returns the replacement without making
+// a query.
+//
+// If the queried latest version is replaced,
+// queryLatestVersionIgnoringRetractions returns the replacement.
+func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) {
+ return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) {
+ ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path)
+ defer span.Done()
+
+ if repl := Replacement(module.Version{Path: path}); repl.Path != "" {
+ // All versions of the module were replaced.
+ // No need to query.
+ return repl, nil
+ }
+
+ // Find the latest version of the module.
+ // Ignore exclusions from the main module's go.mod.
+ const ignoreSelected = ""
+ var allowAll AllowedFunc
+ rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll)
+ if err != nil {
+ return module.Version{}, err
+ }
+ latest := module.Version{Path: path, Version: rev.Version}
+ if repl := resolveReplacement(latest); repl.Path != "" {
+ latest = repl
+ }
+ return latest, nil
+ })
+}
+
+var latestVersionIgnoringRetractionsCache par.ErrCache[string, module.Version] // path → queryLatestVersionIgnoringRetractions result
+
+// ToDirectoryPath adds a prefix if necessary so that path in unambiguously
+// an absolute path or a relative path starting with a '.' or '..'
+// path component.
+func ToDirectoryPath(path string) string {
+ if path == "." || modfile.IsDirectoryPath(path) {
+ return path
+ }
+ // The path is not a relative path or an absolute path, so make it relative
+ // to the current directory.
+ return "./" + filepath.ToSlash(filepath.Clean(path))
+}
diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go
new file mode 100644
index 0000000..8ae2dbf
--- /dev/null
+++ b/src/cmd/go/internal/modload/mvs.go
@@ -0,0 +1,136 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "errors"
+ "os"
+ "sort"
+
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
+
+ "golang.org/x/mod/module"
+)
+
+// cmpVersion implements the comparison for versions in the module loader.
+//
+// It is consistent with gover.ModCompare except that as a special case,
+// the version "" is considered higher than all other versions.
+// The main module (also known as the target) has no version and must be chosen
+// over other versions of the same module in the module dependency graph.
+func cmpVersion(p string, v1, v2 string) int {
+ if v2 == "" {
+ if v1 == "" {
+ return 0
+ }
+ return -1
+ }
+ if v1 == "" {
+ return 1
+ }
+ return gover.ModCompare(p, v1, v2)
+}
+
+// mvsReqs implements mvs.Reqs for module semantic versions,
+// with any exclusions or replacements applied internally.
+type mvsReqs struct {
+ roots []module.Version
+}
+
+func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) {
+ if mod.Version == "" && MainModules.Contains(mod.Path) {
+ // Use the build list as it existed when r was constructed, not the current
+ // global build list.
+ return r.roots, nil
+ }
+
+ if mod.Version == "none" {
+ return nil, nil
+ }
+
+ summary, err := goModSummary(mod)
+ if err != nil {
+ return nil, err
+ }
+ return summary.require, nil
+}
+
+// Max returns the maximum of v1 and v2 according to gover.ModCompare.
+//
+// As a special case, the version "" is considered higher than all other
+// versions. The main module (also known as the target) has no version and must
+// be chosen over other versions of the same module in the module dependency
+// graph.
+func (*mvsReqs) Max(p, v1, v2 string) string {
+ if cmpVersion(p, v1, v2) < 0 {
+ return v2
+ }
+ return v1
+}
+
+// Upgrade is a no-op, here to implement mvs.Reqs.
+// The upgrade logic for go get -u is in ../modget/get.go.
+func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) {
+ return m, nil
+}
+
+func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) {
+ // Note: modfetch.Lookup and repo.Versions are cached,
+ // so there's no need for us to add extra caching here.
+ err = modfetch.TryProxies(func(proxy string) error {
+ repo, err := lookupRepo(ctx, proxy, path)
+ if err != nil {
+ return err
+ }
+ allVersions, err := repo.Versions(ctx, "")
+ if err != nil {
+ return err
+ }
+ allowedVersions := make([]string, 0, len(allVersions.List))
+ for _, v := range allVersions.List {
+ if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil {
+ allowedVersions = append(allowedVersions, v)
+ } else if !errors.Is(err, ErrDisallowed) {
+ return err
+ }
+ }
+ versions = allowedVersions
+ origin = allVersions.Origin
+ return nil
+ })
+ return versions, origin, err
+}
+
+// previousVersion returns the tagged version of m.Path immediately prior to
+// m.Version, or version "none" if no prior version is tagged.
+//
+// Since the version of a main module is not found in the version list,
+// it has no previous version.
+func previousVersion(ctx context.Context, m module.Version) (module.Version, error) {
+ if m.Version == "" && MainModules.Contains(m.Path) {
+ return module.Version{Path: m.Path, Version: "none"}, nil
+ }
+
+ list, _, err := versions(ctx, m.Path, CheckAllowed)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return module.Version{Path: m.Path, Version: "none"}, nil
+ }
+ return module.Version{}, err
+ }
+ i := sort.Search(len(list), func(i int) bool { return gover.ModCompare(m.Path, list[i], m.Version) >= 0 })
+ if i > 0 {
+ return module.Version{Path: m.Path, Version: list[i-1]}, nil
+ }
+ return module.Version{Path: m.Path, Version: "none"}, nil
+}
+
+func (*mvsReqs) Previous(m module.Version) (module.Version, error) {
+ // TODO(golang.org/issue/38714): thread tracing context through MVS.
+ return previousVersion(context.TODO(), m)
+}
diff --git a/src/cmd/go/internal/modload/mvs_test.go b/src/cmd/go/internal/modload/mvs_test.go
new file mode 100644
index 0000000..e0a38b9
--- /dev/null
+++ b/src/cmd/go/internal/modload/mvs_test.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "testing"
+)
+
+func TestReqsMax(t *testing.T) {
+ type testCase struct {
+ a, b, want string
+ }
+ reqs := new(mvsReqs)
+ for _, tc := range []testCase{
+ {a: "v0.1.0", b: "v0.2.0", want: "v0.2.0"},
+ {a: "v0.2.0", b: "v0.1.0", want: "v0.2.0"},
+ {a: "", b: "v0.1.0", want: ""}, // "" is Target.Version
+ {a: "v0.1.0", b: "", want: ""},
+ {a: "none", b: "v0.1.0", want: "v0.1.0"},
+ {a: "v0.1.0", b: "none", want: "v0.1.0"},
+ {a: "none", b: "", want: ""},
+ {a: "", b: "none", want: ""},
+ } {
+ max := reqs.Max("", tc.a, tc.b)
+ if max != tc.want {
+ t.Errorf("(%T).Max(%q, %q) = %q; want %q", reqs, tc.a, tc.b, max, tc.want)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
new file mode 100644
index 0000000..f8ddf11
--- /dev/null
+++ b/src/cmd/go/internal/modload/query.go
@@ -0,0 +1,1265 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ pathpkg "path"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/modinfo"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/internal/pkgpattern"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+// Query looks up a revision of a given module given a version query string.
+// The module must be a complete module path.
+// The version must take one of the following forms:
+//
+// - the literal string "latest", denoting the latest available, allowed
+// tagged version, with non-prereleases preferred over prereleases.
+// If there are no tagged versions in the repo, latest returns the most
+// recent commit.
+//
+// - the literal string "upgrade", equivalent to "latest" except that if
+// current is a newer version, current will be returned (see below).
+//
+// - the literal string "patch", denoting the latest available tagged version
+// with the same major and minor number as current (see below).
+//
+// - v1, denoting the latest available tagged version v1.x.x.
+//
+// - v1.2, denoting the latest available tagged version v1.2.x.
+//
+// - v1.2.3, a semantic version string denoting that tagged version.
+//
+// - <v1.2.3, <=v1.2.3, >v1.2.3, >=v1.2.3,
+// denoting the version closest to the target and satisfying the given operator,
+// with non-prereleases preferred over prereleases.
+//
+// - a repository commit identifier or tag, denoting that commit.
+//
+// current denotes the currently-selected version of the module; it may be
+// "none" if no version is currently selected, or "" if the currently-selected
+// version is unknown or should not be considered. If query is
+// "upgrade" or "patch", current will be returned if it is a newer
+// semantic version or a chronologically later pseudo-version than the
+// version that would otherwise be chosen. This prevents accidental downgrades
+// from newer pre-release or development versions.
+//
+// The allowed function (which may be nil) is used to filter out unsuitable
+// versions (see AllowedFunc documentation for details). If the query refers to
+// a specific revision (for example, "master"; see IsRevisionQuery), and the
+// revision is disallowed by allowed, Query returns the error. If the query
+// does not refer to a specific revision (for example, "latest"), Query
+// acts as if versions disallowed by allowed do not exist.
+//
+// If path is the path of the main module and the query is "latest",
+// Query returns Target.Version as the version.
+//
+// Query often returns a non-nil *RevInfo with a non-nil error,
+// to provide an info.Origin that can allow the error to be cached.
+func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
+ ctx, span := trace.StartSpan(ctx, "modload.Query "+path)
+ defer span.Done()
+
+ return queryReuse(ctx, path, query, current, allowed, nil)
+}
+
+// queryReuse is like Query but also takes a map of module info that can be reused
+// if the validation criteria in Origin are met.
+func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
+ var info *modfetch.RevInfo
+ err := modfetch.TryProxies(func(proxy string) (err error) {
+ info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse)
+ return err
+ })
+ return info, err
+}
+
+// checkReuse checks whether a revision of a given module or a version list
+// for a given module may be reused, according to the information in origin.
+func checkReuse(ctx context.Context, path string, old *codehost.Origin) error {
+ return modfetch.TryProxies(func(proxy string) error {
+ repo, err := lookupRepo(ctx, proxy, path)
+ if err != nil {
+ return err
+ }
+ return repo.CheckReuse(ctx, old)
+ })
+}
+
+// AllowedFunc is used by Query and other functions to filter out unsuitable
+// versions, for example, those listed in exclude directives in the main
+// module's go.mod file.
+//
+// An AllowedFunc returns an error equivalent to ErrDisallowed for an unsuitable
+// version. Any other error indicates the function was unable to determine
+// whether the version should be allowed, for example, the function was unable
+// to fetch or parse a go.mod file containing retractions. Typically, errors
+// other than ErrDisallowed may be ignored.
+type AllowedFunc func(context.Context, module.Version) error
+
+var errQueryDisabled error = queryDisabledError{}
+
+type queryDisabledError struct{}
+
+func (queryDisabledError) Error() string {
+ if cfg.BuildModReason == "" {
+ return fmt.Sprintf("cannot query module due to -mod=%s", cfg.BuildMod)
+ }
+ return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
+}
+
+func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
+ ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query)
+ defer span.Done()
+
+ if current != "" && current != "none" && !gover.ModIsValid(path, current) {
+ return nil, fmt.Errorf("invalid previous version %v@%v", path, current)
+ }
+ if cfg.BuildMod == "vendor" {
+ return nil, errQueryDisabled
+ }
+ if allowed == nil {
+ allowed = func(context.Context, module.Version) error { return nil }
+ }
+
+ if MainModules.Contains(path) && (query == "upgrade" || query == "patch") {
+ m := module.Version{Path: path}
+ if err := allowed(ctx, m); err != nil {
+ return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err)
+ }
+ return &modfetch.RevInfo{Version: m.Version}, nil
+ }
+
+ if path == "std" || path == "cmd" {
+ return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path)
+ }
+
+ repo, err := lookupRepo(ctx, proxy, path)
+ if err != nil {
+ return nil, err
+ }
+
+ if old := reuse[module.Version{Path: path, Version: query}]; old != nil {
+ if err := repo.CheckReuse(ctx, old.Origin); err == nil {
+ info := &modfetch.RevInfo{
+ Version: old.Version,
+ Origin: old.Origin,
+ }
+ if old.Time != nil {
+ info.Time = *old.Time
+ }
+ return info, nil
+ }
+ }
+
+ // Parse query to detect parse errors (and possibly handle query)
+ // before any network I/O.
+ qm, err := newQueryMatcher(path, query, current, allowed)
+ if (err == nil && qm.canStat) || err == errRevQuery {
+ // Direct lookup of a commit identifier or complete (non-prefix) semantic
+ // version.
+
+ // If the identifier is not a canonical semver tag — including if it's a
+ // semver tag with a +metadata suffix — then modfetch.Stat will populate
+ // info.Version with a suitable pseudo-version.
+ info, err := repo.Stat(ctx, query)
+ if err != nil {
+ queryErr := err
+ // The full query doesn't correspond to a tag. If it is a semantic version
+ // with a +metadata suffix, see if there is a tag without that suffix:
+ // semantic versioning defines them to be equivalent.
+ canonicalQuery := module.CanonicalVersion(query)
+ if canonicalQuery != "" && query != canonicalQuery {
+ info, err = repo.Stat(ctx, canonicalQuery)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return info, err
+ }
+ }
+ if err != nil {
+ return info, queryErr
+ }
+ }
+ if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) {
+ return nil, err
+ }
+ return info, nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ // Load versions and execute query.
+ versions, err := repo.Versions(ctx, qm.prefix)
+ if err != nil {
+ return nil, err
+ }
+ revErr := &modfetch.RevInfo{Origin: versions.Origin} // RevInfo to return with error
+
+ releases, prereleases, err := qm.filterVersions(ctx, versions.List)
+ if err != nil {
+ return revErr, err
+ }
+
+ mergeRevOrigin := func(rev *modfetch.RevInfo, origin *codehost.Origin) *modfetch.RevInfo {
+ merged := mergeOrigin(rev.Origin, origin)
+ if merged == rev.Origin {
+ return rev
+ }
+ clone := new(modfetch.RevInfo)
+ *clone = *rev
+ clone.Origin = merged
+ return clone
+ }
+
+ lookup := func(v string) (*modfetch.RevInfo, error) {
+ rev, err := repo.Stat(ctx, v)
+ // Stat can return a non-nil rev and a non-nil err,
+ // in order to provide origin information to make the error cacheable.
+ if rev == nil && err != nil {
+ return revErr, err
+ }
+ rev = mergeRevOrigin(rev, versions.Origin)
+ if err != nil {
+ return rev, err
+ }
+
+ if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() {
+ // Don't allow "upgrade" or "patch" to move from a pseudo-version
+ // to a chronologically older version or pseudo-version.
+ //
+ // If the current version is a pseudo-version from an untagged branch, it
+ // may be semantically lower than the "latest" release or the latest
+ // pseudo-version on the main branch. A user on such a version is unlikely
+ // to intend to “upgrade” to a version that already existed at that point
+ // in time.
+ //
+ // We do this only if the current version is a pseudo-version: if the
+ // version is tagged, the author of the dependency module has given us
+ // explicit information about their intended precedence of this version
+ // relative to other versions, and we shouldn't contradict that
+ // information. (For example, v1.0.1 might be a backport of a fix already
+ // incorporated into v1.1.0, in which case v1.0.1 would be chronologically
+ // newer but v1.1.0 is still an “upgrade”; or v1.0.2 might be a revert of
+ // an unsuccessful fix in v1.0.1, in which case the v1.0.2 commit may be
+ // older than the v1.0.1 commit despite the tag itself being newer.)
+ currentTime, err := module.PseudoVersionTime(current)
+ if err == nil && rev.Time.Before(currentTime) {
+ if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
+ return revErr, err
+ }
+ rev, err = repo.Stat(ctx, current)
+ if rev == nil && err != nil {
+ return revErr, err
+ }
+ rev = mergeRevOrigin(rev, versions.Origin)
+ return rev, err
+ }
+ }
+
+ return rev, nil
+ }
+
+ if qm.preferLower {
+ if len(releases) > 0 {
+ return lookup(releases[0])
+ }
+ if len(prereleases) > 0 {
+ return lookup(prereleases[0])
+ }
+ } else {
+ if len(releases) > 0 {
+ return lookup(releases[len(releases)-1])
+ }
+ if len(prereleases) > 0 {
+ return lookup(prereleases[len(prereleases)-1])
+ }
+ }
+
+ if qm.mayUseLatest {
+ latest, err := repo.Latest(ctx)
+ if err == nil {
+ if qm.allowsVersion(ctx, latest.Version) {
+ return lookup(latest.Version)
+ }
+ } else if !errors.Is(err, fs.ErrNotExist) {
+ return revErr, err
+ }
+ }
+
+ if (query == "upgrade" || query == "patch") && current != "" && current != "none" {
+ // "upgrade" and "patch" may stay on the current version if allowed.
+ if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
+ return nil, err
+ }
+ return lookup(current)
+ }
+
+ return revErr, &NoMatchingVersionError{query: query, current: current}
+}
+
+// IsRevisionQuery returns true if vers is a version query that may refer to
+// a particular version or revision in a repository like "v1.0.0", "master",
+// or "0123abcd". IsRevisionQuery returns false if vers is a query that
+// chooses from among available versions like "latest" or ">v1.0.0".
+func IsRevisionQuery(path, vers string) bool {
+ if vers == "latest" ||
+ vers == "upgrade" ||
+ vers == "patch" ||
+ strings.HasPrefix(vers, "<") ||
+ strings.HasPrefix(vers, ">") ||
+ (gover.ModIsValid(path, vers) && gover.ModIsPrefix(path, vers)) {
+ return false
+ }
+ return true
+}
+
+type queryMatcher struct {
+ path string
+ prefix string
+ filter func(version string) bool
+ allowed AllowedFunc
+ canStat bool // if true, the query can be resolved by repo.Stat
+ preferLower bool // if true, choose the lowest matching version
+ mayUseLatest bool
+ preferIncompatible bool
+}
+
+var errRevQuery = errors.New("query refers to a non-semver revision")
+
+// newQueryMatcher returns a new queryMatcher that matches the versions
+// specified by the given query on the module with the given path.
+//
+// If the query can only be resolved by statting a non-SemVer revision,
+// newQueryMatcher returns errRevQuery.
+func newQueryMatcher(path string, query, current string, allowed AllowedFunc) (*queryMatcher, error) {
+ badVersion := func(v string) (*queryMatcher, error) {
+ return nil, fmt.Errorf("invalid semantic version %q in range %q", v, query)
+ }
+
+ matchesMajor := func(v string) bool {
+ _, pathMajor, ok := module.SplitPathVersion(path)
+ if !ok {
+ return false
+ }
+ return module.CheckPathMajor(v, pathMajor) == nil
+ }
+
+ qm := &queryMatcher{
+ path: path,
+ allowed: allowed,
+ preferIncompatible: strings.HasSuffix(current, "+incompatible"),
+ }
+
+ switch {
+ case query == "latest":
+ qm.mayUseLatest = true
+
+ case query == "upgrade":
+ if current == "" || current == "none" {
+ qm.mayUseLatest = true
+ } else {
+ qm.mayUseLatest = module.IsPseudoVersion(current)
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, current) >= 0 }
+ }
+
+ case query == "patch":
+ if current == "" || current == "none" {
+ return nil, &NoPatchBaseError{path}
+ }
+ if current == "" {
+ qm.mayUseLatest = true
+ } else {
+ qm.mayUseLatest = module.IsPseudoVersion(current)
+ qm.prefix = gover.ModMajorMinor(qm.path, current) + "."
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, current) >= 0 }
+ }
+
+ case strings.HasPrefix(query, "<="):
+ v := query[len("<="):]
+ if !gover.ModIsValid(path, v) {
+ return badVersion(v)
+ }
+ if gover.ModIsPrefix(path, v) {
+ // Refuse to say whether <=v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3).
+ return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query)
+ }
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) <= 0 }
+ if !matchesMajor(v) {
+ qm.preferIncompatible = true
+ }
+
+ case strings.HasPrefix(query, "<"):
+ v := query[len("<"):]
+ if !gover.ModIsValid(path, v) {
+ return badVersion(v)
+ }
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) < 0 }
+ if !matchesMajor(v) {
+ qm.preferIncompatible = true
+ }
+
+ case strings.HasPrefix(query, ">="):
+ v := query[len(">="):]
+ if !gover.ModIsValid(path, v) {
+ return badVersion(v)
+ }
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) >= 0 }
+ qm.preferLower = true
+ if !matchesMajor(v) {
+ qm.preferIncompatible = true
+ }
+
+ case strings.HasPrefix(query, ">"):
+ v := query[len(">"):]
+ if !gover.ModIsValid(path, v) {
+ return badVersion(v)
+ }
+ if gover.ModIsPrefix(path, v) {
+ // Refuse to say whether >v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3).
+ return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query)
+ }
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) > 0 }
+ qm.preferLower = true
+ if !matchesMajor(v) {
+ qm.preferIncompatible = true
+ }
+
+ case gover.ModIsValid(path, query):
+ if gover.ModIsPrefix(path, query) {
+ qm.prefix = query + "."
+ // Do not allow the query "v1.2" to match versions lower than "v1.2.0",
+ // such as prereleases for that version. (https://golang.org/issue/31972)
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, query) >= 0 }
+ } else {
+ qm.canStat = true
+ qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, query) == 0 }
+ qm.prefix = semver.Canonical(query)
+ }
+ if !matchesMajor(query) {
+ qm.preferIncompatible = true
+ }
+
+ default:
+ return nil, errRevQuery
+ }
+
+ return qm, nil
+}
+
+// allowsVersion reports whether version v is allowed by the prefix, filter, and
+// AllowedFunc of qm.
+func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
+ if qm.prefix != "" && !strings.HasPrefix(v, qm.prefix) {
+ if gover.IsToolchain(qm.path) && strings.TrimSuffix(qm.prefix, ".") == v {
+ // Allow 1.21 to match "1.21." prefix.
+ } else {
+ return false
+ }
+ }
+ if qm.filter != nil && !qm.filter(v) {
+ return false
+ }
+ if qm.allowed != nil {
+ if err := qm.allowed(ctx, module.Version{Path: qm.path, Version: v}); errors.Is(err, ErrDisallowed) {
+ return false
+ }
+ }
+ return true
+}
+
+// filterVersions classifies versions into releases and pre-releases, filtering
+// out:
+// 1. versions that do not satisfy the 'allowed' predicate, and
+// 2. "+incompatible" versions, if a compatible one satisfies the predicate
+// and the incompatible version is not preferred.
+//
+// If the allowed predicate returns an error not equivalent to ErrDisallowed,
+// filterVersions returns that error.
+func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) {
+ needIncompatible := qm.preferIncompatible
+
+ var lastCompatible string
+ for _, v := range versions {
+ if !qm.allowsVersion(ctx, v) {
+ continue
+ }
+
+ if !needIncompatible {
+ // We're not yet sure whether we need to include +incompatible versions.
+ // Keep track of the last compatible version we've seen, and use the
+ // presence (or absence) of a go.mod file in that version to decide: a
+ // go.mod file implies that the module author is supporting modules at a
+ // compatible version (and we should ignore +incompatible versions unless
+ // requested explicitly), while a lack of go.mod file implies the
+ // potential for legacy (pre-modules) versioning without semantic import
+ // paths (and thus *with* +incompatible versions).
+ //
+ // This isn't strictly accurate if the latest compatible version has been
+ // replaced by a local file path, because we do not allow file-path
+ // replacements without a go.mod file: the user would have needed to add
+ // one. However, replacing the last compatible version while
+ // simultaneously expecting to upgrade implicitly to a +incompatible
+ // version seems like an extreme enough corner case to ignore for now.
+
+ if !strings.HasSuffix(v, "+incompatible") {
+ lastCompatible = v
+ } else if lastCompatible != "" {
+ // If the latest compatible version is allowed and has a go.mod file,
+ // ignore any version with a higher (+incompatible) major version. (See
+ // https://golang.org/issue/34165.) Note that we even prefer a
+ // compatible pre-release over an incompatible release.
+ ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible})
+ if err != nil {
+ return nil, nil, err
+ }
+ if ok {
+ // The last compatible version has a go.mod file, so that's the
+ // highest version we're willing to consider. Don't bother even
+ // looking at higher versions, because they're all +incompatible from
+ // here onward.
+ break
+ }
+
+ // No acceptable compatible release has a go.mod file, so the versioning
+ // for the module might not be module-aware, and we should respect
+ // legacy major-version tags.
+ needIncompatible = true
+ }
+ }
+
+ if gover.ModIsPrerelease(qm.path, v) {
+ prereleases = append(prereleases, v)
+ } else {
+ releases = append(releases, v)
+ }
+ }
+
+ return releases, prereleases, nil
+}
+
+type QueryResult struct {
+ Mod module.Version
+ Rev *modfetch.RevInfo
+ Packages []string
+}
+
+// QueryPackages is like QueryPattern, but requires that the pattern match at
+// least one package and omits the non-package result (if any).
+func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) {
+ pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed)
+
+ if len(pkgMods) == 0 && err == nil {
+ replacement := Replacement(modOnly.Mod)
+ return nil, &PackageNotInModuleError{
+ Mod: modOnly.Mod,
+ Replacement: replacement,
+ Query: query,
+ Pattern: pattern,
+ }
+ }
+
+ return pkgMods, err
+}
+
+// QueryPattern looks up the module(s) containing at least one package matching
+// the given pattern at the given version. The results are sorted by module path
+// length in descending order. If any proxy provides a non-empty set of candidate
+// modules, no further proxies are tried.
+//
+// For wildcard patterns, QueryPattern looks in modules with package paths up to
+// the first "..." in the pattern. For the pattern "example.com/a/b.../c",
+// QueryPattern would consider prefixes of "example.com/a".
+//
+// If any matching package is in the main module, QueryPattern considers only
+// the main module and only the version "latest", without checking for other
+// possible modules.
+//
+// QueryPattern always returns at least one QueryResult (which may be only
+// modOnly) or a non-nil error.
+func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) {
+ ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query)
+ defer span.Done()
+
+ base := pattern
+
+ firstError := func(m *search.Match) error {
+ if len(m.Errs) == 0 {
+ return nil
+ }
+ return m.Errs[0]
+ }
+
+ var match func(mod module.Version, roots []string, isLocal bool) *search.Match
+ matchPattern := pkgpattern.MatchPattern(pattern)
+
+ if i := strings.Index(pattern, "..."); i >= 0 {
+ base = pathpkg.Dir(pattern[:i+3])
+ if base == "." {
+ return nil, nil, &WildcardInFirstElementError{Pattern: pattern, Query: query}
+ }
+ match = func(mod module.Version, roots []string, isLocal bool) *search.Match {
+ m := search.NewMatch(pattern)
+ matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod})
+ return m
+ }
+ } else {
+ match = func(mod module.Version, roots []string, isLocal bool) *search.Match {
+ m := search.NewMatch(pattern)
+ prefix := mod.Path
+ if MainModules.Contains(mod.Path) {
+ prefix = MainModules.PathPrefix(module.Version{Path: mod.Path})
+ }
+ for _, root := range roots {
+ if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil {
+ m.AddError(err)
+ } else if ok {
+ m.Pkgs = []string{pattern}
+ }
+ }
+ return m
+ }
+ }
+
+ var mainModuleMatches []module.Version
+ for _, mainModule := range MainModules.Versions() {
+ m := match(mainModule, modRoots, true)
+ if len(m.Pkgs) > 0 {
+ if query != "upgrade" && query != "patch" {
+ return nil, nil, &QueryMatchesPackagesInMainModuleError{
+ Pattern: pattern,
+ Query: query,
+ Packages: m.Pkgs,
+ }
+ }
+ if err := allowed(ctx, mainModule); err != nil {
+ return nil, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed: %w", pattern, mainModule.Path, err)
+ }
+ return []QueryResult{{
+ Mod: mainModule,
+ Rev: &modfetch.RevInfo{Version: mainModule.Version},
+ Packages: m.Pkgs,
+ }}, nil, nil
+ }
+ if err := firstError(m); err != nil {
+ return nil, nil, err
+ }
+
+ var matchesMainModule bool
+ if matchPattern(mainModule.Path) {
+ mainModuleMatches = append(mainModuleMatches, mainModule)
+ matchesMainModule = true
+ }
+
+ if (query == "upgrade" || query == "patch") && matchesMainModule {
+ if err := allowed(ctx, mainModule); err == nil {
+ modOnly = &QueryResult{
+ Mod: mainModule,
+ Rev: &modfetch.RevInfo{Version: mainModule.Version},
+ }
+ }
+ }
+ }
+
+ var (
+ results []QueryResult
+ candidateModules = modulePrefixesExcludingTarget(base)
+ )
+ if len(candidateModules) == 0 {
+ if modOnly != nil {
+ return nil, modOnly, nil
+ } else if len(mainModuleMatches) != 0 {
+ return nil, nil, &QueryMatchesMainModulesError{
+ MainModules: mainModuleMatches,
+ Pattern: pattern,
+ Query: query,
+ }
+ } else {
+ return nil, nil, &PackageNotInModuleError{
+ MainModules: mainModuleMatches,
+ Query: query,
+ Pattern: pattern,
+ }
+ }
+ }
+
+ err = modfetch.TryProxies(func(proxy string) error {
+ queryModule := func(ctx context.Context, path string) (r QueryResult, err error) {
+ ctx, span := trace.StartSpan(ctx, "modload.QueryPattern.queryModule ["+proxy+"] "+path)
+ defer span.Done()
+
+ pathCurrent := current(path)
+ r.Mod.Path = path
+ r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil)
+ if err != nil {
+ return r, err
+ }
+ r.Mod.Version = r.Rev.Version
+ if gover.IsToolchain(r.Mod.Path) {
+ return r, nil
+ }
+ root, isLocal, err := fetch(ctx, r.Mod)
+ if err != nil {
+ return r, err
+ }
+ m := match(r.Mod, []string{root}, isLocal)
+ r.Packages = m.Pkgs
+ if len(r.Packages) == 0 && !matchPattern(path) {
+ if err := firstError(m); err != nil {
+ return r, err
+ }
+ replacement := Replacement(r.Mod)
+ return r, &PackageNotInModuleError{
+ Mod: r.Mod,
+ Replacement: replacement,
+ Query: query,
+ Pattern: pattern,
+ }
+ }
+ return r, nil
+ }
+
+ allResults, err := queryPrefixModules(ctx, candidateModules, queryModule)
+ results = allResults[:0]
+ for _, r := range allResults {
+ if len(r.Packages) == 0 {
+ modOnly = &r
+ } else {
+ results = append(results, r)
+ }
+ }
+ return err
+ })
+
+ if len(mainModuleMatches) > 0 && len(results) == 0 && modOnly == nil && errors.Is(err, fs.ErrNotExist) {
+ return nil, nil, &QueryMatchesMainModulesError{
+ Pattern: pattern,
+ Query: query,
+ }
+ }
+ return slices.Clip(results), modOnly, err
+}
+
+// modulePrefixesExcludingTarget returns all prefixes of path that may plausibly
+// exist as a module, excluding targetPrefix but otherwise including path
+// itself, sorted by descending length. Prefixes that are not valid module paths
+// but are valid package paths (like "m" or "example.com/.gen") are included,
+// since they might be replaced.
+func modulePrefixesExcludingTarget(path string) []string {
+ prefixes := make([]string, 0, strings.Count(path, "/")+1)
+
+ mainModulePrefixes := make(map[string]bool)
+ for _, m := range MainModules.Versions() {
+ mainModulePrefixes[m.Path] = true
+ }
+
+ for {
+ if !mainModulePrefixes[path] {
+ if _, _, ok := module.SplitPathVersion(path); ok {
+ prefixes = append(prefixes, path)
+ }
+ }
+
+ j := strings.LastIndexByte(path, '/')
+ if j < 0 {
+ break
+ }
+ path = path[:j]
+ }
+
+ return prefixes
+}
+
+func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) {
+ ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules")
+ defer span.Done()
+
+ // If the path we're attempting is not in the module cache and we don't have a
+ // fetch result cached either, we'll end up making a (potentially slow)
+ // request to the proxy or (often even slower) the origin server.
+ // To minimize latency, execute all of those requests in parallel.
+ type result struct {
+ QueryResult
+ err error
+ }
+ results := make([]result, len(candidateModules))
+ var wg sync.WaitGroup
+ wg.Add(len(candidateModules))
+ for i, p := range candidateModules {
+ ctx := trace.StartGoroutine(ctx)
+ go func(p string, r *result) {
+ r.QueryResult, r.err = queryModule(ctx, p)
+ wg.Done()
+ }(p, &results[i])
+ }
+ wg.Wait()
+
+ // Classify the results. In case of failure, identify the error that the user
+ // is most likely to find helpful: the most useful class of error at the
+ // longest matching path.
+ var (
+ noPackage *PackageNotInModuleError
+ noVersion *NoMatchingVersionError
+ noPatchBase *NoPatchBaseError
+ invalidPath *module.InvalidPathError // see comment in case below
+ notExistErr error
+ )
+ for _, r := range results {
+ switch rErr := r.err.(type) {
+ case nil:
+ found = append(found, r.QueryResult)
+ case *PackageNotInModuleError:
+ // Given the option, prefer to attribute “package not in module”
+ // to modules other than the main one.
+ if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) {
+ noPackage = rErr
+ }
+ case *NoMatchingVersionError:
+ if noVersion == nil {
+ noVersion = rErr
+ }
+ case *NoPatchBaseError:
+ if noPatchBase == nil {
+ noPatchBase = rErr
+ }
+ case *module.InvalidPathError:
+ // The prefix was not a valid module path, and there was no replacement.
+ // Prefixes like this may appear in candidateModules, since we handle
+ // replaced modules that weren't required in the repo lookup process
+ // (see lookupRepo).
+ //
+ // A shorter prefix may be a valid module path and may contain a valid
+ // import path, so this is a low-priority error.
+ if invalidPath == nil {
+ invalidPath = rErr
+ }
+ default:
+ if errors.Is(rErr, fs.ErrNotExist) {
+ if notExistErr == nil {
+ notExistErr = rErr
+ }
+ } else if err == nil {
+ if len(found) > 0 || noPackage != nil {
+ // golang.org/issue/34094: If we have already found a module that
+ // could potentially contain the target package, ignore unclassified
+ // errors for modules with shorter paths.
+
+ // golang.org/issue/34383 is a special case of this: if we have
+ // already found example.com/foo/v2@v2.0.0 with a matching go.mod
+ // file, ignore the error from example.com/foo@v2.0.0.
+ } else {
+ err = r.err
+ }
+ }
+ }
+ }
+
+ // TODO(#26232): If len(found) == 0 and some of the errors are 4xx HTTP
+ // codes, have the auth package recheck the failed paths.
+ // If we obtain new credentials for any of them, re-run the above loop.
+
+ if len(found) == 0 && err == nil {
+ switch {
+ case noPackage != nil:
+ err = noPackage
+ case noVersion != nil:
+ err = noVersion
+ case noPatchBase != nil:
+ err = noPatchBase
+ case invalidPath != nil:
+ err = invalidPath
+ case notExistErr != nil:
+ err = notExistErr
+ default:
+ panic("queryPrefixModules: no modules found, but no error detected")
+ }
+ }
+
+ return found, err
+}
+
+// A NoMatchingVersionError indicates that Query found a module at the requested
+// path, but not at any versions satisfying the query string and allow-function.
+//
+// NOTE: NoMatchingVersionError MUST NOT implement Is(fs.ErrNotExist).
+//
+// If the module came from a proxy, that proxy had to return a successful status
+// code for the versions it knows about, and thus did not have the opportunity
+// to return a non-400 status code to suppress fallback.
+type NoMatchingVersionError struct {
+ query, current string
+}
+
+func (e *NoMatchingVersionError) Error() string {
+ currentSuffix := ""
+ if (e.query == "upgrade" || e.query == "patch") && e.current != "" && e.current != "none" {
+ currentSuffix = fmt.Sprintf(" (current version is %s)", e.current)
+ }
+ return fmt.Sprintf("no matching versions for query %q", e.query) + currentSuffix
+}
+
+// A NoPatchBaseError indicates that Query was called with the query "patch"
+// but with a current version of "" or "none".
+type NoPatchBaseError struct {
+ path string
+}
+
+func (e *NoPatchBaseError) Error() string {
+ return fmt.Sprintf(`can't query version "patch" of module %s: no existing version is required`, e.path)
+}
+
+// A WildcardInFirstElementError indicates that a pattern passed to QueryPattern
+// had a wildcard in its first path element, and therefore had no pattern-prefix
+// modules to search in.
+type WildcardInFirstElementError struct {
+ Pattern string
+ Query string
+}
+
+func (e *WildcardInFirstElementError) Error() string {
+ return fmt.Sprintf("no modules to query for %s@%s because first path element contains a wildcard", e.Pattern, e.Query)
+}
+
+// A PackageNotInModuleError indicates that QueryPattern found a candidate
+// module at the requested version, but that module did not contain any packages
+// matching the requested pattern.
+//
+// NOTE: PackageNotInModuleError MUST NOT implement Is(fs.ErrNotExist).
+//
+// If the module came from a proxy, that proxy had to return a successful status
+// code for the versions it knows about, and thus did not have the opportunity
+// to return a non-400 status code to suppress fallback.
+type PackageNotInModuleError struct {
+ MainModules []module.Version
+ Mod module.Version
+ Replacement module.Version
+ Query string
+ Pattern string
+}
+
+func (e *PackageNotInModuleError) Error() string {
+ if len(e.MainModules) > 0 {
+ prefix := "workspace modules do"
+ if len(e.MainModules) == 1 {
+ prefix = fmt.Sprintf("main module (%s) does", e.MainModules[0])
+ }
+ if strings.Contains(e.Pattern, "...") {
+ return fmt.Sprintf("%s not contain packages matching %s", prefix, e.Pattern)
+ }
+ return fmt.Sprintf("%s not contain package %s", prefix, e.Pattern)
+ }
+
+ found := ""
+ if r := e.Replacement; r.Path != "" {
+ replacement := r.Path
+ if r.Version != "" {
+ replacement = fmt.Sprintf("%s@%s", r.Path, r.Version)
+ }
+ if e.Query == e.Mod.Version {
+ found = fmt.Sprintf(" (replaced by %s)", replacement)
+ } else {
+ found = fmt.Sprintf(" (%s, replaced by %s)", e.Mod.Version, replacement)
+ }
+ } else if e.Query != e.Mod.Version {
+ found = fmt.Sprintf(" (%s)", e.Mod.Version)
+ }
+
+ if strings.Contains(e.Pattern, "...") {
+ return fmt.Sprintf("module %s@%s found%s, but does not contain packages matching %s", e.Mod.Path, e.Query, found, e.Pattern)
+ }
+ return fmt.Sprintf("module %s@%s found%s, but does not contain package %s", e.Mod.Path, e.Query, found, e.Pattern)
+}
+
+func (e *PackageNotInModuleError) ImportPath() string {
+ if !strings.Contains(e.Pattern, "...") {
+ return e.Pattern
+ }
+ return ""
+}
+
+// versionHasGoMod returns whether a version has a go.mod file.
+//
+// versionHasGoMod fetches the go.mod file (possibly a fake) and true if it
+// contains anything other than a module directive with the same path. When a
+// module does not have a real go.mod file, the go command acts as if it had one
+// that only contained a module directive. Normal go.mod files created after
+// 1.12 at least have a go directive.
+//
+// This function is a heuristic, since it's possible to commit a file that would
+// pass this test. However, we only need a heuristic for determining whether
+// +incompatible versions may be "latest", which is what this function is used
+// for.
+//
+// This heuristic is useful for two reasons: first, when using a proxy,
+// this lets us fetch from the .mod endpoint which is much faster than the .zip
+// endpoint. The .mod file is used anyway, even if the .zip file contains a
+// go.mod with different content. Second, if we don't fetch the .zip, then
+// we don't need to verify it in go.sum. This makes 'go list -m -u' faster
+// and simpler.
+func versionHasGoMod(_ context.Context, m module.Version) (bool, error) {
+ _, data, err := rawGoModData(m)
+ if err != nil {
+ return false, err
+ }
+ isFake := bytes.Equal(data, modfetch.LegacyGoMod(m.Path))
+ return !isFake, nil
+}
+
+// A versionRepo is a subset of modfetch.Repo that can report information about
+// available versions, but cannot fetch specific source files.
+type versionRepo interface {
+ ModulePath() string
+ CheckReuse(context.Context, *codehost.Origin) error
+ Versions(ctx context.Context, prefix string) (*modfetch.Versions, error)
+ Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error)
+ Latest(context.Context) (*modfetch.RevInfo, error)
+}
+
+var _ versionRepo = modfetch.Repo(nil)
+
+func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) {
+ if path != "go" && path != "toolchain" {
+ err = module.CheckPath(path)
+ }
+ if err == nil {
+ repo = modfetch.Lookup(ctx, proxy, path)
+ } else {
+ repo = emptyRepo{path: path, err: err}
+ }
+
+ if MainModules == nil {
+ return repo, err
+ } else if _, ok := MainModules.HighestReplaced()[path]; ok {
+ return &replacementRepo{repo: repo}, nil
+ }
+
+ return repo, err
+}
+
+// An emptyRepo is a versionRepo that contains no versions.
+type emptyRepo struct {
+ path string
+ err error
+}
+
+var _ versionRepo = emptyRepo{}
+
+func (er emptyRepo) ModulePath() string { return er.path }
+func (er emptyRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return fmt.Errorf("empty repo")
+}
+func (er emptyRepo) Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) {
+ return &modfetch.Versions{}, nil
+}
+func (er emptyRepo) Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) {
+ return nil, er.err
+}
+func (er emptyRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error) { return nil, er.err }
+
+// A replacementRepo augments a versionRepo to include the replacement versions
+// (if any) found in the main module's go.mod file.
+//
+// A replacementRepo suppresses "not found" errors for otherwise-nonexistent
+// modules, so a replacementRepo should only be constructed for a module that
+// actually has one or more valid replacements.
+type replacementRepo struct {
+ repo versionRepo
+}
+
+var _ versionRepo = (*replacementRepo)(nil)
+
+func (rr *replacementRepo) ModulePath() string { return rr.repo.ModulePath() }
+
+func (rr *replacementRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error {
+ return fmt.Errorf("replacement repo")
+}
+
+// Versions returns the versions from rr.repo augmented with any matching
+// replacement versions.
+func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) {
+ repoVersions, err := rr.repo.Versions(ctx, prefix)
+ if err != nil {
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ repoVersions = new(modfetch.Versions)
+ }
+
+ versions := repoVersions.List
+ for _, mm := range MainModules.Versions() {
+ if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 {
+ path := rr.ModulePath()
+ for m := range index.replace {
+ if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) {
+ versions = append(versions, m.Version)
+ }
+ }
+ }
+ }
+
+ if len(versions) == len(repoVersions.List) { // replacement versions added
+ return repoVersions, nil
+ }
+
+ path := rr.ModulePath()
+ sort.Slice(versions, func(i, j int) bool {
+ return gover.ModCompare(path, versions[i], versions[j]) < 0
+ })
+ str.Uniq(&versions)
+ return &modfetch.Versions{List: versions}, nil
+}
+
+func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) {
+ info, err := rr.repo.Stat(ctx, rev)
+ if err == nil {
+ return info, err
+ }
+ var hasReplacements bool
+ for _, v := range MainModules.Versions() {
+ if index := MainModules.Index(v); index != nil && len(index.replace) > 0 {
+ hasReplacements = true
+ }
+ }
+ if !hasReplacements {
+ return info, err
+ }
+
+ v := module.CanonicalVersion(rev)
+ if v != rev {
+ // The replacements in the go.mod file list only canonical semantic versions,
+ // so a non-canonical version can't possibly have a replacement.
+ return info, err
+ }
+
+ path := rr.ModulePath()
+ _, pathMajor, ok := module.SplitPathVersion(path)
+ if ok && pathMajor == "" {
+ if err := module.CheckPathMajor(v, pathMajor); err != nil && semver.Build(v) == "" {
+ v += "+incompatible"
+ }
+ }
+
+ if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" {
+ return info, err
+ }
+ return rr.replacementStat(v)
+}
+
+func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error) {
+ info, err := rr.repo.Latest(ctx)
+ path := rr.ModulePath()
+
+ if v, ok := MainModules.HighestReplaced()[path]; ok {
+ if v == "" {
+ // The only replacement is a wildcard that doesn't specify a version, so
+ // synthesize a pseudo-version with an appropriate major version and a
+ // timestamp below any real timestamp. That way, if the main module is
+ // used from within some other module, the user will be able to upgrade
+ // the requirement to any real version they choose.
+ if _, pathMajor, ok := module.SplitPathVersion(path); ok && len(pathMajor) > 0 {
+ v = module.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000")
+ } else {
+ v = module.PseudoVersion("v0", "", time.Time{}, "000000000000")
+ }
+ }
+
+ if err != nil || gover.ModCompare(path, v, info.Version) > 0 {
+ return rr.replacementStat(v)
+ }
+ }
+
+ return info, err
+}
+
+func (rr *replacementRepo) replacementStat(v string) (*modfetch.RevInfo, error) {
+ rev := &modfetch.RevInfo{Version: v}
+ if module.IsPseudoVersion(v) {
+ rev.Time, _ = module.PseudoVersionTime(v)
+ rev.Short, _ = module.PseudoVersionRev(v)
+ }
+ return rev, nil
+}
+
+// A QueryMatchesMainModulesError indicates that a query requests
+// a version of the main module that cannot be satisfied.
+// (The main module's version cannot be changed.)
+type QueryMatchesMainModulesError struct {
+ MainModules []module.Version
+ Pattern string
+ Query string
+}
+
+func (e *QueryMatchesMainModulesError) Error() string {
+ if MainModules.Contains(e.Pattern) {
+ return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern)
+ }
+
+ plural := ""
+ mainModulePaths := make([]string, len(e.MainModules))
+ for i := range e.MainModules {
+ mainModulePaths[i] = e.MainModules[i].Path
+ }
+ if len(e.MainModules) > 1 {
+ plural = "s"
+ }
+ return fmt.Sprintf("can't request version %q of pattern %q that includes the main module%s (%s)", e.Query, e.Pattern, plural, strings.Join(mainModulePaths, ", "))
+}
+
+// A QueryUpgradesAllError indicates that a query requests
+// an upgrade on the all pattern.
+// (The main module's version cannot be changed.)
+type QueryUpgradesAllError struct {
+ MainModules []module.Version
+ Query string
+}
+
+func (e *QueryUpgradesAllError) Error() string {
+ var plural string = ""
+ if len(e.MainModules) != 1 {
+ plural = "s"
+ }
+
+ return fmt.Sprintf("can't request version %q of pattern \"all\" that includes the main module%s", e.Query, plural)
+}
+
+// A QueryMatchesPackagesInMainModuleError indicates that a query cannot be
+// satisfied because it matches one or more packages found in the main module.
+type QueryMatchesPackagesInMainModuleError struct {
+ Pattern string
+ Query string
+ Packages []string
+}
+
+func (e *QueryMatchesPackagesInMainModuleError) Error() string {
+ if len(e.Packages) > 1 {
+ return fmt.Sprintf("pattern %s matches %d packages in the main module, so can't request version %s", e.Pattern, len(e.Packages), e.Query)
+ }
+
+ if search.IsMetaPackage(e.Pattern) || strings.Contains(e.Pattern, "...") {
+ return fmt.Sprintf("pattern %s matches package %s in the main module, so can't request version %s", e.Pattern, e.Packages[0], e.Query)
+ }
+
+ return fmt.Sprintf("package %s is in the main module, so can't request version %s", e.Packages[0], e.Query)
+}
diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go
new file mode 100644
index 0000000..93f8f0d
--- /dev/null
+++ b/src/cmd/go/internal/modload/query_test.go
@@ -0,0 +1,202 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "flag"
+ "internal/testenv"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/vcweb/vcstest"
+
+ "golang.org/x/mod/module"
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if err := testMain(m); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func testMain(m *testing.M) (err error) {
+ cfg.GOPROXY = "direct"
+ cfg.ModCacheRW = true
+
+ srv, err := vcstest.NewServer()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if closeErr := srv.Close(); err == nil {
+ err = closeErr
+ }
+ }()
+
+ dir, err := os.MkdirTemp("", "modload-test-")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if rmErr := os.RemoveAll(dir); err == nil {
+ err = rmErr
+ }
+ }()
+
+ os.Setenv("GOPATH", dir)
+ cfg.BuildContext.GOPATH = dir
+ cfg.GOMODCACHE = filepath.Join(dir, "pkg/mod")
+ cfg.SumdbDir = filepath.Join(dir, "pkg/sumdb")
+ m.Run()
+ return nil
+}
+
+var (
+ queryRepo = "vcs-test.golang.org/git/querytest.git"
+ queryRepoV2 = queryRepo + "/v2"
+ queryRepoV3 = queryRepo + "/v3"
+
+ // Empty version list (no semver tags), not actually empty.
+ emptyRepoPath = "vcs-test.golang.org/git/emptytest.git"
+)
+
+var queryTests = []struct {
+ path string
+ query string
+ current string
+ allow string
+ vers string
+ err string
+}{
+ {path: queryRepo, query: "<v0.0.0", vers: "v0.0.0-pre1"},
+ {path: queryRepo, query: "<v0.0.0-pre1", err: `no matching versions for query "<v0.0.0-pre1"`},
+ {path: queryRepo, query: "<=v0.0.0", vers: "v0.0.0"},
+ {path: queryRepo, query: ">v0.0.0", vers: "v0.0.1"},
+ {path: queryRepo, query: ">=v0.0.0", vers: "v0.0.0"},
+ {path: queryRepo, query: "v0.0.1", vers: "v0.0.1"},
+ {path: queryRepo, query: "v0.0.1+foo", vers: "v0.0.1"},
+ {path: queryRepo, query: "v0.0.99", err: `vcs-test.golang.org/git/querytest.git@v0.0.99: invalid version: unknown revision v0.0.99`},
+ {path: queryRepo, query: "v0", vers: "v0.3.0"},
+ {path: queryRepo, query: "v0.1", vers: "v0.1.2"},
+ {path: queryRepo, query: "v0.2", err: `no matching versions for query "v0.2"`},
+ {path: queryRepo, query: "v0.0", vers: "v0.0.3"},
+ {path: queryRepo, query: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "ed5ffdaa", vers: "v1.9.10-pre2.0.20191220134614-ed5ffdaa1f5e"},
+
+ // golang.org/issue/29262: The major version for a module without a suffix
+ // should be based on the most recent tag (v1 as appropriate, not v0
+ // unconditionally).
+ {path: queryRepo, query: "42abcb6df8ee", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+
+ {path: queryRepo, query: "v1.9.10-pre2+wrongmetadata", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2+wrongmetadata: invalid version: unknown revision v1.9.10-pre2+wrongmetadata`},
+ {path: queryRepo, query: "v1.9.10-pre2", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2: invalid version: unknown revision v1.9.10-pre2`},
+ {path: queryRepo, query: "latest", vers: "v1.9.9"},
+ {path: queryRepo, query: "latest", current: "v1.9.10-pre1", vers: "v1.9.9"},
+ {path: queryRepo, query: "upgrade", vers: "v1.9.9"},
+ {path: queryRepo, query: "upgrade", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"},
+ {path: queryRepo, query: "upgrade", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "upgrade", current: "v0.0.0-20190513201126-42abcb6df8ee", vers: "v0.0.0-20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "upgrade", allow: "NOMATCH", err: `no matching versions for query "upgrade"`},
+ {path: queryRepo, query: "upgrade", current: "v1.9.9", allow: "NOMATCH", err: `vcs-test.golang.org/git/querytest.git@v1.9.9: disallowed module version`},
+ {path: queryRepo, query: "upgrade", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`},
+ {path: queryRepo, query: "patch", current: "", err: `can't query version "patch" of module vcs-test.golang.org/git/querytest.git: no existing version is required`},
+ {path: queryRepo, query: "patch", current: "v0.1.0", vers: "v0.1.2"},
+ {path: queryRepo, query: "patch", current: "v1.9.0", vers: "v1.9.9"},
+ {path: queryRepo, query: "patch", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"},
+ {path: queryRepo, query: "patch", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "patch", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`},
+ {path: queryRepo, query: ">v1.9.9", vers: "v1.9.10-pre1"},
+ {path: queryRepo, query: ">v1.10.0", err: `no matching versions for query ">v1.10.0"`},
+ {path: queryRepo, query: ">=v1.10.0", err: `no matching versions for query ">=v1.10.0"`},
+ {path: queryRepo, query: "6cf84eb", vers: "v0.0.2-0.20180704023347-6cf84ebaea54"},
+
+ // golang.org/issue/27173: A pseudo-version may be based on the highest tag on
+ // any parent commit, or any existing semantically-lower tag: a given commit
+ // could have been a pre-release for a backport tag at any point.
+ {path: queryRepo, query: "3ef0cec634e0", vers: "v0.1.2-0.20180704023347-3ef0cec634e0"},
+ {path: queryRepo, query: "v0.1.2-0.20180704023347-3ef0cec634e0", vers: "v0.1.2-0.20180704023347-3ef0cec634e0"},
+ {path: queryRepo, query: "v0.1.1-0.20180704023347-3ef0cec634e0", vers: "v0.1.1-0.20180704023347-3ef0cec634e0"},
+ {path: queryRepo, query: "v0.0.4-0.20180704023347-3ef0cec634e0", vers: "v0.0.4-0.20180704023347-3ef0cec634e0"},
+
+ // Invalid tags are tested in cmd/go/testdata/script/mod_pseudo_invalid.txt.
+
+ {path: queryRepo, query: "start", vers: "v0.0.0-20180704023101-5e9e31667ddf"},
+ {path: queryRepo, query: "5e9e31667ddf", vers: "v0.0.0-20180704023101-5e9e31667ddf"},
+ {path: queryRepo, query: "v0.0.0-20180704023101-5e9e31667ddf", vers: "v0.0.0-20180704023101-5e9e31667ddf"},
+
+ {path: queryRepo, query: "7a1b6bf", vers: "v0.1.0"},
+
+ {path: queryRepoV2, query: "<v0.0.0", err: `no matching versions for query "<v0.0.0"`},
+ {path: queryRepoV2, query: "<=v0.0.0", err: `no matching versions for query "<=v0.0.0"`},
+ {path: queryRepoV2, query: ">v0.0.0", vers: "v2.0.0"},
+ {path: queryRepoV2, query: ">=v0.0.0", vers: "v2.0.0"},
+
+ {path: queryRepoV2, query: "v2", vers: "v2.5.5"},
+ {path: queryRepoV2, query: "v2.5", vers: "v2.5.5"},
+ {path: queryRepoV2, query: "v2.6", err: `no matching versions for query "v2.6"`},
+ {path: queryRepoV2, query: "v2.6.0-pre1", vers: "v2.6.0-pre1"},
+ {path: queryRepoV2, query: "latest", vers: "v2.5.5"},
+
+ // Commit e0cf3de987e6 is actually v1.19.10-pre1, not anything resembling v3,
+ // and it has a go.mod file with a non-v3 module path. Attempting to query it
+ // as the v3 module should fail.
+ {path: queryRepoV3, query: "e0cf3de987e6", err: `vcs-test.golang.org/git/querytest.git/v3@v3.0.0-20180704024501-e0cf3de987e6: invalid version: go.mod has non-.../v3 module path "vcs-test.golang.org/git/querytest.git" (and .../v3/go.mod does not exist) at revision e0cf3de987e6`},
+
+ // The querytest repo does not have any commits tagged with major version 3,
+ // and the latest commit in the repo has a go.mod file specifying a non-v3 path.
+ // That should prevent us from resolving any version for the /v3 path.
+ {path: queryRepoV3, query: "latest", err: `no matching versions for query "latest"`},
+
+ {path: emptyRepoPath, query: "latest", vers: "v0.0.0-20180704023549-7bb914627242"},
+ {path: emptyRepoPath, query: ">v0.0.0", err: `no matching versions for query ">v0.0.0"`},
+ {path: emptyRepoPath, query: "<v10.0.0", err: `no matching versions for query "<v10.0.0"`},
+}
+
+func TestQuery(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+ testenv.MustHaveExecPath(t, "git")
+
+ ctx := context.Background()
+
+ for _, tt := range queryTests {
+ allow := tt.allow
+ if allow == "" {
+ allow = "*"
+ }
+ allowed := func(ctx context.Context, m module.Version) error {
+ if ok, _ := path.Match(allow, m.Version); !ok {
+ return module.VersionError(m, ErrDisallowed)
+ }
+ return nil
+ }
+ tt := tt
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) {
+ t.Parallel()
+
+ info, err := Query(ctx, tt.path, tt.query, tt.current, allowed)
+ if tt.err != "" {
+ if err == nil {
+ t.Errorf("Query(_, %q, %q, %q, %v) = %v, want error %q", tt.path, tt.query, tt.current, allow, info.Version, tt.err)
+ } else if err.Error() != tt.err {
+ t.Errorf("Query(_, %q, %q, %q, %v): %v\nwant error %q", tt.path, tt.query, tt.current, allow, err, tt.err)
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("Query(_, %q, %q, %q, %v): %v\nwant %v", tt.path, tt.query, tt.current, allow, err, tt.vers)
+ }
+ if info.Version != tt.vers {
+ t.Errorf("Query(_, %q, %q, %q, %v) = %v, want %v", tt.path, tt.query, tt.current, allow, info.Version, tt.vers)
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
new file mode 100644
index 0000000..cb03b69
--- /dev/null
+++ b/src/cmd/go/internal/modload/search.go
@@ -0,0 +1,304 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/par"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/internal/pkgpattern"
+
+ "golang.org/x/mod/module"
+)
+
+type stdFilter int8
+
+const (
+ omitStd = stdFilter(iota)
+ includeStd
+)
+
+// matchPackages is like m.MatchPackages, but uses a local variable (rather than
+// a global) for tags, can include or exclude packages in the standard library,
+// and is restricted to the given list of modules.
+func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) {
+ ctx, span := trace.StartSpan(ctx, "modload.matchPackages")
+ defer span.Done()
+
+ m.Pkgs = []string{}
+
+ isMatch := func(string) bool { return true }
+ treeCanMatch := func(string) bool { return true }
+ if !m.IsMeta() {
+ isMatch = pkgpattern.MatchPattern(m.Pattern())
+ treeCanMatch = pkgpattern.TreeCanMatchPattern(m.Pattern())
+ }
+
+ var mu sync.Mutex
+ have := map[string]bool{
+ "builtin": true, // ignore pseudo-package that exists only for documentation
+ }
+ addPkg := func(p string) {
+ mu.Lock()
+ m.Pkgs = append(m.Pkgs, p)
+ mu.Unlock()
+ }
+ if !cfg.BuildContext.CgoEnabled {
+ have["runtime/cgo"] = true // ignore during walk
+ }
+
+ type pruning int8
+ const (
+ pruneVendor = pruning(1 << iota)
+ pruneGoMod
+ )
+
+ q := par.NewQueue(runtime.GOMAXPROCS(0))
+
+ walkPkgs := func(root, importPathRoot string, prune pruning) {
+ _, span := trace.StartSpan(ctx, "walkPkgs "+root)
+ defer span.Done()
+
+ // If the root itself is a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ root = str.WithFilePathSeparator(filepath.Clean(root))
+ err := fsys.Walk(root, func(pkgDir string, fi fs.FileInfo, err error) error {
+ if err != nil {
+ m.AddError(err)
+ return nil
+ }
+
+ want := true
+ elem := ""
+
+ // Don't use GOROOT/src but do walk down into it.
+ if pkgDir == root {
+ if importPathRoot == "" {
+ return nil
+ }
+ } else {
+ // Avoid .foo, _foo, and testdata subdirectory trees.
+ _, elem = filepath.Split(pkgDir)
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ want = false
+ }
+ }
+
+ name := path.Join(importPathRoot, filepath.ToSlash(pkgDir[len(root):]))
+ if !treeCanMatch(name) {
+ want = false
+ }
+
+ if !fi.IsDir() {
+ if fi.Mode()&fs.ModeSymlink != 0 && want && strings.Contains(m.Pattern(), "...") {
+ if target, err := fsys.Stat(pkgDir); err == nil && target.IsDir() {
+ fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", pkgDir)
+ }
+ }
+ return nil
+ }
+
+ if !want {
+ return filepath.SkipDir
+ }
+ // Stop at module boundaries.
+ if (prune&pruneGoMod != 0) && pkgDir != root {
+ if fi, err := os.Stat(filepath.Join(pkgDir, "go.mod")); err == nil && !fi.IsDir() {
+ return filepath.SkipDir
+ }
+ }
+
+ if !have[name] {
+ have[name] = true
+ if isMatch(name) {
+ q.Add(func() {
+ if _, _, err := scanDir(root, pkgDir, tags); err != imports.ErrNoGo {
+ addPkg(name)
+ }
+ })
+ }
+ }
+
+ if elem == "vendor" && (prune&pruneVendor != 0) {
+ return filepath.SkipDir
+ }
+ return nil
+ })
+ if err != nil {
+ m.AddError(err)
+ }
+ }
+
+ // Wait for all in-flight operations to complete before returning.
+ defer func() {
+ <-q.Idle()
+ sort.Strings(m.Pkgs) // sort everything we added for determinism
+ }()
+
+ if filter == includeStd {
+ walkPkgs(cfg.GOROOTsrc, "", pruneGoMod)
+ if treeCanMatch("cmd") {
+ walkPkgs(filepath.Join(cfg.GOROOTsrc, "cmd"), "cmd", pruneGoMod)
+ }
+ }
+
+ if cfg.BuildMod == "vendor" {
+ mod := MainModules.mustGetSingleMainModule()
+ if modRoot := MainModules.ModRoot(mod); modRoot != "" {
+ walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor)
+ walkPkgs(filepath.Join(modRoot, "vendor"), "", pruneVendor)
+ }
+ return
+ }
+
+ for _, mod := range modules {
+ if gover.IsToolchain(mod.Path) || !treeCanMatch(mod.Path) {
+ continue
+ }
+
+ var (
+ root, modPrefix string
+ isLocal bool
+ )
+ if MainModules.Contains(mod.Path) {
+ if MainModules.ModRoot(mod) == "" {
+ continue // If there is no main module, we can't search in it.
+ }
+ root = MainModules.ModRoot(mod)
+ modPrefix = MainModules.PathPrefix(mod)
+ isLocal = true
+ } else {
+ var err error
+ root, isLocal, err = fetch(ctx, mod)
+ if err != nil {
+ m.AddError(err)
+ continue
+ }
+ modPrefix = mod.Path
+ }
+ if mi, err := modindex.GetModule(root); err == nil {
+ walkFromIndex(mi, modPrefix, isMatch, treeCanMatch, tags, have, addPkg)
+ continue
+ } else if !errors.Is(err, modindex.ErrNotIndexed) {
+ m.AddError(err)
+ }
+
+ prune := pruneVendor
+ if isLocal {
+ prune |= pruneGoMod
+ }
+ walkPkgs(root, modPrefix, prune)
+ }
+
+ return
+}
+
+// walkFromIndex matches packages in a module using the module index. modroot
+// is the module's root directory on disk, index is the modindex.Module for the
+// module, and importPathRoot is the module's path prefix.
+func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
+ index.Walk(func(reldir string) {
+ // Avoid .foo, _foo, and testdata subdirectory trees.
+ p := reldir
+ for {
+ elem, rest, found := strings.Cut(p, string(filepath.Separator))
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return
+ }
+ if found && elem == "vendor" {
+ // Ignore this path if it contains the element "vendor" anywhere
+ // except for the last element (packages named vendor are allowed
+ // for historical reasons). Note that found is true when this
+ // isn't the last path element.
+ return
+ }
+ if !found {
+ // Didn't find the separator, so we're considering the last element.
+ break
+ }
+ p = rest
+ }
+
+ // Don't use GOROOT/src.
+ if reldir == "" && importPathRoot == "" {
+ return
+ }
+
+ name := path.Join(importPathRoot, filepath.ToSlash(reldir))
+ if !treeCanMatch(name) {
+ return
+ }
+
+ if !have[name] {
+ have[name] = true
+ if isMatch(name) {
+ if _, _, err := index.Package(reldir).ScanDir(tags); err != imports.ErrNoGo {
+ addPkg(name)
+ }
+ }
+ }
+ })
+}
+
+// MatchInModule identifies the packages matching the given pattern within the
+// given module version, which does not need to be in the build list or module
+// requirement graph.
+//
+// If m is the zero module.Version, MatchInModule matches the pattern
+// against the standard library (std and cmd) in GOROOT/src.
+func MatchInModule(ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match {
+ match := search.NewMatch(pattern)
+ if m == (module.Version{}) {
+ matchPackages(ctx, match, tags, includeStd, nil)
+ }
+
+ LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages.
+
+ if !match.IsLiteral() {
+ matchPackages(ctx, match, tags, omitStd, []module.Version{m})
+ return match
+ }
+
+ root, isLocal, err := fetch(ctx, m)
+ if err != nil {
+ match.Errs = []error{err}
+ return match
+ }
+
+ dir, haveGoFiles, err := dirInModule(pattern, m.Path, root, isLocal)
+ if err != nil {
+ match.Errs = []error{err}
+ return match
+ }
+ if haveGoFiles {
+ if _, _, err := scanDir(root, dir, tags); err != imports.ErrNoGo {
+ // ErrNoGo indicates that the directory is not actually a Go package,
+ // perhaps due to the tags in use. Any other non-nil error indicates a
+ // problem with one or more of the Go source files, but such an error does
+ // not stop the package from existing, so it has no impact on matching.
+ match.Pkgs = []string{pattern}
+ }
+ }
+ return match
+}
diff --git a/src/cmd/go/internal/modload/stat_openfile.go b/src/cmd/go/internal/modload/stat_openfile.go
new file mode 100644
index 0000000..5773073
--- /dev/null
+++ b/src/cmd/go/internal/modload/stat_openfile.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (js && wasm) || plan9
+
+// On plan9, per http://9p.io/magic/man2html/2/access: “Since file permissions
+// are checked by the server and group information is not known to the client,
+// access must open the file to check permissions.”
+//
+// js,wasm is similar, in that it does not define syscall.Access.
+
+package modload
+
+import (
+ "io/fs"
+ "os"
+)
+
+// hasWritePerm reports whether the current user has permission to write to the
+// file with the given info.
+func hasWritePerm(path string, _ fs.FileInfo) bool {
+ if f, err := os.OpenFile(path, os.O_WRONLY, 0); err == nil {
+ f.Close()
+ return true
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/modload/stat_unix.go b/src/cmd/go/internal/modload/stat_unix.go
new file mode 100644
index 0000000..a0d5f4d
--- /dev/null
+++ b/src/cmd/go/internal/modload/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package modload
+
+import (
+ "io/fs"
+ "os"
+ "syscall"
+)
+
+// hasWritePerm reports whether the current user has permission to write to the
+// file with the given info.
+//
+// Although the root user on most Unix systems can write to files even without
+// permission, hasWritePerm reports false if no appropriate permission bit is
+// set even if the current user is root.
+func hasWritePerm(path string, fi fs.FileInfo) bool {
+ if os.Getuid() == 0 {
+ // The root user can access any file, but we still want to default to
+ // read-only mode if the go.mod file is marked as globally non-writable.
+ // (If the user really intends not to be in readonly mode, they can
+ // pass -mod=mod explicitly.)
+ return fi.Mode()&0222 != 0
+ }
+
+ const W_OK = 0x2
+ return syscall.Access(path, W_OK) == nil
+}
diff --git a/src/cmd/go/internal/modload/stat_windows.go b/src/cmd/go/internal/modload/stat_windows.go
new file mode 100644
index 0000000..f29a991
--- /dev/null
+++ b/src/cmd/go/internal/modload/stat_windows.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package modload
+
+import "io/fs"
+
+// hasWritePerm reports whether the current user has permission to write to the
+// file with the given info.
+func hasWritePerm(_ string, fi fs.FileInfo) bool {
+ // Windows has a read-only attribute independent of ACLs, so use that to
+ // determine whether the file is intended to be overwritten.
+ //
+ // Per https://golang.org/pkg/os/#Chmod:
+ // “On Windows, only the 0200 bit (owner writable) of mode is used; it
+ // controls whether the file's read-only attribute is set or cleared.”
+ return fi.Mode()&0200 != 0
+}
diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go
new file mode 100644
index 0000000..ffc79bb
--- /dev/null
+++ b/src/cmd/go/internal/modload/vendor.go
@@ -0,0 +1,231 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modload
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+var (
+ vendorOnce sync.Once
+ vendorList []module.Version // modules that contribute packages to the build, in order of appearance
+ vendorReplaced []module.Version // all replaced modules; may or may not also contribute packages
+ vendorVersion map[string]string // module path → selected version (if known)
+ vendorPkgModule map[string]module.Version // package → containing module
+ vendorMeta map[module.Version]vendorMetadata
+)
+
+type vendorMetadata struct {
+ Explicit bool
+ Replacement module.Version
+ GoVersion string
+}
+
+// readVendorList reads the list of vendored modules from vendor/modules.txt.
+func readVendorList(mainModule module.Version) {
+ vendorOnce.Do(func() {
+ vendorList = nil
+ vendorPkgModule = make(map[string]module.Version)
+ vendorVersion = make(map[string]string)
+ vendorMeta = make(map[module.Version]vendorMetadata)
+ vendorFile := filepath.Join(MainModules.ModRoot(mainModule), "vendor/modules.txt")
+ data, err := os.ReadFile(vendorFile)
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ base.Fatalf("go: %s", err)
+ }
+ return
+ }
+
+ var mod module.Version
+ for _, line := range strings.Split(string(data), "\n") {
+ if strings.HasPrefix(line, "# ") {
+ f := strings.Fields(line)
+
+ if len(f) < 3 {
+ continue
+ }
+ if semver.IsValid(f[2]) {
+ // A module, but we don't yet know whether it is in the build list or
+ // only included to indicate a replacement.
+ mod = module.Version{Path: f[1], Version: f[2]}
+ f = f[3:]
+ } else if f[2] == "=>" {
+ // A wildcard replacement found in the main module's go.mod file.
+ mod = module.Version{Path: f[1]}
+ f = f[2:]
+ } else {
+ // Not a version or a wildcard replacement.
+ // We don't know how to interpret this module line, so ignore it.
+ mod = module.Version{}
+ continue
+ }
+
+ if len(f) >= 2 && f[0] == "=>" {
+ meta := vendorMeta[mod]
+ if len(f) == 2 {
+ // File replacement.
+ meta.Replacement = module.Version{Path: f[1]}
+ vendorReplaced = append(vendorReplaced, mod)
+ } else if len(f) == 3 && semver.IsValid(f[2]) {
+ // Path and version replacement.
+ meta.Replacement = module.Version{Path: f[1], Version: f[2]}
+ vendorReplaced = append(vendorReplaced, mod)
+ } else {
+ // We don't understand this replacement. Ignore it.
+ }
+ vendorMeta[mod] = meta
+ }
+ continue
+ }
+
+ // Not a module line. Must be a package within a module or a metadata
+ // directive, either of which requires a preceding module line.
+ if mod.Path == "" {
+ continue
+ }
+
+ if annotations, ok := strings.CutPrefix(line, "## "); ok {
+ // Metadata. Take the union of annotations across multiple lines, if present.
+ meta := vendorMeta[mod]
+ for _, entry := range strings.Split(annotations, ";") {
+ entry = strings.TrimSpace(entry)
+ if entry == "explicit" {
+ meta.Explicit = true
+ }
+ if goVersion, ok := strings.CutPrefix(entry, "go "); ok {
+ meta.GoVersion = goVersion
+ rawGoVersion.Store(mod, meta.GoVersion)
+ if gover.Compare(goVersion, gover.Local()) > 0 {
+ base.Fatal(&gover.TooNewError{What: mod.Path + " in " + base.ShortPath(vendorFile), GoVersion: goVersion})
+ }
+ }
+ // All other tokens are reserved for future use.
+ }
+ vendorMeta[mod] = meta
+ continue
+ }
+
+ if f := strings.Fields(line); len(f) == 1 && module.CheckImportPath(f[0]) == nil {
+ // A package within the current module.
+ vendorPkgModule[f[0]] = mod
+
+ // Since this module provides a package for the build, we know that it
+ // is in the build list and is the selected version of its path.
+ // If this information is new, record it.
+ if v, ok := vendorVersion[mod.Path]; !ok || gover.ModCompare(mod.Path, v, mod.Version) < 0 {
+ vendorList = append(vendorList, mod)
+ vendorVersion[mod.Path] = mod.Version
+ }
+ }
+ }
+ })
+}
+
+// checkVendorConsistency verifies that the vendor/modules.txt file matches (if
+// go 1.14) or at least does not contradict (go 1.13 or earlier) the
+// requirements and replacements listed in the main module's go.mod file.
+func checkVendorConsistency(index *modFileIndex, modFile *modfile.File) {
+ readVendorList(MainModules.mustGetSingleMainModule())
+
+ pre114 := false
+ if gover.Compare(index.goVersion, "1.14") < 0 {
+ // Go versions before 1.14 did not include enough information in
+ // vendor/modules.txt to check for consistency.
+ // If we know that we're on an earlier version, relax the consistency check.
+ pre114 = true
+ }
+
+ vendErrors := new(strings.Builder)
+ vendErrorf := func(mod module.Version, format string, args ...any) {
+ detail := fmt.Sprintf(format, args...)
+ if mod.Version == "" {
+ fmt.Fprintf(vendErrors, "\n\t%s: %s", mod.Path, detail)
+ } else {
+ fmt.Fprintf(vendErrors, "\n\t%s@%s: %s", mod.Path, mod.Version, detail)
+ }
+ }
+
+ // Iterate over the Require directives in their original (not indexed) order
+ // so that the errors match the original file.
+ for _, r := range modFile.Require {
+ if !vendorMeta[r.Mod].Explicit {
+ if pre114 {
+ // Before 1.14, modules.txt did not indicate whether modules were listed
+ // explicitly in the main module's go.mod file.
+ // However, we can at least detect a version mismatch if packages were
+ // vendored from a non-matching version.
+ if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version {
+ vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv))
+ }
+ } else {
+ vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt")
+ }
+ }
+ }
+
+ describe := func(m module.Version) string {
+ if m.Version == "" {
+ return m.Path
+ }
+ return m.Path + "@" + m.Version
+ }
+
+ // We need to verify *all* replacements that occur in modfile: even if they
+ // don't directly apply to any module in the vendor list, the replacement
+ // go.mod file can affect the selected versions of other (transitive)
+ // dependencies
+ for _, r := range modFile.Replace {
+ vr := vendorMeta[r.Old].Replacement
+ if vr == (module.Version{}) {
+ if pre114 && (r.Old.Version == "" || vendorVersion[r.Old.Path] != r.Old.Version) {
+ // Before 1.14, modules.txt omitted wildcard replacements and
+ // replacements for modules that did not have any packages to vendor.
+ } else {
+ vendErrorf(r.Old, "is replaced in go.mod, but not marked as replaced in vendor/modules.txt")
+ }
+ } else if vr != r.New {
+ vendErrorf(r.Old, "is replaced by %s in go.mod, but marked as replaced by %s in vendor/modules.txt", describe(r.New), describe(vr))
+ }
+ }
+
+ for _, mod := range vendorList {
+ meta := vendorMeta[mod]
+ if meta.Explicit {
+ if _, inGoMod := index.require[mod]; !inGoMod {
+ vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in go.mod")
+ }
+ }
+ }
+
+ for _, mod := range vendorReplaced {
+ r := Replacement(mod)
+ if r == (module.Version{}) {
+ vendErrorf(mod, "is marked as replaced in vendor/modules.txt, but not replaced in go.mod")
+ continue
+ }
+ if meta := vendorMeta[mod]; r != meta.Replacement {
+ vendErrorf(mod, "is marked as replaced by %s in vendor/modules.txt, but replaced by %s in go.mod", describe(meta.Replacement), describe(r))
+ }
+ }
+
+ if vendErrors.Len() > 0 {
+ modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule())
+ base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo mod vendor", modRoot, vendErrors)
+ }
+}
diff --git a/src/cmd/go/internal/mvs/errors.go b/src/cmd/go/internal/mvs/errors.go
new file mode 100644
index 0000000..8db65d6
--- /dev/null
+++ b/src/cmd/go/internal/mvs/errors.go
@@ -0,0 +1,105 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mvs
+
+import (
+ "fmt"
+ "strings"
+
+ "golang.org/x/mod/module"
+)
+
+// BuildListError decorates an error that occurred gathering requirements
+// while constructing a build list. BuildListError prints the chain
+// of requirements to the module where the error occurred.
+type BuildListError struct {
+ Err error
+ stack []buildListErrorElem
+}
+
+type buildListErrorElem struct {
+ m module.Version
+
+ // nextReason is the reason this module depends on the next module in the
+ // stack. Typically either "requires", or "updating to".
+ nextReason string
+}
+
+// NewBuildListError returns a new BuildListError wrapping an error that
+// occurred at a module found along the given path of requirements and/or
+// upgrades, which must be non-empty.
+//
+// The isVersionChange function reports whether a path step is due to an
+// explicit upgrade or downgrade (as opposed to an existing requirement in a
+// go.mod file). A nil isVersionChange function indicates that none of the path
+// steps are due to explicit version changes.
+func NewBuildListError(err error, path []module.Version, isVersionChange func(from, to module.Version) bool) *BuildListError {
+ stack := make([]buildListErrorElem, 0, len(path))
+ for len(path) > 1 {
+ reason := "requires"
+ if isVersionChange != nil && isVersionChange(path[0], path[1]) {
+ reason = "updating to"
+ }
+ stack = append(stack, buildListErrorElem{
+ m: path[0],
+ nextReason: reason,
+ })
+ path = path[1:]
+ }
+ stack = append(stack, buildListErrorElem{m: path[0]})
+
+ return &BuildListError{
+ Err: err,
+ stack: stack,
+ }
+}
+
+// Module returns the module where the error occurred. If the module stack
+// is empty, this returns a zero value.
+func (e *BuildListError) Module() module.Version {
+ if len(e.stack) == 0 {
+ return module.Version{}
+ }
+ return e.stack[len(e.stack)-1].m
+}
+
+func (e *BuildListError) Error() string {
+ b := &strings.Builder{}
+ stack := e.stack
+
+ // Don't print modules at the beginning of the chain without a
+ // version. These always seem to be the main module or a
+ // synthetic module ("target@").
+ for len(stack) > 0 && stack[0].m.Version == "" {
+ stack = stack[1:]
+ }
+
+ if len(stack) == 0 {
+ b.WriteString(e.Err.Error())
+ } else {
+ for _, elem := range stack[:len(stack)-1] {
+ fmt.Fprintf(b, "%s %s\n\t", elem.m, elem.nextReason)
+ }
+ // Ensure that the final module path and version are included as part of the
+ // error message.
+ m := stack[len(stack)-1].m
+ if mErr, ok := e.Err.(*module.ModuleError); ok {
+ actual := module.Version{Path: mErr.Path, Version: mErr.Version}
+ if v, ok := mErr.Err.(*module.InvalidVersionError); ok {
+ actual.Version = v.Version
+ }
+ if actual == m {
+ fmt.Fprintf(b, "%v", e.Err)
+ } else {
+ fmt.Fprintf(b, "%s (replaced by %s): %v", m, actual, mErr.Err)
+ }
+ } else {
+ fmt.Fprintf(b, "%v", module.VersionError(m, e.Err))
+ }
+ }
+ return b.String()
+}
+
+func (e *BuildListError) Unwrap() error { return e.Err }
diff --git a/src/cmd/go/internal/mvs/graph.go b/src/cmd/go/internal/mvs/graph.go
new file mode 100644
index 0000000..56b3c60
--- /dev/null
+++ b/src/cmd/go/internal/mvs/graph.go
@@ -0,0 +1,226 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mvs
+
+import (
+ "fmt"
+ "slices"
+
+ "cmd/go/internal/gover"
+
+ "golang.org/x/mod/module"
+)
+
+// Graph implements an incremental version of the MVS algorithm, with the
+// requirements pushed by the caller instead of pulled by the MVS traversal.
+type Graph struct {
+ cmp func(p, v1, v2 string) int
+ roots []module.Version
+
+ required map[module.Version][]module.Version
+
+ isRoot map[module.Version]bool // contains true for roots and false for reachable non-roots
+ selected map[string]string // path → version
+}
+
+// NewGraph returns an incremental MVS graph containing only a set of root
+// dependencies and using the given max function for version strings.
+//
+// The caller must ensure that the root slice is not modified while the Graph
+// may be in use.
+func NewGraph(cmp func(p, v1, v2 string) int, roots []module.Version) *Graph {
+ g := &Graph{
+ cmp: cmp,
+ roots: slices.Clip(roots),
+ required: make(map[module.Version][]module.Version),
+ isRoot: make(map[module.Version]bool),
+ selected: make(map[string]string),
+ }
+
+ for _, m := range roots {
+ g.isRoot[m] = true
+ if g.cmp(m.Path, g.Selected(m.Path), m.Version) < 0 {
+ g.selected[m.Path] = m.Version
+ }
+ }
+
+ return g
+}
+
+// Require adds the information that module m requires all modules in reqs.
+// The reqs slice must not be modified after it is passed to Require.
+//
+// m must be reachable by some existing chain of requirements from g's target,
+// and Require must not have been called for it already.
+//
+// If any of the modules in reqs has the same path as g's target,
+// the target must have higher precedence than the version in req.
+func (g *Graph) Require(m module.Version, reqs []module.Version) {
+ // To help catch disconnected-graph bugs, enforce that all required versions
+ // are actually reachable from the roots (and therefore should affect the
+ // selected versions of the modules they name).
+ if _, reachable := g.isRoot[m]; !reachable {
+ panic(fmt.Sprintf("%v is not reachable from any root", m))
+ }
+
+ // Truncate reqs to its capacity to avoid aliasing bugs if it is later
+ // returned from RequiredBy and appended to.
+ reqs = slices.Clip(reqs)
+
+ if _, dup := g.required[m]; dup {
+ panic(fmt.Sprintf("requirements of %v have already been set", m))
+ }
+ g.required[m] = reqs
+
+ for _, dep := range reqs {
+ // Mark dep reachable, regardless of whether it is selected.
+ if _, ok := g.isRoot[dep]; !ok {
+ g.isRoot[dep] = false
+ }
+
+ if g.cmp(dep.Path, g.Selected(dep.Path), dep.Version) < 0 {
+ g.selected[dep.Path] = dep.Version
+ }
+ }
+}
+
+// RequiredBy returns the slice of requirements passed to Require for m, if any,
+// with its capacity reduced to its length.
+// If Require has not been called for m, RequiredBy(m) returns ok=false.
+//
+// The caller must not modify the returned slice, but may safely append to it
+// and may rely on it not to be modified.
+func (g *Graph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) {
+ reqs, ok = g.required[m]
+ return reqs, ok
+}
+
+// Selected returns the selected version of the given module path.
+//
+// If no version is selected, Selected returns version "none".
+func (g *Graph) Selected(path string) (version string) {
+ v, ok := g.selected[path]
+ if !ok {
+ return "none"
+ }
+ return v
+}
+
+// BuildList returns the selected versions of all modules present in the Graph,
+// beginning with the selected versions of each module path in the roots of g.
+//
+// The order of the remaining elements in the list is deterministic
+// but arbitrary.
+func (g *Graph) BuildList() []module.Version {
+ seenRoot := make(map[string]bool, len(g.roots))
+
+ var list []module.Version
+ for _, r := range g.roots {
+ if seenRoot[r.Path] {
+ // Multiple copies of the same root, with the same or different versions,
+ // are a bit of a degenerate case: we will take the transitive
+ // requirements of both roots into account, but only the higher one can
+ // possibly be selected. However — especially given that we need the
+ // seenRoot map for later anyway — it is simpler to support this
+ // degenerate case than to forbid it.
+ continue
+ }
+
+ if v := g.Selected(r.Path); v != "none" {
+ list = append(list, module.Version{Path: r.Path, Version: v})
+ }
+ seenRoot[r.Path] = true
+ }
+ uniqueRoots := list
+
+ for path, version := range g.selected {
+ if !seenRoot[path] {
+ list = append(list, module.Version{Path: path, Version: version})
+ }
+ }
+ gover.ModSort(list[len(uniqueRoots):])
+
+ return list
+}
+
+// WalkBreadthFirst invokes f once, in breadth-first order, for each module
+// version other than "none" that appears in the graph, regardless of whether
+// that version is selected.
+func (g *Graph) WalkBreadthFirst(f func(m module.Version)) {
+ var queue []module.Version
+ enqueued := make(map[module.Version]bool)
+ for _, m := range g.roots {
+ if m.Version != "none" {
+ queue = append(queue, m)
+ enqueued[m] = true
+ }
+ }
+
+ for len(queue) > 0 {
+ m := queue[0]
+ queue = queue[1:]
+
+ f(m)
+
+ reqs, _ := g.RequiredBy(m)
+ for _, r := range reqs {
+ if !enqueued[r] && r.Version != "none" {
+ queue = append(queue, r)
+ enqueued[r] = true
+ }
+ }
+ }
+}
+
+// FindPath reports a shortest requirement path starting at one of the roots of
+// the graph and ending at a module version m for which f(m) returns true, or
+// nil if no such path exists.
+func (g *Graph) FindPath(f func(module.Version) bool) []module.Version {
+ // firstRequires[a] = b means that in a breadth-first traversal of the
+ // requirement graph, the module version a was first required by b.
+ firstRequires := make(map[module.Version]module.Version)
+
+ queue := g.roots
+ for _, m := range g.roots {
+ firstRequires[m] = module.Version{}
+ }
+
+ for len(queue) > 0 {
+ m := queue[0]
+ queue = queue[1:]
+
+ if f(m) {
+ // Construct the path reversed (because we're starting from the far
+ // endpoint), then reverse it.
+ path := []module.Version{m}
+ for {
+ m = firstRequires[m]
+ if m.Path == "" {
+ break
+ }
+ path = append(path, m)
+ }
+
+ i, j := 0, len(path)-1
+ for i < j {
+ path[i], path[j] = path[j], path[i]
+ i++
+ j--
+ }
+
+ return path
+ }
+
+ reqs, _ := g.RequiredBy(m)
+ for _, r := range reqs {
+ if _, seen := firstRequires[r]; !seen {
+ queue = append(queue, r)
+ firstRequires[r] = m
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go
new file mode 100644
index 0000000..468a985
--- /dev/null
+++ b/src/cmd/go/internal/mvs/mvs.go
@@ -0,0 +1,488 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mvs implements Minimal Version Selection.
+// See https://research.swtch.com/vgo-mvs.
+package mvs
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+
+ "cmd/go/internal/par"
+
+ "golang.org/x/mod/module"
+)
+
+// A Reqs is the requirement graph on which Minimal Version Selection (MVS) operates.
+//
+// The version strings are opaque except for the special version "none"
+// (see the documentation for module.Version). In particular, MVS does not
+// assume that the version strings are semantic versions; instead, the Max method
+// gives access to the comparison operation.
+//
+// It must be safe to call methods on a Reqs from multiple goroutines simultaneously.
+// Because a Reqs may read the underlying graph from the network on demand,
+// the MVS algorithms parallelize the traversal to overlap network delays.
+type Reqs interface {
+ // Required returns the module versions explicitly required by m itself.
+ // The caller must not modify the returned list.
+ Required(m module.Version) ([]module.Version, error)
+
+ // Max returns the maximum of v1 and v2 (it returns either v1 or v2)
+ // in the module with path p.
+ //
+ // For all versions v, Max(v, "none") must be v,
+ // and for the target passed as the first argument to MVS functions,
+ // Max(target, v) must be target.
+ //
+ // Note that v1 < v2 can be written Max(v1, v2) != v1
+ // and similarly v1 <= v2 can be written Max(v1, v2) == v2.
+ Max(p, v1, v2 string) string
+}
+
+// An UpgradeReqs is a Reqs that can also identify available upgrades.
+type UpgradeReqs interface {
+ Reqs
+
+ // Upgrade returns the upgraded version of m,
+ // for use during an UpgradeAll operation.
+ // If m should be kept as is, Upgrade returns m.
+ // If m is not yet used in the build, then m.Version will be "none".
+ // More typically, m.Version will be the version required
+ // by some other module in the build.
+ //
+ // If no module version is available for the given path,
+ // Upgrade returns a non-nil error.
+ // TODO(rsc): Upgrade must be able to return errors,
+ // but should "no latest version" just return m instead?
+ Upgrade(m module.Version) (module.Version, error)
+}
+
+// A DowngradeReqs is a Reqs that can also identify available downgrades.
+type DowngradeReqs interface {
+ Reqs
+
+ // Previous returns the version of m.Path immediately prior to m.Version,
+ // or "none" if no such version is known.
+ Previous(m module.Version) (module.Version, error)
+}
+
+// BuildList returns the build list for the target module.
+//
+// target is the root vertex of a module requirement graph. For cmd/go, this is
+// typically the main module, but note that this algorithm is not intended to
+// be Go-specific: module paths and versions are treated as opaque values.
+//
+// reqs describes the module requirement graph and provides an opaque method
+// for comparing versions.
+//
+// BuildList traverses the graph and returns a list containing the highest
+// version for each visited module. The first element of the returned list is
+// target itself; reqs.Max requires target.Version to compare higher than all
+// other versions, so no other version can be selected. The remaining elements
+// of the list are sorted by path.
+//
+// See https://research.swtch.com/vgo-mvs for details.
+func BuildList(targets []module.Version, reqs Reqs) ([]module.Version, error) {
+ return buildList(targets, reqs, nil)
+}
+
+func buildList(targets []module.Version, reqs Reqs, upgrade func(module.Version) (module.Version, error)) ([]module.Version, error) {
+ cmp := func(p, v1, v2 string) int {
+ if reqs.Max(p, v1, v2) != v1 {
+ return -1
+ }
+ if reqs.Max(p, v2, v1) != v2 {
+ return 1
+ }
+ return 0
+ }
+
+ var (
+ mu sync.Mutex
+ g = NewGraph(cmp, targets)
+ upgrades = map[module.Version]module.Version{}
+ errs = map[module.Version]error{} // (non-nil errors only)
+ )
+
+ // Explore work graph in parallel in case reqs.Required
+ // does high-latency network operations.
+ var work par.Work[module.Version]
+ for _, target := range targets {
+ work.Add(target)
+ }
+ work.Do(10, func(m module.Version) {
+
+ var required []module.Version
+ var err error
+ if m.Version != "none" {
+ required, err = reqs.Required(m)
+ }
+
+ u := m
+ if upgrade != nil {
+ upgradeTo, upErr := upgrade(m)
+ if upErr == nil {
+ u = upgradeTo
+ } else if err == nil {
+ err = upErr
+ }
+ }
+
+ mu.Lock()
+ if err != nil {
+ errs[m] = err
+ }
+ if u != m {
+ upgrades[m] = u
+ required = append([]module.Version{u}, required...)
+ }
+ g.Require(m, required)
+ mu.Unlock()
+
+ for _, r := range required {
+ work.Add(r)
+ }
+ })
+
+ // If there was an error, find the shortest path from the target to the
+ // node where the error occurred so we can report a useful error message.
+ if len(errs) > 0 {
+ errPath := g.FindPath(func(m module.Version) bool {
+ return errs[m] != nil
+ })
+ if len(errPath) == 0 {
+ panic("internal error: could not reconstruct path to module with error")
+ }
+
+ err := errs[errPath[len(errPath)-1]]
+ isUpgrade := func(from, to module.Version) bool {
+ if u, ok := upgrades[from]; ok {
+ return u == to
+ }
+ return false
+ }
+ return nil, NewBuildListError(err, errPath, isUpgrade)
+ }
+
+ // The final list is the minimum version of each module found in the graph.
+ list := g.BuildList()
+ if vs := list[:len(targets)]; !reflect.DeepEqual(vs, targets) {
+ // target.Version will be "" for modload, the main client of MVS.
+ // "" denotes the main module, which has no version. However, MVS treats
+ // version strings as opaque, so "" is not a special value here.
+ // See golang.org/issue/31491, golang.org/issue/29773.
+ panic(fmt.Sprintf("mistake: chose versions %+v instead of targets %+v", vs, targets))
+ }
+ return list, nil
+}
+
+// Req returns the minimal requirement list for the target module,
+// with the constraint that all module paths listed in base must
+// appear in the returned list.
+func Req(mainModule module.Version, base []string, reqs Reqs) ([]module.Version, error) {
+ list, err := BuildList([]module.Version{mainModule}, reqs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Note: Not running in parallel because we assume
+ // that list came from a previous operation that paged
+ // in all the requirements, so there's no I/O to overlap now.
+
+ max := map[string]string{}
+ for _, m := range list {
+ max[m.Path] = m.Version
+ }
+
+ // Compute postorder, cache requirements.
+ var postorder []module.Version
+ reqCache := map[module.Version][]module.Version{}
+ reqCache[mainModule] = nil
+
+ var walk func(module.Version) error
+ walk = func(m module.Version) error {
+ _, ok := reqCache[m]
+ if ok {
+ return nil
+ }
+ required, err := reqs.Required(m)
+ if err != nil {
+ return err
+ }
+ reqCache[m] = required
+ for _, m1 := range required {
+ if err := walk(m1); err != nil {
+ return err
+ }
+ }
+ postorder = append(postorder, m)
+ return nil
+ }
+ for _, m := range list {
+ if err := walk(m); err != nil {
+ return nil, err
+ }
+ }
+
+ // Walk modules in reverse post-order, only adding those not implied already.
+ have := map[module.Version]bool{}
+ walk = func(m module.Version) error {
+ if have[m] {
+ return nil
+ }
+ have[m] = true
+ for _, m1 := range reqCache[m] {
+ walk(m1)
+ }
+ return nil
+ }
+ // First walk the base modules that must be listed.
+ var min []module.Version
+ haveBase := map[string]bool{}
+ for _, path := range base {
+ if haveBase[path] {
+ continue
+ }
+ m := module.Version{Path: path, Version: max[path]}
+ min = append(min, m)
+ walk(m)
+ haveBase[path] = true
+ }
+ // Now the reverse postorder to bring in anything else.
+ for i := len(postorder) - 1; i >= 0; i-- {
+ m := postorder[i]
+ if max[m.Path] != m.Version {
+ // Older version.
+ continue
+ }
+ if !have[m] {
+ min = append(min, m)
+ walk(m)
+ }
+ }
+ sort.Slice(min, func(i, j int) bool {
+ return min[i].Path < min[j].Path
+ })
+ return min, nil
+}
+
+// UpgradeAll returns a build list for the target module
+// in which every module is upgraded to its latest version.
+func UpgradeAll(target module.Version, reqs UpgradeReqs) ([]module.Version, error) {
+ return buildList([]module.Version{target}, reqs, func(m module.Version) (module.Version, error) {
+ if m.Path == target.Path {
+ return target, nil
+ }
+
+ return reqs.Upgrade(m)
+ })
+}
+
+// Upgrade returns a build list for the target module
+// in which the given additional modules are upgraded.
+func Upgrade(target module.Version, reqs UpgradeReqs, upgrade ...module.Version) ([]module.Version, error) {
+ list, err := reqs.Required(target)
+ if err != nil {
+ return nil, err
+ }
+
+ pathInList := make(map[string]bool, len(list))
+ for _, m := range list {
+ pathInList[m.Path] = true
+ }
+ list = append([]module.Version(nil), list...)
+
+ upgradeTo := make(map[string]string, len(upgrade))
+ for _, u := range upgrade {
+ if !pathInList[u.Path] {
+ list = append(list, module.Version{Path: u.Path, Version: "none"})
+ }
+ if prev, dup := upgradeTo[u.Path]; dup {
+ upgradeTo[u.Path] = reqs.Max(u.Path, prev, u.Version)
+ } else {
+ upgradeTo[u.Path] = u.Version
+ }
+ }
+
+ return buildList([]module.Version{target}, &override{target, list, reqs}, func(m module.Version) (module.Version, error) {
+ if v, ok := upgradeTo[m.Path]; ok {
+ return module.Version{Path: m.Path, Version: v}, nil
+ }
+ return m, nil
+ })
+}
+
+// Downgrade returns a build list for the target module
+// in which the given additional modules are downgraded,
+// potentially overriding the requirements of the target.
+//
+// The versions to be downgraded may be unreachable from reqs.Latest and
+// reqs.Previous, but the methods of reqs must otherwise handle such versions
+// correctly.
+func Downgrade(target module.Version, reqs DowngradeReqs, downgrade ...module.Version) ([]module.Version, error) {
+ // Per https://research.swtch.com/vgo-mvs#algorithm_4:
+ // “To avoid an unnecessary downgrade to E 1.1, we must also add a new
+ // requirement on E 1.2. We can apply Algorithm R to find the minimal set of
+ // new requirements to write to go.mod.”
+ //
+ // In order to generate those new requirements, we need to identify versions
+ // for every module in the build list — not just reqs.Required(target).
+ list, err := BuildList([]module.Version{target}, reqs)
+ if err != nil {
+ return nil, err
+ }
+ list = list[1:] // remove target
+
+ max := make(map[string]string)
+ for _, r := range list {
+ max[r.Path] = r.Version
+ }
+ for _, d := range downgrade {
+ if v, ok := max[d.Path]; !ok || reqs.Max(d.Path, v, d.Version) != d.Version {
+ max[d.Path] = d.Version
+ }
+ }
+
+ var (
+ added = make(map[module.Version]bool)
+ rdeps = make(map[module.Version][]module.Version)
+ excluded = make(map[module.Version]bool)
+ )
+ var exclude func(module.Version)
+ exclude = func(m module.Version) {
+ if excluded[m] {
+ return
+ }
+ excluded[m] = true
+ for _, p := range rdeps[m] {
+ exclude(p)
+ }
+ }
+ var add func(module.Version)
+ add = func(m module.Version) {
+ if added[m] {
+ return
+ }
+ added[m] = true
+ if v, ok := max[m.Path]; ok && reqs.Max(m.Path, m.Version, v) != v {
+ // m would upgrade an existing dependency — it is not a strict downgrade,
+ // and because it was already present as a dependency, it could affect the
+ // behavior of other relevant packages.
+ exclude(m)
+ return
+ }
+ list, err := reqs.Required(m)
+ if err != nil {
+ // If we can't load the requirements, we couldn't load the go.mod file.
+ // There are a number of reasons this can happen, but this usually
+ // means an older version of the module had a missing or invalid
+ // go.mod file. For example, if example.com/mod released v2.0.0 before
+ // migrating to modules (v2.0.0+incompatible), then added a valid go.mod
+ // in v2.0.1, downgrading from v2.0.1 would cause this error.
+ //
+ // TODO(golang.org/issue/31730, golang.org/issue/30134): if the error
+ // is transient (we couldn't download go.mod), return the error from
+ // Downgrade. Currently, we can't tell what kind of error it is.
+ exclude(m)
+ return
+ }
+ for _, r := range list {
+ add(r)
+ if excluded[r] {
+ exclude(m)
+ return
+ }
+ rdeps[r] = append(rdeps[r], m)
+ }
+ }
+
+ downgraded := make([]module.Version, 0, len(list)+1)
+ downgraded = append(downgraded, target)
+List:
+ for _, r := range list {
+ add(r)
+ for excluded[r] {
+ p, err := reqs.Previous(r)
+ if err != nil {
+ // This is likely a transient error reaching the repository,
+ // rather than a permanent error with the retrieved version.
+ //
+ // TODO(golang.org/issue/31730, golang.org/issue/30134):
+ // decode what to do based on the actual error.
+ return nil, err
+ }
+ // If the target version is a pseudo-version, it may not be
+ // included when iterating over prior versions using reqs.Previous.
+ // Insert it into the right place in the iteration.
+ // If v is excluded, p should be returned again by reqs.Previous on the next iteration.
+ if v := max[r.Path]; reqs.Max(r.Path, v, r.Version) != v && reqs.Max(r.Path, p.Version, v) != p.Version {
+ p.Version = v
+ }
+ if p.Version == "none" {
+ continue List
+ }
+ add(p)
+ r = p
+ }
+ downgraded = append(downgraded, r)
+ }
+
+ // The downgrades we computed above only downgrade to versions enumerated by
+ // reqs.Previous. However, reqs.Previous omits some versions — such as
+ // pseudo-versions and retracted versions — that may be selected as transitive
+ // requirements of other modules.
+ //
+ // If one of those requirements pulls the version back up above the version
+ // identified by reqs.Previous, then the transitive dependencies of that that
+ // initially-downgraded version should no longer matter — in particular, we
+ // should not add new dependencies on module paths that nothing else in the
+ // updated module graph even requires.
+ //
+ // In order to eliminate those spurious dependencies, we recompute the build
+ // list with the actual versions of the downgraded modules as selected by MVS,
+ // instead of our initial downgrades.
+ // (See the downhiddenartifact and downhiddencross test cases).
+ actual, err := BuildList([]module.Version{target}, &override{
+ target: target,
+ list: downgraded,
+ Reqs: reqs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ actualVersion := make(map[string]string, len(actual))
+ for _, m := range actual {
+ actualVersion[m.Path] = m.Version
+ }
+
+ downgraded = downgraded[:0]
+ for _, m := range list {
+ if v, ok := actualVersion[m.Path]; ok {
+ downgraded = append(downgraded, module.Version{Path: m.Path, Version: v})
+ }
+ }
+
+ return BuildList([]module.Version{target}, &override{
+ target: target,
+ list: downgraded,
+ Reqs: reqs,
+ })
+}
+
+type override struct {
+ target module.Version
+ list []module.Version
+ Reqs
+}
+
+func (r *override) Required(m module.Version) ([]module.Version, error) {
+ if m == r.target {
+ return r.list, nil
+ }
+ return r.Reqs.Required(m)
+}
diff --git a/src/cmd/go/internal/mvs/mvs_test.go b/src/cmd/go/internal/mvs/mvs_test.go
new file mode 100644
index 0000000..6e1e71c
--- /dev/null
+++ b/src/cmd/go/internal/mvs/mvs_test.go
@@ -0,0 +1,635 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mvs
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/mod/module"
+)
+
+var tests = `
+# Scenario from blog.
+name: blog
+A: B1 C2
+B1: D3
+C1: D2
+C2: D4
+C3: D5
+C4: G1
+D2: E1
+D3: E2
+D4: E2 F1
+D5: E2
+G1: C4
+A2: B1 C4 D4
+build A: A B1 C2 D4 E2 F1
+upgrade* A: A B1 C4 D5 E2 F1 G1
+upgrade A C4: A B1 C4 D4 E2 F1 G1
+build A2: A2 B1 C4 D4 E2 F1 G1
+downgrade A2 D2: A2 C4 D2 E2 F1 G1
+
+name: trim
+A: B1 C2
+B1: D3
+C2: B2
+B2:
+build A: A B2 C2 D3
+
+# Cross-dependency between D and E.
+# No matter how it arises, should get result of merging all build lists via max,
+# which leads to including both D2 and E2.
+
+name: cross1
+A: B C
+B: D1
+C: D2
+D1: E2
+D2: E1
+build A: A B C D2 E2
+
+name: cross1V
+A: B2 C D2 E1
+B1:
+B2: D1
+C: D2
+D1: E2
+D2: E1
+build A: A B2 C D2 E2
+
+name: cross1U
+A: B1 C
+B1:
+B2: D1
+C: D2
+D1: E2
+D2: E1
+build A: A B1 C D2 E1
+upgrade A B2: A B2 C D2 E2
+
+name: cross1R
+A: B C
+B: D2
+C: D1
+D1: E2
+D2: E1
+build A: A B C D2 E2
+
+name: cross1X
+A: B C
+B: D1 E2
+C: D2
+D1: E2
+D2: E1
+build A: A B C D2 E2
+
+name: cross2
+A: B D2
+B: D1
+D1: E2
+D2: E1
+build A: A B D2 E2
+
+name: cross2X
+A: B D2
+B: D1 E2
+C: D2
+D1: E2
+D2: E1
+build A: A B D2 E2
+
+name: cross3
+A: B D2 E1
+B: D1
+D1: E2
+D2: E1
+build A: A B D2 E2
+
+name: cross3X
+A: B D2 E1
+B: D1 E2
+D1: E2
+D2: E1
+build A: A B D2 E2
+
+# Should not get E2 here, because B has been updated
+# not to depend on D1 anymore.
+name: cross4
+A1: B1 D2
+A2: B2 D2
+B1: D1
+B2: D2
+D1: E2
+D2: E1
+build A1: A1 B1 D2 E2
+build A2: A2 B2 D2 E1
+
+# But the upgrade from A1 preserves the E2 dep explicitly.
+upgrade A1 B2: A1 B2 D2 E2
+upgradereq A1 B2: B2 E2
+
+name: cross5
+A: D1
+D1: E2
+D2: E1
+build A: A D1 E2
+upgrade* A: A D2 E2
+upgrade A D2: A D2 E2
+upgradereq A D2: D2 E2
+
+name: cross6
+A: D2
+D1: E2
+D2: E1
+build A: A D2 E1
+upgrade* A: A D2 E2
+upgrade A E2: A D2 E2
+
+name: cross7
+A: B C
+B: D1
+C: E1
+D1: E2
+E1: D2
+build A: A B C D2 E2
+
+# golang.org/issue/31248:
+# Even though we select X2, the requirement on I1
+# via X1 should be preserved.
+name: cross8
+M: A1 B1
+A1: X1
+B1: X2
+X1: I1
+X2:
+build M: M A1 B1 I1 X2
+
+# Upgrade from B1 to B2 should not drop the transitive dep on D.
+name: drop
+A: B1 C1
+B1: D1
+B2:
+C2:
+D2:
+build A: A B1 C1 D1
+upgrade* A: A B2 C2 D2
+
+name: simplify
+A: B1 C1
+B1: C2
+C1: D1
+C2:
+build A: A B1 C2 D1
+
+name: up1
+A: B1 C1
+B1:
+B2:
+B3:
+B4:
+B5.hidden:
+C2:
+C3:
+build A: A B1 C1
+upgrade* A: A B4 C3
+
+name: up2
+A: B5.hidden C1
+B1:
+B2:
+B3:
+B4:
+B5.hidden:
+C2:
+C3:
+build A: A B5.hidden C1
+upgrade* A: A B5.hidden C3
+
+name: down1
+A: B2
+B1: C1
+B2: C2
+build A: A B2 C2
+downgrade A C1: A B1 C1
+
+name: down2
+A: B2 E2
+B1:
+B2: C2 F2
+C1:
+D1:
+C2: D2 E2
+D2: B2
+E2: D2
+E1:
+F1:
+build A: A B2 C2 D2 E2 F2
+downgrade A F1: A B1 C1 D1 E1 F1
+
+# https://research.swtch.com/vgo-mvs#algorithm_4:
+# “[D]owngrades are constrained to only downgrade packages, not also upgrade
+# them; if an upgrade before downgrade is needed, the user must ask for it
+# explicitly.”
+#
+# Here, downgrading B2 to B1 upgrades C1 to C2, and C2 does not depend on D2.
+# However, C2 would be an upgrade — not a downgrade — so B1 must also be
+# rejected.
+name: downcross1
+A: B2 C1
+B1: C2
+B2: C1
+C1: D2
+C2:
+D1:
+D2:
+build A: A B2 C1 D2
+downgrade A D1: A D1
+
+# https://research.swtch.com/vgo-mvs#algorithm_4:
+# “Unlike upgrades, downgrades must work by removing requirements, not adding
+# them.”
+#
+# However, downgrading a requirement may introduce a new requirement on a
+# previously-unrequired module. If each dependency's requirements are complete
+# (“tidy”), that can't change the behavior of any other package whose version is
+# not also being downgraded, so we should allow it.
+name: downcross2
+A: B2
+B1: C1
+B2: D2
+C1:
+D1:
+D2:
+build A: A B2 D2
+downgrade A D1: A B1 C1 D1
+
+name: downcycle
+A: A B2
+B2: A
+B1:
+build A: A B2
+downgrade A B1: A B1
+
+# Both B3 and C2 require D2.
+# If we downgrade D to D1, then in isolation B3 would downgrade to B1,
+# because B2 is hidden — B1 is the next-highest version that is not hidden.
+# However, if we downgrade D, we will also downgrade C to C1.
+# And C1 requires B2.hidden, and B2.hidden also meets our requirements:
+# it is compatible with D1 and a strict downgrade from B3.
+#
+# Since neither the initial nor the final build list includes B1,
+# and the nothing in the final downgraded build list requires E at all,
+# no dependency on E1 (required by only B1) should be introduced.
+#
+name: downhiddenartifact
+A: B3 C2
+A1: B3
+B1: E1
+B2.hidden:
+B3: D2
+C1: B2.hidden
+C2: D2
+D1:
+D2:
+build A1: A1 B3 D2
+downgrade A1 D1: A1 B1 D1 E1
+build A: A B3 C2 D2
+downgrade A D1: A B2.hidden C1 D1
+
+# Both B3 and C3 require D2.
+# If we downgrade D to D1, then in isolation B3 would downgrade to B1,
+# and C3 would downgrade to C1.
+# But C1 requires B2.hidden, and B1 requires C2.hidden, so we can't
+# downgrade to either of those without pulling the other back up a little.
+#
+# B2.hidden and C2.hidden are both compatible with D1, so that still
+# meets our requirements — but then we're in an odd state in which
+# B and C have both been downgraded to hidden versions, without any
+# remaining requirements to explain how those hidden versions got there.
+#
+# TODO(bcmills): Would it be better to force downgrades to land on non-hidden
+# versions?
+# In this case, that would remove the dependencies on B and C entirely.
+#
+name: downhiddencross
+A: B3 C3
+B1: C2.hidden
+B2.hidden:
+B3: D2
+C1: B2.hidden
+C2.hidden:
+C3: D2
+D1:
+D2:
+build A: A B3 C3 D2
+downgrade A D1: A B2.hidden C2.hidden D1
+
+# golang.org/issue/25542.
+name: noprev1
+A: B4 C2
+B2.hidden:
+C2:
+build A: A B4 C2
+downgrade A B2.hidden: A B2.hidden C2
+
+name: noprev2
+A: B4 C2
+B2.hidden:
+B1:
+C2:
+build A: A B4 C2
+downgrade A B2.hidden: A B2.hidden C2
+
+name: noprev3
+A: B4 C2
+B3:
+B2.hidden:
+C2:
+build A: A B4 C2
+downgrade A B2.hidden: A B2.hidden C2
+
+# Cycles involving the target.
+
+# The target must be the newest version of itself.
+name: cycle1
+A: B1
+B1: A1
+B2: A2
+B3: A3
+build A: A B1
+upgrade A B2: A B2
+upgrade* A: A B3
+
+# golang.org/issue/29773:
+# Requirements of older versions of the target
+# must be carried over.
+name: cycle2
+A: B1
+A1: C1
+A2: D1
+B1: A1
+B2: A2
+C1: A2
+C2:
+D2:
+build A: A B1 C1 D1
+upgrade* A: A B2 C2 D2
+
+# Cycles with multiple possible solutions.
+# (golang.org/issue/34086)
+name: cycle3
+M: A1 C2
+A1: B1
+B1: C1
+B2: C2
+C1:
+C2: B2
+build M: M A1 B2 C2
+req M: A1 B2
+req M A: A1 B2
+req M C: A1 C2
+
+# Requirement minimization.
+
+name: req1
+A: B1 C1 D1 E1 F1
+B1: C1 E1 F1
+req A: B1 D1
+req A C: B1 C1 D1
+
+name: req2
+A: G1 H1
+G1: H1
+H1: G1
+req A: G1
+req A G: G1
+req A H: H1
+
+name: req3
+M: A1 B1
+A1: X1
+B1: X2
+X1: I1
+X2:
+req M: A1 B1
+
+name: reqnone
+M: Anone B1 D1 E1
+B1: Cnone D1
+E1: Fnone
+build M: M B1 D1 E1
+req M: B1 E1
+
+name: reqdup
+M: A1 B1
+A1: B1
+B1:
+req M A A: A1
+
+name: reqcross
+M: A1 B1 C1
+A1: B1 C1
+B1: C1
+C1:
+req M A B: A1 B1
+`
+
+func Test(t *testing.T) {
+ var (
+ name string
+ reqs reqsMap
+ fns []func(*testing.T)
+ )
+ flush := func() {
+ if name != "" {
+ t.Run(name, func(t *testing.T) {
+ for _, fn := range fns {
+ fn(t)
+ }
+ if len(fns) == 0 {
+ t.Errorf("no functions tested")
+ }
+ })
+ }
+ }
+ m := func(s string) module.Version {
+ return module.Version{Path: s[:1], Version: s[1:]}
+ }
+ ms := func(list []string) []module.Version {
+ var mlist []module.Version
+ for _, s := range list {
+ mlist = append(mlist, m(s))
+ }
+ return mlist
+ }
+ checkList := func(t *testing.T, desc string, list []module.Version, err error, val string) {
+ if err != nil {
+ t.Fatalf("%s: %v", desc, err)
+ }
+ vs := ms(strings.Fields(val))
+ if !reflect.DeepEqual(list, vs) {
+ t.Errorf("%s = %v, want %v", desc, list, vs)
+ }
+ }
+
+ for _, line := range strings.Split(tests, "\n") {
+ line = strings.TrimSpace(line)
+ if strings.HasPrefix(line, "#") || line == "" {
+ continue
+ }
+ i := strings.Index(line, ":")
+ if i < 0 {
+ t.Fatalf("missing colon: %q", line)
+ }
+ key := strings.TrimSpace(line[:i])
+ val := strings.TrimSpace(line[i+1:])
+ if key == "" {
+ t.Fatalf("missing key: %q", line)
+ }
+ kf := strings.Fields(key)
+ switch kf[0] {
+ case "name":
+ if len(kf) != 1 {
+ t.Fatalf("name takes no arguments: %q", line)
+ }
+ flush()
+ reqs = make(reqsMap)
+ fns = nil
+ name = val
+ continue
+ case "build":
+ if len(kf) != 2 {
+ t.Fatalf("build takes one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := BuildList([]module.Version{m(kf[1])}, reqs)
+ checkList(t, key, list, err, val)
+ })
+ continue
+ case "upgrade*":
+ if len(kf) != 2 {
+ t.Fatalf("upgrade* takes one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := UpgradeAll(m(kf[1]), reqs)
+ checkList(t, key, list, err, val)
+ })
+ continue
+ case "upgradereq":
+ if len(kf) < 2 {
+ t.Fatalf("upgrade takes at least one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := Upgrade(m(kf[1]), reqs, ms(kf[2:])...)
+ if err == nil {
+ // Copy the reqs map, but substitute the upgraded requirements in
+ // place of the target's original requirements.
+ upReqs := make(reqsMap, len(reqs))
+ for m, r := range reqs {
+ upReqs[m] = r
+ }
+ upReqs[m(kf[1])] = list
+
+ list, err = Req(m(kf[1]), nil, upReqs)
+ }
+ checkList(t, key, list, err, val)
+ })
+ continue
+ case "upgrade":
+ if len(kf) < 2 {
+ t.Fatalf("upgrade takes at least one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := Upgrade(m(kf[1]), reqs, ms(kf[2:])...)
+ checkList(t, key, list, err, val)
+ })
+ continue
+ case "downgrade":
+ if len(kf) < 2 {
+ t.Fatalf("downgrade takes at least one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := Downgrade(m(kf[1]), reqs, ms(kf[1:])...)
+ checkList(t, key, list, err, val)
+ })
+ continue
+ case "req":
+ if len(kf) < 2 {
+ t.Fatalf("req takes at least one argument: %q", line)
+ }
+ fns = append(fns, func(t *testing.T) {
+ list, err := Req(m(kf[1]), kf[2:], reqs)
+ checkList(t, key, list, err, val)
+ })
+ continue
+ }
+ if len(kf) == 1 && 'A' <= key[0] && key[0] <= 'Z' {
+ var rs []module.Version
+ for _, f := range strings.Fields(val) {
+ r := m(f)
+ if reqs[r] == nil {
+ reqs[r] = []module.Version{}
+ }
+ rs = append(rs, r)
+ }
+ reqs[m(key)] = rs
+ continue
+ }
+ t.Fatalf("bad line: %q", line)
+ }
+ flush()
+}
+
+type reqsMap map[module.Version][]module.Version
+
+func (r reqsMap) Max(_, v1, v2 string) string {
+ if v1 == "none" || v2 == "" {
+ return v2
+ }
+ if v2 == "none" || v1 == "" {
+ return v1
+ }
+ if v1 < v2 {
+ return v2
+ }
+ return v1
+}
+
+func (r reqsMap) Upgrade(m module.Version) (module.Version, error) {
+ u := module.Version{Version: "none"}
+ for k := range r {
+ if k.Path == m.Path && r.Max(k.Path, u.Version, k.Version) == k.Version && !strings.HasSuffix(k.Version, ".hidden") {
+ u = k
+ }
+ }
+ if u.Path == "" {
+ return module.Version{}, fmt.Errorf("missing module: %v", module.Version{Path: m.Path})
+ }
+ return u, nil
+}
+
+func (r reqsMap) Previous(m module.Version) (module.Version, error) {
+ var p module.Version
+ for k := range r {
+ if k.Path == m.Path && p.Version < k.Version && k.Version < m.Version && !strings.HasSuffix(k.Version, ".hidden") {
+ p = k
+ }
+ }
+ if p.Path == "" {
+ return module.Version{Path: m.Path, Version: "none"}, nil
+ }
+ return p, nil
+}
+
+func (r reqsMap) Required(m module.Version) ([]module.Version, error) {
+ rr, ok := r[m]
+ if !ok {
+ return nil, fmt.Errorf("missing module: %v", m)
+ }
+ return rr, nil
+}
diff --git a/src/cmd/go/internal/par/queue.go b/src/cmd/go/internal/par/queue.go
new file mode 100644
index 0000000..180bc75
--- /dev/null
+++ b/src/cmd/go/internal/par/queue.go
@@ -0,0 +1,88 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package par
+
+import "fmt"
+
+// Queue manages a set of work items to be executed in parallel. The number of
+// active work items is limited, and excess items are queued sequentially.
+type Queue struct {
+ maxActive int
+ st chan queueState
+}
+
+type queueState struct {
+ active int // number of goroutines processing work; always nonzero when len(backlog) > 0
+ backlog []func()
+ idle chan struct{} // if non-nil, closed when active becomes 0
+}
+
+// NewQueue returns a Queue that executes up to maxActive items in parallel.
+//
+// maxActive must be positive.
+func NewQueue(maxActive int) *Queue {
+ if maxActive < 1 {
+ panic(fmt.Sprintf("par.NewQueue called with nonpositive limit (%d)", maxActive))
+ }
+
+ q := &Queue{
+ maxActive: maxActive,
+ st: make(chan queueState, 1),
+ }
+ q.st <- queueState{}
+ return q
+}
+
+// Add adds f as a work item in the queue.
+//
+// Add returns immediately, but the queue will be marked as non-idle until after
+// f (and any subsequently-added work) has completed.
+func (q *Queue) Add(f func()) {
+ st := <-q.st
+ if st.active == q.maxActive {
+ st.backlog = append(st.backlog, f)
+ q.st <- st
+ return
+ }
+ if st.active == 0 {
+ // Mark q as non-idle.
+ st.idle = nil
+ }
+ st.active++
+ q.st <- st
+
+ go func() {
+ for {
+ f()
+
+ st := <-q.st
+ if len(st.backlog) == 0 {
+ if st.active--; st.active == 0 && st.idle != nil {
+ close(st.idle)
+ }
+ q.st <- st
+ return
+ }
+ f, st.backlog = st.backlog[0], st.backlog[1:]
+ q.st <- st
+ }
+ }()
+}
+
+// Idle returns a channel that will be closed when q has no (active or enqueued)
+// work outstanding.
+func (q *Queue) Idle() <-chan struct{} {
+ st := <-q.st
+ defer func() { q.st <- st }()
+
+ if st.idle == nil {
+ st.idle = make(chan struct{})
+ if st.active == 0 {
+ close(st.idle)
+ }
+ }
+
+ return st.idle
+}
diff --git a/src/cmd/go/internal/par/queue_test.go b/src/cmd/go/internal/par/queue_test.go
new file mode 100644
index 0000000..1331e65
--- /dev/null
+++ b/src/cmd/go/internal/par/queue_test.go
@@ -0,0 +1,79 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package par
+
+import (
+ "sync"
+ "testing"
+)
+
+func TestQueueIdle(t *testing.T) {
+ q := NewQueue(1)
+ select {
+ case <-q.Idle():
+ default:
+ t.Errorf("NewQueue(1) is not initially idle.")
+ }
+
+ started := make(chan struct{})
+ unblock := make(chan struct{})
+ q.Add(func() {
+ close(started)
+ <-unblock
+ })
+
+ <-started
+ idle := q.Idle()
+ select {
+ case <-idle:
+ t.Errorf("NewQueue(1) is marked idle while processing work.")
+ default:
+ }
+
+ close(unblock)
+ <-idle // Should be closed as soon as the Add callback returns.
+}
+
+func TestQueueBacklog(t *testing.T) {
+ const (
+ maxActive = 2
+ totalWork = 3 * maxActive
+ )
+
+ q := NewQueue(maxActive)
+ t.Logf("q = NewQueue(%d)", maxActive)
+
+ var wg sync.WaitGroup
+ wg.Add(totalWork)
+ started := make([]chan struct{}, totalWork)
+ unblock := make(chan struct{})
+ for i := range started {
+ started[i] = make(chan struct{})
+ i := i
+ q.Add(func() {
+ close(started[i])
+ <-unblock
+ wg.Done()
+ })
+ }
+
+ for i, c := range started {
+ if i < maxActive {
+ <-c // Work item i should be started immediately.
+ } else {
+ select {
+ case <-c:
+ t.Errorf("Work item %d started before previous items finished.", i)
+ default:
+ }
+ }
+ }
+
+ close(unblock)
+ for _, c := range started[maxActive:] {
+ <-c
+ }
+ wg.Wait()
+}
diff --git a/src/cmd/go/internal/par/work.go b/src/cmd/go/internal/par/work.go
new file mode 100644
index 0000000..5b6de94
--- /dev/null
+++ b/src/cmd/go/internal/par/work.go
@@ -0,0 +1,223 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package par implements parallel execution helpers.
+package par
+
+import (
+ "errors"
+ "math/rand"
+ "sync"
+ "sync/atomic"
+)
+
+// Work manages a set of work items to be executed in parallel, at most once each.
+// The items in the set must all be valid map keys.
+type Work[T comparable] struct {
+ f func(T) // function to run for each item
+ running int // total number of runners
+
+ mu sync.Mutex
+ added map[T]bool // items added to set
+ todo []T // items yet to be run
+ wait sync.Cond // wait when todo is empty
+ waiting int // number of runners waiting for todo
+}
+
+func (w *Work[T]) init() {
+ if w.added == nil {
+ w.added = make(map[T]bool)
+ }
+}
+
+// Add adds item to the work set, if it hasn't already been added.
+func (w *Work[T]) Add(item T) {
+ w.mu.Lock()
+ w.init()
+ if !w.added[item] {
+ w.added[item] = true
+ w.todo = append(w.todo, item)
+ if w.waiting > 0 {
+ w.wait.Signal()
+ }
+ }
+ w.mu.Unlock()
+}
+
+// Do runs f in parallel on items from the work set,
+// with at most n invocations of f running at a time.
+// It returns when everything added to the work set has been processed.
+// At least one item should have been added to the work set
+// before calling Do (or else Do returns immediately),
+// but it is allowed for f(item) to add new items to the set.
+// Do should only be used once on a given Work.
+func (w *Work[T]) Do(n int, f func(item T)) {
+ if n < 1 {
+ panic("par.Work.Do: n < 1")
+ }
+ if w.running >= 1 {
+ panic("par.Work.Do: already called Do")
+ }
+
+ w.running = n
+ w.f = f
+ w.wait.L = &w.mu
+
+ for i := 0; i < n-1; i++ {
+ go w.runner()
+ }
+ w.runner()
+}
+
+// runner executes work in w until both nothing is left to do
+// and all the runners are waiting for work.
+// (Then all the runners return.)
+func (w *Work[T]) runner() {
+ for {
+ // Wait for something to do.
+ w.mu.Lock()
+ for len(w.todo) == 0 {
+ w.waiting++
+ if w.waiting == w.running {
+ // All done.
+ w.wait.Broadcast()
+ w.mu.Unlock()
+ return
+ }
+ w.wait.Wait()
+ w.waiting--
+ }
+
+ // Pick something to do at random,
+ // to eliminate pathological contention
+ // in case items added at about the same time
+ // are most likely to contend.
+ i := rand.Intn(len(w.todo))
+ item := w.todo[i]
+ w.todo[i] = w.todo[len(w.todo)-1]
+ w.todo = w.todo[:len(w.todo)-1]
+ w.mu.Unlock()
+
+ w.f(item)
+ }
+}
+
+// ErrCache is like Cache except that it also stores
+// an error value alongside the cached value V.
+type ErrCache[K comparable, V any] struct {
+ Cache[K, errValue[V]]
+}
+
+type errValue[V any] struct {
+ v V
+ err error
+}
+
+func (c *ErrCache[K, V]) Do(key K, f func() (V, error)) (V, error) {
+ v := c.Cache.Do(key, func() errValue[V] {
+ v, err := f()
+ return errValue[V]{v, err}
+ })
+ return v.v, v.err
+}
+
+var ErrCacheEntryNotFound = errors.New("cache entry not found")
+
+// Get returns the cached result associated with key.
+// It returns ErrCacheEntryNotFound if there is no such result.
+func (c *ErrCache[K, V]) Get(key K) (V, error) {
+ v, ok := c.Cache.Get(key)
+ if !ok {
+ v.err = ErrCacheEntryNotFound
+ }
+ return v.v, v.err
+}
+
+// Cache runs an action once per key and caches the result.
+type Cache[K comparable, V any] struct {
+ m sync.Map
+}
+
+type cacheEntry[V any] struct {
+ done atomic.Bool
+ mu sync.Mutex
+ result V
+}
+
+// Do calls the function f if and only if Do is being called for the first time with this key.
+// No call to Do with a given key returns until the one call to f returns.
+// Do returns the value returned by the one call to f.
+func (c *Cache[K, V]) Do(key K, f func() V) V {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry[V]))
+ }
+ e := entryIface.(*cacheEntry[V])
+ if !e.done.Load() {
+ e.mu.Lock()
+ if !e.done.Load() {
+ e.result = f()
+ e.done.Store(true)
+ }
+ e.mu.Unlock()
+ }
+ return e.result
+}
+
+// Get returns the cached result associated with key
+// and reports whether there is such a result.
+//
+// If the result for key is being computed, Get does not wait for the computation to finish.
+func (c *Cache[K, V]) Get(key K) (V, bool) {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ return *new(V), false
+ }
+ e := entryIface.(*cacheEntry[V])
+ if !e.done.Load() {
+ return *new(V), false
+ }
+ return e.result, true
+}
+
+// Clear removes all entries in the cache.
+//
+// Concurrent calls to Get may return old values. Concurrent calls to Do
+// may return old values or store results in entries that have been deleted.
+//
+// TODO(jayconrod): Delete this after the package cache clearing functions
+// in internal/load have been removed.
+func (c *Cache[K, V]) Clear() {
+ c.m.Range(func(key, value any) bool {
+ c.m.Delete(key)
+ return true
+ })
+}
+
+// Delete removes an entry from the map. It is safe to call Delete for an
+// entry that does not exist. Delete will return quickly, even if the result
+// for a key is still being computed; the computation will finish, but the
+// result won't be accessible through the cache.
+//
+// TODO(jayconrod): Delete this after the package cache clearing functions
+// in internal/load have been removed.
+func (c *Cache[K, V]) Delete(key K) {
+ c.m.Delete(key)
+}
+
+// DeleteIf calls pred for each key in the map. If pred returns true for a key,
+// DeleteIf removes the corresponding entry. If the result for a key is
+// still being computed, DeleteIf will remove the entry without waiting for
+// the computation to finish. The result won't be accessible through the cache.
+//
+// TODO(jayconrod): Delete this after the package cache clearing functions
+// in internal/load have been removed.
+func (c *Cache[K, V]) DeleteIf(pred func(key K) bool) {
+ c.m.Range(func(key, _ any) bool {
+ if key := key.(K); pred(key) {
+ c.Delete(key)
+ }
+ return true
+ })
+}
diff --git a/src/cmd/go/internal/par/work_test.go b/src/cmd/go/internal/par/work_test.go
new file mode 100644
index 0000000..9d96ffa
--- /dev/null
+++ b/src/cmd/go/internal/par/work_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package par
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestWork(t *testing.T) {
+ var w Work[int]
+
+ const N = 10000
+ n := int32(0)
+ w.Add(N)
+ w.Do(100, func(i int) {
+ atomic.AddInt32(&n, 1)
+ if i >= 2 {
+ w.Add(i - 1)
+ w.Add(i - 2)
+ }
+ w.Add(i >> 1)
+ w.Add((i >> 1) ^ 1)
+ })
+ if n != N+1 {
+ t.Fatalf("ran %d items, expected %d", n, N+1)
+ }
+}
+
+func TestWorkParallel(t *testing.T) {
+ for tries := 0; tries < 10; tries++ {
+ var w Work[int]
+ const N = 100
+ for i := 0; i < N; i++ {
+ w.Add(i)
+ }
+ start := time.Now()
+ var n int32
+ w.Do(N, func(x int) {
+ time.Sleep(1 * time.Millisecond)
+ atomic.AddInt32(&n, +1)
+ })
+ if n != N {
+ t.Fatalf("par.Work.Do did not do all the work")
+ }
+ if time.Since(start) < N/2*time.Millisecond {
+ return
+ }
+ }
+ t.Fatalf("par.Work.Do does not seem to be parallel")
+}
+
+func TestCache(t *testing.T) {
+ var cache Cache[int, int]
+
+ n := 1
+ v := cache.Do(1, func() int { n++; return n })
+ if v != 2 {
+ t.Fatalf("cache.Do(1) did not run f")
+ }
+ v = cache.Do(1, func() int { n++; return n })
+ if v != 2 {
+ t.Fatalf("cache.Do(1) ran f again!")
+ }
+ v = cache.Do(2, func() int { n++; return n })
+ if v != 3 {
+ t.Fatalf("cache.Do(2) did not run f")
+ }
+ v = cache.Do(1, func() int { n++; return n })
+ if v != 2 {
+ t.Fatalf("cache.Do(1) did not returned saved value from original cache.Do(1)")
+ }
+}
diff --git a/src/cmd/go/internal/robustio/robustio.go b/src/cmd/go/internal/robustio/robustio.go
new file mode 100644
index 0000000..15b3377
--- /dev/null
+++ b/src/cmd/go/internal/robustio/robustio.go
@@ -0,0 +1,53 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package robustio wraps I/O functions that are prone to failure on Windows,
+// transparently retrying errors up to an arbitrary timeout.
+//
+// Errors are classified heuristically and retries are bounded, so the functions
+// in this package do not completely eliminate spurious errors. However, they do
+// significantly reduce the rate of failure in practice.
+//
+// If so, the error will likely wrap one of:
+// The functions in this package do not completely eliminate spurious errors,
+// but substantially reduce their rate of occurrence in practice.
+package robustio
+
+// Rename is like os.Rename, but on Windows retries errors that may occur if the
+// file is concurrently read or overwritten.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func Rename(oldpath, newpath string) error {
+ return rename(oldpath, newpath)
+}
+
+// ReadFile is like os.ReadFile, but on Windows retries errors that may
+// occur if the file is concurrently replaced.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func ReadFile(filename string) ([]byte, error) {
+ return readFile(filename)
+}
+
+// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
+// if an executable file in the directory has recently been executed.
+//
+// (See golang.org/issue/19491.)
+func RemoveAll(path string) error {
+ return removeAll(path)
+}
+
+// IsEphemeralError reports whether err is one of the errors that the functions
+// in this package attempt to mitigate.
+//
+// Errors considered ephemeral include:
+// - syscall.ERROR_ACCESS_DENIED
+// - syscall.ERROR_FILE_NOT_FOUND
+// - internal/syscall/windows.ERROR_SHARING_VIOLATION
+//
+// This set may be expanded in the future; programs must not rely on the
+// non-ephemerality of any given error.
+func IsEphemeralError(err error) bool {
+ return isEphemeralError(err)
+}
diff --git a/src/cmd/go/internal/robustio/robustio_darwin.go b/src/cmd/go/internal/robustio/robustio_darwin.go
new file mode 100644
index 0000000..99fd8eb
--- /dev/null
+++ b/src/cmd/go/internal/robustio/robustio_darwin.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+ "errors"
+ "syscall"
+)
+
+const errFileNotFound = syscall.ENOENT
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ return errno == errFileNotFound
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/robustio/robustio_flaky.go b/src/cmd/go/internal/robustio/robustio_flaky.go
new file mode 100644
index 0000000..c56e36c
--- /dev/null
+++ b/src/cmd/go/internal/robustio/robustio_flaky.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows || darwin
+
+package robustio
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "syscall"
+ "time"
+)
+
+const arbitraryTimeout = 2000 * time.Millisecond
+
+// retry retries ephemeral errors from f up to an arbitrary timeout
+// to work around filesystem flakiness on Windows and Darwin.
+func retry(f func() (err error, mayRetry bool)) error {
+ var (
+ bestErr error
+ lowestErrno syscall.Errno
+ start time.Time
+ nextSleep time.Duration = 1 * time.Millisecond
+ )
+ for {
+ err, mayRetry := f()
+ if err == nil || !mayRetry {
+ return err
+ }
+
+ var errno syscall.Errno
+ if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) {
+ bestErr = err
+ lowestErrno = errno
+ } else if bestErr == nil {
+ bestErr = err
+ }
+
+ if start.IsZero() {
+ start = time.Now()
+ } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
+ break
+ }
+ time.Sleep(nextSleep)
+ nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
+ }
+
+ return bestErr
+}
+
+// rename is like os.Rename, but retries ephemeral errors.
+//
+// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
+// MOVEFILE_REPLACE_EXISTING.
+//
+// Windows also provides a different system call, ReplaceFile,
+// that provides similar semantics, but perhaps preserves more metadata. (The
+// documentation on the differences between the two is very sparse.)
+//
+// Empirical error rates with MoveFileEx are lower under modest concurrency, so
+// for now we're sticking with what the os package already provides.
+func rename(oldpath, newpath string) (err error) {
+ return retry(func() (err error, mayRetry bool) {
+ err = os.Rename(oldpath, newpath)
+ return err, isEphemeralError(err)
+ })
+}
+
+// readFile is like os.ReadFile, but retries ephemeral errors.
+func readFile(filename string) ([]byte, error) {
+ var b []byte
+ err := retry(func() (err error, mayRetry bool) {
+ b, err = os.ReadFile(filename)
+
+ // Unlike in rename, we do not retry errFileNotFound here: it can occur
+ // as a spurious error, but the file may also genuinely not exist, so the
+ // increase in robustness is probably not worth the extra latency.
+ return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound)
+ })
+ return b, err
+}
+
+func removeAll(path string) error {
+ return retry(func() (err error, mayRetry bool) {
+ err = os.RemoveAll(path)
+ return err, isEphemeralError(err)
+ })
+}
diff --git a/src/cmd/go/internal/robustio/robustio_other.go b/src/cmd/go/internal/robustio/robustio_other.go
new file mode 100644
index 0000000..da9a46e
--- /dev/null
+++ b/src/cmd/go/internal/robustio/robustio_other.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows && !darwin
+
+package robustio
+
+import (
+ "os"
+)
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func readFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+func removeAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func isEphemeralError(err error) bool {
+ return false
+}
diff --git a/src/cmd/go/internal/robustio/robustio_windows.go b/src/cmd/go/internal/robustio/robustio_windows.go
new file mode 100644
index 0000000..687dcb6
--- /dev/null
+++ b/src/cmd/go/internal/robustio/robustio_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+ "errors"
+ "internal/syscall/windows"
+ "syscall"
+)
+
+const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ switch errno {
+ case syscall.ERROR_ACCESS_DENIED,
+ syscall.ERROR_FILE_NOT_FOUND,
+ windows.ERROR_SHARING_VIOLATION:
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
new file mode 100644
index 0000000..4a3dcf0
--- /dev/null
+++ b/src/cmd/go/internal/run/run.go
@@ -0,0 +1,219 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package run implements the “go run” command.
+package run
+
+import (
+ "context"
+ "fmt"
+ "go/build"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/work"
+)
+
+var CmdRun = &base.Command{
+ UsageLine: "go run [build flags] [-exec xprog] package [arguments...]",
+ Short: "compile and run Go program",
+ Long: `
+Run compiles and runs the named main Go package.
+Typically the package is specified as a list of .go source files from a single
+directory, but it may also be an import path, file system path, or pattern
+matching a single known package, as in 'go run .' or 'go run my/cmd'.
+
+If the package argument has a version suffix (like @latest or @v1.0.0),
+"go run" builds the program in module-aware mode, ignoring the go.mod file in
+the current directory or any parent directory, if there is one. This is useful
+for running programs without affecting the dependencies of the main module.
+
+If the package argument doesn't have a version suffix, "go run" may run in
+module-aware mode or GOPATH mode, depending on the GO111MODULE environment
+variable and the presence of a go.mod file. See 'go help modules' for details.
+If module-aware mode is enabled, "go run" runs in the context of the main
+module.
+
+By default, 'go run' runs the compiled binary directly: 'a.out arguments...'.
+If the -exec flag is given, 'go run' invokes the binary using xprog:
+ 'xprog a.out arguments...'.
+If the -exec flag is not given, GOOS or GOARCH is different from the system
+default, and a program named go_$GOOS_$GOARCH_exec can be found
+on the current search path, 'go run' invokes the binary using that program,
+for example 'go_js_wasm_exec a.out arguments...'. This allows execution of
+cross-compiled programs when a simulator or other execution method is
+available.
+
+By default, 'go run' compiles the binary without generating the information
+used by debuggers, to reduce build time. To include debugger information in
+the binary, use 'go build'.
+
+The exit status of Run is not the exit status of the compiled binary.
+
+For more about build flags, see 'go help build'.
+For more about specifying packages, see 'go help packages'.
+
+See also: go build.
+ `,
+}
+
+func init() {
+ CmdRun.Run = runRun // break init loop
+
+ work.AddBuildFlags(CmdRun, work.DefaultBuildFlags)
+ if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+ work.AddCoverFlags(CmdRun, nil)
+ }
+ CmdRun.Flag.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "")
+}
+
+func printStderr(args ...any) (int, error) {
+ return fmt.Fprint(os.Stderr, args...)
+}
+
+func runRun(ctx context.Context, cmd *base.Command, args []string) {
+ if shouldUseOutsideModuleMode(args) {
+ // Set global module flags for 'go run cmd@version'.
+ // This must be done before modload.Init, but we need to call work.BuildInit
+ // before loading packages, since it affects package locations, e.g.,
+ // for -race and -msan.
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NoRoot
+ modload.AllowMissingModuleImports()
+ modload.Init()
+ } else {
+ modload.InitWorkfile()
+ }
+
+ work.BuildInit()
+ b := work.NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+ b.Print = printStderr
+
+ i := 0
+ for i < len(args) && strings.HasSuffix(args[i], ".go") {
+ i++
+ }
+ pkgOpts := load.PackageOpts{MainOnly: true}
+ var p *load.Package
+ if i > 0 {
+ files := args[:i]
+ for _, file := range files {
+ if strings.HasSuffix(file, "_test.go") {
+ // GoFilesPackage is going to assign this to TestGoFiles.
+ // Reject since it won't be part of the build.
+ base.Fatalf("go: cannot run *_test.go files (%s)", file)
+ }
+ }
+ p = load.GoFilesPackage(ctx, pkgOpts, files)
+ } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") {
+ arg := args[0]
+ var pkgs []*load.Package
+ if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) {
+ var err error
+ pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1])
+ if err != nil {
+ base.Fatal(err)
+ }
+ } else {
+ pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1])
+ }
+
+ if len(pkgs) == 0 {
+ base.Fatalf("go: no packages loaded from %s", arg)
+ }
+ if len(pkgs) > 1 {
+ var names []string
+ for _, p := range pkgs {
+ names = append(names, p.ImportPath)
+ }
+ base.Fatalf("go: pattern %s matches multiple packages:\n\t%s", arg, strings.Join(names, "\n\t"))
+ }
+ p = pkgs[0]
+ i++
+ } else {
+ base.Fatalf("go: no go files listed")
+ }
+ cmdArgs := args[i:]
+ load.CheckPackageErrors([]*load.Package{p})
+
+ if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+ load.PrepareForCoverageBuild([]*load.Package{p})
+ }
+
+ p.Internal.OmitDebug = true
+ p.Target = "" // must build - not up to date
+ if p.Internal.CmdlineFiles {
+ //set executable name if go file is given as cmd-argument
+ var src string
+ if len(p.GoFiles) > 0 {
+ src = p.GoFiles[0]
+ } else if len(p.CgoFiles) > 0 {
+ src = p.CgoFiles[0]
+ } else {
+ // this case could only happen if the provided source uses cgo
+ // while cgo is disabled.
+ hint := ""
+ if !cfg.BuildContext.CgoEnabled {
+ hint = " (cgo is disabled)"
+ }
+ base.Fatalf("go: no suitable source files%s", hint)
+ }
+ p.Internal.ExeName = src[:len(src)-len(".go")]
+ } else {
+ p.Internal.ExeName = path.Base(p.ImportPath)
+ }
+
+ a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p)
+ a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}}
+ b.Do(ctx, a)
+}
+
+// shouldUseOutsideModuleMode returns whether 'go run' will load packages in
+// module-aware mode, ignoring the go.mod file in the current directory. It
+// returns true if the first argument contains "@", does not begin with "-"
+// (resembling a flag) or end with ".go" (a file). The argument must not be a
+// local or absolute file path.
+//
+// These rules are slightly different than other commands. Whether or not
+// 'go run' uses this mode, it interprets arguments ending with ".go" as files
+// and uses arguments up to the last ".go" argument to comprise the package.
+// If there are no ".go" arguments, only the first argument is interpreted
+// as a package path, since there can be only one package.
+func shouldUseOutsideModuleMode(args []string) bool {
+ // NOTE: "@" not allowed in import paths, but it is allowed in non-canonical
+ // versions.
+ return len(args) > 0 &&
+ !strings.HasSuffix(args[0], ".go") &&
+ !strings.HasPrefix(args[0], "-") &&
+ strings.Contains(args[0], "@") &&
+ !build.IsLocalImport(args[0]) &&
+ !filepath.IsAbs(args[0])
+}
+
+// buildRunProgram is the action for running a binary that has already
+// been compiled. We ignore exit status.
+func buildRunProgram(b *work.Builder, ctx context.Context, a *work.Action) error {
+ cmdline := str.StringList(work.FindExecCmd(), a.Deps[0].Target, a.Args)
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "%s", strings.Join(cmdline, " "))
+ if cfg.BuildN {
+ return nil
+ }
+ }
+
+ base.RunStdin(cmdline)
+ return nil
+}
diff --git a/src/cmd/go/internal/script/cmds.go b/src/cmd/go/internal/script/cmds.go
new file mode 100644
index 0000000..36e16c5
--- /dev/null
+++ b/src/cmd/go/internal/script/cmds.go
@@ -0,0 +1,1125 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package script
+
+import (
+ "cmd/go/internal/robustio"
+ "errors"
+ "fmt"
+ "internal/diff"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// DefaultCmds returns a set of broadly useful script commands.
+//
+// Run the 'help' command within a script engine to view a list of the available
+// commands.
+func DefaultCmds() map[string]Cmd {
+ return map[string]Cmd{
+ "cat": Cat(),
+ "cd": Cd(),
+ "chmod": Chmod(),
+ "cmp": Cmp(),
+ "cmpenv": Cmpenv(),
+ "cp": Cp(),
+ "echo": Echo(),
+ "env": Env(),
+ "exec": Exec(func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) }, 100*time.Millisecond), // arbitrary grace period
+ "exists": Exists(),
+ "grep": Grep(),
+ "help": Help(),
+ "mkdir": Mkdir(),
+ "mv": Mv(),
+ "rm": Rm(),
+ "replace": Replace(),
+ "sleep": Sleep(),
+ "stderr": Stderr(),
+ "stdout": Stdout(),
+ "stop": Stop(),
+ "symlink": Symlink(),
+ "wait": Wait(),
+ }
+}
+
+// Command returns a new Cmd with a Usage method that returns a copy of the
+// given CmdUsage and a Run method calls the given function.
+func Command(usage CmdUsage, run func(*State, ...string) (WaitFunc, error)) Cmd {
+ return &funcCmd{
+ usage: usage,
+ run: run,
+ }
+}
+
+// A funcCmd implements Cmd using a function value.
+type funcCmd struct {
+ usage CmdUsage
+ run func(*State, ...string) (WaitFunc, error)
+}
+
+func (c *funcCmd) Run(s *State, args ...string) (WaitFunc, error) {
+ return c.run(s, args...)
+}
+
+func (c *funcCmd) Usage() *CmdUsage { return &c.usage }
+
+// firstNonFlag returns a slice containing the index of the first argument in
+// rawArgs that is not a flag, or nil if all arguments are flags.
+func firstNonFlag(rawArgs ...string) []int {
+ for i, arg := range rawArgs {
+ if !strings.HasPrefix(arg, "-") {
+ return []int{i}
+ }
+ if arg == "--" {
+ return []int{i + 1}
+ }
+ }
+ return nil
+}
+
+// Cat writes the concatenated contents of the named file(s) to the script's
+// stdout buffer.
+func Cat() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "concatenate files and print to the script's stdout buffer",
+ Args: "files...",
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) == 0 {
+ return nil, ErrUsage
+ }
+
+ paths := make([]string, 0, len(args))
+ for _, arg := range args {
+ paths = append(paths, s.Path(arg))
+ }
+
+ var buf strings.Builder
+ errc := make(chan error, 1)
+ go func() {
+ for _, p := range paths {
+ b, err := os.ReadFile(p)
+ buf.Write(b)
+ if err != nil {
+ errc <- err
+ return
+ }
+ }
+ errc <- nil
+ }()
+
+ wait := func(*State) (stdout, stderr string, err error) {
+ err = <-errc
+ return buf.String(), "", err
+ }
+ return wait, nil
+ })
+}
+
+// Cd changes the current working directory.
+func Cd() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "change the working directory",
+ Args: "dir",
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) != 1 {
+ return nil, ErrUsage
+ }
+ return nil, s.Chdir(args[0])
+ })
+}
+
+// Chmod changes the permissions of a file or a directory..
+func Chmod() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "change file mode bits",
+ Args: "perm paths...",
+ Detail: []string{
+ "Changes the permissions of the named files or directories to be equal to perm.",
+ "Only numerical permissions are supported.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) < 2 {
+ return nil, ErrUsage
+ }
+
+ perm, err := strconv.ParseUint(args[0], 0, 32)
+ if err != nil || perm&uint64(fs.ModePerm) != perm {
+ return nil, fmt.Errorf("invalid mode: %s", args[0])
+ }
+
+ for _, arg := range args[1:] {
+ err := os.Chmod(s.Path(arg), fs.FileMode(perm))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+}
+
+// Cmp compares the contents of two files, or the contents of either the
+// "stdout" or "stderr" buffer and a file, returning a non-nil error if the
+// contents differ.
+func Cmp() Cmd {
+ return Command(
+ CmdUsage{
+ Args: "[-q] file1 file2",
+ Summary: "compare files for differences",
+ Detail: []string{
+ "By convention, file1 is the actual data and file2 is the expected data.",
+ "The command succeeds if the file contents are identical.",
+ "File1 can be 'stdout' or 'stderr' to compare the stdout or stderr buffer from the most recent command.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ return nil, doCompare(s, false, args...)
+ })
+}
+
+// Cmpenv is like Compare, but also performs environment substitutions
+// on the contents of both arguments.
+func Cmpenv() Cmd {
+ return Command(
+ CmdUsage{
+ Args: "[-q] file1 file2",
+ Summary: "compare files for differences, with environment expansion",
+ Detail: []string{
+ "By convention, file1 is the actual data and file2 is the expected data.",
+ "The command succeeds if the file contents are identical after substituting variables from the script environment.",
+ "File1 can be 'stdout' or 'stderr' to compare the script's stdout or stderr buffer.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ return nil, doCompare(s, true, args...)
+ })
+}
+
+func doCompare(s *State, env bool, args ...string) error {
+ quiet := false
+ if len(args) > 0 && args[0] == "-q" {
+ quiet = true
+ args = args[1:]
+ }
+ if len(args) != 2 {
+ return ErrUsage
+ }
+
+ name1, name2 := args[0], args[1]
+ var text1, text2 string
+ switch name1 {
+ case "stdout":
+ text1 = s.Stdout()
+ case "stderr":
+ text1 = s.Stderr()
+ default:
+ data, err := os.ReadFile(s.Path(name1))
+ if err != nil {
+ return err
+ }
+ text1 = string(data)
+ }
+
+ data, err := os.ReadFile(s.Path(name2))
+ if err != nil {
+ return err
+ }
+ text2 = string(data)
+
+ if env {
+ text1 = s.ExpandEnv(text1, false)
+ text2 = s.ExpandEnv(text2, false)
+ }
+
+ if text1 != text2 {
+ if !quiet {
+ diffText := diff.Diff(name1, []byte(text1), name2, []byte(text2))
+ s.Logf("%s\n", diffText)
+ }
+ return fmt.Errorf("%s and %s differ", name1, name2)
+ }
+ return nil
+}
+
+// Cp copies one or more files to a new location.
+func Cp() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "copy files to a target file or directory",
+ Args: "src... dst",
+ Detail: []string{
+ "src can include 'stdout' or 'stderr' to copy from the script's stdout or stderr buffer.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) < 2 {
+ return nil, ErrUsage
+ }
+
+ dst := s.Path(args[len(args)-1])
+ info, err := os.Stat(dst)
+ dstDir := err == nil && info.IsDir()
+ if len(args) > 2 && !dstDir {
+ return nil, &fs.PathError{Op: "cp", Path: dst, Err: errors.New("destination is not a directory")}
+ }
+
+ for _, arg := range args[:len(args)-1] {
+ var (
+ src string
+ data []byte
+ mode fs.FileMode
+ )
+ switch arg {
+ case "stdout":
+ src = arg
+ data = []byte(s.Stdout())
+ mode = 0666
+ case "stderr":
+ src = arg
+ data = []byte(s.Stderr())
+ mode = 0666
+ default:
+ src = s.Path(arg)
+ info, err := os.Stat(src)
+ if err != nil {
+ return nil, err
+ }
+ mode = info.Mode() & 0777
+ data, err = os.ReadFile(src)
+ if err != nil {
+ return nil, err
+ }
+ }
+ targ := dst
+ if dstDir {
+ targ = filepath.Join(dst, filepath.Base(src))
+ }
+ err := os.WriteFile(targ, data, mode)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+ })
+}
+
+// Echo writes its arguments to stdout, followed by a newline.
+func Echo() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "display a line of text",
+ Args: "string...",
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ var buf strings.Builder
+ for i, arg := range args {
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ buf.WriteString(arg)
+ }
+ buf.WriteString("\n")
+ out := buf.String()
+
+ // Stuff the result into a callback to satisfy the OutputCommandFunc
+ // interface, even though it isn't really asynchronous even if run in the
+ // background.
+ //
+ // Nobody should be running 'echo' as a background command, but it's not worth
+ // defining yet another interface, and also doesn't seem worth shoehorning
+ // into a SimpleCommand the way we did with Wait.
+ return func(*State) (stdout, stderr string, err error) {
+ return out, "", nil
+ }, nil
+ })
+}
+
+// Env sets or logs the values of environment variables.
+//
+// With no arguments, Env reports all variables in the environment.
+// "key=value" arguments set variables, and arguments without "="
+// cause the corresponding value to be printed to the stdout buffer.
+func Env() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "set or log the values of environment variables",
+ Args: "[key[=value]...]",
+ Detail: []string{
+ "With no arguments, print the script environment to the log.",
+ "Otherwise, add the listed key=value pairs to the environment or print the listed keys.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ out := new(strings.Builder)
+ if len(args) == 0 {
+ for _, kv := range s.env {
+ fmt.Fprintf(out, "%s\n", kv)
+ }
+ } else {
+ for _, env := range args {
+ i := strings.Index(env, "=")
+ if i < 0 {
+ // Display value instead of setting it.
+ fmt.Fprintf(out, "%s=%s\n", env, s.envMap[env])
+ continue
+ }
+ if err := s.Setenv(env[:i], env[i+1:]); err != nil {
+ return nil, err
+ }
+ }
+ }
+ var wait WaitFunc
+ if out.Len() > 0 || len(args) == 0 {
+ wait = func(*State) (stdout, stderr string, err error) {
+ return out.String(), "", nil
+ }
+ }
+ return wait, nil
+ })
+}
+
+// Exec runs an arbitrary executable as a subprocess.
+//
+// When the Script's context is canceled, Exec sends the interrupt signal, then
+// waits for up to the given delay for the subprocess to flush output before
+// terminating it with os.Kill.
+func Exec(cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "run an executable program with arguments",
+ Args: "program [args...]",
+ Detail: []string{
+ "Note that 'exec' does not terminate the script (unlike Unix shells).",
+ },
+ Async: true,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) < 1 {
+ return nil, ErrUsage
+ }
+
+ // Use the script's PATH to look up the command (if it does not contain a separator)
+ // instead of the test process's PATH (see lookPath).
+ // Don't use filepath.Clean, since that changes "./foo" to "foo".
+ name := filepath.FromSlash(args[0])
+ path := name
+ if !strings.Contains(name, string(filepath.Separator)) {
+ var err error
+ path, err = lookPath(s, name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return startCommand(s, name, path, args[1:], cancel, waitDelay)
+ })
+}
+
+func startCommand(s *State, name, path string, args []string, cancel func(*exec.Cmd) error, waitDelay time.Duration) (WaitFunc, error) {
+ var (
+ cmd *exec.Cmd
+ stdoutBuf, stderrBuf strings.Builder
+ )
+ for {
+ cmd = exec.CommandContext(s.Context(), path, args...)
+ if cancel == nil {
+ cmd.Cancel = nil
+ } else {
+ cmd.Cancel = func() error { return cancel(cmd) }
+ }
+ cmd.WaitDelay = waitDelay
+ cmd.Args[0] = name
+ cmd.Dir = s.Getwd()
+ cmd.Env = s.env
+ cmd.Stdout = &stdoutBuf
+ cmd.Stderr = &stderrBuf
+ err := cmd.Start()
+ if err == nil {
+ break
+ }
+ if isETXTBSY(err) {
+ // If the script (or its host process) just wrote the executable we're
+ // trying to run, a fork+exec in another thread may be holding open the FD
+ // that we used to write the executable (see https://go.dev/issue/22315).
+ // Since the descriptor should have CLOEXEC set, the problem should
+ // resolve as soon as the forked child reaches its exec call.
+ // Keep retrying until that happens.
+ } else {
+ return nil, err
+ }
+ }
+
+ wait := func(s *State) (stdout, stderr string, err error) {
+ err = cmd.Wait()
+ return stdoutBuf.String(), stderrBuf.String(), err
+ }
+ return wait, nil
+}
+
+// lookPath is (roughly) like exec.LookPath, but it uses the script's current
+// PATH to find the executable.
+func lookPath(s *State, command string) (string, error) {
+ var strEqual func(string, string) bool
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ // Using GOOS as a proxy for case-insensitive file system.
+ // TODO(bcmills): Remove this assumption.
+ strEqual = strings.EqualFold
+ } else {
+ strEqual = func(a, b string) bool { return a == b }
+ }
+
+ var pathExt []string
+ var searchExt bool
+ var isExecutable func(os.FileInfo) bool
+ if runtime.GOOS == "windows" {
+ // Use the test process's PathExt instead of the script's.
+ // If PathExt is set in the command's environment, cmd.Start fails with
+ // "parameter is invalid". Not sure why.
+ // If the command already has an extension in PathExt (like "cmd.exe")
+ // don't search for other extensions (not "cmd.bat.exe").
+ pathExt = strings.Split(os.Getenv("PathExt"), string(filepath.ListSeparator))
+ searchExt = true
+ cmdExt := filepath.Ext(command)
+ for _, ext := range pathExt {
+ if strEqual(cmdExt, ext) {
+ searchExt = false
+ break
+ }
+ }
+ isExecutable = func(fi os.FileInfo) bool {
+ return fi.Mode().IsRegular()
+ }
+ } else {
+ isExecutable = func(fi os.FileInfo) bool {
+ return fi.Mode().IsRegular() && fi.Mode().Perm()&0111 != 0
+ }
+ }
+
+ pathEnv, _ := s.LookupEnv(pathEnvName())
+ for _, dir := range strings.Split(pathEnv, string(filepath.ListSeparator)) {
+ if dir == "" {
+ continue
+ }
+
+ // Determine whether dir needs a trailing path separator.
+ // Note: we avoid filepath.Join in this function because it cleans the
+ // result: we want to preserve the exact dir prefix from the environment.
+ sep := string(filepath.Separator)
+ if os.IsPathSeparator(dir[len(dir)-1]) {
+ sep = ""
+ }
+
+ if searchExt {
+ ents, err := os.ReadDir(dir)
+ if err != nil {
+ continue
+ }
+ for _, ent := range ents {
+ for _, ext := range pathExt {
+ if !ent.IsDir() && strEqual(ent.Name(), command+ext) {
+ return dir + sep + ent.Name(), nil
+ }
+ }
+ }
+ } else {
+ path := dir + sep + command
+ if fi, err := os.Stat(path); err == nil && isExecutable(fi) {
+ return path, nil
+ }
+ }
+ }
+ return "", &exec.Error{Name: command, Err: exec.ErrNotFound}
+}
+
+// pathEnvName returns the platform-specific variable used by os/exec.LookPath
+// to look up executable names (either "PATH" or "path").
+//
+// TODO(bcmills): Investigate whether we can instead use PATH uniformly and
+// rewrite it to $path when executing subprocesses.
+func pathEnvName() string {
+ switch runtime.GOOS {
+ case "plan9":
+ return "path"
+ default:
+ return "PATH"
+ }
+}
+
+// Exists checks that the named file(s) exist.
+func Exists() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "check that files exist",
+ Args: "[-readonly] [-exec] file...",
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ var readonly, exec bool
+ loop:
+ for len(args) > 0 {
+ switch args[0] {
+ case "-readonly":
+ readonly = true
+ args = args[1:]
+ case "-exec":
+ exec = true
+ args = args[1:]
+ default:
+ break loop
+ }
+ }
+ if len(args) == 0 {
+ return nil, ErrUsage
+ }
+
+ for _, file := range args {
+ file = s.Path(file)
+ info, err := os.Stat(file)
+ if err != nil {
+ return nil, err
+ }
+ if readonly && info.Mode()&0222 != 0 {
+ return nil, fmt.Errorf("%s exists but is writable", file)
+ }
+ if exec && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
+ return nil, fmt.Errorf("%s exists but is not executable", file)
+ }
+ }
+
+ return nil, nil
+ })
+}
+
+// Grep checks that file content matches a regexp.
+// Like stdout/stderr and unlike Unix grep, it accepts Go regexp syntax.
+//
+// Grep does not modify the State's stdout or stderr buffers.
+// (Its output goes to the script log, not stdout.)
+func Grep() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "find lines in a file that match a pattern",
+ Args: matchUsage + " file",
+ Detail: []string{
+ "The command succeeds if at least one match (or the exact count, if given) is found.",
+ "The -q flag suppresses printing of matches.",
+ },
+ RegexpArgs: firstNonFlag,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ return nil, match(s, args, "", "grep")
+ })
+}
+
+const matchUsage = "[-count=N] [-q] 'pattern'"
+
+// match implements the Grep, Stdout, and Stderr commands.
+func match(s *State, args []string, text, name string) error {
+ n := 0
+ if len(args) >= 1 && strings.HasPrefix(args[0], "-count=") {
+ var err error
+ n, err = strconv.Atoi(args[0][len("-count="):])
+ if err != nil {
+ return fmt.Errorf("bad -count=: %v", err)
+ }
+ if n < 1 {
+ return fmt.Errorf("bad -count=: must be at least 1")
+ }
+ args = args[1:]
+ }
+ quiet := false
+ if len(args) >= 1 && args[0] == "-q" {
+ quiet = true
+ args = args[1:]
+ }
+
+ isGrep := name == "grep"
+
+ wantArgs := 1
+ if isGrep {
+ wantArgs = 2
+ }
+ if len(args) != wantArgs {
+ return ErrUsage
+ }
+
+ pattern := `(?m)` + args[0]
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return err
+ }
+
+ if isGrep {
+ name = args[1] // for error messages
+ data, err := os.ReadFile(s.Path(args[1]))
+ if err != nil {
+ return err
+ }
+ text = string(data)
+ }
+
+ if n > 0 {
+ count := len(re.FindAllString(text, -1))
+ if count != n {
+ return fmt.Errorf("found %d matches for %#q in %s", count, pattern, name)
+ }
+ return nil
+ }
+
+ if !re.MatchString(text) {
+ return fmt.Errorf("no match for %#q in %s", pattern, name)
+ }
+
+ if !quiet {
+ // Print the lines containing the match.
+ loc := re.FindStringIndex(text)
+ for loc[0] > 0 && text[loc[0]-1] != '\n' {
+ loc[0]--
+ }
+ for loc[1] < len(text) && text[loc[1]] != '\n' {
+ loc[1]++
+ }
+ lines := strings.TrimSuffix(text[loc[0]:loc[1]], "\n")
+ s.Logf("matched: %s\n", lines)
+ }
+ return nil
+}
+
+// Help writes command documentation to the script log.
+func Help() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "log help text for commands and conditions",
+ Args: "[-v] name...",
+ Detail: []string{
+ "To display help for a specific condition, enclose it in brackets: 'help [amd64]'.",
+ "To display complete documentation when listing all commands, pass the -v flag.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if s.engine == nil {
+ return nil, errors.New("no engine configured")
+ }
+
+ verbose := false
+ if len(args) > 0 {
+ verbose = true
+ if args[0] == "-v" {
+ args = args[1:]
+ }
+ }
+
+ var cmds, conds []string
+ for _, arg := range args {
+ if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
+ conds = append(conds, arg[1:len(arg)-1])
+ } else {
+ cmds = append(cmds, arg)
+ }
+ }
+
+ out := new(strings.Builder)
+
+ if len(conds) > 0 || (len(args) == 0 && len(s.engine.Conds) > 0) {
+ if conds == nil {
+ out.WriteString("conditions:\n\n")
+ }
+ s.engine.ListConds(out, s, conds...)
+ }
+
+ if len(cmds) > 0 || len(args) == 0 {
+ if len(args) == 0 {
+ out.WriteString("\ncommands:\n\n")
+ }
+ s.engine.ListCmds(out, verbose, cmds...)
+ }
+
+ wait := func(*State) (stdout, stderr string, err error) {
+ return out.String(), "", nil
+ }
+ return wait, nil
+ })
+}
+
+// Mkdir creates a directory and any needed parent directories.
+func Mkdir() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "create directories, if they do not already exist",
+ Args: "path...",
+ Detail: []string{
+ "Unlike Unix mkdir, parent directories are always created if needed.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) < 1 {
+ return nil, ErrUsage
+ }
+ for _, arg := range args {
+ if err := os.MkdirAll(s.Path(arg), 0777); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+}
+
+// Mv renames an existing file or directory to a new path.
+func Mv() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "rename a file or directory to a new path",
+ Args: "old new",
+ Detail: []string{
+ "OS-specific restrictions may apply when old and new are in different directories.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) != 2 {
+ return nil, ErrUsage
+ }
+ return nil, os.Rename(s.Path(args[0]), s.Path(args[1]))
+ })
+}
+
+// Program returns a new command that runs the named program, found from the
+// host process's PATH (not looked up in the script's PATH).
+func Program(name string, cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd {
+ var (
+ shortName string
+ summary string
+ lookPathOnce sync.Once
+ path string
+ pathErr error
+ )
+ if filepath.IsAbs(name) {
+ lookPathOnce.Do(func() { path = filepath.Clean(name) })
+ shortName = strings.TrimSuffix(filepath.Base(path), ".exe")
+ summary = "run the '" + shortName + "' program provided by the script host"
+ } else {
+ shortName = name
+ summary = "run the '" + shortName + "' program from the script host's PATH"
+ }
+
+ return Command(
+ CmdUsage{
+ Summary: summary,
+ Args: "[args...]",
+ Async: true,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ lookPathOnce.Do(func() {
+ path, pathErr = exec.LookPath(name)
+ })
+ if pathErr != nil {
+ return nil, pathErr
+ }
+ return startCommand(s, shortName, path, args, cancel, waitDelay)
+ })
+}
+
+// Replace replaces all occurrences of a string in a file with another string.
+func Replace() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "replace strings in a file",
+ Args: "[old new]... file",
+ Detail: []string{
+ "The 'old' and 'new' arguments are unquoted as if in quoted Go strings.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args)%2 != 1 {
+ return nil, ErrUsage
+ }
+
+ oldNew := make([]string, 0, len(args)-1)
+ for _, arg := range args[:len(args)-1] {
+ s, err := strconv.Unquote(`"` + arg + `"`)
+ if err != nil {
+ return nil, err
+ }
+ oldNew = append(oldNew, s)
+ }
+
+ r := strings.NewReplacer(oldNew...)
+ file := s.Path(args[len(args)-1])
+
+ data, err := os.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ replaced := r.Replace(string(data))
+
+ return nil, os.WriteFile(file, []byte(replaced), 0666)
+ })
+}
+
+// Rm removes a file or directory.
+//
+// If a directory, Rm also recursively removes that directory's
+// contents.
+func Rm() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "remove a file or directory",
+ Args: "path...",
+ Detail: []string{
+ "If the path is a directory, its contents are removed recursively.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) < 1 {
+ return nil, ErrUsage
+ }
+ for _, arg := range args {
+ if err := removeAll(s.Path(arg)); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+}
+
+// removeAll removes dir and all files and directories it contains.
+//
+// Unlike os.RemoveAll, removeAll attempts to make the directories writable if
+// needed in order to remove their contents.
+func removeAll(dir string) error {
+ // module cache has 0444 directories;
+ // make them writable in order to remove content.
+ filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+ // chmod not only directories, but also things that we couldn't even stat
+ // due to permission errors: they may also be unreadable directories.
+ if err != nil || info.IsDir() {
+ os.Chmod(path, 0777)
+ }
+ return nil
+ })
+ return robustio.RemoveAll(dir)
+}
+
+// Sleep sleeps for the given Go duration or until the script's context is
+// cancelled, whichever happens first.
+func Sleep() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "sleep for a specified duration",
+ Args: "duration",
+ Detail: []string{
+ "The duration must be given as a Go time.Duration string.",
+ },
+ Async: true,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) != 1 {
+ return nil, ErrUsage
+ }
+
+ d, err := time.ParseDuration(args[0])
+ if err != nil {
+ return nil, err
+ }
+
+ timer := time.NewTimer(d)
+ wait := func(s *State) (stdout, stderr string, err error) {
+ ctx := s.Context()
+ select {
+ case <-ctx.Done():
+ timer.Stop()
+ return "", "", ctx.Err()
+ case <-timer.C:
+ return "", "", nil
+ }
+ }
+ return wait, nil
+ })
+}
+
+// Stderr searches for a regular expression in the stderr buffer.
+func Stderr() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "find lines in the stderr buffer that match a pattern",
+ Args: matchUsage + " file",
+ Detail: []string{
+ "The command succeeds if at least one match (or the exact count, if given) is found.",
+ "The -q flag suppresses printing of matches.",
+ },
+ RegexpArgs: firstNonFlag,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ return nil, match(s, args, s.Stderr(), "stderr")
+ })
+}
+
+// Stdout searches for a regular expression in the stdout buffer.
+func Stdout() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "find lines in the stdout buffer that match a pattern",
+ Args: matchUsage + " file",
+ Detail: []string{
+ "The command succeeds if at least one match (or the exact count, if given) is found.",
+ "The -q flag suppresses printing of matches.",
+ },
+ RegexpArgs: firstNonFlag,
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ return nil, match(s, args, s.Stdout(), "stdout")
+ })
+}
+
+// Stop returns a sentinel error that causes script execution to halt
+// and s.Execute to return with a nil error.
+func Stop() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "stop execution of the script",
+ Args: "[msg]",
+ Detail: []string{
+ "The message is written to the script log, but no error is reported from the script engine.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) > 1 {
+ return nil, ErrUsage
+ }
+ // TODO(bcmills): The argument passed to stop seems redundant with comments.
+ // Either use it systematically or remove it.
+ if len(args) == 1 {
+ return nil, stopError{msg: args[0]}
+ }
+ return nil, stopError{}
+ })
+}
+
+// stopError is the sentinel error type returned by the Stop command.
+type stopError struct {
+ msg string
+}
+
+func (s stopError) Error() string {
+ if s.msg == "" {
+ return "stop"
+ }
+ return "stop: " + s.msg
+}
+
+// Symlink creates a symbolic link.
+func Symlink() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "create a symlink",
+ Args: "path -> target",
+ Detail: []string{
+ "Creates path as a symlink to target.",
+ "The '->' token (like in 'ls -l' output on Unix) is required.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) != 3 || args[1] != "->" {
+ return nil, ErrUsage
+ }
+
+ // Note that the link target args[2] is not interpreted with s.Path:
+ // it will be interpreted relative to the directory file is in.
+ return nil, os.Symlink(filepath.FromSlash(args[2]), s.Path(args[0]))
+ })
+}
+
+// Wait waits for the completion of background commands.
+//
+// When Wait returns, the stdout and stderr buffers contain the concatenation of
+// the background commands' respective outputs in the order in which those
+// commands were started.
+func Wait() Cmd {
+ return Command(
+ CmdUsage{
+ Summary: "wait for completion of background commands",
+ Args: "",
+ Detail: []string{
+ "Waits for all background commands to complete.",
+ "The output (and any error) from each command is printed to the log in the order in which the commands were started.",
+ "After the call to 'wait', the script's stdout and stderr buffers contain the concatenation of the background commands' outputs.",
+ },
+ },
+ func(s *State, args ...string) (WaitFunc, error) {
+ if len(args) > 0 {
+ return nil, ErrUsage
+ }
+
+ var stdouts, stderrs []string
+ var errs []*CommandError
+ for _, bg := range s.background {
+ stdout, stderr, err := bg.wait(s)
+
+ beforeArgs := ""
+ if len(bg.args) > 0 {
+ beforeArgs = " "
+ }
+ s.Logf("[background] %s%s%s\n", bg.name, beforeArgs, quoteArgs(bg.args))
+
+ if stdout != "" {
+ s.Logf("[stdout]\n%s", stdout)
+ stdouts = append(stdouts, stdout)
+ }
+ if stderr != "" {
+ s.Logf("[stderr]\n%s", stderr)
+ stderrs = append(stderrs, stderr)
+ }
+ if err != nil {
+ s.Logf("[%v]\n", err)
+ }
+ if cmdErr := checkStatus(bg.command, err); cmdErr != nil {
+ errs = append(errs, cmdErr.(*CommandError))
+ }
+ }
+
+ s.stdout = strings.Join(stdouts, "")
+ s.stderr = strings.Join(stderrs, "")
+ s.background = nil
+ if len(errs) > 0 {
+ return nil, waitError{errs: errs}
+ }
+ return nil, nil
+ })
+}
+
+// A waitError wraps one or more errors returned by background commands.
+type waitError struct {
+ errs []*CommandError
+}
+
+func (w waitError) Error() string {
+ b := new(strings.Builder)
+ for i, err := range w.errs {
+ if i != 0 {
+ b.WriteString("\n")
+ }
+ b.WriteString(err.Error())
+ }
+ return b.String()
+}
+
+func (w waitError) Unwrap() error {
+ if len(w.errs) == 1 {
+ return w.errs[0]
+ }
+ return nil
+}
diff --git a/src/cmd/go/internal/script/cmds_other.go b/src/cmd/go/internal/script/cmds_other.go
new file mode 100644
index 0000000..847b225
--- /dev/null
+++ b/src/cmd/go/internal/script/cmds_other.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(unix || windows)
+
+package script
+
+func isETXTBSY(err error) bool {
+ return false
+}
diff --git a/src/cmd/go/internal/script/cmds_posix.go b/src/cmd/go/internal/script/cmds_posix.go
new file mode 100644
index 0000000..2525f6e
--- /dev/null
+++ b/src/cmd/go/internal/script/cmds_posix.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package script
+
+import (
+ "errors"
+ "syscall"
+)
+
+func isETXTBSY(err error) bool {
+ return errors.Is(err, syscall.ETXTBSY)
+}
diff --git a/src/cmd/go/internal/script/conds.go b/src/cmd/go/internal/script/conds.go
new file mode 100644
index 0000000..d70f274
--- /dev/null
+++ b/src/cmd/go/internal/script/conds.go
@@ -0,0 +1,205 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package script
+
+import (
+ "cmd/go/internal/imports"
+ "fmt"
+ "os"
+ "runtime"
+ "sync"
+)
+
+// DefaultConds returns a set of broadly useful script conditions.
+//
+// Run the 'help' command within a script engine to view a list of the available
+// conditions.
+func DefaultConds() map[string]Cond {
+ conds := make(map[string]Cond)
+
+ conds["GOOS"] = PrefixCondition(
+ "runtime.GOOS == <suffix>",
+ func(_ *State, suffix string) (bool, error) {
+ if suffix == runtime.GOOS {
+ return true, nil
+ }
+ if _, ok := imports.KnownOS[suffix]; !ok {
+ return false, fmt.Errorf("unrecognized GOOS %q", suffix)
+ }
+ return false, nil
+ })
+
+ conds["GOARCH"] = PrefixCondition(
+ "runtime.GOARCH == <suffix>",
+ func(_ *State, suffix string) (bool, error) {
+ if suffix == runtime.GOARCH {
+ return true, nil
+ }
+ if _, ok := imports.KnownArch[suffix]; !ok {
+ return false, fmt.Errorf("unrecognized GOOS %q", suffix)
+ }
+ return false, nil
+ })
+
+ conds["compiler"] = PrefixCondition(
+ "runtime.Compiler == <suffix>",
+ func(_ *State, suffix string) (bool, error) {
+ if suffix == runtime.Compiler {
+ return true, nil
+ }
+ switch suffix {
+ case "gc", "gccgo":
+ return false, nil
+ default:
+ return false, fmt.Errorf("unrecognized compiler %q", suffix)
+ }
+ })
+
+ conds["root"] = BoolCondition("os.Geteuid() == 0", os.Geteuid() == 0)
+
+ return conds
+}
+
+// Condition returns a Cond with the given summary and evaluation function.
+func Condition(summary string, eval func(*State) (bool, error)) Cond {
+ return &funcCond{eval: eval, usage: CondUsage{Summary: summary}}
+}
+
+type funcCond struct {
+ eval func(*State) (bool, error)
+ usage CondUsage
+}
+
+func (c *funcCond) Usage() *CondUsage { return &c.usage }
+
+func (c *funcCond) Eval(s *State, suffix string) (bool, error) {
+ if suffix != "" {
+ return false, ErrUsage
+ }
+ return c.eval(s)
+}
+
+// PrefixCondition returns a Cond with the given summary and evaluation function.
+func PrefixCondition(summary string, eval func(*State, string) (bool, error)) Cond {
+ return &prefixCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}}
+}
+
+type prefixCond struct {
+ eval func(*State, string) (bool, error)
+ usage CondUsage
+}
+
+func (c *prefixCond) Usage() *CondUsage { return &c.usage }
+
+func (c *prefixCond) Eval(s *State, suffix string) (bool, error) {
+ return c.eval(s, suffix)
+}
+
+// BoolCondition returns a Cond with the given truth value and summary.
+// The Cond rejects the use of condition suffixes.
+func BoolCondition(summary string, v bool) Cond {
+ return &boolCond{v: v, usage: CondUsage{Summary: summary}}
+}
+
+type boolCond struct {
+ v bool
+ usage CondUsage
+}
+
+func (b *boolCond) Usage() *CondUsage { return &b.usage }
+
+func (b *boolCond) Eval(s *State, suffix string) (bool, error) {
+ if suffix != "" {
+ return false, ErrUsage
+ }
+ return b.v, nil
+}
+
+// OnceCondition returns a Cond that calls eval the first time the condition is
+// evaluated. Future calls reuse the same result.
+//
+// The eval function is not passed a *State because the condition is cached
+// across all execution states and must not vary by state.
+func OnceCondition(summary string, eval func() (bool, error)) Cond {
+ return &onceCond{eval: eval, usage: CondUsage{Summary: summary}}
+}
+
+type onceCond struct {
+ once sync.Once
+ v bool
+ err error
+ eval func() (bool, error)
+ usage CondUsage
+}
+
+func (l *onceCond) Usage() *CondUsage { return &l.usage }
+
+func (l *onceCond) Eval(s *State, suffix string) (bool, error) {
+ if suffix != "" {
+ return false, ErrUsage
+ }
+ l.once.Do(func() { l.v, l.err = l.eval() })
+ return l.v, l.err
+}
+
+// CachedCondition is like Condition but only calls eval the first time the
+// condition is evaluated for a given suffix.
+// Future calls with the same suffix reuse the earlier result.
+//
+// The eval function is not passed a *State because the condition is cached
+// across all execution states and must not vary by state.
+func CachedCondition(summary string, eval func(string) (bool, error)) Cond {
+ return &cachedCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}}
+}
+
+type cachedCond struct {
+ m sync.Map
+ eval func(string) (bool, error)
+ usage CondUsage
+}
+
+func (c *cachedCond) Usage() *CondUsage { return &c.usage }
+
+func (c *cachedCond) Eval(_ *State, suffix string) (bool, error) {
+ for {
+ var ready chan struct{}
+
+ v, loaded := c.m.Load(suffix)
+ if !loaded {
+ ready = make(chan struct{})
+ v, loaded = c.m.LoadOrStore(suffix, (<-chan struct{})(ready))
+
+ if !loaded {
+ inPanic := true
+ defer func() {
+ if inPanic {
+ c.m.Delete(suffix)
+ }
+ close(ready)
+ }()
+
+ b, err := c.eval(suffix)
+ inPanic = false
+
+ if err == nil {
+ c.m.Store(suffix, b)
+ return b, nil
+ } else {
+ c.m.Store(suffix, err)
+ return false, err
+ }
+ }
+ }
+
+ switch v := v.(type) {
+ case bool:
+ return v, nil
+ case error:
+ return false, v
+ case <-chan struct{}:
+ <-v
+ }
+ }
+}
diff --git a/src/cmd/go/internal/script/engine.go b/src/cmd/go/internal/script/engine.go
new file mode 100644
index 0000000..43054a2
--- /dev/null
+++ b/src/cmd/go/internal/script/engine.go
@@ -0,0 +1,788 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package script implements a small, customizable, platform-agnostic scripting
+// language.
+//
+// Scripts are run by an [Engine] configured with a set of available commands
+// and conditions that guard those commands. Each script has an associated
+// working directory and environment, along with a buffer containing the stdout
+// and stderr output of a prior command, tracked in a [State] that commands can
+// inspect and modify.
+//
+// The default commands configured by [NewEngine] resemble a simplified Unix
+// shell.
+//
+// # Script Language
+//
+// Each line of a script is parsed into a sequence of space-separated command
+// words, with environment variable expansion within each word and # marking an
+// end-of-line comment. Additional variables named ':' and '/' are expanded
+// within script arguments (expanding to the value of os.PathListSeparator and
+// os.PathSeparator respectively) but are not inherited in subprocess
+// environments.
+//
+// Adding single quotes around text keeps spaces in that text from being treated
+// as word separators and also disables environment variable expansion.
+// Inside a single-quoted block of text, a repeated single quote indicates
+// a literal single quote, as in:
+//
+// 'Don''t communicate by sharing memory.'
+//
+// A line beginning with # is a comment and conventionally explains what is
+// being done or tested at the start of a new section of the script.
+//
+// Commands are executed one at a time, and errors are checked for each command;
+// if any command fails unexpectedly, no subsequent commands in the script are
+// executed. The command prefix ! indicates that the command on the rest of the
+// line (typically go or a matching predicate) must fail instead of succeeding.
+// The command prefix ? indicates that the command may or may not succeed, but
+// the script should continue regardless.
+//
+// The command prefix [cond] indicates that the command on the rest of the line
+// should only run when the condition is satisfied.
+//
+// A condition can be negated: [!root] means to run the rest of the line only if
+// the user is not root. Multiple conditions may be given for a single command,
+// for example, '[linux] [amd64] skip'. The command will run if all conditions
+// are satisfied.
+package script
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "time"
+)
+
+// An Engine stores the configuration for executing a set of scripts.
+//
+// The same Engine may execute multiple scripts concurrently.
+type Engine struct {
+ Cmds map[string]Cmd
+ Conds map[string]Cond
+
+ // If Quiet is true, Execute deletes log prints from the previous
+ // section when starting a new section.
+ Quiet bool
+}
+
+// NewEngine returns an Engine configured with a basic set of commands and conditions.
+func NewEngine() *Engine {
+ return &Engine{
+ Cmds: DefaultCmds(),
+ Conds: DefaultConds(),
+ }
+}
+
+// A Cmd is a command that is available to a script.
+type Cmd interface {
+ // Run begins running the command.
+ //
+ // If the command produces output or can be run in the background, run returns
+ // a WaitFunc that will be called to obtain the result of the command and
+ // update the engine's stdout and stderr buffers.
+ //
+ // Run itself and the returned WaitFunc may inspect and/or modify the State,
+ // but the State's methods must not be called concurrently after Run has
+ // returned.
+ //
+ // Run may retain and access the args slice until the WaitFunc has returned.
+ Run(s *State, args ...string) (WaitFunc, error)
+
+ // Usage returns the usage for the command, which the caller must not modify.
+ Usage() *CmdUsage
+}
+
+// A WaitFunc is a function called to retrieve the results of a Cmd.
+type WaitFunc func(*State) (stdout, stderr string, err error)
+
+// A CmdUsage describes the usage of a Cmd, independent of its name
+// (which can change based on its registration).
+type CmdUsage struct {
+ Summary string // in the style of the Name section of a Unix 'man' page, omitting the name
+ Args string // a brief synopsis of the command's arguments (only)
+ Detail []string // zero or more sentences in the style of the Description section of a Unix 'man' page
+
+ // If Async is true, the Cmd is meaningful to run in the background, and its
+ // Run method must return either a non-nil WaitFunc or a non-nil error.
+ Async bool
+
+ // RegexpArgs reports which arguments, if any, should be treated as regular
+ // expressions. It takes as input the raw, unexpanded arguments and returns
+ // the list of argument indices that will be interpreted as regular
+ // expressions.
+ //
+ // If RegexpArgs is nil, all arguments are assumed not to be regular
+ // expressions.
+ RegexpArgs func(rawArgs ...string) []int
+}
+
+// A Cond is a condition deciding whether a command should be run.
+type Cond interface {
+ // Eval reports whether the condition applies to the given State.
+ //
+ // If the condition's usage reports that it is a prefix,
+ // the condition must be used with a suffix.
+ // Otherwise, the passed-in suffix argument is always the empty string.
+ Eval(s *State, suffix string) (bool, error)
+
+ // Usage returns the usage for the condition, which the caller must not modify.
+ Usage() *CondUsage
+}
+
+// A CondUsage describes the usage of a Cond, independent of its name
+// (which can change based on its registration).
+type CondUsage struct {
+ Summary string // a single-line summary of when the condition is true
+
+ // If Prefix is true, the condition is a prefix and requires a
+ // colon-separated suffix (like "[GOOS:linux]" for the "GOOS" condition).
+ // The suffix may be the empty string (like "[prefix:]").
+ Prefix bool
+}
+
+// Execute reads and executes script, writing the output to log.
+//
+// Execute stops and returns an error at the first command that does not succeed.
+// The returned error's text begins with "file:line: ".
+//
+// If the script runs to completion or ends by a 'stop' command,
+// Execute returns nil.
+//
+// Execute does not stop background commands started by the script
+// before returning. To stop those, use [State.CloseAndWait] or the
+// [Wait] command.
+func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Writer) (err error) {
+ defer func(prev *Engine) { s.engine = prev }(s.engine)
+ s.engine = e
+
+ var sectionStart time.Time
+ // endSection flushes the logs for the current section from s.log to log.
+ // ok indicates whether all commands in the section succeeded.
+ endSection := func(ok bool) error {
+ var err error
+ if sectionStart.IsZero() {
+ // We didn't write a section header or record a timestamp, so just dump the
+ // whole log without those.
+ if s.log.Len() > 0 {
+ err = s.flushLog(log)
+ }
+ } else if s.log.Len() == 0 {
+ // Adding elapsed time for doing nothing is meaningless, so don't.
+ _, err = io.WriteString(log, "\n")
+ } else {
+ // Insert elapsed time for section at the end of the section's comment.
+ _, err = fmt.Fprintf(log, " (%.3fs)\n", time.Since(sectionStart).Seconds())
+
+ if err == nil && (!ok || !e.Quiet) {
+ err = s.flushLog(log)
+ } else {
+ s.log.Reset()
+ }
+ }
+
+ sectionStart = time.Time{}
+ return err
+ }
+
+ var lineno int
+ lineErr := func(err error) error {
+ if errors.As(err, new(*CommandError)) {
+ return err
+ }
+ return fmt.Errorf("%s:%d: %w", file, lineno, err)
+ }
+
+ // In case of failure or panic, flush any pending logs for the section.
+ defer func() {
+ if sErr := endSection(false); sErr != nil && err == nil {
+ err = lineErr(sErr)
+ }
+ }()
+
+ for {
+ if err := s.ctx.Err(); err != nil {
+ // This error wasn't produced by any particular command,
+ // so don't wrap it in a CommandError.
+ return lineErr(err)
+ }
+
+ line, err := script.ReadString('\n')
+ if err == io.EOF {
+ if line == "" {
+ break // Reached the end of the script.
+ }
+ // If the script doesn't end in a newline, interpret the final line.
+ } else if err != nil {
+ return lineErr(err)
+ }
+ line = strings.TrimSuffix(line, "\n")
+ lineno++
+
+ // The comment character "#" at the start of the line delimits a section of
+ // the script.
+ if strings.HasPrefix(line, "#") {
+ // If there was a previous section, the fact that we are starting a new
+ // one implies the success of the previous one.
+ //
+ // At the start of the script, the state may also contain accumulated logs
+ // from commands executed on the State outside of the engine in order to
+ // set it up; flush those logs too.
+ if err := endSection(true); err != nil {
+ return lineErr(err)
+ }
+
+ // Log the section start without a newline so that we can add
+ // a timestamp for the section when it ends.
+ _, err = fmt.Fprintf(log, "%s", line)
+ sectionStart = time.Now()
+ if err != nil {
+ return lineErr(err)
+ }
+ continue
+ }
+
+ cmd, err := parse(file, lineno, line)
+ if cmd == nil && err == nil {
+ continue // Ignore blank lines.
+ }
+ s.Logf("> %s\n", line)
+ if err != nil {
+ return lineErr(err)
+ }
+
+ // Evaluate condition guards.
+ ok, err := e.conditionsActive(s, cmd.conds)
+ if err != nil {
+ return lineErr(err)
+ }
+ if !ok {
+ s.Logf("[condition not met]\n")
+ continue
+ }
+
+ impl := e.Cmds[cmd.name]
+
+ // Expand variables in arguments.
+ var regexpArgs []int
+ if impl != nil {
+ usage := impl.Usage()
+ if usage.RegexpArgs != nil {
+ // First join rawArgs without expansion to pass to RegexpArgs.
+ rawArgs := make([]string, 0, len(cmd.rawArgs))
+ for _, frags := range cmd.rawArgs {
+ var b strings.Builder
+ for _, frag := range frags {
+ b.WriteString(frag.s)
+ }
+ rawArgs = append(rawArgs, b.String())
+ }
+ regexpArgs = usage.RegexpArgs(rawArgs...)
+ }
+ }
+ cmd.args = expandArgs(s, cmd.rawArgs, regexpArgs)
+
+ // Run the command.
+ err = e.runCommand(s, cmd, impl)
+ if err != nil {
+ if stop := (stopError{}); errors.As(err, &stop) {
+ // Since the 'stop' command halts execution of the entire script,
+ // log its message separately from the section in which it appears.
+ err = endSection(true)
+ s.Logf("%v\n", stop)
+ if err == nil {
+ return nil
+ }
+ }
+ return lineErr(err)
+ }
+ }
+
+ if err := endSection(true); err != nil {
+ return lineErr(err)
+ }
+ return nil
+}
+
+// A command is a complete command parsed from a script.
+type command struct {
+ file string
+ line int
+ want expectedStatus
+ conds []condition // all must be satisfied
+ name string // the name of the command; must be non-empty
+ rawArgs [][]argFragment
+ args []string // shell-expanded arguments following name
+ background bool // command should run in background (ends with a trailing &)
+}
+
+// A expectedStatus describes the expected outcome of a command.
+// Script execution halts when a command does not match its expected status.
+type expectedStatus string
+
+const (
+ success expectedStatus = ""
+ failure expectedStatus = "!"
+ successOrFailure expectedStatus = "?"
+)
+
+type argFragment struct {
+ s string
+ quoted bool // if true, disable variable expansion for this fragment
+}
+
+type condition struct {
+ want bool
+ tag string
+}
+
+const argSepChars = " \t\r\n#"
+
+// parse parses a single line as a list of space-separated arguments.
+// subject to environment variable expansion (but not resplitting).
+// Single quotes around text disable splitting and expansion.
+// To embed a single quote, double it:
+//
+// 'Don''t communicate by sharing memory.'
+func parse(filename string, lineno int, line string) (cmd *command, err error) {
+ cmd = &command{file: filename, line: lineno}
+ var (
+ rawArg []argFragment // text fragments of current arg so far (need to add line[start:i])
+ start = -1 // if >= 0, position where current arg text chunk starts
+ quoted = false // currently processing quoted text
+ )
+
+ flushArg := func() error {
+ if len(rawArg) == 0 {
+ return nil // Nothing to flush.
+ }
+ defer func() { rawArg = nil }()
+
+ if cmd.name == "" && len(rawArg) == 1 && !rawArg[0].quoted {
+ arg := rawArg[0].s
+
+ // Command prefix ! means negate the expectations about this command:
+ // go command should fail, match should not be found, etc.
+ // Prefix ? means allow either success or failure.
+ switch want := expectedStatus(arg); want {
+ case failure, successOrFailure:
+ if cmd.want != "" {
+ return errors.New("duplicated '!' or '?' token")
+ }
+ cmd.want = want
+ return nil
+ }
+
+ // Command prefix [cond] means only run this command if cond is satisfied.
+ if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
+ want := true
+ arg = strings.TrimSpace(arg[1 : len(arg)-1])
+ if strings.HasPrefix(arg, "!") {
+ want = false
+ arg = strings.TrimSpace(arg[1:])
+ }
+ if arg == "" {
+ return errors.New("empty condition")
+ }
+ cmd.conds = append(cmd.conds, condition{want: want, tag: arg})
+ return nil
+ }
+
+ if arg == "" {
+ return errors.New("empty command")
+ }
+ cmd.name = arg
+ return nil
+ }
+
+ cmd.rawArgs = append(cmd.rawArgs, rawArg)
+ return nil
+ }
+
+ for i := 0; ; i++ {
+ if !quoted && (i >= len(line) || strings.ContainsRune(argSepChars, rune(line[i]))) {
+ // Found arg-separating space.
+ if start >= 0 {
+ rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false})
+ start = -1
+ }
+ if err := flushArg(); err != nil {
+ return nil, err
+ }
+ if i >= len(line) || line[i] == '#' {
+ break
+ }
+ continue
+ }
+ if i >= len(line) {
+ return nil, errors.New("unterminated quoted argument")
+ }
+ if line[i] == '\'' {
+ if !quoted {
+ // starting a quoted chunk
+ if start >= 0 {
+ rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false})
+ }
+ start = i + 1
+ quoted = true
+ continue
+ }
+ // 'foo''bar' means foo'bar, like in rc shell and Pascal.
+ if i+1 < len(line) && line[i+1] == '\'' {
+ rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true})
+ start = i + 1
+ i++ // skip over second ' before next iteration
+ continue
+ }
+ // ending a quoted chunk
+ rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true})
+ start = i + 1
+ quoted = false
+ continue
+ }
+ // found character worth saving; make sure we're saving
+ if start < 0 {
+ start = i
+ }
+ }
+
+ if cmd.name == "" {
+ if cmd.want != "" || len(cmd.conds) > 0 || len(cmd.rawArgs) > 0 || cmd.background {
+ // The line contains a command prefix or suffix, but no actual command.
+ return nil, errors.New("missing command")
+ }
+
+ // The line is blank, or contains only a comment.
+ return nil, nil
+ }
+
+ if n := len(cmd.rawArgs); n > 0 {
+ last := cmd.rawArgs[n-1]
+ if len(last) == 1 && !last[0].quoted && last[0].s == "&" {
+ cmd.background = true
+ cmd.rawArgs = cmd.rawArgs[:n-1]
+ }
+ }
+ return cmd, nil
+}
+
+// expandArgs expands the shell variables in rawArgs and joins them to form the
+// final arguments to pass to a command.
+func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string {
+ args := make([]string, 0, len(rawArgs))
+ for i, frags := range rawArgs {
+ isRegexp := false
+ for _, j := range regexpArgs {
+ if i == j {
+ isRegexp = true
+ break
+ }
+ }
+
+ var b strings.Builder
+ for _, frag := range frags {
+ if frag.quoted {
+ b.WriteString(frag.s)
+ } else {
+ b.WriteString(s.ExpandEnv(frag.s, isRegexp))
+ }
+ }
+ args = append(args, b.String())
+ }
+ return args
+}
+
+// quoteArgs returns a string that parse would parse as args when passed to a command.
+//
+// TODO(bcmills): This function should have a fuzz test.
+func quoteArgs(args []string) string {
+ var b strings.Builder
+ for i, arg := range args {
+ if i > 0 {
+ b.WriteString(" ")
+ }
+ if strings.ContainsAny(arg, "'"+argSepChars) {
+ // Quote the argument to a form that would be parsed as a single argument.
+ b.WriteString("'")
+ b.WriteString(strings.ReplaceAll(arg, "'", "''"))
+ b.WriteString("'")
+ } else {
+ b.WriteString(arg)
+ }
+ }
+ return b.String()
+}
+
+func (e *Engine) conditionsActive(s *State, conds []condition) (bool, error) {
+ for _, cond := range conds {
+ var impl Cond
+ prefix, suffix, ok := strings.Cut(cond.tag, ":")
+ if ok {
+ impl = e.Conds[prefix]
+ if impl == nil {
+ return false, fmt.Errorf("unknown condition prefix %q", prefix)
+ }
+ if !impl.Usage().Prefix {
+ return false, fmt.Errorf("condition %q cannot be used with a suffix", prefix)
+ }
+ } else {
+ impl = e.Conds[cond.tag]
+ if impl == nil {
+ return false, fmt.Errorf("unknown condition %q", cond.tag)
+ }
+ if impl.Usage().Prefix {
+ return false, fmt.Errorf("condition %q requires a suffix", cond.tag)
+ }
+ }
+ active, err := impl.Eval(s, suffix)
+
+ if err != nil {
+ return false, fmt.Errorf("evaluating condition %q: %w", cond.tag, err)
+ }
+ if active != cond.want {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func (e *Engine) runCommand(s *State, cmd *command, impl Cmd) error {
+ if impl == nil {
+ return cmdError(cmd, errors.New("unknown command"))
+ }
+
+ async := impl.Usage().Async
+ if cmd.background && !async {
+ return cmdError(cmd, errors.New("command cannot be run in background"))
+ }
+
+ wait, runErr := impl.Run(s, cmd.args...)
+ if wait == nil {
+ if async && runErr == nil {
+ return cmdError(cmd, errors.New("internal error: async command returned a nil WaitFunc"))
+ }
+ return checkStatus(cmd, runErr)
+ }
+ if runErr != nil {
+ return cmdError(cmd, errors.New("internal error: command returned both an error and a WaitFunc"))
+ }
+
+ if cmd.background {
+ s.background = append(s.background, backgroundCmd{
+ command: cmd,
+ wait: wait,
+ })
+ // Clear stdout and stderr, since they no longer correspond to the last
+ // command executed.
+ s.stdout = ""
+ s.stderr = ""
+ return nil
+ }
+
+ if wait != nil {
+ stdout, stderr, waitErr := wait(s)
+ s.stdout = stdout
+ s.stderr = stderr
+ if stdout != "" {
+ s.Logf("[stdout]\n%s", stdout)
+ }
+ if stderr != "" {
+ s.Logf("[stderr]\n%s", stderr)
+ }
+ if cmdErr := checkStatus(cmd, waitErr); cmdErr != nil {
+ return cmdErr
+ }
+ if waitErr != nil {
+ // waitErr was expected (by cmd.want), so log it instead of returning it.
+ s.Logf("[%v]\n", waitErr)
+ }
+ }
+ return nil
+}
+
+func checkStatus(cmd *command, err error) error {
+ if err == nil {
+ if cmd.want == failure {
+ return cmdError(cmd, ErrUnexpectedSuccess)
+ }
+ return nil
+ }
+
+ if s := (stopError{}); errors.As(err, &s) {
+ // This error originated in the Stop command.
+ // Propagate it as-is.
+ return cmdError(cmd, err)
+ }
+
+ if w := (waitError{}); errors.As(err, &w) {
+ // This error was surfaced from a background process by a call to Wait.
+ // Add a call frame for Wait itself, but ignore its "want" field.
+ // (Wait itself cannot fail to wait on commands or else it would leak
+ // processes and/or goroutines — so a negative assertion for it would be at
+ // best ambiguous.)
+ return cmdError(cmd, err)
+ }
+
+ if cmd.want == success {
+ return cmdError(cmd, err)
+ }
+
+ if cmd.want == failure && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) {
+ // The command was terminated because the script is no longer interested in
+ // its output, so we don't know what it would have done had it run to
+ // completion — for all we know, it could have exited without error if it
+ // ran just a smidge faster.
+ return cmdError(cmd, err)
+ }
+
+ return nil
+}
+
+// ListCmds prints to w a list of the named commands,
+// annotating each with its arguments and a short usage summary.
+// If verbose is true, ListCmds prints full details for each command.
+//
+// Each of the name arguments should be a command name.
+// If no names are passed as arguments, ListCmds lists all the
+// commands registered in e.
+func (e *Engine) ListCmds(w io.Writer, verbose bool, names ...string) error {
+ if names == nil {
+ names = make([]string, 0, len(e.Cmds))
+ for name := range e.Cmds {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ }
+
+ for _, name := range names {
+ cmd := e.Cmds[name]
+ usage := cmd.Usage()
+
+ suffix := ""
+ if usage.Async {
+ suffix = " [&]"
+ }
+
+ _, err := fmt.Fprintf(w, "%s %s%s\n\t%s\n", name, usage.Args, suffix, usage.Summary)
+ if err != nil {
+ return err
+ }
+
+ if verbose {
+ if _, err := io.WriteString(w, "\n"); err != nil {
+ return err
+ }
+ for _, line := range usage.Detail {
+ if err := wrapLine(w, line, 60, "\t"); err != nil {
+ return err
+ }
+ }
+ if _, err := io.WriteString(w, "\n"); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func wrapLine(w io.Writer, line string, cols int, indent string) error {
+ line = strings.TrimLeft(line, " ")
+ for len(line) > cols {
+ bestSpace := -1
+ for i, r := range line {
+ if r == ' ' {
+ if i <= cols || bestSpace < 0 {
+ bestSpace = i
+ }
+ if i > cols {
+ break
+ }
+ }
+ }
+ if bestSpace < 0 {
+ break
+ }
+
+ if _, err := fmt.Fprintf(w, "%s%s\n", indent, line[:bestSpace]); err != nil {
+ return err
+ }
+ line = line[bestSpace+1:]
+ }
+
+ _, err := fmt.Fprintf(w, "%s%s\n", indent, line)
+ return err
+}
+
+// ListConds prints to w a list of conditions, one per line,
+// annotating each with a description and whether the condition
+// is true in the state s (if s is non-nil).
+//
+// Each of the tag arguments should be a condition string of
+// the form "name" or "name:suffix". If no tags are passed as
+// arguments, ListConds lists all conditions registered in
+// the engine e.
+func (e *Engine) ListConds(w io.Writer, s *State, tags ...string) error {
+ if tags == nil {
+ tags = make([]string, 0, len(e.Conds))
+ for name := range e.Conds {
+ tags = append(tags, name)
+ }
+ sort.Strings(tags)
+ }
+
+ for _, tag := range tags {
+ if prefix, suffix, ok := strings.Cut(tag, ":"); ok {
+ cond := e.Conds[prefix]
+ if cond == nil {
+ return fmt.Errorf("unknown condition prefix %q", prefix)
+ }
+ usage := cond.Usage()
+ if !usage.Prefix {
+ return fmt.Errorf("condition %q cannot be used with a suffix", prefix)
+ }
+
+ activeStr := ""
+ if s != nil {
+ if active, _ := cond.Eval(s, suffix); active {
+ activeStr = " (active)"
+ }
+ }
+ _, err := fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ cond := e.Conds[tag]
+ if cond == nil {
+ return fmt.Errorf("unknown condition %q", tag)
+ }
+ var err error
+ usage := cond.Usage()
+ if usage.Prefix {
+ _, err = fmt.Fprintf(w, "[%s:*]\n\t%s\n", tag, usage.Summary)
+ } else {
+ activeStr := ""
+ if s != nil {
+ if ok, _ := cond.Eval(s, ""); ok {
+ activeStr = " (active)"
+ }
+ }
+ _, err = fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary)
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/script/errors.go b/src/cmd/go/internal/script/errors.go
new file mode 100644
index 0000000..7f43e72
--- /dev/null
+++ b/src/cmd/go/internal/script/errors.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package script
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrUnexpectedSuccess indicates that a script command that was expected to
+// fail (as indicated by a "!" prefix) instead completed successfully.
+var ErrUnexpectedSuccess = errors.New("unexpected success")
+
+// A CommandError describes an error resulting from attempting to execute a
+// specific command.
+type CommandError struct {
+ File string
+ Line int
+ Op string
+ Args []string
+ Err error
+}
+
+func cmdError(cmd *command, err error) *CommandError {
+ return &CommandError{
+ File: cmd.file,
+ Line: cmd.line,
+ Op: cmd.name,
+ Args: cmd.args,
+ Err: err,
+ }
+}
+
+func (e *CommandError) Error() string {
+ if len(e.Args) == 0 {
+ return fmt.Sprintf("%s:%d: %s: %v", e.File, e.Line, e.Op, e.Err)
+ }
+ return fmt.Sprintf("%s:%d: %s %s: %v", e.File, e.Line, e.Op, quoteArgs(e.Args), e.Err)
+}
+
+func (e *CommandError) Unwrap() error { return e.Err }
+
+// A UsageError reports the valid arguments for a command.
+//
+// It may be returned in response to invalid arguments.
+type UsageError struct {
+ Name string
+ Command Cmd
+}
+
+func (e *UsageError) Error() string {
+ usage := e.Command.Usage()
+ suffix := ""
+ if usage.Async {
+ suffix = " [&]"
+ }
+ return fmt.Sprintf("usage: %s %s%s", e.Name, usage.Args, suffix)
+}
+
+// ErrUsage may be returned by a Command to indicate that it was called with
+// invalid arguments; its Usage method may be called to obtain details.
+var ErrUsage = errors.New("invalid usage")
diff --git a/src/cmd/go/internal/script/scripttest/scripttest.go b/src/cmd/go/internal/script/scripttest/scripttest.go
new file mode 100644
index 0000000..0696624
--- /dev/null
+++ b/src/cmd/go/internal/script/scripttest/scripttest.go
@@ -0,0 +1,143 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scripttest adapts the script engine for use in tests.
+package scripttest
+
+import (
+ "bufio"
+ "cmd/go/internal/script"
+ "errors"
+ "io"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// DefaultCmds returns a set of broadly useful script commands.
+//
+// This set includes all of the commands in script.DefaultCmds,
+// as well as a "skip" command that halts the script and causes the
+// testing.TB passed to Run to be skipped.
+func DefaultCmds() map[string]script.Cmd {
+ cmds := script.DefaultCmds()
+ cmds["skip"] = Skip()
+ return cmds
+}
+
+// DefaultConds returns a set of broadly useful script conditions.
+//
+// This set includes all of the conditions in script.DefaultConds,
+// as well as:
+//
+// - Conditions of the form "exec:foo" are active when the executable "foo" is
+// found in the test process's PATH, and inactive when the executable is
+// not found.
+//
+// - "short" is active when testing.Short() is true.
+//
+// - "verbose" is active when testing.Verbose() is true.
+func DefaultConds() map[string]script.Cond {
+ conds := script.DefaultConds()
+ conds["exec"] = CachedExec()
+ conds["short"] = script.BoolCondition("testing.Short()", testing.Short())
+ conds["verbose"] = script.BoolCondition("testing.Verbose()", testing.Verbose())
+ return conds
+}
+
+// Run runs the script from the given filename starting at the given initial state.
+// When the script completes, Run closes the state.
+func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testScript io.Reader) {
+ t.Helper()
+ err := func() (err error) {
+ log := new(strings.Builder)
+ log.WriteString("\n") // Start output on a new line for consistent indentation.
+
+ // Defer writing to the test log in case the script engine panics during execution,
+ // but write the log before we write the final "skip" or "FAIL" line.
+ t.Helper()
+ defer func() {
+ t.Helper()
+
+ if closeErr := s.CloseAndWait(log); err == nil {
+ err = closeErr
+ }
+
+ if log.Len() > 0 {
+ t.Log(strings.TrimSuffix(log.String(), "\n"))
+ }
+ }()
+
+ if testing.Verbose() {
+ // Add the environment to the start of the script log.
+ wait, err := script.Env().Run(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if wait != nil {
+ stdout, stderr, err := wait(s)
+ if err != nil {
+ t.Fatalf("env: %v\n%s", err, stderr)
+ }
+ if len(stdout) > 0 {
+ s.Logf("%s\n", stdout)
+ }
+ }
+ }
+
+ return e.Execute(s, filename, bufio.NewReader(testScript), log)
+ }()
+
+ if skip := (skipError{}); errors.As(err, &skip) {
+ if skip.msg == "" {
+ t.Skip("SKIP")
+ } else {
+ t.Skipf("SKIP: %v", skip.msg)
+ }
+ }
+ if err != nil {
+ t.Errorf("FAIL: %v", err)
+ }
+}
+
+// Skip returns a sentinel error that causes Run to mark the test as skipped.
+func Skip() script.Cmd {
+ return script.Command(
+ script.CmdUsage{
+ Summary: "skip the current test",
+ Args: "[msg]",
+ },
+ func(_ *script.State, args ...string) (script.WaitFunc, error) {
+ if len(args) > 1 {
+ return nil, script.ErrUsage
+ }
+ if len(args) == 0 {
+ return nil, skipError{""}
+ }
+ return nil, skipError{args[0]}
+ })
+}
+
+type skipError struct {
+ msg string
+}
+
+func (s skipError) Error() string {
+ if s.msg == "" {
+ return "skip"
+ }
+ return s.msg
+}
+
+// CachedExec returns a Condition that reports whether the PATH of the test
+// binary itself (not the script's current environment) contains the named
+// executable.
+func CachedExec() script.Cond {
+ return script.CachedCondition(
+ "<suffix> names an executable in the test binary's PATH",
+ func(name string) (bool, error) {
+ _, err := exec.LookPath(name)
+ return err == nil, nil
+ })
+}
diff --git a/src/cmd/go/internal/script/state.go b/src/cmd/go/internal/script/state.go
new file mode 100644
index 0000000..548f673
--- /dev/null
+++ b/src/cmd/go/internal/script/state.go
@@ -0,0 +1,236 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package script
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "internal/txtar"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// A State encapsulates the current state of a running script engine,
+// including the script environment and any running background commands.
+type State struct {
+ engine *Engine // the engine currently executing the script, if any
+
+ ctx context.Context
+ cancel context.CancelFunc
+ file string
+ log bytes.Buffer
+
+ workdir string // initial working directory
+ pwd string // current working directory during execution
+ env []string // environment list (for os/exec)
+ envMap map[string]string // environment mapping (matches env)
+ stdout string // standard output from last 'go' command; for 'stdout' command
+ stderr string // standard error from last 'go' command; for 'stderr' command
+
+ background []backgroundCmd
+}
+
+type backgroundCmd struct {
+ *command
+ wait WaitFunc
+}
+
+// NewState returns a new State permanently associated with ctx, with its
+// initial working directory in workdir and its initial environment set to
+// initialEnv (or os.Environ(), if initialEnv is nil).
+//
+// The new State also contains pseudo-environment-variables for
+// ${/} and ${:} (for the platform's path and list separators respectively),
+// but does not pass those to subprocesses.
+func NewState(ctx context.Context, workdir string, initialEnv []string) (*State, error) {
+ absWork, err := filepath.Abs(workdir)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+
+ // Make a fresh copy of the env slice to avoid aliasing bugs if we ever
+ // start modifying it in place; this also establishes the invariant that
+ // s.env contains no duplicates.
+ env := cleanEnv(initialEnv, absWork)
+
+ envMap := make(map[string]string, len(env))
+
+ // Add entries for ${:} and ${/} to make it easier to write platform-independent
+ // paths in scripts.
+ envMap["/"] = string(os.PathSeparator)
+ envMap[":"] = string(os.PathListSeparator)
+
+ for _, kv := range env {
+ if k, v, ok := strings.Cut(kv, "="); ok {
+ envMap[k] = v
+ }
+ }
+
+ s := &State{
+ ctx: ctx,
+ cancel: cancel,
+ workdir: absWork,
+ pwd: absWork,
+ env: env,
+ envMap: envMap,
+ }
+ s.Setenv("PWD", absWork)
+ return s, nil
+}
+
+// CloseAndWait cancels the State's Context and waits for any background commands to
+// finish. If any remaining background command ended in an unexpected state,
+// Close returns a non-nil error.
+func (s *State) CloseAndWait(log io.Writer) error {
+ s.cancel()
+ wait, err := Wait().Run(s)
+ if wait != nil {
+ panic("script: internal error: Wait unexpectedly returns its own WaitFunc")
+ }
+ if flushErr := s.flushLog(log); err == nil {
+ err = flushErr
+ }
+ return err
+}
+
+// Chdir changes the State's working directory to the given path.
+func (s *State) Chdir(path string) error {
+ dir := s.Path(path)
+ if _, err := os.Stat(dir); err != nil {
+ return &fs.PathError{Op: "Chdir", Path: dir, Err: err}
+ }
+ s.pwd = dir
+ s.Setenv("PWD", dir)
+ return nil
+}
+
+// Context returns the Context with which the State was created.
+func (s *State) Context() context.Context {
+ return s.ctx
+}
+
+// Environ returns a copy of the current script environment,
+// in the form "key=value".
+func (s *State) Environ() []string {
+ return append([]string(nil), s.env...)
+}
+
+// ExpandEnv replaces ${var} or $var in the string according to the values of
+// the environment variables in s. References to undefined variables are
+// replaced by the empty string.
+func (s *State) ExpandEnv(str string, inRegexp bool) string {
+ return os.Expand(str, func(key string) string {
+ e := s.envMap[key]
+ if inRegexp {
+ // Quote to literal strings: we want paths like C:\work\go1.4 to remain
+ // paths rather than regular expressions.
+ e = regexp.QuoteMeta(e)
+ }
+ return e
+ })
+}
+
+// ExtractFiles extracts the files in ar to the state's current directory,
+// expanding any environment variables within each name.
+//
+// The files must reside within the working directory with which the State was
+// originally created.
+func (s *State) ExtractFiles(ar *txtar.Archive) error {
+ wd := s.workdir
+
+ // Add trailing separator to terminate wd.
+ // This prevents extracting to outside paths which prefix wd,
+ // e.g. extracting to /home/foobar when wd is /home/foo
+ if wd == "" {
+ panic("s.workdir is unexpectedly empty")
+ }
+ if !os.IsPathSeparator(wd[len(wd)-1]) {
+ wd += string(filepath.Separator)
+ }
+
+ for _, f := range ar.Files {
+ name := s.Path(s.ExpandEnv(f.Name, false))
+
+ if !strings.HasPrefix(name, wd) {
+ return fmt.Errorf("file %#q is outside working directory", f.Name)
+ }
+
+ if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil {
+ return err
+ }
+ if err := os.WriteFile(name, f.Data, 0666); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Getwd returns the directory in which to run the next script command.
+func (s *State) Getwd() string { return s.pwd }
+
+// Logf writes output to the script's log without updating its stdout or stderr
+// buffers. (The output log functions as a kind of meta-stderr.)
+func (s *State) Logf(format string, args ...any) {
+ fmt.Fprintf(&s.log, format, args...)
+}
+
+// flushLog writes the contents of the script's log to w and clears the log.
+func (s *State) flushLog(w io.Writer) error {
+ _, err := w.Write(s.log.Bytes())
+ s.log.Reset()
+ return err
+}
+
+// LookupEnv retrieves the value of the environment variable in s named by the key.
+func (s *State) LookupEnv(key string) (string, bool) {
+ v, ok := s.envMap[key]
+ return v, ok
+}
+
+// Path returns the absolute path in the host operating system for a
+// script-based (generally slash-separated and relative) path.
+func (s *State) Path(path string) string {
+ if filepath.IsAbs(path) {
+ return filepath.Clean(path)
+ }
+ return filepath.Join(s.pwd, path)
+}
+
+// Setenv sets the value of the environment variable in s named by the key.
+func (s *State) Setenv(key, value string) error {
+ s.env = cleanEnv(append(s.env, key+"="+value), s.pwd)
+ s.envMap[key] = value
+ return nil
+}
+
+// Stdout returns the stdout output of the last command run,
+// or the empty string if no command has been run.
+func (s *State) Stdout() string { return s.stdout }
+
+// Stderr returns the stderr output of the last command run,
+// or the empty string if no command has been run.
+func (s *State) Stderr() string { return s.stderr }
+
+// cleanEnv returns a copy of env with any duplicates removed in favor of
+// later values and any required system variables defined.
+//
+// If env is nil, cleanEnv copies the environment from os.Environ().
+func cleanEnv(env []string, pwd string) []string {
+ // There are some funky edge-cases in this logic, especially on Windows (with
+ // case-insensitive environment variables and variables with keys like "=C:").
+ // Rather than duplicating exec.dedupEnv here, cheat and use exec.Cmd directly.
+ cmd := &exec.Cmd{Env: env}
+ cmd.Dir = pwd
+ return cmd.Environ()
+}
diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go
new file mode 100644
index 0000000..9f216d5
--- /dev/null
+++ b/src/cmd/go/internal/search/search.go
@@ -0,0 +1,512 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "cmd/internal/pkgpattern"
+ "fmt"
+ "go/build"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+// A Match represents the result of matching a single package pattern.
+type Match struct {
+ pattern string // the pattern itself
+ Dirs []string // if the pattern is local, directories that potentially contain matching packages
+ Pkgs []string // matching packages (import paths)
+ Errs []error // errors matching the patterns to packages, NOT errors loading those packages
+
+ // Errs may be non-empty even if len(Pkgs) > 0, indicating that some matching
+ // packages could be located but results may be incomplete.
+ // If len(Pkgs) == 0 && len(Errs) == 0, the pattern is well-formed but did not
+ // match any packages.
+}
+
+// NewMatch returns a Match describing the given pattern,
+// without resolving its packages or errors.
+func NewMatch(pattern string) *Match {
+ return &Match{pattern: pattern}
+}
+
+// Pattern returns the pattern to be matched.
+func (m *Match) Pattern() string { return m.pattern }
+
+// AddError appends a MatchError wrapping err to m.Errs.
+func (m *Match) AddError(err error) {
+ m.Errs = append(m.Errs, &MatchError{Match: m, Err: err})
+}
+
+// IsLiteral reports whether the pattern is free of wildcards and meta-patterns.
+//
+// A literal pattern must match at most one package.
+func (m *Match) IsLiteral() bool {
+ return !strings.Contains(m.pattern, "...") && !m.IsMeta()
+}
+
+// IsLocal reports whether the pattern must be resolved from a specific root or
+// directory, such as a filesystem path or a single module.
+func (m *Match) IsLocal() bool {
+ return build.IsLocalImport(m.pattern) || filepath.IsAbs(m.pattern)
+}
+
+// IsMeta reports whether the pattern is a “meta-package” keyword that represents
+// multiple packages, such as "std", "cmd", or "all".
+func (m *Match) IsMeta() bool {
+ return IsMetaPackage(m.pattern)
+}
+
+// IsMetaPackage checks if name is a reserved package name that expands to multiple packages.
+func IsMetaPackage(name string) bool {
+ return name == "std" || name == "cmd" || name == "all"
+}
+
+// A MatchError indicates an error that occurred while attempting to match a
+// pattern.
+type MatchError struct {
+ Match *Match
+ Err error
+}
+
+func (e *MatchError) Error() string {
+ if e.Match.IsLiteral() {
+ return fmt.Sprintf("%s: %v", e.Match.Pattern(), e.Err)
+ }
+ return fmt.Sprintf("pattern %s: %v", e.Match.Pattern(), e.Err)
+}
+
+func (e *MatchError) Unwrap() error {
+ return e.Err
+}
+
+// MatchPackages sets m.Pkgs to a non-nil slice containing all the packages that
+// can be found under the $GOPATH directories and $GOROOT that match the
+// pattern. The pattern must be either "all" (all packages), "std" (standard
+// packages), "cmd" (standard commands), or a path including "...".
+//
+// If any errors may have caused the set of packages to be incomplete,
+// MatchPackages appends those errors to m.Errs.
+func (m *Match) MatchPackages() {
+ m.Pkgs = []string{}
+ if m.IsLocal() {
+ m.AddError(fmt.Errorf("internal error: MatchPackages: %s is not a valid package pattern", m.pattern))
+ return
+ }
+
+ if m.IsLiteral() {
+ m.Pkgs = []string{m.pattern}
+ return
+ }
+
+ match := func(string) bool { return true }
+ treeCanMatch := func(string) bool { return true }
+ if !m.IsMeta() {
+ match = pkgpattern.MatchPattern(m.pattern)
+ treeCanMatch = pkgpattern.TreeCanMatchPattern(m.pattern)
+ }
+
+ have := map[string]bool{
+ "builtin": true, // ignore pseudo-package that exists only for documentation
+ }
+ if !cfg.BuildContext.CgoEnabled {
+ have["runtime/cgo"] = true // ignore during walk
+ }
+
+ for _, src := range cfg.BuildContext.SrcDirs() {
+ if (m.pattern == "std" || m.pattern == "cmd") && src != cfg.GOROOTsrc {
+ continue
+ }
+
+ // If the root itself is a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ src = str.WithFilePathSeparator(filepath.Clean(src))
+ root := src
+ if m.pattern == "cmd" {
+ root += "cmd" + string(filepath.Separator)
+ }
+
+ err := fsys.Walk(root, func(path string, fi fs.FileInfo, err error) error {
+ if err != nil {
+ return err // Likely a permission error, which could interfere with matching.
+ }
+ if path == src {
+ return nil // GOROOT/src and GOPATH/src cannot contain packages.
+ }
+
+ want := true
+ // Avoid .foo, _foo, and testdata directory trees.
+ _, elem := filepath.Split(path)
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ want = false
+ }
+
+ name := filepath.ToSlash(path[len(src):])
+ if m.pattern == "std" && (!IsStandardImportPath(name) || name == "cmd") {
+ // The name "std" is only the standard library.
+ // If the name is cmd, it's the root of the command tree.
+ want = false
+ }
+ if !treeCanMatch(name) {
+ want = false
+ }
+
+ if !fi.IsDir() {
+ if fi.Mode()&fs.ModeSymlink != 0 && want && strings.Contains(m.pattern, "...") {
+ if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+ fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path)
+ }
+ }
+ return nil
+ }
+ if !want {
+ return filepath.SkipDir
+ }
+
+ if have[name] {
+ return nil
+ }
+ have[name] = true
+ if !match(name) {
+ return nil
+ }
+ pkg, err := cfg.BuildContext.ImportDir(path, 0)
+ if err != nil {
+ if _, noGo := err.(*build.NoGoError); noGo {
+ // The package does not actually exist, so record neither the package
+ // nor the error.
+ return nil
+ }
+ // There was an error importing path, but not matching it,
+ // which is all that Match promises to do.
+ // Ignore the import error.
+ }
+
+ // If we are expanding "cmd", skip main
+ // packages under cmd/vendor. At least as of
+ // March, 2017, there is one there for the
+ // vendored pprof tool.
+ if m.pattern == "cmd" && pkg != nil && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" {
+ return nil
+ }
+
+ m.Pkgs = append(m.Pkgs, name)
+ return nil
+ })
+ if err != nil {
+ m.AddError(err)
+ }
+ }
+}
+
+// MatchDirs sets m.Dirs to a non-nil slice containing all directories that
+// potentially match a local pattern. The pattern must begin with an absolute
+// path, or "./", or "../". On Windows, the pattern may use slash or backslash
+// separators or a mix of both.
+//
+// If any errors may have caused the set of directories to be incomplete,
+// MatchDirs appends those errors to m.Errs.
+func (m *Match) MatchDirs(modRoots []string) {
+ m.Dirs = []string{}
+ if !m.IsLocal() {
+ m.AddError(fmt.Errorf("internal error: MatchDirs: %s is not a valid filesystem pattern", m.pattern))
+ return
+ }
+
+ if m.IsLiteral() {
+ m.Dirs = []string{m.pattern}
+ return
+ }
+
+ // Clean the path and create a matching predicate.
+ // filepath.Clean removes "./" prefixes (and ".\" on Windows). We need to
+ // preserve these, since they are meaningful in MatchPattern and in
+ // returned import paths.
+ cleanPattern := filepath.Clean(m.pattern)
+ isLocal := strings.HasPrefix(m.pattern, "./") || (os.PathSeparator == '\\' && strings.HasPrefix(m.pattern, `.\`))
+ prefix := ""
+ if cleanPattern != "." && isLocal {
+ prefix = "./"
+ cleanPattern = "." + string(os.PathSeparator) + cleanPattern
+ }
+ slashPattern := filepath.ToSlash(cleanPattern)
+ match := pkgpattern.MatchPattern(slashPattern)
+
+ // Find directory to begin the scan.
+ // Could be smarter but this one optimization
+ // is enough for now, since ... is usually at the
+ // end of a path.
+ i := strings.Index(cleanPattern, "...")
+ dir, _ := filepath.Split(cleanPattern[:i])
+
+ // pattern begins with ./ or ../.
+ // path.Clean will discard the ./ but not the ../.
+ // We need to preserve the ./ for pattern matching
+ // and in the returned import paths.
+
+ if len(modRoots) > 1 {
+ abs, err := filepath.Abs(dir)
+ if err != nil {
+ m.AddError(err)
+ return
+ }
+ var found bool
+ for _, modRoot := range modRoots {
+ if modRoot != "" && str.HasFilePathPrefix(abs, modRoot) {
+ found = true
+ }
+ }
+ if !found {
+ plural := ""
+ if len(modRoots) > 1 {
+ plural = "s"
+ }
+ m.AddError(fmt.Errorf("directory %s is outside module root%s (%s)", abs, plural, strings.Join(modRoots, ", ")))
+ }
+ }
+
+ // If dir is actually a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ dir = str.WithFilePathSeparator(dir)
+ err := fsys.Walk(dir, func(path string, fi fs.FileInfo, err error) error {
+ if err != nil {
+ return err // Likely a permission error, which could interfere with matching.
+ }
+ if !fi.IsDir() {
+ return nil
+ }
+ top := false
+ if path == dir {
+ // Walk starts at dir and recurses. For the recursive case,
+ // the path is the result of filepath.Join, which calls filepath.Clean.
+ // The initial case is not Cleaned, though, so we do this explicitly.
+ //
+ // This converts a path like "./io/" to "io". Without this step, running
+ // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io
+ // package, because prepending the prefix "./" to the unclean path would
+ // result in "././io", and match("././io") returns false.
+ top = true
+ path = filepath.Clean(path)
+ }
+
+ // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
+ _, elem := filepath.Split(path)
+ dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
+ if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return filepath.SkipDir
+ }
+
+ if !top && cfg.ModulesEnabled {
+ // Ignore other modules found in subdirectories.
+ if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() {
+ return filepath.SkipDir
+ }
+ }
+
+ name := prefix + filepath.ToSlash(path)
+ if !match(name) {
+ return nil
+ }
+
+ // We keep the directory if we can import it, or if we can't import it
+ // due to invalid Go source files. This means that directories containing
+ // parse errors will be built (and fail) instead of being silently skipped
+ // as not matching the pattern. Go 1.5 and earlier skipped, but that
+ // behavior means people miss serious mistakes.
+ // See golang.org/issue/11407.
+ if p, err := cfg.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) {
+ if _, noGo := err.(*build.NoGoError); noGo {
+ // The package does not actually exist, so record neither the package
+ // nor the error.
+ return nil
+ }
+ // There was an error importing path, but not matching it,
+ // which is all that Match promises to do.
+ // Ignore the import error.
+ }
+ m.Dirs = append(m.Dirs, name)
+ return nil
+ })
+ if err != nil {
+ m.AddError(err)
+ }
+}
+
+// WarnUnmatched warns about patterns that didn't match any packages.
+func WarnUnmatched(matches []*Match) {
+ for _, m := range matches {
+ if len(m.Pkgs) == 0 && len(m.Errs) == 0 {
+ fmt.Fprintf(os.Stderr, "go: warning: %q matched no packages\n", m.pattern)
+ }
+ }
+}
+
+// ImportPaths returns the matching paths to use for the given command line.
+// It calls ImportPathsQuiet and then WarnUnmatched.
+func ImportPaths(patterns, modRoots []string) []*Match {
+ matches := ImportPathsQuiet(patterns, modRoots)
+ WarnUnmatched(matches)
+ return matches
+}
+
+// ImportPathsQuiet is like ImportPaths but does not warn about patterns with no matches.
+func ImportPathsQuiet(patterns, modRoots []string) []*Match {
+ var out []*Match
+ for _, a := range CleanPatterns(patterns) {
+ m := NewMatch(a)
+ if m.IsLocal() {
+ m.MatchDirs(modRoots)
+
+ // Change the file import path to a regular import path if the package
+ // is in GOPATH or GOROOT. We don't report errors here; LoadImport
+ // (or something similar) will report them later.
+ m.Pkgs = make([]string, len(m.Dirs))
+ for i, dir := range m.Dirs {
+ absDir := dir
+ if !filepath.IsAbs(dir) {
+ absDir = filepath.Join(base.Cwd(), dir)
+ }
+ if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." {
+ m.Pkgs[i] = bp.ImportPath
+ } else {
+ m.Pkgs[i] = dir
+ }
+ }
+ } else {
+ m.MatchPackages()
+ }
+
+ out = append(out, m)
+ }
+ return out
+}
+
+// CleanPatterns returns the patterns to use for the given command line. It
+// canonicalizes the patterns but does not evaluate any matches. For patterns
+// that are not local or absolute paths, it preserves text after '@' to avoid
+// modifying version queries.
+func CleanPatterns(patterns []string) []string {
+ if len(patterns) == 0 {
+ return []string{"."}
+ }
+ var out []string
+ for _, a := range patterns {
+ var p, v string
+ if build.IsLocalImport(a) || filepath.IsAbs(a) {
+ p = a
+ } else if i := strings.IndexByte(a, '@'); i < 0 {
+ p = a
+ } else {
+ p = a[:i]
+ v = a[i:]
+ }
+
+ // Arguments may be either file paths or import paths.
+ // As a courtesy to Windows developers, rewrite \ to /
+ // in arguments that look like import paths.
+ // Don't replace slashes in absolute paths.
+ if filepath.IsAbs(p) {
+ p = filepath.Clean(p)
+ } else {
+ if filepath.Separator == '\\' {
+ p = strings.ReplaceAll(p, `\`, `/`)
+ }
+
+ // Put argument in canonical form, but preserve leading ./.
+ if strings.HasPrefix(p, "./") {
+ p = "./" + path.Clean(p)
+ if p == "./." {
+ p = "."
+ }
+ } else {
+ p = path.Clean(p)
+ }
+ }
+
+ out = append(out, p+v)
+ }
+ return out
+}
+
+// IsStandardImportPath reports whether $GOROOT/src/path should be considered
+// part of the standard distribution. For historical reasons we allow people to add
+// their own code to $GOROOT instead of using $GOPATH, but we assume that
+// code will start with a domain name (dot in the first element).
+//
+// Note that this function is meant to evaluate whether a directory found in GOROOT
+// should be treated as part of the standard library. It should not be used to decide
+// that a directory found in GOPATH should be rejected: directories in GOPATH
+// need not have dots in the first element, and they just take their chances
+// with future collisions in the standard library.
+func IsStandardImportPath(path string) bool {
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ elem := path[:i]
+ return !strings.Contains(elem, ".")
+}
+
+// IsRelativePath reports whether pattern should be interpreted as a directory
+// path relative to the current directory, as opposed to a pattern matching
+// import paths.
+func IsRelativePath(pattern string) bool {
+ return strings.HasPrefix(pattern, "./") || strings.HasPrefix(pattern, "../") || pattern == "." || pattern == ".."
+}
+
+// InDir checks whether path is in the file tree rooted at dir.
+// If so, InDir returns an equivalent path relative to dir.
+// If not, InDir returns an empty string.
+// InDir makes some effort to succeed even in the presence of symbolic links.
+func InDir(path, dir string) string {
+ // inDirLex reports whether path is lexically in dir,
+ // without considering symbolic or hard links.
+ inDirLex := func(path, dir string) (string, bool) {
+ if dir == "" {
+ return path, true
+ }
+ rel := str.TrimFilePathPrefix(path, dir)
+ if rel == path {
+ return "", false
+ }
+ if rel == "" {
+ return ".", true
+ }
+ return rel, true
+ }
+
+ if rel, ok := inDirLex(path, dir); ok {
+ return rel
+ }
+ xpath, err := filepath.EvalSymlinks(path)
+ if err != nil || xpath == path {
+ xpath = ""
+ } else {
+ if rel, ok := inDirLex(xpath, dir); ok {
+ return rel
+ }
+ }
+
+ xdir, err := filepath.EvalSymlinks(dir)
+ if err == nil && xdir != dir {
+ if rel, ok := inDirLex(path, xdir); ok {
+ return rel
+ }
+ if xpath != "" {
+ if rel, ok := inDirLex(xpath, xdir); ok {
+ return rel
+ }
+ }
+ }
+ return ""
+}
diff --git a/src/cmd/go/internal/str/path.go b/src/cmd/go/internal/str/path.go
new file mode 100644
index 0000000..83a3d0e
--- /dev/null
+++ b/src/cmd/go/internal/str/path.go
@@ -0,0 +1,133 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package str
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// HasPathPrefix reports whether the slash-separated path s
+// begins with the elements in prefix.
+func HasPathPrefix(s, prefix string) bool {
+ if len(s) == len(prefix) {
+ return s == prefix
+ }
+ if prefix == "" {
+ return true
+ }
+ if len(s) > len(prefix) {
+ if prefix[len(prefix)-1] == '/' || s[len(prefix)] == '/' {
+ return s[:len(prefix)] == prefix
+ }
+ }
+ return false
+}
+
+// HasFilePathPrefix reports whether the filesystem path s
+// begins with the elements in prefix.
+//
+// HasFilePathPrefix is case-sensitive (except for volume names) even if the
+// filesystem is not, does not apply Unicode normalization even if the
+// filesystem does, and assumes that all path separators are canonicalized to
+// filepath.Separator (as returned by filepath.Clean).
+func HasFilePathPrefix(s, prefix string) bool {
+ sv := filepath.VolumeName(s)
+ pv := filepath.VolumeName(prefix)
+
+ // Strip the volume from both paths before canonicalizing sv and pv:
+ // it's unlikely that strings.ToUpper will change the length of the string,
+ // but doesn't seem impossible.
+ s = s[len(sv):]
+ prefix = prefix[len(pv):]
+
+ // Always treat Windows volume names as case-insensitive, even though
+ // we don't treat the rest of the path as such.
+ //
+ // TODO(bcmills): Why do we care about case only for the volume name? It's
+ // been this way since https://go.dev/cl/11316, but I don't understand why
+ // that problem doesn't apply to case differences in the entire path.
+ if sv != pv {
+ sv = strings.ToUpper(sv)
+ pv = strings.ToUpper(pv)
+ }
+
+ switch {
+ default:
+ return false
+ case sv != pv:
+ return false
+ case len(s) == len(prefix):
+ return s == prefix
+ case prefix == "":
+ return true
+ case len(s) > len(prefix):
+ if prefix[len(prefix)-1] == filepath.Separator {
+ return strings.HasPrefix(s, prefix)
+ }
+ return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix
+ }
+}
+
+// TrimFilePathPrefix returns s without the leading path elements in prefix,
+// such that joining the string to prefix produces s.
+//
+// If s does not start with prefix (HasFilePathPrefix with the same arguments
+// returns false), TrimFilePathPrefix returns s. If s equals prefix,
+// TrimFilePathPrefix returns "".
+func TrimFilePathPrefix(s, prefix string) string {
+ if prefix == "" {
+ // Trimming the empty string from a path should join to produce that path.
+ // (Trim("/tmp/foo", "") should give "/tmp/foo", not "tmp/foo".)
+ return s
+ }
+ if !HasFilePathPrefix(s, prefix) {
+ return s
+ }
+
+ trimmed := s[len(prefix):]
+ if len(trimmed) > 0 && os.IsPathSeparator(trimmed[0]) {
+ if runtime.GOOS == "windows" && prefix == filepath.VolumeName(prefix) && len(prefix) == 2 && prefix[1] == ':' {
+ // Joining a relative path to a bare Windows drive letter produces a path
+ // relative to the working directory on that drive, but the original path
+ // was absolute, not relative. Keep the leading path separator so that it
+ // remains absolute when joined to prefix.
+ } else {
+ // Prefix ends in a regular path element, so strip the path separator that
+ // follows it.
+ trimmed = trimmed[1:]
+ }
+ }
+ return trimmed
+}
+
+// WithFilePathSeparator returns s with a trailing path separator, or the empty
+// string if s is empty.
+func WithFilePathSeparator(s string) string {
+ if s == "" || os.IsPathSeparator(s[len(s)-1]) {
+ return s
+ }
+ return s + string(filepath.Separator)
+}
+
+// QuoteGlob returns s with all Glob metacharacters quoted.
+// We don't try to handle backslash here, as that can appear in a
+// file path on Windows.
+func QuoteGlob(s string) string {
+ if !strings.ContainsAny(s, `*?[]`) {
+ return s
+ }
+ var sb strings.Builder
+ for _, c := range s {
+ switch c {
+ case '*', '?', '[', ']':
+ sb.WriteByte('\\')
+ }
+ sb.WriteRune(c)
+ }
+ return sb.String()
+}
diff --git a/src/cmd/go/internal/str/str.go b/src/cmd/go/internal/str/str.go
new file mode 100644
index 0000000..af7c699
--- /dev/null
+++ b/src/cmd/go/internal/str/str.go
@@ -0,0 +1,113 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package str provides string manipulation utilities.
+package str
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// StringList flattens its arguments into a single []string.
+// Each argument in args must have type string or []string.
+func StringList(args ...any) []string {
+ var x []string
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case []string:
+ x = append(x, arg...)
+ case string:
+ x = append(x, arg)
+ default:
+ panic("stringList: invalid argument of type " + fmt.Sprintf("%T", arg))
+ }
+ }
+ return x
+}
+
+// ToFold returns a string with the property that
+//
+// strings.EqualFold(s, t) iff ToFold(s) == ToFold(t)
+//
+// This lets us test a large set of strings for fold-equivalent
+// duplicates without making a quadratic number of calls
+// to EqualFold. Note that strings.ToUpper and strings.ToLower
+// do not have the desired property in some corner cases.
+func ToFold(s string) string {
+ // Fast path: all ASCII, no upper case.
+ // Most paths look like this already.
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf || 'A' <= c && c <= 'Z' {
+ goto Slow
+ }
+ }
+ return s
+
+Slow:
+ var b strings.Builder
+ for _, r := range s {
+ // SimpleFold(x) cycles to the next equivalent rune > x
+ // or wraps around to smaller values. Iterate until it wraps,
+ // and we've found the minimum value.
+ for {
+ r0 := r
+ r = unicode.SimpleFold(r0)
+ if r <= r0 {
+ break
+ }
+ }
+ // Exception to allow fast path above: A-Z => a-z
+ if 'A' <= r && r <= 'Z' {
+ r += 'a' - 'A'
+ }
+ b.WriteRune(r)
+ }
+ return b.String()
+}
+
+// FoldDup reports a pair of strings from the list that are
+// equal according to strings.EqualFold.
+// It returns "", "" if there are no such strings.
+func FoldDup(list []string) (string, string) {
+ clash := map[string]string{}
+ for _, s := range list {
+ fold := ToFold(s)
+ if t := clash[fold]; t != "" {
+ if s > t {
+ s, t = t, s
+ }
+ return s, t
+ }
+ clash[fold] = s
+ }
+ return "", ""
+}
+
+// Contains reports whether x contains s.
+func Contains(x []string, s string) bool {
+ for _, t := range x {
+ if t == s {
+ return true
+ }
+ }
+ return false
+}
+
+// Uniq removes consecutive duplicate strings from ss.
+func Uniq(ss *[]string) {
+ if len(*ss) <= 1 {
+ return
+ }
+ uniq := (*ss)[:1]
+ for _, s := range *ss {
+ if s != uniq[len(uniq)-1] {
+ uniq = append(uniq, s)
+ }
+ }
+ *ss = uniq
+}
diff --git a/src/cmd/go/internal/str/str_test.go b/src/cmd/go/internal/str/str_test.go
new file mode 100644
index 0000000..7c19877
--- /dev/null
+++ b/src/cmd/go/internal/str/str_test.go
@@ -0,0 +1,185 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package str
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var foldDupTests = []struct {
+ list []string
+ f1, f2 string
+}{
+ {StringList("math/rand", "math/big"), "", ""},
+ {StringList("math", "strings"), "", ""},
+ {StringList("strings"), "", ""},
+ {StringList("strings", "strings"), "strings", "strings"},
+ {StringList("Rand", "rand", "math", "math/rand", "math/Rand"), "Rand", "rand"},
+}
+
+func TestFoldDup(t *testing.T) {
+ for _, tt := range foldDupTests {
+ f1, f2 := FoldDup(tt.list)
+ if f1 != tt.f1 || f2 != tt.f2 {
+ t.Errorf("foldDup(%q) = %q, %q, want %q, %q", tt.list, f1, f2, tt.f1, tt.f2)
+ }
+ }
+}
+
+func TestHasPathPrefix(t *testing.T) {
+ type testCase struct {
+ s, prefix string
+ want bool
+ }
+ for _, tt := range []testCase{
+ {"", "", true},
+ {"", "/", false},
+ {"foo", "", true},
+ {"foo", "/", false},
+ {"foo", "foo", true},
+ {"foo", "foo/", false},
+ {"foo", "/foo", false},
+ {"foo/bar", "", true},
+ {"foo/bar", "foo", true},
+ {"foo/bar", "foo/", true},
+ {"foo/bar", "/foo", false},
+ {"foo/bar", "foo/bar", true},
+ {"foo/bar", "foo/bar/", false},
+ {"foo/bar", "/foo/bar", false},
+ } {
+ got := HasPathPrefix(tt.s, tt.prefix)
+ if got != tt.want {
+ t.Errorf("HasPathPrefix(%q, %q) = %v; want %v", tt.s, tt.prefix, got, tt.want)
+ }
+ }
+}
+
+func TestTrimFilePathPrefixSlash(t *testing.T) {
+ if os.PathSeparator != '/' {
+ t.Skipf("test requires slash-separated file paths")
+ }
+
+ type testCase struct {
+ s, prefix, want string
+ }
+ for _, tt := range []testCase{
+ {"/", "", "/"},
+ {"/", "/", ""},
+ {"/foo", "", "/foo"},
+ {"/foo", "/", "foo"},
+ {"/foo", "/foo", ""},
+ {"/foo/bar", "/foo", "bar"},
+ {"/foo/bar", "/foo/", "bar"},
+ {"/foo/", "/", "foo/"},
+ {"/foo/", "/foo", ""},
+ {"/foo/", "/foo/", ""},
+
+ // if prefix is not s's prefix, return s
+ {"", "/", ""},
+ {"/foo", "/bar", "/foo"},
+ {"/foo", "/foo/bar", "/foo"},
+ {"foo", "/foo", "foo"},
+ {"/foo", "foo", "/foo"},
+ {"/foo", "/foo/", "/foo"},
+ } {
+ got := TrimFilePathPrefix(tt.s, tt.prefix)
+ if got == tt.want {
+ t.Logf("TrimFilePathPrefix(%q, %q) = %q", tt.s, tt.prefix, got)
+ } else {
+ t.Errorf("TrimFilePathPrefix(%q, %q) = %q, want %q", tt.s, tt.prefix, got, tt.want)
+ }
+
+ if HasFilePathPrefix(tt.s, tt.prefix) {
+ joined := filepath.Join(tt.prefix, got)
+ if clean := filepath.Clean(tt.s); joined != clean {
+ t.Errorf("filepath.Join(%q, %q) = %q, want %q", tt.prefix, got, joined, clean)
+ }
+ }
+ }
+}
+
+func TestTrimFilePathPrefixWindows(t *testing.T) {
+ if runtime.GOOS != "windows" {
+ t.Skipf("test requires Windows file paths")
+ }
+ type testCase struct {
+ s, prefix, want string
+ }
+ for _, tt := range []testCase{
+ {`\`, ``, `\`},
+ {`\`, `\`, ``},
+ {`C:`, `C:`, ``},
+ {`C:\`, `C:`, `\`},
+ {`C:\`, `C:\`, ``},
+ {`C:\foo`, ``, `C:\foo`},
+ {`C:\foo`, `C:`, `\foo`},
+ {`C:\foo`, `C:\`, `foo`},
+ {`C:\foo`, `C:\foo`, ``},
+ {`C:\foo\`, `C:\foo`, ``},
+ {`C:\foo\bar`, `C:\foo`, `bar`},
+ {`C:\foo\bar`, `C:\foo\`, `bar`},
+ // if prefix is not s's prefix, return s
+ {`C:\foo`, `C:\bar`, `C:\foo`},
+ {`C:\foo`, `C:\foo\bar`, `C:\foo`},
+ {`C:`, `C:\`, `C:`},
+ // if volumes are different, return s
+ {`C:`, ``, `C:`},
+ {`C:\`, ``, `C:\`},
+ {`C:\foo`, ``, `C:\foo`},
+ {`C:\foo`, `\foo`, `C:\foo`},
+ {`C:\foo`, `D:\foo`, `C:\foo`},
+
+ //UNC path
+ {`\\host\share\foo`, `\\host\share`, `foo`},
+ {`\\host\share\foo`, `\\host\share\`, `foo`},
+ {`\\host\share\foo`, `\\host\share\foo`, ``},
+ {`\\host\share\foo\bar`, `\\host\share\foo`, `bar`},
+ {`\\host\share\foo\bar`, `\\host\share\foo\`, `bar`},
+ // if prefix is not s's prefix, return s
+ {`\\host\share\foo`, `\\host\share\bar`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\share\foo\bar`, `\\host\share\foo`},
+ // if either host or share name is different, return s
+ {`\\host\share\foo`, ``, `\\host\share\foo`},
+ {`\\host\share\foo`, `\foo`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\other\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\other\share\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\\host\`, `\\host\share\foo`},
+ {`\\host\share\foo`, `\share\`, `\\host\share\foo`},
+
+ // only volume names are case-insensitive
+ {`C:\foo`, `c:`, `\foo`},
+ {`C:\foo`, `c:\foo`, ``},
+ {`c:\foo`, `C:`, `\foo`},
+ {`c:\foo`, `C:\foo`, ``},
+ {`C:\foo`, `C:\Foo`, `C:\foo`},
+ {`\\Host\Share\foo`, `\\host\share`, `foo`},
+ {`\\Host\Share\foo`, `\\host\share\foo`, ``},
+ {`\\host\share\foo`, `\\Host\Share`, `foo`},
+ {`\\host\share\foo`, `\\Host\Share\foo`, ``},
+ {`\\Host\Share\foo`, `\\Host\Share\Foo`, `\\Host\Share\foo`},
+ } {
+ got := TrimFilePathPrefix(tt.s, tt.prefix)
+ if got == tt.want {
+ t.Logf("TrimFilePathPrefix(%#q, %#q) = %#q", tt.s, tt.prefix, got)
+ } else {
+ t.Errorf("TrimFilePathPrefix(%#q, %#q) = %#q, want %#q", tt.s, tt.prefix, got, tt.want)
+ }
+
+ if HasFilePathPrefix(tt.s, tt.prefix) {
+ // Although TrimFilePathPrefix is only case-insensitive in the volume name,
+ // what we care about in testing Join is that absolute paths remain
+ // absolute and relative paths remaining relative — there is no harm in
+ // over-normalizing letters in the comparison, so we use EqualFold.
+ joined := filepath.Join(tt.prefix, got)
+ if clean := filepath.Clean(tt.s); !strings.EqualFold(joined, clean) {
+ t.Errorf("filepath.Join(%#q, %#q) = %#q, want %#q", tt.prefix, got, joined, clean)
+ }
+ }
+ }
+}
diff --git a/src/cmd/go/internal/test/cover.go b/src/cmd/go/internal/test/cover.go
new file mode 100644
index 0000000..f614458
--- /dev/null
+++ b/src/cmd/go/internal/test/cover.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+var coverMerge struct {
+ f *os.File
+ sync.Mutex // for f.Write
+}
+
+// initCoverProfile initializes the test coverage profile.
+// It must be run before any calls to mergeCoverProfile or closeCoverProfile.
+// Using this function clears the profile in case it existed from a previous run,
+// or in case it doesn't exist and the test is going to fail to create it (or not run).
+func initCoverProfile() {
+ if testCoverProfile == "" || testC {
+ return
+ }
+ if !filepath.IsAbs(testCoverProfile) {
+ testCoverProfile = filepath.Join(testOutputDir.getAbs(), testCoverProfile)
+ }
+
+ // No mutex - caller's responsibility to call with no racing goroutines.
+ f, err := os.Create(testCoverProfile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ _, err = fmt.Fprintf(f, "mode: %s\n", cfg.BuildCoverMode)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ coverMerge.f = f
+}
+
+// mergeCoverProfile merges file into the profile stored in testCoverProfile.
+// It prints any errors it encounters to ew.
+func mergeCoverProfile(ew io.Writer, file string) {
+ if coverMerge.f == nil {
+ return
+ }
+ coverMerge.Lock()
+ defer coverMerge.Unlock()
+
+ expect := fmt.Sprintf("mode: %s\n", cfg.BuildCoverMode)
+ buf := make([]byte, len(expect))
+ r, err := os.Open(file)
+ if err != nil {
+ // Test did not create profile, which is OK.
+ return
+ }
+ defer r.Close()
+
+ n, err := io.ReadFull(r, buf)
+ if n == 0 {
+ return
+ }
+ if err != nil || string(buf) != expect {
+ fmt.Fprintf(ew, "error: test wrote malformed coverage profile %s.\n", file)
+ return
+ }
+ _, err = io.Copy(coverMerge.f, r)
+ if err != nil {
+ fmt.Fprintf(ew, "error: saving coverage profile: %v\n", err)
+ }
+}
+
+func closeCoverProfile() {
+ if coverMerge.f == nil {
+ return
+ }
+ if err := coverMerge.f.Close(); err != nil {
+ base.Errorf("closing coverage profile: %v", err)
+ }
+}
diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go
new file mode 100644
index 0000000..947c27e
--- /dev/null
+++ b/src/cmd/go/internal/test/flagdefs.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by genflags.go — DO NOT EDIT.
+
+package test
+
+// passFlagToTest contains the flags that should be forwarded to
+// the test binary with the prefix "test.".
+var passFlagToTest = map[string]bool{
+ "bench": true,
+ "benchmem": true,
+ "benchtime": true,
+ "blockprofile": true,
+ "blockprofilerate": true,
+ "count": true,
+ "coverprofile": true,
+ "cpu": true,
+ "cpuprofile": true,
+ "failfast": true,
+ "fullpath": true,
+ "fuzz": true,
+ "fuzzminimizetime": true,
+ "fuzztime": true,
+ "list": true,
+ "memprofile": true,
+ "memprofilerate": true,
+ "mutexprofile": true,
+ "mutexprofilefraction": true,
+ "outputdir": true,
+ "parallel": true,
+ "run": true,
+ "short": true,
+ "shuffle": true,
+ "skip": true,
+ "timeout": true,
+ "trace": true,
+ "v": true,
+}
+
+var passAnalyzersToVet = map[string]bool{
+ "asmdecl": true,
+ "assign": true,
+ "atomic": true,
+ "bool": true,
+ "bools": true,
+ "buildtag": true,
+ "buildtags": true,
+ "cgocall": true,
+ "composites": true,
+ "copylocks": true,
+ "directive": true,
+ "errorsas": true,
+ "framepointer": true,
+ "httpresponse": true,
+ "ifaceassert": true,
+ "loopclosure": true,
+ "lostcancel": true,
+ "methods": true,
+ "nilfunc": true,
+ "printf": true,
+ "rangeloops": true,
+ "shift": true,
+ "sigchanyzer": true,
+ "slog": true,
+ "stdmethods": true,
+ "stringintconv": true,
+ "structtag": true,
+ "testinggoroutine": true,
+ "tests": true,
+ "timeformat": true,
+ "unmarshal": true,
+ "unreachable": true,
+ "unsafeptr": true,
+ "unusedresult": true,
+}
diff --git a/src/cmd/go/internal/test/flagdefs_test.go b/src/cmd/go/internal/test/flagdefs_test.go
new file mode 100644
index 0000000..5461b2d
--- /dev/null
+++ b/src/cmd/go/internal/test/flagdefs_test.go
@@ -0,0 +1,76 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/test/internal/genflags"
+ "internal/testenv"
+ "maps"
+ "os"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ cfg.SetGOROOT(testenv.GOROOT(nil), false)
+ os.Exit(m.Run())
+}
+
+func TestPassFlagToTest(t *testing.T) {
+ wantNames := genflags.ShortTestFlags()
+
+ missing := map[string]bool{}
+ for _, name := range wantNames {
+ if !passFlagToTest[name] {
+ missing[name] = true
+ }
+ }
+ if len(missing) > 0 {
+ t.Errorf("passFlagToTest is missing entries: %v", missing)
+ }
+
+ extra := maps.Clone(passFlagToTest)
+ for _, name := range wantNames {
+ delete(extra, name)
+ }
+ if len(extra) > 0 {
+ t.Errorf("passFlagToTest contains extra entries: %v", extra)
+ }
+
+ if t.Failed() {
+ t.Logf("To regenerate:\n\tgo generate cmd/go/internal/test")
+ }
+}
+
+func TestPassAnalyzersToVet(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // runs 'go tool vet -flags'
+
+ wantNames, err := genflags.VetAnalyzers()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ missing := map[string]bool{}
+ for _, name := range wantNames {
+ if !passAnalyzersToVet[name] {
+ missing[name] = true
+ }
+ }
+ if len(missing) > 0 {
+ t.Errorf("passAnalyzersToVet is missing entries: %v", missing)
+ }
+
+ extra := maps.Clone(passAnalyzersToVet)
+ for _, name := range wantNames {
+ delete(extra, name)
+ }
+ if len(extra) > 0 {
+ t.Errorf("passFlagToTest contains extra entries: %v", extra)
+ }
+
+ if t.Failed() {
+ t.Logf("To regenerate:\n\tgo generate cmd/go/internal/test")
+ }
+}
diff --git a/src/cmd/go/internal/test/genflags.go b/src/cmd/go/internal/test/genflags.go
new file mode 100644
index 0000000..bb5ceb6
--- /dev/null
+++ b/src/cmd/go/internal/test/genflags.go
@@ -0,0 +1,84 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "log"
+ "os"
+ "os/exec"
+ "text/template"
+
+ "cmd/go/internal/test/internal/genflags"
+)
+
+func main() {
+ if err := regenerate(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func regenerate() error {
+ vetAnalyzers, err := genflags.VetAnalyzers()
+ if err != nil {
+ return err
+ }
+
+ t := template.Must(template.New("fileTemplate").Parse(fileTemplate))
+ tData := map[string][]string{
+ "testFlags": genflags.ShortTestFlags(),
+ "vetAnalyzers": vetAnalyzers,
+ }
+ buf := bytes.NewBuffer(nil)
+ if err := t.Execute(buf, tData); err != nil {
+ return err
+ }
+
+ f, err := os.Create("flagdefs.go")
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.Command("gofmt")
+ cmd.Stdin = buf
+ cmd.Stdout = f
+ cmd.Stderr = os.Stderr
+ cmdErr := cmd.Run()
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if cmdErr != nil {
+ os.Remove(f.Name())
+ return cmdErr
+ }
+
+ return nil
+}
+
+const fileTemplate = `// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by genflags.go — DO NOT EDIT.
+
+package test
+
+// passFlagToTest contains the flags that should be forwarded to
+// the test binary with the prefix "test.".
+var passFlagToTest = map[string]bool {
+{{- range .testFlags}}
+ "{{.}}": true,
+{{- end }}
+}
+
+var passAnalyzersToVet = map[string]bool {
+{{- range .vetAnalyzers}}
+ "{{.}}": true,
+{{- end }}
+}
+`
diff --git a/src/cmd/go/internal/test/internal/genflags/testflag.go b/src/cmd/go/internal/test/internal/genflags/testflag.go
new file mode 100644
index 0000000..712428d
--- /dev/null
+++ b/src/cmd/go/internal/test/internal/genflags/testflag.go
@@ -0,0 +1,35 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genflags
+
+import (
+ "flag"
+ "strings"
+ "testing"
+)
+
+// ShortTestFlags returns the set of "-test." flag shorthand names that end
+// users may pass to 'go test'.
+func ShortTestFlags() []string {
+ testing.Init()
+
+ var names []string
+ flag.VisitAll(func(f *flag.Flag) {
+ var name string
+ var found bool
+ if name, found = strings.CutPrefix(f.Name, "test."); !found {
+ return
+ }
+
+ switch name {
+ case "testlogfile", "paniconexit0", "fuzzcachedir", "fuzzworker", "gocoverdir":
+ // These flags are only for use by cmd/go.
+ default:
+ names = append(names, name)
+ }
+ })
+
+ return names
+}
diff --git a/src/cmd/go/internal/test/internal/genflags/vetflag.go b/src/cmd/go/internal/test/internal/genflags/vetflag.go
new file mode 100644
index 0000000..1448811
--- /dev/null
+++ b/src/cmd/go/internal/test/internal/genflags/vetflag.go
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genflags
+
+import (
+ "bytes"
+ "cmd/go/internal/base"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "sort"
+)
+
+// VetAnalyzers computes analyzers and their aliases supported by vet.
+func VetAnalyzers() ([]string, error) {
+ // get supported vet flag information
+ tool := base.Tool("vet")
+ vetcmd := exec.Command(tool, "-flags")
+ out := new(bytes.Buffer)
+ vetcmd.Stdout = out
+ if err := vetcmd.Run(); err != nil {
+ return nil, fmt.Errorf("go vet: can't execute %s -flags: %v\n", tool, err)
+ }
+ var analysisFlags []struct {
+ Name string
+ Bool bool
+ Usage string
+ }
+ if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil {
+ return nil, fmt.Errorf("go vet: can't unmarshal JSON from %s -flags: %v", tool, err)
+ }
+
+ // parse the flags to figure out which ones stand for analyses
+ analyzerSet := make(map[string]bool)
+ rEnable := regexp.MustCompile("^enable .+ analysis$")
+ for _, flag := range analysisFlags {
+ if rEnable.MatchString(flag.Usage) {
+ analyzerSet[flag.Name] = true
+ }
+ }
+
+ rDeprecated := regexp.MustCompile("^deprecated alias for -(?P<analyzer>(.+))$")
+ // Returns the original value matched by rDeprecated on input value.
+ // If there is no match, "" is returned.
+ originalValue := func(value string) string {
+ match := rDeprecated.FindStringSubmatch(value)
+ if len(match) < 2 {
+ return ""
+ }
+ return match[1]
+ }
+ // extract deprecated aliases for existing analyses
+ for _, flag := range analysisFlags {
+ if o := originalValue(flag.Usage); analyzerSet[o] {
+ analyzerSet[flag.Name] = true
+ }
+ }
+
+ var analyzers []string
+ for a := range analyzerSet {
+ analyzers = append(analyzers, a)
+ }
+ sort.Strings(analyzers)
+ return analyzers, nil
+}
diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go
new file mode 100644
index 0000000..3bce026
--- /dev/null
+++ b/src/cmd/go/internal/test/test.go
@@ -0,0 +1,1942 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "internal/platform"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/lockedfile"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/go/internal/work"
+ "cmd/internal/test2json"
+
+ "golang.org/x/mod/module"
+)
+
+// Break init loop.
+func init() {
+ CmdTest.Run = runTest
+}
+
+const testUsage = "go test [build/test flags] [packages] [build/test flags & test binary flags]"
+
+var CmdTest = &base.Command{
+ CustomFlags: true,
+ UsageLine: testUsage,
+ Short: "test packages",
+ Long: `
+'Go test' automates testing the packages named by the import paths.
+It prints a summary of the test results in the format:
+
+ ok archive/tar 0.011s
+ FAIL archive/zip 0.022s
+ ok compress/gzip 0.033s
+ ...
+
+followed by detailed output for each failed package.
+
+'Go test' recompiles each package along with any files with names matching
+the file pattern "*_test.go".
+These additional files can contain test functions, benchmark functions, fuzz
+tests and example functions. See 'go help testfunc' for more.
+Each listed package causes the execution of a separate test binary.
+Files whose names begin with "_" (including "_test.go") or "." are ignored.
+
+Test files that declare a package with the suffix "_test" will be compiled as a
+separate package, and then linked and run with the main test binary.
+
+The go tool will ignore a directory named "testdata", making it available
+to hold ancillary data needed by the tests.
+
+As part of building a test binary, go test runs go vet on the package
+and its test source files to identify significant problems. If go vet
+finds any problems, go test reports those and does not run the test
+binary. Only a high-confidence subset of the default go vet checks are
+used. That subset is: atomic, bool, buildtags, directive, errorsas,
+ifaceassert, nilfunc, printf, and stringintconv. You can see
+the documentation for these and other vet tests via "go doc cmd/vet".
+To disable the running of go vet, use the -vet=off flag. To run all
+checks, use the -vet=all flag.
+
+All test output and summary lines are printed to the go command's
+standard output, even if the test printed them to its own standard
+error. (The go command's standard error is reserved for printing
+errors building the tests.)
+
+The go command places $GOROOT/bin at the beginning of $PATH
+in the test's environment, so that tests that execute
+'go' commands use the same 'go' as the parent 'go test' command.
+
+Go test runs in two different modes:
+
+The first, called local directory mode, occurs when go test is
+invoked with no package arguments (for example, 'go test' or 'go
+test -v'). In this mode, go test compiles the package sources and
+tests found in the current directory and then runs the resulting
+test binary. In this mode, caching (discussed below) is disabled.
+After the package test finishes, go test prints a summary line
+showing the test status ('ok' or 'FAIL'), package name, and elapsed
+time.
+
+The second, called package list mode, occurs when go test is invoked
+with explicit package arguments (for example 'go test math', 'go
+test ./...', and even 'go test .'). In this mode, go test compiles
+and tests each of the packages listed on the command line. If a
+package test passes, go test prints only the final 'ok' summary
+line. If a package test fails, go test prints the full test output.
+If invoked with the -bench or -v flag, go test prints the full
+output even for passing package tests, in order to display the
+requested benchmark results or verbose logging. After the package
+tests for all of the listed packages finish, and their output is
+printed, go test prints a final 'FAIL' status if any package test
+has failed.
+
+In package list mode only, go test caches successful package test
+results to avoid unnecessary repeated running of tests. When the
+result of a test can be recovered from the cache, go test will
+redisplay the previous output instead of running the test binary
+again. When this happens, go test prints '(cached)' in place of the
+elapsed time in the summary line.
+
+The rule for a match in the cache is that the run involves the same
+test binary and the flags on the command line come entirely from a
+restricted set of 'cacheable' test flags, defined as -benchtime, -cpu,
+-list, -parallel, -run, -short, -timeout, -failfast, and -v.
+If a run of go test has any test or non-test flags outside this set,
+the result is not cached. To disable test caching, use any test flag
+or argument other than the cacheable flags. The idiomatic way to disable
+test caching explicitly is to use -count=1. Tests that open files within
+the package's source root (usually $GOPATH) or that consult environment
+variables only match future runs in which the files and environment
+variables are unchanged. A cached test result is treated as executing
+in no time at all, so a successful package test result will be cached and
+reused regardless of -timeout setting.
+
+In addition to the build flags, the flags handled by 'go test' itself are:
+
+ -args
+ Pass the remainder of the command line (everything after -args)
+ to the test binary, uninterpreted and unchanged.
+ Because this flag consumes the remainder of the command line,
+ the package list (if present) must appear before this flag.
+
+ -c
+ Compile the test binary to pkg.test in the current directory but do not run it
+ (where pkg is the last element of the package's import path).
+ The file name or target directory can be changed with the -o flag.
+
+ -exec xprog
+ Run the test binary using xprog. The behavior is the same as
+ in 'go run'. See 'go help run' for details.
+
+ -json
+ Convert test output to JSON suitable for automated processing.
+ See 'go doc test2json' for the encoding details.
+
+ -o file
+ Compile the test binary to the named file.
+ The test still runs (unless -c or -i is specified).
+ If file ends in a slash or names an existing directory,
+ the test is written to pkg.test in that directory.
+
+The test binary also accepts flags that control execution of the test; these
+flags are also accessible by 'go test'. See 'go help testflag' for details.
+
+For more about build flags, see 'go help build'.
+For more about specifying packages, see 'go help packages'.
+
+See also: go build, go vet.
+`,
+}
+
+var HelpTestflag = &base.Command{
+ UsageLine: "testflag",
+ Short: "testing flags",
+ Long: `
+The 'go test' command takes both flags that apply to 'go test' itself
+and flags that apply to the resulting test binary.
+
+Several of the flags control profiling and write an execution profile
+suitable for "go tool pprof"; run "go tool pprof -h" for more
+information. The --alloc_space, --alloc_objects, and --show_bytes
+options of pprof control how the information is presented.
+
+The following flags are recognized by the 'go test' command and
+control the execution of any test:
+
+ -bench regexp
+ Run only those benchmarks matching a regular expression.
+ By default, no benchmarks are run.
+ To run all benchmarks, use '-bench .' or '-bench=.'.
+ The regular expression is split by unbracketed slash (/)
+ characters into a sequence of regular expressions, and each
+ part of a benchmark's identifier must match the corresponding
+ element in the sequence, if any. Possible parents of matches
+ are run with b.N=1 to identify sub-benchmarks. For example,
+ given -bench=X/Y, top-level benchmarks matching X are run
+ with b.N=1 to find any sub-benchmarks matching Y, which are
+ then run in full.
+
+ -benchtime t
+ Run enough iterations of each benchmark to take t, specified
+ as a time.Duration (for example, -benchtime 1h30s).
+ The default is 1 second (1s).
+ The special syntax Nx means to run the benchmark N times
+ (for example, -benchtime 100x).
+
+ -count n
+ Run each test, benchmark, and fuzz seed n times (default 1).
+ If -cpu is set, run n times for each GOMAXPROCS value.
+ Examples are always run once. -count does not apply to
+ fuzz tests matched by -fuzz.
+
+ -cover
+ Enable coverage analysis.
+ Note that because coverage works by annotating the source
+ code before compilation, compilation and test failures with
+ coverage enabled may report line numbers that don't correspond
+ to the original sources.
+
+ -covermode set,count,atomic
+ Set the mode for coverage analysis for the package[s]
+ being tested. The default is "set" unless -race is enabled,
+ in which case it is "atomic".
+ The values:
+ set: bool: does this statement run?
+ count: int: how many times does this statement run?
+ atomic: int: count, but correct in multithreaded tests;
+ significantly more expensive.
+ Sets -cover.
+
+ -coverpkg pattern1,pattern2,pattern3
+ Apply coverage analysis in each test to packages matching the patterns.
+ The default is for each test to analyze only the package being tested.
+ See 'go help packages' for a description of package patterns.
+ Sets -cover.
+
+ -cpu 1,2,4
+ Specify a list of GOMAXPROCS values for which the tests, benchmarks or
+ fuzz tests should be executed. The default is the current value
+ of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz.
+
+ -failfast
+ Do not start new tests after the first test failure.
+
+ -fullpath
+ Show full file names in the error messages.
+
+ -fuzz regexp
+ Run the fuzz test matching the regular expression. When specified,
+ the command line argument must match exactly one package within the
+ main module, and regexp must match exactly one fuzz test within
+ that package. Fuzzing will occur after tests, benchmarks, seed corpora
+ of other fuzz tests, and examples have completed. See the Fuzzing
+ section of the testing package documentation for details.
+
+ -fuzztime t
+ Run enough iterations of the fuzz target during fuzzing to take t,
+ specified as a time.Duration (for example, -fuzztime 1h30s).
+ The default is to run forever.
+ The special syntax Nx means to run the fuzz target N times
+ (for example, -fuzztime 1000x).
+
+ -fuzzminimizetime t
+ Run enough iterations of the fuzz target during each minimization
+ attempt to take t, as specified as a time.Duration (for example,
+ -fuzzminimizetime 30s).
+ The default is 60s.
+ The special syntax Nx means to run the fuzz target N times
+ (for example, -fuzzminimizetime 100x).
+
+ -json
+ Log verbose output and test results in JSON. This presents the
+ same information as the -v flag in a machine-readable format.
+
+ -list regexp
+ List tests, benchmarks, fuzz tests, or examples matching the regular
+ expression. No tests, benchmarks, fuzz tests, or examples will be run.
+ This will only list top-level tests. No subtest or subbenchmarks will be
+ shown.
+
+ -parallel n
+ Allow parallel execution of test functions that call t.Parallel, and
+ fuzz targets that call t.Parallel when running the seed corpus.
+ The value of this flag is the maximum number of tests to run
+ simultaneously.
+ While fuzzing, the value of this flag is the maximum number of
+ subprocesses that may call the fuzz function simultaneously, regardless of
+ whether T.Parallel is called.
+ By default, -parallel is set to the value of GOMAXPROCS.
+ Setting -parallel to values higher than GOMAXPROCS may cause degraded
+ performance due to CPU contention, especially when fuzzing.
+ Note that -parallel only applies within a single test binary.
+ The 'go test' command may run tests for different packages
+ in parallel as well, according to the setting of the -p flag
+ (see 'go help build').
+
+ -run regexp
+ Run only those tests, examples, and fuzz tests matching the regular
+ expression. For tests, the regular expression is split by unbracketed
+ slash (/) characters into a sequence of regular expressions, and each
+ part of a test's identifier must match the corresponding element in
+ the sequence, if any. Note that possible parents of matches are
+ run too, so that -run=X/Y matches and runs and reports the result
+ of all tests matching X, even those without sub-tests matching Y,
+ because it must run them to look for those sub-tests.
+ See also -skip.
+
+ -short
+ Tell long-running tests to shorten their run time.
+ It is off by default but set during all.bash so that installing
+ the Go tree can run a sanity check but not spend time running
+ exhaustive tests.
+
+ -shuffle off,on,N
+ Randomize the execution order of tests and benchmarks.
+ It is off by default. If -shuffle is set to on, then it will seed
+ the randomizer using the system clock. If -shuffle is set to an
+ integer N, then N will be used as the seed value. In both cases,
+ the seed will be reported for reproducibility.
+
+ -skip regexp
+ Run only those tests, examples, fuzz tests, and benchmarks that
+ do not match the regular expression. Like for -run and -bench,
+ for tests and benchmarks, the regular expression is split by unbracketed
+ slash (/) characters into a sequence of regular expressions, and each
+ part of a test's identifier must match the corresponding element in
+ the sequence, if any.
+
+ -timeout d
+ If a test binary runs longer than duration d, panic.
+ If d is 0, the timeout is disabled.
+ The default is 10 minutes (10m).
+
+ -v
+ Verbose output: log all tests as they are run. Also print all
+ text from Log and Logf calls even if the test succeeds.
+
+ -vet list
+ Configure the invocation of "go vet" during "go test"
+ to use the comma-separated list of vet checks.
+ If list is empty, "go test" runs "go vet" with a curated list of
+ checks believed to be always worth addressing.
+ If list is "off", "go test" does not run "go vet" at all.
+
+The following flags are also recognized by 'go test' and can be used to
+profile the tests during execution:
+
+ -benchmem
+ Print memory allocation statistics for benchmarks.
+
+ -blockprofile block.out
+ Write a goroutine blocking profile to the specified file
+ when all tests are complete.
+ Writes test binary as -c would.
+
+ -blockprofilerate n
+ Control the detail provided in goroutine blocking profiles by
+ calling runtime.SetBlockProfileRate with n.
+ See 'go doc runtime.SetBlockProfileRate'.
+ The profiler aims to sample, on average, one blocking event every
+ n nanoseconds the program spends blocked. By default,
+ if -test.blockprofile is set without this flag, all blocking events
+ are recorded, equivalent to -test.blockprofilerate=1.
+
+ -coverprofile cover.out
+ Write a coverage profile to the file after all tests have passed.
+ Sets -cover.
+
+ -cpuprofile cpu.out
+ Write a CPU profile to the specified file before exiting.
+ Writes test binary as -c would.
+
+ -memprofile mem.out
+ Write an allocation profile to the file after all tests have passed.
+ Writes test binary as -c would.
+
+ -memprofilerate n
+ Enable more precise (and expensive) memory allocation profiles by
+ setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
+ To profile all memory allocations, use -test.memprofilerate=1.
+
+ -mutexprofile mutex.out
+ Write a mutex contention profile to the specified file
+ when all tests are complete.
+ Writes test binary as -c would.
+
+ -mutexprofilefraction n
+ Sample 1 in n stack traces of goroutines holding a
+ contended mutex.
+
+ -outputdir directory
+ Place output files from profiling in the specified directory,
+ by default the directory in which "go test" is running.
+
+ -trace trace.out
+ Write an execution trace to the specified file before exiting.
+
+Each of these flags is also recognized with an optional 'test.' prefix,
+as in -test.v. When invoking the generated test binary (the result of
+'go test -c') directly, however, the prefix is mandatory.
+
+The 'go test' command rewrites or removes recognized flags,
+as appropriate, both before and after the optional package list,
+before invoking the test binary.
+
+For instance, the command
+
+ go test -v -myflag testdata -cpuprofile=prof.out -x
+
+will compile the test binary and then run it as
+
+ pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
+
+(The -x flag is removed because it applies only to the go command's
+execution, not to the test itself.)
+
+The test flags that generate profiles (other than for coverage) also
+leave the test binary in pkg.test for use when analyzing the profiles.
+
+When 'go test' runs a test binary, it does so from within the
+corresponding package's source code directory. Depending on the test,
+it may be necessary to do the same when invoking a generated test
+binary directly. Because that directory may be located within the
+module cache, which may be read-only and is verified by checksums, the
+test must not write to it or any other directory within the module
+unless explicitly requested by the user (such as with the -fuzz flag,
+which writes failures to testdata/fuzz).
+
+The command-line package list, if present, must appear before any
+flag not known to the go test command. Continuing the example above,
+the package list would have to appear before -myflag, but could appear
+on either side of -v.
+
+When 'go test' runs in package list mode, 'go test' caches successful
+package test results to avoid unnecessary repeated running of tests. To
+disable test caching, use any test flag or argument other than the
+cacheable flags. The idiomatic way to disable test caching explicitly
+is to use -count=1.
+
+To keep an argument for a test binary from being interpreted as a
+known flag or a package name, use -args (see 'go help test') which
+passes the remainder of the command line through to the test binary
+uninterpreted and unaltered.
+
+For instance, the command
+
+ go test -v -args -x -v
+
+will compile the test binary and then run it as
+
+ pkg.test -test.v -x -v
+
+Similarly,
+
+ go test -args math
+
+will compile the test binary and then run it as
+
+ pkg.test math
+
+In the first example, the -x and the second -v are passed through to the
+test binary unchanged and with no effect on the go command itself.
+In the second example, the argument math is passed through to the test
+binary, instead of being interpreted as the package list.
+`,
+}
+
+var HelpTestfunc = &base.Command{
+ UsageLine: "testfunc",
+ Short: "testing functions",
+ Long: `
+The 'go test' command expects to find test, benchmark, and example functions
+in the "*_test.go" files corresponding to the package under test.
+
+A test function is one named TestXxx (where Xxx does not start with a
+lower case letter) and should have the signature,
+
+ func TestXxx(t *testing.T) { ... }
+
+A benchmark function is one named BenchmarkXxx and should have the signature,
+
+ func BenchmarkXxx(b *testing.B) { ... }
+
+A fuzz test is one named FuzzXxx and should have the signature,
+
+ func FuzzXxx(f *testing.F) { ... }
+
+An example function is similar to a test function but, instead of using
+*testing.T to report success or failure, prints output to os.Stdout.
+If the last comment in the function starts with "Output:" then the output
+is compared exactly against the comment (see examples below). If the last
+comment begins with "Unordered output:" then the output is compared to the
+comment, however the order of the lines is ignored. An example with no such
+comment is compiled but not executed. An example with no text after
+"Output:" is compiled, executed, and expected to produce no output.
+
+Godoc displays the body of ExampleXxx to demonstrate the use
+of the function, constant, or variable Xxx. An example of a method M with
+receiver type T or *T is named ExampleT_M. There may be multiple examples
+for a given function, constant, or variable, distinguished by a trailing _xxx,
+where xxx is a suffix not beginning with an upper case letter.
+
+Here is an example of an example:
+
+ func ExamplePrintln() {
+ Println("The output of\nthis example.")
+ // Output: The output of
+ // this example.
+ }
+
+Here is another example where the ordering of the output is ignored:
+
+ func ExamplePerm() {
+ for _, value := range Perm(4) {
+ fmt.Println(value)
+ }
+
+ // Unordered output: 4
+ // 2
+ // 1
+ // 3
+ // 0
+ }
+
+The entire test file is presented as the example when it contains a single
+example function, at least one other function, type, variable, or constant
+declaration, and no tests, benchmarks, or fuzz tests.
+
+See the documentation of the testing package for more information.
+`,
+}
+
+var (
+ testBench string // -bench flag
+ testC bool // -c flag
+ testCoverPkgs []*load.Package // -coverpkg flag
+ testCoverProfile string // -coverprofile flag
+ testFuzz string // -fuzz flag
+ testJSON bool // -json flag
+ testList string // -list flag
+ testO string // -o flag
+ testOutputDir outputdirFlag // -outputdir flag
+ testShuffle shuffleFlag // -shuffle flag
+ testTimeout time.Duration // -timeout flag
+ testV testVFlag // -v flag
+ testVet = vetFlag{flags: defaultVetFlags} // -vet flag
+)
+
+type testVFlag struct {
+ on bool // -v is set in some form
+ json bool // -v=test2json is set, to make output better for test2json
+}
+
+func (*testVFlag) IsBoolFlag() bool { return true }
+
+func (f *testVFlag) Set(arg string) error {
+ if v, err := strconv.ParseBool(arg); err == nil {
+ f.on = v
+ f.json = false
+ return nil
+ }
+ if arg == "test2json" {
+ f.on = true
+ f.json = arg == "test2json"
+ return nil
+ }
+ return fmt.Errorf("invalid flag -test.v=%s", arg)
+}
+
+func (f *testVFlag) String() string {
+ if f.json {
+ return "test2json"
+ }
+ if f.on {
+ return "true"
+ }
+ return "false"
+}
+
+var (
+ testArgs []string
+ pkgArgs []string
+ pkgs []*load.Package
+
+ testHelp bool // -help option passed to test via -args
+
+ testKillTimeout = 100 * 365 * 24 * time.Hour // backup alarm; defaults to about a century if no timeout is set
+ testWaitDelay time.Duration // how long to wait for output to close after a test binary exits; zero means unlimited
+ testCacheExpire time.Time // ignore cached test results before this time
+
+ testBlockProfile, testCPUProfile, testMemProfile, testMutexProfile, testTrace string // profiling flag that limits test to one package
+
+ testODir = false
+)
+
+// testProfile returns the name of an arbitrary single-package profiling flag
+// that is set, if any.
+func testProfile() string {
+ switch {
+ case testBlockProfile != "":
+ return "-blockprofile"
+ case testCPUProfile != "":
+ return "-cpuprofile"
+ case testMemProfile != "":
+ return "-memprofile"
+ case testMutexProfile != "":
+ return "-mutexprofile"
+ case testTrace != "":
+ return "-trace"
+ default:
+ return ""
+ }
+}
+
+// testNeedBinary reports whether the test needs to keep the binary around.
+func testNeedBinary() bool {
+ switch {
+ case testBlockProfile != "":
+ return true
+ case testCPUProfile != "":
+ return true
+ case testMemProfile != "":
+ return true
+ case testMutexProfile != "":
+ return true
+ case testO != "":
+ return true
+ default:
+ return false
+ }
+}
+
+// testShowPass reports whether the output for a passing test should be shown.
+func testShowPass() bool {
+ return testV.on || testList != "" || testHelp
+}
+
+var defaultVetFlags = []string{
+ // TODO(rsc): Decide which tests are enabled by default.
+ // See golang.org/issue/18085.
+ // "-asmdecl",
+ // "-assign",
+ "-atomic",
+ "-bool",
+ "-buildtags",
+ // "-cgocall",
+ // "-composites",
+ // "-copylocks",
+ "-directive",
+ "-errorsas",
+ // "-httpresponse",
+ "-ifaceassert",
+ // "-lostcancel",
+ // "-methods",
+ "-nilfunc",
+ "-printf",
+ // "-rangeloops",
+ // "-shift",
+ "-slog",
+ "-stringintconv",
+ // "-structtags",
+ // "-tests",
+ // "-unreachable",
+ // "-unsafeptr",
+ // "-unusedresult",
+}
+
+func runTest(ctx context.Context, cmd *base.Command, args []string) {
+ pkgArgs, testArgs = testFlags(args)
+ modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that.
+
+ if cfg.DebugTrace != "" {
+ var close func() error
+ var err error
+ ctx, close, err = trace.Start(ctx, cfg.DebugTrace)
+ if err != nil {
+ base.Fatalf("failed to start trace: %v", err)
+ }
+ defer func() {
+ if err := close(); err != nil {
+ base.Fatalf("failed to stop trace: %v", err)
+ }
+ }()
+ }
+
+ ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command"))
+ defer span.Done()
+
+ work.FindExecCmd() // initialize cached result
+
+ work.BuildInit()
+ work.VetFlags = testVet.flags
+ work.VetExplicit = testVet.explicit
+
+ pkgOpts := load.PackageOpts{ModResolveTests: true}
+ pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs)
+ load.CheckPackageErrors(pkgs)
+ if len(pkgs) == 0 {
+ base.Fatalf("no packages to test")
+ }
+
+ if testFuzz != "" {
+ if !platform.FuzzSupported(cfg.Goos, cfg.Goarch) {
+ base.Fatalf("-fuzz flag is not supported on %s/%s", cfg.Goos, cfg.Goarch)
+ }
+ if len(pkgs) != 1 {
+ base.Fatalf("cannot use -fuzz flag with multiple packages")
+ }
+ if testCoverProfile != "" {
+ base.Fatalf("cannot use -coverprofile flag with -fuzz flag")
+ }
+ if profileFlag := testProfile(); profileFlag != "" {
+ base.Fatalf("cannot use %s flag with -fuzz flag", profileFlag)
+ }
+
+ // Reject the '-fuzz' flag if the package is outside the main module.
+ // Otherwise, if fuzzing identifies a failure it could corrupt checksums in
+ // the module cache (or permanently alter the behavior of std tests for all
+ // users) by writing the failing input to the package's testdata directory.
+ // (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.)
+ mainMods := modload.MainModules
+ if m := pkgs[0].Module; m != nil && m.Path != "" {
+ if !mainMods.Contains(m.Path) {
+ base.Fatalf("cannot use -fuzz flag on package outside the main module")
+ }
+ } else if pkgs[0].Standard && modload.Enabled() {
+ // Because packages in 'std' and 'cmd' are part of the standard library,
+ // they are only treated as part of a module in 'go mod' subcommands and
+ // 'go get'. However, we still don't want to accidentally corrupt their
+ // testdata during fuzzing, nor do we want to fail with surprising errors
+ // if GOROOT isn't writable (as is often the case for Go toolchains
+ // installed through package managers).
+ //
+ // If the user is requesting to fuzz a standard-library package, ensure
+ // that they are in the same module as that package (just like when
+ // fuzzing any other package).
+ if strings.HasPrefix(pkgs[0].ImportPath, "cmd/") {
+ if !mainMods.Contains("cmd") || !mainMods.InGorootSrc(module.Version{Path: "cmd"}) {
+ base.Fatalf("cannot use -fuzz flag on package outside the main module")
+ }
+ } else {
+ if !mainMods.Contains("std") || !mainMods.InGorootSrc(module.Version{Path: "std"}) {
+ base.Fatalf("cannot use -fuzz flag on package outside the main module")
+ }
+ }
+ }
+ }
+ if testProfile() != "" && len(pkgs) != 1 {
+ base.Fatalf("cannot use %s flag with multiple packages", testProfile())
+ }
+
+ if testO != "" {
+ if strings.HasSuffix(testO, "/") || strings.HasSuffix(testO, string(os.PathSeparator)) {
+ testODir = true
+ } else if fi, err := os.Stat(testO); err == nil && fi.IsDir() {
+ testODir = true
+ }
+ }
+
+ if len(pkgs) > 1 && (testC || testO != "") && !base.IsNull(testO) {
+ if testO != "" && !testODir {
+ base.Fatalf("with multiple packages, -o must refer to a directory or %s", os.DevNull)
+ }
+
+ pkgsForBinary := map[string][]*load.Package{}
+
+ for _, p := range pkgs {
+ testBinary := testBinaryName(p)
+ pkgsForBinary[testBinary] = append(pkgsForBinary[testBinary], p)
+ }
+
+ for testBinary, pkgs := range pkgsForBinary {
+ if len(pkgs) > 1 {
+ var buf strings.Builder
+ for _, pkg := range pkgs {
+ buf.WriteString(pkg.ImportPath)
+ buf.WriteString("\n")
+ }
+
+ base.Errorf("cannot write test binary %s for multiple packages:\n%s", testBinary, buf.String())
+ }
+ }
+
+ base.ExitIfErrors()
+ }
+
+ initCoverProfile()
+ defer closeCoverProfile()
+
+ // If a test timeout is finite, set our kill timeout
+ // to that timeout plus one minute. This is a backup alarm in case
+ // the test wedges with a goroutine spinning and its background
+ // timer does not get a chance to fire.
+ // Don't set this if fuzzing, since it should be able to run
+ // indefinitely.
+ if testTimeout > 0 && testFuzz == "" {
+ // The WaitDelay for the test process depends on both the OS I/O and
+ // scheduling overhead and the amount of I/O generated by the test just
+ // before it exits. We set the minimum at 5 seconds to account for the OS
+ // overhead, and scale it up from there proportional to the overall test
+ // timeout on the assumption that the time to write and read a goroutine
+ // dump from a timed-out test process scales roughly with the overall
+ // running time of the test.
+ //
+ // This is probably too generous when the timeout is very long, but it seems
+ // better to hard-code a scale factor than to hard-code a constant delay.
+ if wd := testTimeout / 10; wd < 5*time.Second {
+ testWaitDelay = 5 * time.Second
+ } else {
+ testWaitDelay = wd
+ }
+
+ // We expect the test binary to terminate itself (and dump stacks) after
+ // exactly testTimeout. We give it up to one WaitDelay or one minute,
+ // whichever is longer, to finish dumping stacks before we send it an
+ // external signal: if the process has a lot of goroutines, dumping stacks
+ // after the timeout can take a while.
+ //
+ // After the signal is delivered, the test process may have up to one
+ // additional WaitDelay to finish writing its output streams.
+ if testWaitDelay < 1*time.Minute {
+ testKillTimeout = testTimeout + 1*time.Minute
+ } else {
+ testKillTimeout = testTimeout + testWaitDelay
+ }
+ }
+
+ // Read testcache expiration time, if present.
+ // (We implement go clean -testcache by writing an expiration date
+ // instead of searching out and deleting test result cache entries.)
+ if dir := cache.DefaultDir(); dir != "off" {
+ if data, _ := lockedfile.Read(filepath.Join(dir, "testexpire.txt")); len(data) > 0 && data[len(data)-1] == '\n' {
+ if t, err := strconv.ParseInt(string(data[:len(data)-1]), 10, 64); err == nil {
+ testCacheExpire = time.Unix(0, t)
+ }
+ }
+ }
+
+ b := work.NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ var builds, runs, prints []*work.Action
+
+ if cfg.BuildCoverPkg != nil {
+ match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg))
+ for i := range cfg.BuildCoverPkg {
+ match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd())
+ }
+
+ // Select for coverage all dependencies matching the -coverpkg
+ // patterns.
+ plist := load.TestPackageList(ctx, pkgOpts, pkgs)
+ testCoverPkgs = load.SelectCoverPackages(plist, match, "test")
+ }
+
+ // Inform the compiler that it should instrument the binary at
+ // build-time when fuzzing is enabled.
+ if testFuzz != "" {
+ // Don't instrument packages which may affect coverage guidance but are
+ // unlikely to be useful. Most of these are used by the testing or
+ // internal/fuzz packages concurrently with fuzzing.
+ var skipInstrumentation = map[string]bool{
+ "context": true,
+ "internal/fuzz": true,
+ "reflect": true,
+ "runtime": true,
+ "sync": true,
+ "sync/atomic": true,
+ "syscall": true,
+ "testing": true,
+ "time": true,
+ }
+ for _, p := range load.TestPackageList(ctx, pkgOpts, pkgs) {
+ if !skipInstrumentation[p.ImportPath] {
+ p.Internal.FuzzInstrument = true
+ }
+ }
+ }
+
+ // Collect all the packages imported by the packages being tested.
+ allImports := make(map[*load.Package]bool)
+ for _, p := range pkgs {
+ if p.Error != nil && p.Error.IsImportCycle {
+ continue
+ }
+ for _, p1 := range p.Internal.Imports {
+ allImports[p1] = true
+ }
+ }
+
+ // Prepare build + run + print actions for all packages being tested.
+ for _, p := range pkgs {
+ // sync/atomic import is inserted by the cover tool if we're
+ // using atomic mode (and not compiling sync/atomic package itself).
+ // See #18486 and #57445.
+ if cfg.BuildCover && cfg.BuildCoverMode == "atomic" &&
+ p.ImportPath != "sync/atomic" {
+ load.EnsureImport(p, "sync/atomic")
+ }
+
+ buildTest, runTest, printTest, err := builderTest(b, ctx, pkgOpts, p, allImports[p])
+ if err != nil {
+ str := err.Error()
+ str = strings.TrimPrefix(str, "\n")
+ if p.ImportPath != "" {
+ base.Errorf("# %s\n%s", p.ImportPath, str)
+ } else {
+ base.Errorf("%s", str)
+ }
+ fmt.Printf("FAIL\t%s [setup failed]\n", p.ImportPath)
+ continue
+ }
+ builds = append(builds, buildTest)
+ runs = append(runs, runTest)
+ prints = append(prints, printTest)
+ }
+
+ // Order runs for coordinating start JSON prints.
+ ch := make(chan struct{})
+ close(ch)
+ for _, a := range runs {
+ if r, ok := a.Actor.(*runTestActor); ok {
+ r.prev = ch
+ ch = make(chan struct{})
+ r.next = ch
+ }
+ }
+
+ // Ultimately the goal is to print the output.
+ root := &work.Action{Mode: "go test", Actor: work.ActorFunc(printExitStatus), Deps: prints}
+
+ // Force the printing of results to happen in order,
+ // one at a time.
+ for i, a := range prints {
+ if i > 0 {
+ a.Deps = append(a.Deps, prints[i-1])
+ }
+ }
+
+ // Force benchmarks to run in serial.
+ if !testC && (testBench != "") {
+ // The first run must wait for all builds.
+ // Later runs must wait for the previous run's print.
+ for i, run := range runs {
+ if i == 0 {
+ run.Deps = append(run.Deps, builds...)
+ } else {
+ run.Deps = append(run.Deps, prints[i-1])
+ }
+ }
+ }
+
+ b.Do(ctx, root)
+}
+
+var windowsBadWords = []string{
+ "install",
+ "patch",
+ "setup",
+ "update",
+}
+
+func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool) (buildAction, runAction, printAction *work.Action, err error) {
+ if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ build := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
+ run := &work.Action{
+ Mode: "test run",
+ Actor: new(runTestActor),
+ Deps: []*work.Action{build},
+ Package: p,
+ IgnoreFail: true, // run (prepare output) even if build failed
+ }
+ addTestVet(b, p, run, nil)
+ print := &work.Action{
+ Mode: "test print",
+ Actor: work.ActorFunc(builderPrintTest),
+ Deps: []*work.Action{run},
+ Package: p,
+ IgnoreFail: true, // print even if test failed
+ }
+ return build, run, print, nil
+ }
+
+ // Build Package structs describing:
+ // pmain - pkg.test binary
+ // ptest - package + test files
+ // pxtest - package of external test files
+ var cover *load.TestCover
+ if cfg.BuildCover {
+ cover = &load.TestCover{
+ Mode: cfg.BuildCoverMode,
+ Local: cfg.BuildCoverPkg == nil,
+ Pkgs: testCoverPkgs,
+ Paths: cfg.BuildCoverPkg,
+ }
+ }
+ pmain, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, cover)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // If imported is true then this package is imported by some
+ // package being tested. Make building the test version of the
+ // package depend on building the non-test version, so that we
+ // only report build errors once. Issue #44624.
+ if imported && ptest != p {
+ buildTest := b.CompileAction(work.ModeBuild, work.ModeBuild, ptest)
+ buildP := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
+ buildTest.Deps = append(buildTest.Deps, buildP)
+ }
+
+ testBinary := testBinaryName(p)
+
+ testDir := b.NewObjdir()
+ if err := b.Mkdir(testDir); err != nil {
+ return nil, nil, nil, err
+ }
+
+ pmain.Dir = testDir
+ pmain.Internal.OmitDebug = !testC && !testNeedBinary()
+
+ if !cfg.BuildN {
+ // writeTestmain writes _testmain.go,
+ // using the test description gathered in t.
+ if err := os.WriteFile(testDir+"_testmain.go", *pmain.Internal.TestmainGo, 0666); err != nil {
+ return nil, nil, nil, err
+ }
+ }
+
+ // Set compile objdir to testDir we've already created,
+ // so that the default file path stripping applies to _testmain.go.
+ b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir = testDir
+
+ a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain)
+ a.Target = testDir + testBinary + cfg.ExeSuffix
+ if cfg.Goos == "windows" {
+ // There are many reserved words on Windows that,
+ // if used in the name of an executable, cause Windows
+ // to try to ask for extra permissions.
+ // The word list includes setup, install, update, and patch,
+ // but it does not appear to be defined anywhere.
+ // We have run into this trying to run the
+ // go.codereview/patch tests.
+ // For package names containing those words, use test.test.exe
+ // instead of pkgname.test.exe.
+ // Note that this file name is only used in the Go command's
+ // temporary directory. If the -c or other flags are
+ // given, the code below will still use pkgname.test.exe.
+ // There are two user-visible effects of this change.
+ // First, you can actually run 'go test' in directories that
+ // have names that Windows thinks are installer-like,
+ // without getting a dialog box asking for more permissions.
+ // Second, in the Windows process listing during go test,
+ // the test shows up as test.test.exe, not pkgname.test.exe.
+ // That second one is a drawback, but it seems a small
+ // price to pay for the test running at all.
+ // If maintaining the list of bad words is too onerous,
+ // we could just do this always on Windows.
+ for _, bad := range windowsBadWords {
+ if strings.Contains(testBinary, bad) {
+ a.Target = testDir + "test.test" + cfg.ExeSuffix
+ break
+ }
+ }
+ }
+ buildAction = a
+ var installAction, cleanAction *work.Action
+ if testC || testNeedBinary() {
+ // -c or profiling flag: create action to copy binary to ./test.out.
+ target := filepath.Join(base.Cwd(), testBinary+cfg.ExeSuffix)
+ isNull := false
+
+ if testO != "" {
+ target = testO
+
+ if testODir {
+ if filepath.IsAbs(target) {
+ target = filepath.Join(target, testBinary+cfg.ExeSuffix)
+ } else {
+ target = filepath.Join(base.Cwd(), target, testBinary+cfg.ExeSuffix)
+ }
+ } else {
+ if base.IsNull(target) {
+ isNull = true
+ } else if !filepath.IsAbs(target) {
+ target = filepath.Join(base.Cwd(), target)
+ }
+ }
+ }
+
+ if isNull {
+ runAction = buildAction
+ } else {
+ pmain.Target = target
+ installAction = &work.Action{
+ Mode: "test build",
+ Actor: work.ActorFunc(work.BuildInstallFunc),
+ Deps: []*work.Action{buildAction},
+ Package: pmain,
+ Target: target,
+ }
+ runAction = installAction // make sure runAction != nil even if not running test
+ }
+ }
+ var vetRunAction *work.Action
+ if testC {
+ printAction = &work.Action{Mode: "test print (nop)", Package: p, Deps: []*work.Action{runAction}} // nop
+ vetRunAction = printAction
+ } else {
+ // run test
+ r := new(runTestActor)
+ runAction = &work.Action{
+ Mode: "test run",
+ Actor: r,
+ Deps: []*work.Action{buildAction},
+ Package: p,
+ IgnoreFail: true, // run (prepare output) even if build failed
+ TryCache: r.c.tryCache,
+ Objdir: testDir,
+ }
+ vetRunAction = runAction
+ cleanAction = &work.Action{
+ Mode: "test clean",
+ Actor: work.ActorFunc(builderCleanTest),
+ Deps: []*work.Action{runAction},
+ Package: p,
+ IgnoreFail: true, // clean even if test failed
+ Objdir: testDir,
+ }
+ printAction = &work.Action{
+ Mode: "test print",
+ Actor: work.ActorFunc(builderPrintTest),
+ Deps: []*work.Action{cleanAction},
+ Package: p,
+ IgnoreFail: true, // print even if test failed
+ }
+ }
+
+ if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 {
+ addTestVet(b, ptest, vetRunAction, installAction)
+ }
+ if pxtest != nil {
+ addTestVet(b, pxtest, vetRunAction, installAction)
+ }
+
+ if installAction != nil {
+ if runAction != installAction {
+ installAction.Deps = append(installAction.Deps, runAction)
+ }
+ if cleanAction != nil {
+ cleanAction.Deps = append(cleanAction.Deps, installAction)
+ }
+ }
+
+ return buildAction, runAction, printAction, nil
+}
+
+func addTestVet(b *work.Builder, p *load.Package, runAction, installAction *work.Action) {
+ if testVet.off {
+ return
+ }
+
+ vet := b.VetAction(work.ModeBuild, work.ModeBuild, p)
+ runAction.Deps = append(runAction.Deps, vet)
+ // Install will clean the build directory.
+ // Make sure vet runs first.
+ // The install ordering in b.VetAction does not apply here
+ // because we are using a custom installAction (created above).
+ if installAction != nil {
+ installAction.Deps = append(installAction.Deps, vet)
+ }
+}
+
+var noTestsToRun = []byte("\ntesting: warning: no tests to run\n")
+var noFuzzTestsToFuzz = []byte("\ntesting: warning: no fuzz tests to fuzz\n")
+var tooManyFuzzTestsToFuzz = []byte("\ntesting: warning: -fuzz matches more than one fuzz test, won't fuzz\n")
+
+// runTestActor is the actor for running a test.
+type runTestActor struct {
+ c runCache
+
+ // sequencing of json start messages, to preserve test order
+ prev <-chan struct{} // wait to start until prev is closed
+ next chan<- struct{} // close next once the next test can start.
+}
+
+// runCache is the cache for running a single test.
+type runCache struct {
+ disableCache bool // cache should be disabled for this run
+
+ buf *bytes.Buffer
+ id1 cache.ActionID
+ id2 cache.ActionID
+}
+
+// stdoutMu and lockedStdout provide a locked standard output
+// that guarantees never to interlace writes from multiple
+// goroutines, so that we can have multiple JSON streams writing
+// to a lockedStdout simultaneously and know that events will
+// still be intelligible.
+var stdoutMu sync.Mutex
+
+type lockedStdout struct{}
+
+func (lockedStdout) Write(b []byte) (int, error) {
+ stdoutMu.Lock()
+ defer stdoutMu.Unlock()
+ return os.Stdout.Write(b)
+}
+
+func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) error {
+ // Wait for previous test to get started and print its first json line.
+ select {
+ case <-r.prev:
+ case <-base.Interrupted:
+ // We can't wait for the previous test action to complete: we don't start
+ // new actions after an interrupt, so if that action wasn't already running
+ // it might never happen. Instead, just don't log anything for this action.
+ base.SetExitStatus(1)
+ return nil
+ }
+
+ if a.Failed {
+ // We were unable to build the binary.
+ a.Failed = false
+ a.TestOutput = new(bytes.Buffer)
+ fmt.Fprintf(a.TestOutput, "FAIL\t%s [build failed]\n", a.Package.ImportPath)
+ base.SetExitStatus(1)
+
+ // release next test to start
+ close(r.next)
+ return nil
+ }
+
+ var stdout io.Writer = os.Stdout
+ var err error
+ if testJSON {
+ json := test2json.NewConverter(lockedStdout{}, a.Package.ImportPath, test2json.Timestamp)
+ defer func() {
+ json.Exited(err)
+ json.Close()
+ }()
+ stdout = json
+ }
+
+ // Release next test to start (test2json.NewConverter writes the start event).
+ close(r.next)
+
+ if p := a.Package; len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ fmt.Fprintf(stdout, "? \t%s\t[no test files]\n", p.ImportPath)
+ return nil
+ }
+
+ var buf bytes.Buffer
+ if len(pkgArgs) == 0 || testBench != "" || testFuzz != "" {
+ // Stream test output (no buffering) when no package has
+ // been given on the command line (implicit current directory)
+ // or when benchmarking or fuzzing.
+ // No change to stdout.
+ } else {
+ // If we're only running a single package under test or if parallelism is
+ // set to 1, and if we're displaying all output (testShowPass), we can
+ // hurry the output along, echoing it as soon as it comes in.
+ // We still have to copy to &buf for caching the result. This special
+ // case was introduced in Go 1.5 and is intentionally undocumented:
+ // the exact details of output buffering are up to the go command and
+ // subject to change. It would be nice to remove this special case
+ // entirely, but it is surely very helpful to see progress being made
+ // when tests are run on slow single-CPU ARM systems.
+ //
+ // If we're showing JSON output, then display output as soon as
+ // possible even when multiple tests are being run: the JSON output
+ // events are attributed to specific package tests, so interlacing them
+ // is OK.
+ if testShowPass() && (len(pkgs) == 1 || cfg.BuildP == 1) || testJSON {
+ // Write both to stdout and buf, for possible saving
+ // to cache, and for looking for the "no tests to run" message.
+ stdout = io.MultiWriter(stdout, &buf)
+ } else {
+ stdout = &buf
+ }
+ }
+
+ if r.c.buf == nil {
+ // We did not find a cached result using the link step action ID,
+ // so we ran the link step. Try again now with the link output
+ // content ID. The attempt using the action ID makes sure that
+ // if the link inputs don't change, we reuse the cached test
+ // result without even rerunning the linker. The attempt using
+ // the link output (test binary) content ID makes sure that if
+ // we have different link inputs but the same final binary,
+ // we still reuse the cached test result.
+ // c.saveOutput will store the result under both IDs.
+ r.c.tryCacheWithID(b, a, a.Deps[0].BuildContentID())
+ }
+ if r.c.buf != nil {
+ if stdout != &buf {
+ stdout.Write(r.c.buf.Bytes())
+ r.c.buf.Reset()
+ }
+ a.TestOutput = r.c.buf
+ return nil
+ }
+
+ execCmd := work.FindExecCmd()
+ testlogArg := []string{}
+ if !r.c.disableCache && len(execCmd) == 0 {
+ testlogArg = []string{"-test.testlogfile=" + a.Objdir + "testlog.txt"}
+ }
+ panicArg := "-test.paniconexit0"
+ fuzzArg := []string{}
+ if testFuzz != "" {
+ fuzzCacheDir := filepath.Join(cache.Default().FuzzDir(), a.Package.ImportPath)
+ fuzzArg = []string{"-test.fuzzcachedir=" + fuzzCacheDir}
+ }
+ coverdirArg := []string{}
+ addToEnv := ""
+ if cfg.BuildCover {
+ gcd := filepath.Join(a.Objdir, "gocoverdir")
+ if err := b.Mkdir(gcd); err != nil {
+ // If we can't create a temp dir, terminate immediately
+ // with an error as opposed to returning an error to the
+ // caller; failed MkDir most likely indicates that we're
+ // out of disk space or there is some other systemic error
+ // that will make forward progress unlikely.
+ base.Fatalf("failed to create temporary dir: %v", err)
+ }
+ coverdirArg = append(coverdirArg, "-test.gocoverdir="+gcd)
+ // Even though we are passing the -test.gocoverdir option to
+ // the test binary, also set GOCOVERDIR as well. This is
+ // intended to help with tests that run "go build" to build
+ // fresh copies of tools to test as part of the testing.
+ addToEnv = "GOCOVERDIR=" + gcd
+ }
+ args := str.StringList(execCmd, a.Deps[0].BuiltTarget(), testlogArg, panicArg, fuzzArg, coverdirArg, testArgs)
+
+ if testCoverProfile != "" {
+ // Write coverage to temporary profile, for merging later.
+ for i, arg := range args {
+ if strings.HasPrefix(arg, "-test.coverprofile=") {
+ args[i] = "-test.coverprofile=" + a.Objdir + "_cover_.out"
+ }
+ }
+ }
+
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "%s", strings.Join(args, " "))
+ if cfg.BuildN {
+ return nil
+ }
+ }
+
+ // Normally, the test will terminate itself when the timeout expires,
+ // but add a last-ditch deadline to detect and stop wedged binaries.
+ ctx, cancel := context.WithTimeout(ctx, testKillTimeout)
+ defer cancel()
+
+ // Now we're ready to actually run the command.
+ //
+ // If the -o flag is set, or if at some point we change cmd/go to start
+ // copying test executables into the build cache, we may run into spurious
+ // ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315).
+ //
+ // Since we know what causes those, and we know that they should resolve
+ // quickly (the ETXTBSY error will resolve as soon as the subprocess
+ // holding the descriptor open reaches its 'exec' call), we retry them
+ // in a loop.
+
+ var (
+ cmd *exec.Cmd
+ t0 time.Time
+ cancelKilled = false
+ cancelSignaled = false
+ )
+ for {
+ cmd = exec.CommandContext(ctx, args[0], args[1:]...)
+ cmd.Dir = a.Package.Dir
+
+ env := slices.Clip(cfg.OrigEnv)
+ env = base.AppendPATH(env)
+ env = base.AppendPWD(env, cmd.Dir)
+ cmd.Env = env
+ if addToEnv != "" {
+ cmd.Env = append(cmd.Env, addToEnv)
+ }
+
+ cmd.Stdout = stdout
+ cmd.Stderr = stdout
+
+ // If there are any local SWIG dependencies, we want to load
+ // the shared library from the build directory.
+ if a.Package.UsesSwig() {
+ env := cmd.Env
+ found := false
+ prefix := "LD_LIBRARY_PATH="
+ for i, v := range env {
+ if strings.HasPrefix(v, prefix) {
+ env[i] = v + ":."
+ found = true
+ break
+ }
+ }
+ if !found {
+ env = append(env, "LD_LIBRARY_PATH=.")
+ }
+ cmd.Env = env
+ }
+
+ cmd.Cancel = func() error {
+ if base.SignalTrace == nil {
+ err := cmd.Process.Kill()
+ if err == nil {
+ cancelKilled = true
+ }
+ return err
+ }
+
+ // Send a quit signal in the hope that the program will print
+ // a stack trace and exit.
+ err := cmd.Process.Signal(base.SignalTrace)
+ if err == nil {
+ cancelSignaled = true
+ }
+ return err
+ }
+ cmd.WaitDelay = testWaitDelay
+
+ base.StartSigHandlers()
+ t0 = time.Now()
+ err = cmd.Run()
+
+ if !isETXTBSY(err) {
+ // We didn't hit the race in #22315, so there is no reason to retry the
+ // command.
+ break
+ }
+ }
+
+ out := buf.Bytes()
+ a.TestOutput = &buf
+ t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())
+
+ mergeCoverProfile(cmd.Stdout, a.Objdir+"_cover_.out")
+
+ if err == nil {
+ norun := ""
+ if !testShowPass() && !testJSON {
+ buf.Reset()
+ }
+ if bytes.HasPrefix(out, noTestsToRun[1:]) || bytes.Contains(out, noTestsToRun) {
+ norun = " [no tests to run]"
+ }
+ if bytes.HasPrefix(out, noFuzzTestsToFuzz[1:]) || bytes.Contains(out, noFuzzTestsToFuzz) {
+ norun = " [no fuzz tests to fuzz]"
+ }
+ if bytes.HasPrefix(out, tooManyFuzzTestsToFuzz[1:]) || bytes.Contains(out, tooManyFuzzTestsToFuzz) {
+ norun = "[-fuzz matches more than one fuzz test, won't fuzz]"
+ }
+ if len(out) > 0 && !bytes.HasSuffix(out, []byte("\n")) {
+ // Ensure that the output ends with a newline before the "ok"
+ // line we're about to print (https://golang.org/issue/49317).
+ cmd.Stdout.Write([]byte("\n"))
+ }
+ fmt.Fprintf(cmd.Stdout, "ok \t%s\t%s%s%s\n", a.Package.ImportPath, t, coveragePercentage(out), norun)
+ r.c.saveOutput(a)
+ } else {
+ base.SetExitStatus(1)
+ if cancelSignaled {
+ fmt.Fprintf(cmd.Stdout, "*** Test killed with %v: ran too long (%v).\n", base.SignalTrace, testKillTimeout)
+ } else if cancelKilled {
+ fmt.Fprintf(cmd.Stdout, "*** Test killed: ran too long (%v).\n", testKillTimeout)
+ } else if errors.Is(err, exec.ErrWaitDelay) {
+ fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay)
+ }
+ var ee *exec.ExitError
+ if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() {
+ // If there was no test output, print the exit status so that the reason
+ // for failure is clear.
+ fmt.Fprintf(cmd.Stdout, "%s\n", err)
+ } else if !bytes.HasSuffix(out, []byte("\n")) {
+ // Otherwise, ensure that the output ends with a newline before the FAIL
+ // line we're about to print (https://golang.org/issue/49317).
+ cmd.Stdout.Write([]byte("\n"))
+ }
+
+ // NOTE(golang.org/issue/37555): test2json reports that a test passes
+ // unless "FAIL" is printed at the beginning of a line. The test may not
+ // actually print that if it panics, exits, or terminates abnormally,
+ // so we print it here. We can't always check whether it was printed
+ // because some tests need stdout to be a terminal (golang.org/issue/34791),
+ // not a pipe.
+ // TODO(golang.org/issue/29062): tests that exit with status 0 without
+ // printing a final result should fail.
+ prefix := ""
+ if testJSON || testV.json {
+ prefix = "\x16"
+ }
+ fmt.Fprintf(cmd.Stdout, "%sFAIL\t%s\t%s\n", prefix, a.Package.ImportPath, t)
+ }
+
+ if cmd.Stdout != &buf {
+ buf.Reset() // cmd.Stdout was going to os.Stdout already
+ }
+ return nil
+}
+
+// tryCache is called just before the link attempt,
+// to see if the test result is cached and therefore the link is unneeded.
+// It reports whether the result can be satisfied from cache.
+func (c *runCache) tryCache(b *work.Builder, a *work.Action) bool {
+ return c.tryCacheWithID(b, a, a.Deps[0].BuildActionID())
+}
+
+func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bool {
+ if len(pkgArgs) == 0 {
+ // Caching does not apply to "go test",
+ // only to "go test foo" (including "go test .").
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: caching disabled in local directory mode\n")
+ }
+ c.disableCache = true
+ return false
+ }
+
+ if a.Package.Root == "" {
+ // Caching does not apply to tests outside of any module, GOPATH, or GOROOT.
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: caching disabled for package outside of module root, GOPATH, or GOROOT: %s\n", a.Package.ImportPath)
+ }
+ c.disableCache = true
+ return false
+ }
+
+ var cacheArgs []string
+ for _, arg := range testArgs {
+ i := strings.Index(arg, "=")
+ if i < 0 || !strings.HasPrefix(arg, "-test.") {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: caching disabled for test argument: %s\n", arg)
+ }
+ c.disableCache = true
+ return false
+ }
+ switch arg[:i] {
+ case "-test.benchtime",
+ "-test.cpu",
+ "-test.list",
+ "-test.parallel",
+ "-test.run",
+ "-test.short",
+ "-test.timeout",
+ "-test.failfast",
+ "-test.v":
+ // These are cacheable.
+ // Note that this list is documented above,
+ // so if you add to this list, update the docs too.
+ cacheArgs = append(cacheArgs, arg)
+
+ default:
+ // nothing else is cacheable
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: caching disabled for test argument: %s\n", arg)
+ }
+ c.disableCache = true
+ return false
+ }
+ }
+
+ // The test cache result fetch is a two-level lookup.
+ //
+ // First, we use the content hash of the test binary
+ // and its command-line arguments to find the
+ // list of environment variables and files consulted
+ // the last time the test was run with those arguments.
+ // (To avoid unnecessary links, we store this entry
+ // under two hashes: id1 uses the linker inputs as a
+ // proxy for the test binary, and id2 uses the actual
+ // test binary. If the linker inputs are unchanged,
+ // this way we avoid the link step, even though we
+ // do not cache link outputs.)
+ //
+ // Second, we compute a hash of the values of the
+ // environment variables and the content of the files
+ // listed in the log from the previous run.
+ // Then we look up test output using a combination of
+ // the hash from the first part (testID) and the hash of the
+ // test inputs (testInputsID).
+ //
+ // In order to store a new test result, we must redo the
+ // testInputsID computation using the log from the run
+ // we want to cache, and then we store that new log and
+ // the new outputs.
+
+ h := cache.NewHash("testResult")
+ fmt.Fprintf(h, "test binary %s args %q execcmd %q", id, cacheArgs, work.ExecCmd)
+ testID := h.Sum()
+ if c.id1 == (cache.ActionID{}) {
+ c.id1 = testID
+ } else {
+ c.id2 = testID
+ }
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test ID %x => %x\n", a.Package.ImportPath, id, testID)
+ }
+
+ // Load list of referenced environment variables and files
+ // from last run of testID, and compute hash of that content.
+ data, entry, err := cache.GetBytes(cache.Default(), testID)
+ if !bytes.HasPrefix(data, testlogMagic) || data[len(data)-1] != '\n' {
+ if cache.DebugTest {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testcache: %s: input list not found: %v\n", a.Package.ImportPath, err)
+ } else {
+ fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed\n", a.Package.ImportPath)
+ }
+ }
+ return false
+ }
+ testInputsID, err := computeTestInputsID(a, data)
+ if err != nil {
+ return false
+ }
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test ID %x => input ID %x => %x\n", a.Package.ImportPath, testID, testInputsID, testAndInputKey(testID, testInputsID))
+ }
+
+ // Parse cached result in preparation for changing run time to "(cached)".
+ // If we can't parse the cached result, don't use it.
+ data, entry, err = cache.GetBytes(cache.Default(), testAndInputKey(testID, testInputsID))
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ if cache.DebugTest {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test output not found: %v\n", a.Package.ImportPath, err)
+ } else {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath)
+ }
+ }
+ return false
+ }
+ if entry.Time.Before(testCacheExpire) {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test output expired due to go clean -testcache\n", a.Package.ImportPath)
+ }
+ return false
+ }
+ i := bytes.LastIndexByte(data[:len(data)-1], '\n') + 1
+ if !bytes.HasPrefix(data[i:], []byte("ok \t")) {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath)
+ }
+ return false
+ }
+ j := bytes.IndexByte(data[i+len("ok \t"):], '\t')
+ if j < 0 {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath)
+ }
+ return false
+ }
+ j += i + len("ok \t") + 1
+
+ // Committed to printing.
+ c.buf = new(bytes.Buffer)
+ c.buf.Write(data[:j])
+ c.buf.WriteString("(cached)")
+ for j < len(data) && ('0' <= data[j] && data[j] <= '9' || data[j] == '.' || data[j] == 's') {
+ j++
+ }
+ c.buf.Write(data[j:])
+ return true
+}
+
+var errBadTestInputs = errors.New("error parsing test inputs")
+var testlogMagic = []byte("# test log\n") // known to testing/internal/testdeps/deps.go
+
+// computeTestInputsID computes the "test inputs ID"
+// (see comment in tryCacheWithID above) for the
+// test log.
+func computeTestInputsID(a *work.Action, testlog []byte) (cache.ActionID, error) {
+ testlog = bytes.TrimPrefix(testlog, testlogMagic)
+ h := cache.NewHash("testInputs")
+ pwd := a.Package.Dir
+ for _, line := range bytes.Split(testlog, []byte("\n")) {
+ if len(line) == 0 {
+ continue
+ }
+ s := string(line)
+ op, name, found := strings.Cut(s, " ")
+ if !found {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed (%q)\n", a.Package.ImportPath, line)
+ }
+ return cache.ActionID{}, errBadTestInputs
+ }
+ switch op {
+ default:
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed (%q)\n", a.Package.ImportPath, line)
+ }
+ return cache.ActionID{}, errBadTestInputs
+ case "getenv":
+ fmt.Fprintf(h, "env %s %x\n", name, hashGetenv(name))
+ case "chdir":
+ pwd = name // always absolute
+ fmt.Fprintf(h, "chdir %s %x\n", name, hashStat(name))
+ case "stat":
+ if !filepath.IsAbs(name) {
+ name = filepath.Join(pwd, name)
+ }
+ if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" {
+ // Do not recheck files outside the module, GOPATH, or GOROOT root.
+ break
+ }
+ fmt.Fprintf(h, "stat %s %x\n", name, hashStat(name))
+ case "open":
+ if !filepath.IsAbs(name) {
+ name = filepath.Join(pwd, name)
+ }
+ if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" {
+ // Do not recheck files outside the module, GOPATH, or GOROOT root.
+ break
+ }
+ fh, err := hashOpen(name)
+ if err != nil {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: input file %s: %s\n", a.Package.ImportPath, name, err)
+ }
+ return cache.ActionID{}, err
+ }
+ fmt.Fprintf(h, "open %s %x\n", name, fh)
+ }
+ }
+ sum := h.Sum()
+ return sum, nil
+}
+
+func hashGetenv(name string) cache.ActionID {
+ h := cache.NewHash("getenv")
+ v, ok := os.LookupEnv(name)
+ if !ok {
+ h.Write([]byte{0})
+ } else {
+ h.Write([]byte{1})
+ h.Write([]byte(v))
+ }
+ return h.Sum()
+}
+
+const modTimeCutoff = 2 * time.Second
+
+var errFileTooNew = errors.New("file used as input is too new")
+
+func hashOpen(name string) (cache.ActionID, error) {
+ h := cache.NewHash("open")
+ info, err := os.Stat(name)
+ if err != nil {
+ fmt.Fprintf(h, "err %v\n", err)
+ return h.Sum(), nil
+ }
+ hashWriteStat(h, info)
+ if info.IsDir() {
+ files, err := os.ReadDir(name)
+ if err != nil {
+ fmt.Fprintf(h, "err %v\n", err)
+ }
+ for _, f := range files {
+ fmt.Fprintf(h, "file %s ", f.Name())
+ finfo, err := f.Info()
+ if err != nil {
+ fmt.Fprintf(h, "err %v\n", err)
+ } else {
+ hashWriteStat(h, finfo)
+ }
+ }
+ } else if info.Mode().IsRegular() {
+ // Because files might be very large, do not attempt
+ // to hash the entirety of their content. Instead assume
+ // the mtime and size recorded in hashWriteStat above
+ // are good enough.
+ //
+ // To avoid problems for very recent files where a new
+ // write might not change the mtime due to file system
+ // mtime precision, reject caching if a file was read that
+ // is less than modTimeCutoff old.
+ if time.Since(info.ModTime()) < modTimeCutoff {
+ return cache.ActionID{}, errFileTooNew
+ }
+ }
+ return h.Sum(), nil
+}
+
+func hashStat(name string) cache.ActionID {
+ h := cache.NewHash("stat")
+ if info, err := os.Stat(name); err != nil {
+ fmt.Fprintf(h, "err %v\n", err)
+ } else {
+ hashWriteStat(h, info)
+ }
+ if info, err := os.Lstat(name); err != nil {
+ fmt.Fprintf(h, "err %v\n", err)
+ } else {
+ hashWriteStat(h, info)
+ }
+ return h.Sum()
+}
+
+func hashWriteStat(h io.Writer, info fs.FileInfo) {
+ fmt.Fprintf(h, "stat %d %x %v %v\n", info.Size(), uint64(info.Mode()), info.ModTime(), info.IsDir())
+}
+
+// testAndInputKey returns the actual cache key for the pair (testID, testInputsID).
+func testAndInputKey(testID, testInputsID cache.ActionID) cache.ActionID {
+ return cache.Subkey(testID, fmt.Sprintf("inputs:%x", testInputsID))
+}
+
+func (c *runCache) saveOutput(a *work.Action) {
+ if c.id1 == (cache.ActionID{}) && c.id2 == (cache.ActionID{}) {
+ return
+ }
+
+ // See comment about two-level lookup in tryCacheWithID above.
+ testlog, err := os.ReadFile(a.Objdir + "testlog.txt")
+ if err != nil || !bytes.HasPrefix(testlog, testlogMagic) || testlog[len(testlog)-1] != '\n' {
+ if cache.DebugTest {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testcache: %s: reading testlog: %v\n", a.Package.ImportPath, err)
+ } else {
+ fmt.Fprintf(os.Stderr, "testcache: %s: reading testlog: malformed\n", a.Package.ImportPath)
+ }
+ }
+ return
+ }
+ testInputsID, err := computeTestInputsID(a, testlog)
+ if err != nil {
+ return
+ }
+ if c.id1 != (cache.ActionID{}) {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: save test ID %x => input ID %x => %x\n", a.Package.ImportPath, c.id1, testInputsID, testAndInputKey(c.id1, testInputsID))
+ }
+ cache.PutNoVerify(cache.Default(), c.id1, bytes.NewReader(testlog))
+ cache.PutNoVerify(cache.Default(), testAndInputKey(c.id1, testInputsID), bytes.NewReader(a.TestOutput.Bytes()))
+ }
+ if c.id2 != (cache.ActionID{}) {
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: %s: save test ID %x => input ID %x => %x\n", a.Package.ImportPath, c.id2, testInputsID, testAndInputKey(c.id2, testInputsID))
+ }
+ cache.PutNoVerify(cache.Default(), c.id2, bytes.NewReader(testlog))
+ cache.PutNoVerify(cache.Default(), testAndInputKey(c.id2, testInputsID), bytes.NewReader(a.TestOutput.Bytes()))
+ }
+}
+
+// coveragePercentage returns the coverage results (if enabled) for the
+// test. It uncovers the data by scanning the output from the test run.
+func coveragePercentage(out []byte) string {
+ if !cfg.BuildCover {
+ return ""
+ }
+ // The string looks like
+ // test coverage for encoding/binary: 79.9% of statements
+ // Extract the piece from the percentage to the end of the line.
+ re := regexp.MustCompile(`coverage: (.*)\n`)
+ matches := re.FindSubmatch(out)
+ if matches == nil {
+ // Probably running "go test -cover" not "go test -cover fmt".
+ // The coverage output will appear in the output directly.
+ return ""
+ }
+ return fmt.Sprintf("\tcoverage: %s", matches[1])
+}
+
+// builderCleanTest is the action for cleaning up after a test.
+func builderCleanTest(b *work.Builder, ctx context.Context, a *work.Action) error {
+ if cfg.BuildWork {
+ return nil
+ }
+ if cfg.BuildX {
+ b.Showcmd("", "rm -r %s", a.Objdir)
+ }
+ os.RemoveAll(a.Objdir)
+ return nil
+}
+
+// builderPrintTest is the action for printing a test result.
+func builderPrintTest(b *work.Builder, ctx context.Context, a *work.Action) error {
+ clean := a.Deps[0]
+ run := clean.Deps[0]
+ if run.TestOutput != nil {
+ os.Stdout.Write(run.TestOutput.Bytes())
+ run.TestOutput = nil
+ }
+ return nil
+}
+
+// printExitStatus is the action for printing the final exit status.
+// If we are running multiple test targets, print a final "FAIL"
+// in case a failure in an early package has already scrolled
+// off of the user's terminal.
+// (See https://golang.org/issue/30507#issuecomment-470593235.)
+//
+// In JSON mode, we need to maintain valid JSON output and
+// we assume that the test output is being parsed by a tool
+// anyway, so the failure will not be missed and would be
+// awkward to try to wedge into the JSON stream.
+//
+// In fuzz mode, we only allow a single package for now
+// (see CL 350156 and https://golang.org/issue/46312),
+// so there is no possibility of scrolling off and no need
+// to print the final status.
+func printExitStatus(b *work.Builder, ctx context.Context, a *work.Action) error {
+ if !testJSON && testFuzz == "" && len(pkgArgs) != 0 {
+ if base.GetExitStatus() != 0 {
+ fmt.Println("FAIL")
+ return nil
+ }
+ }
+ return nil
+}
+
+// testBinaryName can be used to create name for test binary executable.
+// Use last element of import path, not package name.
+// They differ when package name is "main".
+// But if the import path is "command-line-arguments",
+// like it is during 'go run', use the package name.
+func testBinaryName(p *load.Package) string {
+ var elem string
+ if p.ImportPath == "command-line-arguments" {
+ elem = p.Name
+ } else {
+ elem = p.DefaultExecName()
+ }
+
+ return elem + ".test"
+}
diff --git a/src/cmd/go/internal/test/test_nonunix.go b/src/cmd/go/internal/test/test_nonunix.go
new file mode 100644
index 0000000..df84487
--- /dev/null
+++ b/src/cmd/go/internal/test/test_nonunix.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package test
+
+func isETXTBSY(err error) bool {
+ // syscall.ETXTBSY is only meaningful on Unix platforms.
+ return false
+}
diff --git a/src/cmd/go/internal/test/test_unix.go b/src/cmd/go/internal/test/test_unix.go
new file mode 100644
index 0000000..f50ef98
--- /dev/null
+++ b/src/cmd/go/internal/test/test_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package test
+
+import (
+ "errors"
+ "syscall"
+)
+
+func isETXTBSY(err error) bool {
+ return errors.Is(err, syscall.ETXTBSY)
+}
diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go
new file mode 100644
index 0000000..4253788
--- /dev/null
+++ b/src/cmd/go/internal/test/testflag.go
@@ -0,0 +1,416 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/cmdflag"
+ "cmd/go/internal/work"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+)
+
+//go:generate go run ./genflags.go
+
+// The flag handling part of go test is large and distracting.
+// We can't use (*flag.FlagSet).Parse because some of the flags from
+// our command line are for us, and some are for the test binary, and
+// some are for both.
+
+func init() {
+ work.AddBuildFlags(CmdTest, work.OmitVFlag)
+
+ cf := CmdTest.Flag
+ cf.BoolVar(&testC, "c", false, "")
+ cf.StringVar(&testO, "o", "", "")
+ work.AddCoverFlags(CmdTest, &testCoverProfile)
+ cf.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "")
+ cf.BoolVar(&testJSON, "json", false, "")
+ cf.Var(&testVet, "vet", "")
+
+ // Register flags to be forwarded to the test binary. We retain variables for
+ // some of them so that cmd/go knows what to do with the test output, or knows
+ // to build the test in a way that supports the use of the flag.
+
+ cf.StringVar(&testBench, "bench", "", "")
+ cf.Bool("benchmem", false, "")
+ cf.String("benchtime", "", "")
+ cf.StringVar(&testBlockProfile, "blockprofile", "", "")
+ cf.String("blockprofilerate", "", "")
+ cf.Int("count", 0, "")
+ cf.String("cpu", "", "")
+ cf.StringVar(&testCPUProfile, "cpuprofile", "", "")
+ cf.Bool("failfast", false, "")
+ cf.StringVar(&testFuzz, "fuzz", "", "")
+ cf.Bool("fullpath", false, "")
+ cf.StringVar(&testList, "list", "", "")
+ cf.StringVar(&testMemProfile, "memprofile", "", "")
+ cf.String("memprofilerate", "", "")
+ cf.StringVar(&testMutexProfile, "mutexprofile", "", "")
+ cf.String("mutexprofilefraction", "", "")
+ cf.Var(&testOutputDir, "outputdir", "")
+ cf.Int("parallel", 0, "")
+ cf.String("run", "", "")
+ cf.Bool("short", false, "")
+ cf.String("skip", "", "")
+ cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist
+ cf.String("fuzztime", "", "")
+ cf.String("fuzzminimizetime", "", "")
+ cf.StringVar(&testTrace, "trace", "", "")
+ cf.Var(&testV, "v", "")
+ cf.Var(&testShuffle, "shuffle", "")
+
+ for name, ok := range passFlagToTest {
+ if ok {
+ cf.Var(cf.Lookup(name).Value, "test."+name, "")
+ }
+ }
+}
+
+// outputdirFlag implements the -outputdir flag.
+// It interprets an empty value as the working directory of the 'go' command.
+type outputdirFlag struct {
+ abs string
+}
+
+func (f *outputdirFlag) String() string {
+ return f.abs
+}
+
+func (f *outputdirFlag) Set(value string) (err error) {
+ if value == "" {
+ f.abs = ""
+ } else {
+ f.abs, err = filepath.Abs(value)
+ }
+ return err
+}
+
+func (f *outputdirFlag) getAbs() string {
+ if f.abs == "" {
+ return base.Cwd()
+ }
+ return f.abs
+}
+
+// vetFlag implements the special parsing logic for the -vet flag:
+// a comma-separated list, with distinguished values "all" and
+// "off", plus a boolean tracking whether it was set explicitly.
+//
+// "all" is encoded as vetFlag{true, false, nil}, since it will
+// pass no flags to the vet binary, and by default, it runs all
+// analyzers.
+type vetFlag struct {
+ explicit bool
+ off bool
+ flags []string // passed to vet when invoked automatically during 'go test'
+}
+
+func (f *vetFlag) String() string {
+ switch {
+ case !f.off && !f.explicit && len(f.flags) == 0:
+ return "all"
+ case f.off:
+ return "off"
+ }
+
+ var buf strings.Builder
+ for i, f := range f.flags {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(f)
+ }
+ return buf.String()
+}
+
+func (f *vetFlag) Set(value string) error {
+ switch {
+ case value == "":
+ *f = vetFlag{flags: defaultVetFlags}
+ return nil
+ case strings.Contains(value, "="):
+ return fmt.Errorf("-vet argument cannot contain equal signs")
+ case strings.Contains(value, " "):
+ return fmt.Errorf("-vet argument is comma-separated list, cannot contain spaces")
+ }
+
+ *f = vetFlag{explicit: true}
+ var single string
+ for _, arg := range strings.Split(value, ",") {
+ switch arg {
+ case "":
+ return fmt.Errorf("-vet argument contains empty list element")
+ case "all":
+ single = arg
+ *f = vetFlag{explicit: true}
+ continue
+ case "off":
+ single = arg
+ *f = vetFlag{
+ explicit: true,
+ off: true,
+ }
+ continue
+ default:
+ if _, ok := passAnalyzersToVet[arg]; !ok {
+ return fmt.Errorf("-vet argument must be a supported analyzer or a distinguished value; found %s", arg)
+ }
+ f.flags = append(f.flags, "-"+arg)
+ }
+ }
+ if len(f.flags) > 1 && single != "" {
+ return fmt.Errorf("-vet does not accept %q in a list with other analyzers", single)
+ }
+ if len(f.flags) > 1 && single != "" {
+ return fmt.Errorf("-vet does not accept %q in a list with other analyzers", single)
+ }
+ return nil
+}
+
+type shuffleFlag struct {
+ on bool
+ seed *int64
+}
+
+func (f *shuffleFlag) String() string {
+ if !f.on {
+ return "off"
+ }
+ if f.seed == nil {
+ return "on"
+ }
+ return fmt.Sprintf("%d", *f.seed)
+}
+
+func (f *shuffleFlag) Set(value string) error {
+ if value == "off" {
+ *f = shuffleFlag{on: false}
+ return nil
+ }
+
+ if value == "on" {
+ *f = shuffleFlag{on: true}
+ return nil
+ }
+
+ seed, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf(`-shuffle argument must be "on", "off", or an int64: %v`, err)
+ }
+
+ *f = shuffleFlag{on: true, seed: &seed}
+ return nil
+}
+
+// testFlags processes the command line, grabbing -x and -c, rewriting known flags
+// to have "test" before them, and reading the command line for the test binary.
+// Unfortunately for us, we need to do our own flag processing because go test
+// grabs some flags but otherwise its command line is just a holding place for
+// pkg.test's arguments.
+// We allow known flags both before and after the package name list,
+// to allow both
+//
+// go test fmt -custom-flag-for-fmt-test
+// go test -x math
+func testFlags(args []string) (packageNames, passToTest []string) {
+ base.SetFromGOFLAGS(&CmdTest.Flag)
+ addFromGOFLAGS := map[string]bool{}
+ CmdTest.Flag.Visit(func(f *flag.Flag) {
+ if short := strings.TrimPrefix(f.Name, "test."); passFlagToTest[short] {
+ addFromGOFLAGS[f.Name] = true
+ }
+ })
+
+ // firstUnknownFlag helps us report an error when flags not known to 'go
+ // test' are used along with -i or -c.
+ firstUnknownFlag := ""
+
+ explicitArgs := make([]string, 0, len(args))
+ inPkgList := false
+ afterFlagWithoutValue := false
+ for len(args) > 0 {
+ f, remainingArgs, err := cmdflag.ParseOne(&CmdTest.Flag, args)
+
+ wasAfterFlagWithoutValue := afterFlagWithoutValue
+ afterFlagWithoutValue = false // provisionally
+
+ if errors.Is(err, flag.ErrHelp) {
+ exitWithUsage()
+ }
+
+ if errors.Is(err, cmdflag.ErrFlagTerminator) {
+ // 'go list' allows package arguments to be named either before or after
+ // the terminator, but 'go test' has historically allowed them only
+ // before. Preserve that behavior and treat all remaining arguments —
+ // including the terminator itself! — as arguments to the test.
+ explicitArgs = append(explicitArgs, args...)
+ break
+ }
+
+ if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) {
+ if !inPkgList && packageNames != nil {
+ // We already saw the package list previously, and this argument is not
+ // a flag, so it — and everything after it — must be either a value for
+ // a preceding flag or a literal argument to the test binary.
+ if wasAfterFlagWithoutValue {
+ // This argument could syntactically be a flag value, so
+ // optimistically assume that it is and keep looking for go command
+ // flags after it.
+ //
+ // (If we're wrong, we'll at least be consistent with historical
+ // behavior; see https://golang.org/issue/40763.)
+ explicitArgs = append(explicitArgs, nf.RawArg)
+ args = remainingArgs
+ continue
+ } else {
+ // This argument syntactically cannot be a flag value, so it must be a
+ // positional argument, and so must everything after it.
+ explicitArgs = append(explicitArgs, args...)
+ break
+ }
+ }
+
+ inPkgList = true
+ packageNames = append(packageNames, nf.RawArg)
+ args = remainingArgs // Consume the package name.
+ continue
+ }
+
+ if inPkgList {
+ // This argument is syntactically a flag, so if we were in the package
+ // list we're not anymore.
+ inPkgList = false
+ }
+
+ if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) {
+ // This is a flag we do not know. We must assume that any args we see
+ // after this might be flag arguments, not package names, so make
+ // packageNames non-nil to indicate that the package list is complete.
+ //
+ // (Actually, we only strictly need to assume that if the flag is not of
+ // the form -x=value, but making this more precise would be a breaking
+ // change in the command line API.)
+ if packageNames == nil {
+ packageNames = []string{}
+ }
+
+ if nd.RawArg == "-args" || nd.RawArg == "--args" {
+ // -args or --args signals that everything that follows
+ // should be passed to the test.
+ explicitArgs = append(explicitArgs, remainingArgs...)
+ break
+ }
+
+ if firstUnknownFlag == "" {
+ firstUnknownFlag = nd.RawArg
+ }
+
+ explicitArgs = append(explicitArgs, nd.RawArg)
+ args = remainingArgs
+ if !nd.HasValue {
+ afterFlagWithoutValue = true
+ }
+ continue
+ }
+
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ exitWithUsage()
+ }
+
+ if short := strings.TrimPrefix(f.Name, "test."); passFlagToTest[short] {
+ explicitArgs = append(explicitArgs, fmt.Sprintf("-test.%s=%v", short, f.Value))
+
+ // This flag has been overridden explicitly, so don't forward its implicit
+ // value from GOFLAGS.
+ delete(addFromGOFLAGS, short)
+ delete(addFromGOFLAGS, "test."+short)
+ }
+
+ args = remainingArgs
+ }
+ if firstUnknownFlag != "" && testC {
+ fmt.Fprintf(os.Stderr, "go: unknown flag %s cannot be used with -c\n", firstUnknownFlag)
+ exitWithUsage()
+ }
+
+ var injectedFlags []string
+ if testJSON {
+ // If converting to JSON, we need the full output in order to pipe it to test2json.
+ // The -test.v=test2json flag is like -test.v=true but causes the test to add
+ // extra ^V characters before testing output lines and other framing,
+ // which helps test2json do a better job creating the JSON events.
+ injectedFlags = append(injectedFlags, "-test.v=test2json")
+ delete(addFromGOFLAGS, "v")
+ delete(addFromGOFLAGS, "test.v")
+ }
+
+ // Inject flags from GOFLAGS before the explicit command-line arguments.
+ // (They must appear before the flag terminator or first non-flag argument.)
+ // Also determine whether flags with awkward defaults have already been set.
+ var timeoutSet, outputDirSet bool
+ CmdTest.Flag.Visit(func(f *flag.Flag) {
+ short := strings.TrimPrefix(f.Name, "test.")
+ if addFromGOFLAGS[f.Name] {
+ injectedFlags = append(injectedFlags, fmt.Sprintf("-test.%s=%v", short, f.Value))
+ }
+ switch short {
+ case "timeout":
+ timeoutSet = true
+ case "outputdir":
+ outputDirSet = true
+ }
+ })
+
+ // 'go test' has a default timeout, but the test binary itself does not.
+ // If the timeout wasn't set (and forwarded) explicitly, add the default
+ // timeout to the command line.
+ if testTimeout > 0 && !timeoutSet {
+ injectedFlags = append(injectedFlags, fmt.Sprintf("-test.timeout=%v", testTimeout))
+ }
+
+ // Similarly, the test binary defaults -test.outputdir to its own working
+ // directory, but 'go test' defaults it to the working directory of the 'go'
+ // command. Set it explicitly if it is needed due to some other flag that
+ // requests output.
+ if testProfile() != "" && !outputDirSet {
+ injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs())
+ }
+
+ // If the user is explicitly passing -help or -h, show output
+ // of the test binary so that the help output is displayed
+ // even though the test will exit with success.
+ // This loop is imperfect: it will do the wrong thing for a case
+ // like -args -test.outputdir -help. Such cases are probably rare,
+ // and getting this wrong doesn't do too much harm.
+helpLoop:
+ for _, arg := range explicitArgs {
+ switch arg {
+ case "--":
+ break helpLoop
+ case "-h", "-help", "--help":
+ testHelp = true
+ break helpLoop
+ }
+ }
+
+ // Forward any unparsed arguments (following --args) to the test binary.
+ return packageNames, append(injectedFlags, explicitArgs...)
+}
+
+func exitWithUsage() {
+ fmt.Fprintf(os.Stderr, "usage: %s\n", CmdTest.UsageLine)
+ fmt.Fprintf(os.Stderr, "Run 'go help %s' and 'go help %s' for details.\n", CmdTest.LongName(), HelpTestflag.LongName())
+
+ base.SetExitStatus(2)
+ base.Exit()
+}
diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go
new file mode 100644
index 0000000..ebe189b
--- /dev/null
+++ b/src/cmd/go/internal/tool/tool.go
@@ -0,0 +1,224 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tool implements the “go tool” command.
+package tool
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/build"
+ "internal/platform"
+ "os"
+ "os/exec"
+ "os/signal"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+)
+
+var CmdTool = &base.Command{
+ Run: runTool,
+ UsageLine: "go tool [-n] command [args...]",
+ Short: "run specified go tool",
+ Long: `
+Tool runs the go tool command identified by the arguments.
+With no arguments it prints the list of known tools.
+
+The -n flag causes tool to print the command that would be
+executed but not execute it.
+
+For more about each tool command, see 'go doc cmd/<command>'.
+`,
+}
+
+var toolN bool
+
+// Return whether tool can be expected in the gccgo tool directory.
+// Other binaries could be in the same directory so don't
+// show those with the 'go tool' command.
+func isGccgoTool(tool string) bool {
+ switch tool {
+ case "cgo", "fix", "cover", "godoc", "vet":
+ return true
+ }
+ return false
+}
+
+func init() {
+ base.AddChdirFlag(&CmdTool.Flag)
+ CmdTool.Flag.BoolVar(&toolN, "n", false, "")
+}
+
+func runTool(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) == 0 {
+ listTools()
+ return
+ }
+ toolName := args[0]
+ // The tool name must be lower-case letters, numbers or underscores.
+ for _, c := range toolName {
+ switch {
+ case 'a' <= c && c <= 'z', '0' <= c && c <= '9', c == '_':
+ default:
+ fmt.Fprintf(os.Stderr, "go: bad tool name %q\n", toolName)
+ base.SetExitStatus(2)
+ return
+ }
+ }
+
+ toolPath, err := base.ToolPath(toolName)
+ if err != nil {
+ if toolName == "dist" && len(args) > 1 && args[1] == "list" {
+ // cmd/distpack removes the 'dist' tool from the toolchain to save space,
+ // since it is normally only used for building the toolchain in the first
+ // place. However, 'go tool dist list' is useful for listing all supported
+ // platforms.
+ //
+ // If the dist tool does not exist, impersonate this command.
+ if impersonateDistList(args[2:]) {
+ return
+ }
+ }
+
+ // Emit the usual error for the missing tool.
+ _ = base.Tool(toolName)
+ }
+
+ if toolN {
+ cmd := toolPath
+ if len(args) > 1 {
+ cmd += " " + strings.Join(args[1:], " ")
+ }
+ fmt.Printf("%s\n", cmd)
+ return
+ }
+ args[0] = toolPath // in case the tool wants to re-exec itself, e.g. cmd/dist
+ toolCmd := &exec.Cmd{
+ Path: toolPath,
+ Args: args,
+ Stdin: os.Stdin,
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+ }
+ err = toolCmd.Start()
+ if err == nil {
+ c := make(chan os.Signal, 100)
+ signal.Notify(c)
+ go func() {
+ for sig := range c {
+ toolCmd.Process.Signal(sig)
+ }
+ }()
+ err = toolCmd.Wait()
+ signal.Stop(c)
+ close(c)
+ }
+ if err != nil {
+ // Only print about the exit status if the command
+ // didn't even run (not an ExitError) or it didn't exit cleanly
+ // or we're printing command lines too (-x mode).
+ // Assume if command exited cleanly (even with non-zero status)
+ // it printed any messages it wanted to print.
+ if e, ok := err.(*exec.ExitError); !ok || !e.Exited() || cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "go tool %s: %s\n", toolName, err)
+ }
+ base.SetExitStatus(1)
+ return
+ }
+}
+
+// listTools prints a list of the available tools in the tools directory.
+func listTools() {
+ f, err := os.Open(build.ToolDir)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go: no tool directory: %s\n", err)
+ base.SetExitStatus(2)
+ return
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go: can't read tool directory: %s\n", err)
+ base.SetExitStatus(2)
+ return
+ }
+
+ sort.Strings(names)
+ for _, name := range names {
+ // Unify presentation by going to lower case.
+ // If it's windows, don't show the .exe suffix.
+ name = strings.TrimSuffix(strings.ToLower(name), cfg.ToolExeSuffix())
+
+ // The tool directory used by gccgo will have other binaries
+ // in addition to go tools. Only display go tools here.
+ if cfg.BuildToolchainName == "gccgo" && !isGccgoTool(name) {
+ continue
+ }
+ fmt.Println(name)
+ }
+}
+
+func impersonateDistList(args []string) (handled bool) {
+ fs := flag.NewFlagSet("go tool dist list", flag.ContinueOnError)
+ jsonFlag := fs.Bool("json", false, "produce JSON output")
+ brokenFlag := fs.Bool("broken", false, "include broken ports")
+
+ // The usage for 'go tool dist' claims that
+ // “All commands take -v flags to emit extra information”,
+ // but list -v appears not to have any effect.
+ _ = fs.Bool("v", false, "emit extra information")
+
+ if err := fs.Parse(args); err != nil || len(fs.Args()) > 0 {
+ // Unrecognized flag or argument.
+ // Force fallback to the real 'go tool dist'.
+ return false
+ }
+
+ if !*jsonFlag {
+ for _, p := range platform.List {
+ if !*brokenFlag && platform.Broken(p.GOOS, p.GOARCH) {
+ continue
+ }
+ fmt.Println(p)
+ }
+ return true
+ }
+
+ type jsonResult struct {
+ GOOS string
+ GOARCH string
+ CgoSupported bool
+ FirstClass bool
+ Broken bool `json:",omitempty"`
+ }
+
+ var results []jsonResult
+ for _, p := range platform.List {
+ broken := platform.Broken(p.GOOS, p.GOARCH)
+ if broken && !*brokenFlag {
+ continue
+ }
+ if *jsonFlag {
+ results = append(results, jsonResult{
+ GOOS: p.GOOS,
+ GOARCH: p.GOARCH,
+ CgoSupported: platform.CgoSupported(p.GOOS, p.GOARCH),
+ FirstClass: platform.FirstClass(p.GOOS, p.GOARCH),
+ Broken: broken,
+ })
+ }
+ }
+ out, err := json.MarshalIndent(results, "", "\t")
+ if err != nil {
+ return false
+ }
+
+ os.Stdout.Write(out)
+ return true
+}
diff --git a/src/cmd/go/internal/toolchain/exec.go b/src/cmd/go/internal/toolchain/exec.go
new file mode 100644
index 0000000..820fe93
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/exec.go
@@ -0,0 +1,55 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js && !wasip1
+
+package toolchain
+
+import (
+ "cmd/go/internal/base"
+ "internal/godebug"
+ "os"
+ "os/exec"
+ "runtime"
+ "syscall"
+)
+
+// execGoToolchain execs the Go toolchain with the given name (gotoolchain),
+// GOROOT directory, and go command executable.
+// The GOROOT directory is empty if we are invoking a command named
+// gotoolchain found in $PATH.
+func execGoToolchain(gotoolchain, dir, exe string) {
+ os.Setenv(targetEnv, gotoolchain)
+ if dir == "" {
+ os.Unsetenv("GOROOT")
+ } else {
+ os.Setenv("GOROOT", dir)
+ }
+
+ // On Windows, there is no syscall.Exec, so the best we can do
+ // is run a subprocess and exit with the same status.
+ // Doing the same on Unix would be a problem because it wouldn't
+ // propagate signals and such, but there are no signals on Windows.
+ // We also use the exec case when GODEBUG=gotoolchainexec=0,
+ // to allow testing this code even when not on Windows.
+ if godebug.New("#gotoolchainexec").Value() == "0" || runtime.GOOS == "windows" {
+ cmd := exec.Command(exe, os.Args[1:]...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ if e, ok := err.(*exec.ExitError); ok && e.ProcessState != nil {
+ if e.ProcessState.Exited() {
+ os.Exit(e.ProcessState.ExitCode())
+ }
+ base.Fatalf("exec %s: %s", gotoolchain, e.ProcessState)
+ }
+ base.Fatalf("exec %s: %s", exe, err)
+ }
+ os.Exit(0)
+ }
+ err := syscall.Exec(exe, os.Args, os.Environ())
+ base.Fatalf("exec %s: %v", gotoolchain, err)
+}
diff --git a/src/cmd/go/internal/toolchain/exec_stub.go b/src/cmd/go/internal/toolchain/exec_stub.go
new file mode 100644
index 0000000..e212379
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/exec_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js || wasip1
+
+package toolchain
+
+import "cmd/go/internal/base"
+
+func execGoToolchain(gotoolchain, dir, exe string) {
+ base.Fatalf("execGoToolchain unsupported")
+}
diff --git a/src/cmd/go/internal/toolchain/path_none.go b/src/cmd/go/internal/toolchain/path_none.go
new file mode 100644
index 0000000..8fdf71a
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/path_none.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix && !plan9 && !windows
+
+package toolchain
+
+import "io/fs"
+
+// pathDirs returns the directories in the system search path.
+func pathDirs() []string {
+ return nil
+}
+
+// pathVersion returns the Go version implemented by the file
+// described by de and info in directory dir.
+// The analysis only uses the name itself; it does not run the program.
+func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) {
+ return "", false
+}
diff --git a/src/cmd/go/internal/toolchain/path_plan9.go b/src/cmd/go/internal/toolchain/path_plan9.go
new file mode 100644
index 0000000..3f836a0
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/path_plan9.go
@@ -0,0 +1,29 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package toolchain
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "cmd/go/internal/gover"
+)
+
+// pathDirs returns the directories in the system search path.
+func pathDirs() []string {
+ return filepath.SplitList(os.Getenv("path"))
+}
+
+// pathVersion returns the Go version implemented by the file
+// described by de and info in directory dir.
+// The analysis only uses the name itself; it does not run the program.
+func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) {
+ v := gover.FromToolchain(de.Name())
+ if v == "" || info.Mode()&0111 == 0 {
+ return "", false
+ }
+ return v, true
+}
diff --git a/src/cmd/go/internal/toolchain/path_unix.go b/src/cmd/go/internal/toolchain/path_unix.go
new file mode 100644
index 0000000..519c53e
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/path_unix.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package toolchain
+
+import (
+ "internal/syscall/unix"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "cmd/go/internal/gover"
+)
+
+// pathDirs returns the directories in the system search path.
+func pathDirs() []string {
+ return filepath.SplitList(os.Getenv("PATH"))
+}
+
+// pathVersion returns the Go version implemented by the file
+// described by de and info in directory dir.
+// The analysis only uses the name itself; it does not run the program.
+func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) {
+ v := gover.FromToolchain(de.Name())
+ if v == "" {
+ return "", false
+ }
+
+ // Mimicking exec.findExecutable here.
+ // ENOSYS means Eaccess is not available or not implemented.
+ // EPERM can be returned by Linux containers employing seccomp.
+ // In both cases, fall back to checking the permission bits.
+ err := unix.Eaccess(filepath.Join(dir, de.Name()), unix.X_OK)
+ if (err == syscall.ENOSYS || err == syscall.EPERM) && info.Mode()&0111 != 0 {
+ err = nil
+ }
+ if err != nil {
+ return "", false
+ }
+
+ return v, true
+}
diff --git a/src/cmd/go/internal/toolchain/path_windows.go b/src/cmd/go/internal/toolchain/path_windows.go
new file mode 100644
index 0000000..086c591
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/path_windows.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package toolchain
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/gover"
+)
+
+// pathExts is a cached PATHEXT list.
+var pathExts struct {
+ once sync.Once
+ list []string
+}
+
+func initPathExts() {
+ var exts []string
+ x := os.Getenv(`PATHEXT`)
+ if x != "" {
+ for _, e := range strings.Split(strings.ToLower(x), `;`) {
+ if e == "" {
+ continue
+ }
+ if e[0] != '.' {
+ e = "." + e
+ }
+ exts = append(exts, e)
+ }
+ } else {
+ exts = []string{".com", ".exe", ".bat", ".cmd"}
+ }
+ pathExts.list = exts
+}
+
+// pathDirs returns the directories in the system search path.
+func pathDirs() []string {
+ return filepath.SplitList(os.Getenv("PATH"))
+}
+
+// pathVersion returns the Go version implemented by the file
+// described by de and info in directory dir.
+// The analysis only uses the name itself; it does not run the program.
+func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) {
+ pathExts.once.Do(initPathExts)
+ name, _, ok := cutExt(de.Name(), pathExts.list)
+ if !ok {
+ return "", false
+ }
+ v := gover.FromToolchain(name)
+ if v == "" {
+ return "", false
+ }
+ return v, true
+}
+
+// cutExt looks for any of the known extensions at the end of file.
+// If one is found, cutExt returns the file name with the extension trimmed,
+// the extension itself, and true to signal that an extension was found.
+// Otherwise cutExt returns file, "", false.
+func cutExt(file string, exts []string) (name, ext string, found bool) {
+ i := strings.LastIndex(file, ".")
+ if i < 0 {
+ return file, "", false
+ }
+ for _, x := range exts {
+ if strings.EqualFold(file[i:], x) {
+ return file[:i], file[i:], true
+ }
+ }
+ return file, "", false
+}
diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go
new file mode 100644
index 0000000..3446a48
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/select.go
@@ -0,0 +1,649 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package toolchain implements dynamic switching of Go toolchains.
+package toolchain
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "io/fs"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/run"
+ "cmd/go/internal/work"
+
+ "golang.org/x/mod/module"
+)
+
+const (
+ // We download golang.org/toolchain version v0.0.1-<gotoolchain>.<goos>-<goarch>.
+ // If the 0.0.1 indicates anything at all, its the version of the toolchain packaging:
+ // if for some reason we needed to change the way toolchains are packaged into
+ // module zip files in a future version of Go, we could switch to v0.0.2 and then
+ // older versions expecting the old format could use v0.0.1 and newer versions
+ // would use v0.0.2. Of course, then we'd also have to publish two of each
+ // module zip file. It's not likely we'll ever need to change this.
+ gotoolchainModule = "golang.org/toolchain"
+ gotoolchainVersion = "v0.0.1"
+
+ // targetEnv is a special environment variable set to the expected
+ // toolchain version during the toolchain switch by the parent
+ // process and cleared in the child process. When set, that indicates
+ // to the child to confirm that it provides the expected toolchain version.
+ targetEnv = "GOTOOLCHAIN_INTERNAL_SWITCH_VERSION"
+
+ // countEnv is a special environment variable
+ // that is incremented during each toolchain switch, to detect loops.
+ // It is cleared before invoking programs in 'go run', 'go test', 'go generate', and 'go tool'
+ // by invoking them in an environment filtered with FilterEnv,
+ // so user programs should not see this in their environment.
+ countEnv = "GOTOOLCHAIN_INTERNAL_SWITCH_COUNT"
+
+ // maxSwitch is the maximum toolchain switching depth.
+ // Most uses should never see more than three.
+ // (Perhaps one for the initial GOTOOLCHAIN dispatch,
+ // a second for go get doing an upgrade, and a third if
+ // for some reason the chosen upgrade version is too small
+ // by a little.)
+ // When the count reaches maxSwitch - 10, we start logging
+ // the switched versions for debugging before crashing with
+ // a fatal error upon reaching maxSwitch.
+ // That should be enough to see the repetition.
+ maxSwitch = 100
+)
+
+// FilterEnv returns a copy of env with internal GOTOOLCHAIN environment
+// variables filtered out.
+func FilterEnv(env []string) []string {
+ // Note: Don't need to filter out targetEnv because Switch does that.
+ var out []string
+ for _, e := range env {
+ if strings.HasPrefix(e, countEnv+"=") {
+ continue
+ }
+ out = append(out, e)
+ }
+ return out
+}
+
+// Select invokes a different Go toolchain if directed by
+// the GOTOOLCHAIN environment variable or the user's configuration
+// or go.mod file.
+// It must be called early in startup.
+// See https://go.dev/doc/toolchain#select.
+func Select() {
+ log.SetPrefix("go: ")
+ defer log.SetPrefix("")
+
+ if !modload.WillBeEnabled() {
+ return
+ }
+
+ // As a special case, let "go env GOTOOLCHAIN" and "go env -w GOTOOLCHAIN=..."
+ // be handled by the local toolchain, since an older toolchain may not understand it.
+ // This provides an easy way out of "go env -w GOTOOLCHAIN=go1.19" and makes
+ // sure that "go env GOTOOLCHAIN" always prints the local go command's interpretation of it.
+ // We look for these specific command lines in order to avoid mishandling
+ //
+ // GOTOOLCHAIN=go1.999 go env -newflag GOTOOLCHAIN
+ //
+ // where -newflag is a flag known to Go 1.999 but not known to us.
+ if (len(os.Args) == 3 && os.Args[1] == "env" && os.Args[2] == "GOTOOLCHAIN") ||
+ (len(os.Args) == 4 && os.Args[1] == "env" && os.Args[2] == "-w" && strings.HasPrefix(os.Args[3], "GOTOOLCHAIN=")) {
+ return
+ }
+
+ // Interpret GOTOOLCHAIN to select the Go toolchain to run.
+ gotoolchain := cfg.Getenv("GOTOOLCHAIN")
+ gover.Startup.GOTOOLCHAIN = gotoolchain
+ if gotoolchain == "" {
+ // cfg.Getenv should fall back to $GOROOT/go.env,
+ // so this should not happen, unless a packager
+ // has deleted the GOTOOLCHAIN line from go.env.
+ // It can also happen if GOROOT is missing or broken,
+ // in which case best to let the go command keep running
+ // and diagnose the problem.
+ return
+ }
+
+ // Note: minToolchain is what https://go.dev/doc/toolchain#select calls the default toolchain.
+ minToolchain := gover.LocalToolchain()
+ minVers := gover.Local()
+ var mode string
+ if gotoolchain == "auto" {
+ mode = "auto"
+ } else if gotoolchain == "path" {
+ mode = "path"
+ } else {
+ min, suffix, plus := strings.Cut(gotoolchain, "+") // go1.2.3+auto
+ if min != "local" {
+ v := gover.FromToolchain(min)
+ if v == "" {
+ if plus {
+ base.Fatalf("invalid GOTOOLCHAIN %q: invalid minimum toolchain %q", gotoolchain, min)
+ }
+ base.Fatalf("invalid GOTOOLCHAIN %q", gotoolchain)
+ }
+ minToolchain = min
+ minVers = v
+ }
+ if plus && suffix != "auto" && suffix != "path" {
+ base.Fatalf("invalid GOTOOLCHAIN %q: only version suffixes are +auto and +path", gotoolchain)
+ }
+ mode = suffix
+ }
+
+ gotoolchain = minToolchain
+ if (mode == "auto" || mode == "path") && !goInstallVersion() {
+ // Read go.mod to find new minimum and suggested toolchain.
+ file, goVers, toolchain := modGoToolchain()
+ gover.Startup.AutoFile = file
+ if toolchain == "default" {
+ // "default" means always use the default toolchain,
+ // which is already set, so nothing to do here.
+ // Note that if we have Go 1.21 installed originally,
+ // GOTOOLCHAIN=go1.30.0+auto or GOTOOLCHAIN=go1.30.0,
+ // and the go.mod says "toolchain default", we use Go 1.30, not Go 1.21.
+ // That is, default overrides the "auto" part of the calculation
+ // but not the minimum that the user has set.
+ // Of course, if the go.mod also says "go 1.35", using Go 1.30
+ // will provoke an error about the toolchain being too old.
+ // That's what people who use toolchain default want:
+ // only ever use the toolchain configured by the user
+ // (including its environment and go env -w file).
+ gover.Startup.AutoToolchain = toolchain
+ } else {
+ if toolchain != "" {
+ // Accept toolchain only if it is >= our min.
+ toolVers := gover.FromToolchain(toolchain)
+ if toolVers == "" || (!strings.HasPrefix(toolchain, "go") && !strings.Contains(toolchain, "-go")) {
+ base.Fatalf("invalid toolchain %q in %s", toolchain, base.ShortPath(file))
+ }
+ if gover.Compare(toolVers, minVers) >= 0 {
+ gotoolchain = toolchain
+ minVers = toolVers
+ gover.Startup.AutoToolchain = toolchain
+ }
+ }
+ if gover.Compare(goVers, minVers) > 0 {
+ gotoolchain = "go" + goVers
+ gover.Startup.AutoGoVersion = goVers
+ gover.Startup.AutoToolchain = "" // in case we are overriding it for being too old
+ }
+ }
+ }
+
+ // If we are invoked as a target toolchain, confirm that
+ // we provide the expected version and then run.
+ // This check is delayed until after the handling of auto and path
+ // so that we have initialized gover.Startup for use in error messages.
+ if target := os.Getenv(targetEnv); target != "" && TestVersionSwitch != "loop" {
+ if gover.LocalToolchain() != target {
+ base.Fatalf("toolchain %v invoked to provide %v", gover.LocalToolchain(), target)
+ }
+ os.Unsetenv(targetEnv)
+
+ // Note: It is tempting to check that if gotoolchain != "local"
+ // then target == gotoolchain here, as a sanity check that
+ // the child has made the same version determination as the parent.
+ // This turns out not always to be the case. Specifically, if we are
+ // running Go 1.21 with GOTOOLCHAIN=go1.22+auto, which invokes
+ // Go 1.22, then 'go get go@1.23.0' or 'go get needs_go_1_23'
+ // will invoke Go 1.23, but as the Go 1.23 child the reason for that
+ // will not be apparent here: it will look like we should be using Go 1.22.
+ // We rely on the targetEnv being set to know not to downgrade.
+ // A longer term problem with the sanity check is that the exact details
+ // may change over time: there may be other reasons that a future Go
+ // version might invoke an older one, and the older one won't know why.
+ // Best to just accept that we were invoked to provide a specific toolchain
+ // (which we just checked) and leave it at that.
+ return
+ }
+
+ if gotoolchain == "local" || gotoolchain == gover.LocalToolchain() {
+ // Let the current binary handle the command.
+ return
+ }
+
+ // Minimal sanity check of GOTOOLCHAIN setting before search.
+ // We want to allow things like go1.20.3 but also gccgo-go1.20.3.
+ // We want to disallow mistakes / bad ideas like GOTOOLCHAIN=bash,
+ // since we will find that in the path lookup.
+ if !strings.HasPrefix(gotoolchain, "go1") && !strings.Contains(gotoolchain, "-go1") {
+ base.Fatalf("invalid GOTOOLCHAIN %q", gotoolchain)
+ }
+
+ Exec(gotoolchain)
+}
+
+// TestVersionSwitch is set in the test go binary to the value in $TESTGO_VERSION_SWITCH.
+// Valid settings are:
+//
+// "switch" - simulate version switches by reinvoking the test go binary with a different TESTGO_VERSION.
+// "mismatch" - like "switch" but forget to set TESTGO_VERSION, so it looks like we invoked a mismatched toolchain
+// "loop" - like "mismatch" but forget the target check, causing a toolchain switching loop
+var TestVersionSwitch string
+
+// Exec invokes the specified Go toolchain or else prints an error and exits the process.
+// If $GOTOOLCHAIN is set to path or min+path, Exec only considers the PATH
+// as a source of Go toolchains. Otherwise Exec tries the PATH but then downloads
+// a toolchain if necessary.
+func Exec(gotoolchain string) {
+ log.SetPrefix("go: ")
+
+ writeBits = sysWriteBits()
+
+ count, _ := strconv.Atoi(os.Getenv(countEnv))
+ if count >= maxSwitch-10 {
+ fmt.Fprintf(os.Stderr, "go: switching from go%v to %v [depth %d]\n", gover.Local(), gotoolchain, count)
+ }
+ if count >= maxSwitch {
+ base.Fatalf("too many toolchain switches")
+ }
+ os.Setenv(countEnv, fmt.Sprint(count+1))
+
+ env := cfg.Getenv("GOTOOLCHAIN")
+ pathOnly := env == "path" || strings.HasSuffix(env, "+path")
+
+ // For testing, if TESTGO_VERSION is already in use
+ // (only happens in the cmd/go test binary)
+ // and TESTGO_VERSION_SWITCH=switch is set,
+ // "switch" toolchains by changing TESTGO_VERSION
+ // and reinvoking the current binary.
+ // The special cases =loop and =mismatch skip the
+ // setting of TESTGO_VERSION so that it looks like we
+ // accidentally invoked the wrong toolchain,
+ // to test detection of that failure mode.
+ switch TestVersionSwitch {
+ case "switch":
+ os.Setenv("TESTGO_VERSION", gotoolchain)
+ fallthrough
+ case "loop", "mismatch":
+ exe, err := os.Executable()
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ execGoToolchain(gotoolchain, os.Getenv("GOROOT"), exe)
+ }
+
+ // Look in PATH for the toolchain before we download one.
+ // This allows custom toolchains as well as reuse of toolchains
+ // already installed using go install golang.org/dl/go1.2.3@latest.
+ if exe, err := exec.LookPath(gotoolchain); err == nil {
+ execGoToolchain(gotoolchain, "", exe)
+ }
+
+ // GOTOOLCHAIN=auto looks in PATH and then falls back to download.
+ // GOTOOLCHAIN=path only looks in PATH.
+ if pathOnly {
+ base.Fatalf("cannot find %q in PATH", gotoolchain)
+ }
+
+ // Set up modules without an explicit go.mod, to download distribution.
+ modload.Reset()
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NoRoot
+ modload.Init()
+
+ // Download and unpack toolchain module into module cache.
+ // Note that multiple go commands might be doing this at the same time,
+ // and that's OK: the module cache handles that case correctly.
+ m := module.Version{
+ Path: gotoolchainModule,
+ Version: gotoolchainVersion + "-" + gotoolchain + "." + runtime.GOOS + "-" + runtime.GOARCH,
+ }
+ dir, err := modfetch.Download(context.Background(), m)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ base.Fatalf("download %s for %s/%s: toolchain not available", gotoolchain, runtime.GOOS, runtime.GOARCH)
+ }
+ base.Fatalf("download %s: %v", gotoolchain, err)
+ }
+
+ // On first use after download, set the execute bits on the commands
+ // so that we can run them. Note that multiple go commands might be
+ // doing this at the same time, but if so no harm done.
+ if runtime.GOOS != "windows" {
+ info, err := os.Stat(filepath.Join(dir, "bin/go"))
+ if err != nil {
+ base.Fatalf("download %s: %v", gotoolchain, err)
+ }
+ if info.Mode()&0111 == 0 {
+ // allowExec sets the exec permission bits on all files found in dir.
+ allowExec := func(dir string) {
+ err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ info, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(path, info.Mode()&0777|0111); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ base.Fatalf("download %s: %v", gotoolchain, err)
+ }
+ }
+
+ // Set the bits in pkg/tool before bin/go.
+ // If we are racing with another go command and do bin/go first,
+ // then the check of bin/go above might succeed, the other go command
+ // would skip its own mode-setting, and then the go command might
+ // try to run a tool before we get to setting the bits on pkg/tool.
+ // Setting pkg/tool before bin/go avoids that ordering problem.
+ // The only other tool the go command invokes is gofmt,
+ // so we set that one explicitly before handling bin (which will include bin/go).
+ allowExec(filepath.Join(dir, "pkg/tool"))
+ allowExec(filepath.Join(dir, "bin/gofmt"))
+ allowExec(filepath.Join(dir, "bin"))
+ }
+ }
+
+ srcUGoMod := filepath.Join(dir, "src/_go.mod")
+ srcGoMod := filepath.Join(dir, "src/go.mod")
+ if size(srcGoMod) != size(srcUGoMod) {
+ err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == srcUGoMod {
+ // Leave for last, in case we are racing with another go command.
+ return nil
+ }
+ if pdir, name := filepath.Split(path); name == "_go.mod" {
+ if err := raceSafeCopy(path, pdir+"go.mod"); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ // Handle src/go.mod; this is the signal to other racing go commands
+ // that everything is okay and they can skip this step.
+ if err == nil {
+ err = raceSafeCopy(srcUGoMod, srcGoMod)
+ }
+ if err != nil {
+ base.Fatalf("download %s: %v", gotoolchain, err)
+ }
+ }
+
+ // Reinvoke the go command.
+ execGoToolchain(gotoolchain, dir, filepath.Join(dir, "bin/go"))
+}
+
+func size(path string) int64 {
+ info, err := os.Stat(path)
+ if err != nil {
+ return -1
+ }
+ return info.Size()
+}
+
+var writeBits fs.FileMode
+
+// raceSafeCopy copies the file old to the file new, being careful to ensure
+// that if multiple go commands call raceSafeCopy(old, new) at the same time,
+// they don't interfere with each other: both will succeed and return and
+// later observe the correct content in new. Like in the build cache, we arrange
+// this by opening new without truncation and then writing the content.
+// Both go commands can do this simultaneously and will write the same thing
+// (old never changes content).
+func raceSafeCopy(old, new string) error {
+ oldInfo, err := os.Stat(old)
+ if err != nil {
+ return err
+ }
+ newInfo, err := os.Stat(new)
+ if err == nil && newInfo.Size() == oldInfo.Size() {
+ return nil
+ }
+ data, err := os.ReadFile(old)
+ if err != nil {
+ return err
+ }
+ // The module cache has unwritable directories by default.
+ // Restore the user write bit in the directory so we can create
+ // the new go.mod file. We clear it again at the end on a
+ // best-effort basis (ignoring failures).
+ dir := filepath.Dir(old)
+ info, err := os.Stat(dir)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(dir, info.Mode()|writeBits); err != nil {
+ return err
+ }
+ defer os.Chmod(dir, info.Mode())
+ // Note: create the file writable, so that a racing go command
+ // doesn't get an error before we store the actual data.
+ f, err := os.OpenFile(new, os.O_CREATE|os.O_WRONLY, writeBits&^0o111)
+ if err != nil {
+ // If OpenFile failed because a racing go command completed our work
+ // (and then OpenFile failed because the directory or file is now read-only),
+ // count that as a success.
+ if size(old) == size(new) {
+ return nil
+ }
+ return err
+ }
+ defer os.Chmod(new, oldInfo.Mode())
+ if _, err := f.Write(data); err != nil {
+ f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+// modGoToolchain finds the enclosing go.work or go.mod file
+// and returns the go version and toolchain lines from the file.
+// The toolchain line overrides the version line
+func modGoToolchain() (file, goVers, toolchain string) {
+ wd := base.UncachedCwd()
+ file = modload.FindGoWork(wd)
+ // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'.
+ // Do not try to load the file in that case
+ if _, err := os.Stat(file); err != nil {
+ file = ""
+ }
+ if file == "" {
+ file = modload.FindGoMod(wd)
+ }
+ if file == "" {
+ return "", "", ""
+ }
+
+ data, err := os.ReadFile(file)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ return file, gover.GoModLookup(data, "go"), gover.GoModLookup(data, "toolchain")
+}
+
+// goInstallVersion reports whether the command line is go install m@v or go run m@v.
+// If so, Select must not read the go.mod or go.work file in "auto" or "path" mode.
+func goInstallVersion() bool {
+ // Note: We assume there are no flags between 'go' and 'install' or 'run'.
+ // During testing there are some debugging flags that are accepted
+ // in that position, but in production go binaries there are not.
+ if len(os.Args) < 3 {
+ return false
+ }
+
+ var cmdFlags *flag.FlagSet
+ switch os.Args[1] {
+ default:
+ // Command doesn't support a pkg@version as the main module.
+ return false
+ case "install":
+ cmdFlags = &work.CmdInstall.Flag
+ case "run":
+ cmdFlags = &run.CmdRun.Flag
+ }
+
+ // The modcachrw flag is unique, in that it affects how we fetch the
+ // requested module to even figure out what toolchain it needs.
+ // We need to actually set it before we check the toolchain version.
+ // (See https://go.dev/issue/64282.)
+ modcacherwFlag := cmdFlags.Lookup("modcacherw")
+ if modcacherwFlag == nil {
+ base.Fatalf("internal error: modcacherw flag not registered for command")
+ }
+ modcacherwVal, ok := modcacherwFlag.Value.(interface {
+ IsBoolFlag() bool
+ flag.Value
+ })
+ if !ok || !modcacherwVal.IsBoolFlag() {
+ base.Fatalf("internal error: modcacherw is not a boolean flag")
+ }
+
+ // Make a best effort to parse the command's args to find the pkg@version
+ // argument and the -modcacherw flag.
+ var (
+ pkgArg string
+ modcacherwSeen bool
+ )
+ for args := os.Args[2:]; len(args) > 0; {
+ a := args[0]
+ args = args[1:]
+ if a == "--" {
+ if len(args) == 0 {
+ return false
+ }
+ pkgArg = args[0]
+ break
+ }
+
+ a, ok := strings.CutPrefix(a, "-")
+ if !ok {
+ // Not a flag argument. Must be a package.
+ pkgArg = a
+ break
+ }
+ a = strings.TrimPrefix(a, "-") // Treat --flag as -flag.
+
+ name, val, hasEq := strings.Cut(a, "=")
+
+ if name == "modcacherw" {
+ if !hasEq {
+ val = "true"
+ }
+ if err := modcacherwVal.Set(val); err != nil {
+ return false
+ }
+ modcacherwSeen = true
+ continue
+ }
+
+ if hasEq {
+ // Already has a value; don't bother parsing it.
+ continue
+ }
+
+ f := run.CmdRun.Flag.Lookup(a)
+ if f == nil {
+ // We don't know whether this flag is a boolean.
+ if os.Args[1] == "run" {
+ // We don't know where to find the pkg@version argument.
+ // For run, the pkg@version can be anywhere on the command line,
+ // because it is preceded by run flags and followed by arguments to the
+ // program being run. Since we don't know whether this flag takes
+ // an argument, we can't reliably identify the end of the run flags.
+ // Just give up and let the user clarify using the "=" form..
+ return false
+ }
+
+ // We would like to let 'go install -newflag pkg@version' work even
+ // across a toolchain switch. To make that work, assume by default that
+ // the pkg@version is the last argument and skip the remaining args unless
+ // we spot a plausible "-modcacherw" flag.
+ for len(args) > 0 {
+ a := args[0]
+ name, _, _ := strings.Cut(a, "=")
+ if name == "-modcacherw" || name == "--modcacherw" {
+ break
+ }
+ if len(args) == 1 && !strings.HasPrefix(a, "-") {
+ pkgArg = a
+ }
+ args = args[1:]
+ }
+ continue
+ }
+
+ if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); !ok || !bf.IsBoolFlag() {
+ // The next arg is the value for this flag. Skip it.
+ args = args[1:]
+ continue
+ }
+ }
+
+ if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) {
+ return false
+ }
+ path, version, _ := strings.Cut(pkgArg, "@")
+ if path == "" || version == "" || gover.IsToolchain(path) {
+ return false
+ }
+
+ if !modcacherwSeen && base.InGOFLAGS("-modcacherw") {
+ fs := flag.NewFlagSet("goInstallVersion", flag.ExitOnError)
+ fs.Var(modcacherwVal, "modcacherw", modcacherwFlag.Usage)
+ base.SetFromGOFLAGS(fs)
+ }
+
+ // It would be correct to simply return true here, bypassing use
+ // of the current go.mod or go.work, and let "go run" or "go install"
+ // do the rest, including a toolchain switch.
+ // Our goal instead is, since we have gone to the trouble of handling
+ // unknown flags to some degree, to run the switch now, so that
+ // these commands can switch to a newer toolchain directed by the
+ // go.mod which may actually understand the flag.
+ // This was brought up during the go.dev/issue/57001 proposal discussion
+ // and may end up being common in self-contained "go install" or "go run"
+ // command lines if we add new flags in the future.
+
+ // Set up modules without an explicit go.mod, to download go.mod.
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NoRoot
+ modload.Init()
+ defer modload.Reset()
+
+ // See internal/load.PackagesAndErrorsOutsideModule
+ ctx := context.Background()
+ allowed := modload.CheckAllowed
+ if modload.IsRevisionQuery(path, version) {
+ // Don't check for retractions if a specific revision is requested.
+ allowed = nil
+ }
+ noneSelected := func(path string) (version string) { return "none" }
+ _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed)
+ if errors.Is(err, gover.ErrTooNew) {
+ // Run early switch, same one go install or go run would eventually do,
+ // if it understood all the command-line flags.
+ SwitchOrFatal(ctx, err)
+ }
+
+ return true // pkg@version found
+}
diff --git a/src/cmd/go/internal/toolchain/switch.go b/src/cmd/go/internal/toolchain/switch.go
new file mode 100644
index 0000000..2c6a2b8
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/switch.go
@@ -0,0 +1,231 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package toolchain
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modfetch"
+)
+
+// A Switcher collects errors to be reported and then decides
+// between reporting the errors or switching to a new toolchain
+// to resolve them.
+//
+// The client calls [Switcher.Error] repeatedly with errors encountered
+// and then calls [Switcher.Switch]. If the errors included any
+// *gover.TooNewErrors (potentially wrapped) and switching is
+// permitted by GOTOOLCHAIN, Switch switches to a new toolchain.
+// Otherwise Switch prints all the errors using base.Error.
+//
+// See https://go.dev/doc/toolchain#switch.
+type Switcher struct {
+ TooNew *gover.TooNewError // max go requirement observed
+ Errors []error // errors collected so far
+}
+
+// Error reports the error to the Switcher,
+// which saves it for processing during Switch.
+func (s *Switcher) Error(err error) {
+ s.Errors = append(s.Errors, err)
+ s.addTooNew(err)
+}
+
+// addTooNew adds any TooNew errors that can be found in err.
+func (s *Switcher) addTooNew(err error) {
+ switch err := err.(type) {
+ case interface{ Unwrap() []error }:
+ for _, e := range err.Unwrap() {
+ s.addTooNew(e)
+ }
+
+ case interface{ Unwrap() error }:
+ s.addTooNew(err.Unwrap())
+
+ case *gover.TooNewError:
+ if s.TooNew == nil ||
+ gover.Compare(err.GoVersion, s.TooNew.GoVersion) > 0 ||
+ gover.Compare(err.GoVersion, s.TooNew.GoVersion) == 0 && err.What < s.TooNew.What {
+ s.TooNew = err
+ }
+ }
+}
+
+// NeedSwitch reports whether Switch would attempt to switch toolchains.
+func (s *Switcher) NeedSwitch() bool {
+ return s.TooNew != nil && (HasAuto() || HasPath())
+}
+
+// Switch decides whether to switch to a newer toolchain
+// to resolve any of the saved errors.
+// It switches if toolchain switches are permitted and there is at least one TooNewError.
+//
+// If Switch decides not to switch toolchains, it prints the errors using base.Error and returns.
+//
+// If Switch decides to switch toolchains but cannot identify a toolchain to use.
+// it prints the errors along with one more about not being able to find the toolchain
+// and returns.
+//
+// Otherwise, Switch prints an informational message giving a reason for the
+// switch and the toolchain being invoked and then switches toolchains.
+// This operation never returns.
+func (s *Switcher) Switch(ctx context.Context) {
+ if !s.NeedSwitch() {
+ for _, err := range s.Errors {
+ base.Error(err)
+ }
+ return
+ }
+
+ // Switch to newer Go toolchain if necessary and possible.
+ tv, err := NewerToolchain(ctx, s.TooNew.GoVersion)
+ if err != nil {
+ for _, err := range s.Errors {
+ base.Error(err)
+ }
+ base.Error(fmt.Errorf("switching to go >= %v: %w", s.TooNew.GoVersion, err))
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "go: %v requires go >= %v; switching to %v\n", s.TooNew.What, s.TooNew.GoVersion, tv)
+ Exec(tv)
+ panic("unreachable")
+}
+
+// SwitchOrFatal attempts a toolchain switch based on the information in err
+// and otherwise falls back to base.Fatal(err).
+func SwitchOrFatal(ctx context.Context, err error) {
+ var s Switcher
+ s.Error(err)
+ s.Switch(ctx)
+ base.Exit()
+}
+
+// NewerToolchain returns the name of the toolchain to use when we need
+// to switch to a newer toolchain that must support at least the given Go version.
+// See https://go.dev/doc/toolchain#switch.
+//
+// If the latest major release is 1.N.0, we use the latest patch release of 1.(N-1) if that's >= version.
+// Otherwise we use the latest 1.N if that's allowed.
+// Otherwise we use the latest release.
+func NewerToolchain(ctx context.Context, version string) (string, error) {
+ fetch := autoToolchains
+ if !HasAuto() {
+ fetch = pathToolchains
+ }
+ list, err := fetch(ctx)
+ if err != nil {
+ return "", err
+ }
+ return newerToolchain(version, list)
+}
+
+// autoToolchains returns the list of toolchain versions available to GOTOOLCHAIN=auto or =min+auto mode.
+func autoToolchains(ctx context.Context) ([]string, error) {
+ var versions *modfetch.Versions
+ err := modfetch.TryProxies(func(proxy string) error {
+ v, err := modfetch.Lookup(ctx, proxy, "go").Versions(ctx, "")
+ if err != nil {
+ return err
+ }
+ versions = v
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return versions.List, nil
+}
+
+// pathToolchains returns the list of toolchain versions available to GOTOOLCHAIN=path or =min+path mode.
+func pathToolchains(ctx context.Context) ([]string, error) {
+ have := make(map[string]bool)
+ var list []string
+ for _, dir := range pathDirs() {
+ if dir == "" || !filepath.IsAbs(dir) {
+ // Refuse to use local directories in $PATH (hard-coding exec.ErrDot).
+ continue
+ }
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ continue
+ }
+ for _, de := range entries {
+ if de.IsDir() || !strings.HasPrefix(de.Name(), "go1.") {
+ continue
+ }
+ info, err := de.Info()
+ if err != nil {
+ continue
+ }
+ v, ok := pathVersion(dir, de, info)
+ if !ok || !strings.HasPrefix(v, "1.") || have[v] {
+ continue
+ }
+ have[v] = true
+ list = append(list, v)
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return gover.Compare(list[i], list[j]) < 0
+ })
+ return list, nil
+}
+
+// newerToolchain implements NewerToolchain where the list of choices is known.
+// It is separated out for easier testing of this logic.
+func newerToolchain(need string, list []string) (string, error) {
+ // Consider each release in the list, from newest to oldest,
+ // considering only entries >= need and then only entries
+ // that are the latest in their language family
+ // (the latest 1.40, the latest 1.39, and so on).
+ // We prefer the latest patch release before the most recent release family,
+ // so if the latest release is 1.40.1 we'll take the latest 1.39.X.
+ // Failing that, we prefer the latest patch release before the most recent
+ // prerelease family, so if the latest release is 1.40rc1 is out but 1.39 is okay,
+ // we'll still take 1.39.X.
+ // Failing that we'll take the latest release.
+ latest := ""
+ for i := len(list) - 1; i >= 0; i-- {
+ v := list[i]
+ if gover.Compare(v, need) < 0 {
+ break
+ }
+ if gover.Lang(latest) == gover.Lang(v) {
+ continue
+ }
+ newer := latest
+ latest = v
+ if newer != "" && !gover.IsPrerelease(newer) {
+ // latest is the last patch release of Go 1.X, and we saw a non-prerelease of Go 1.(X+1),
+ // so latest is the one we want.
+ break
+ }
+ }
+ if latest == "" {
+ return "", fmt.Errorf("no releases found for go >= %v", need)
+ }
+ return "go" + latest, nil
+}
+
+// HasAuto reports whether the GOTOOLCHAIN setting allows "auto" upgrades.
+func HasAuto() bool {
+ env := cfg.Getenv("GOTOOLCHAIN")
+ return env == "auto" || strings.HasSuffix(env, "+auto")
+}
+
+// HasPath reports whether the GOTOOLCHAIN setting allows "path" upgrades.
+func HasPath() bool {
+ env := cfg.Getenv("GOTOOLCHAIN")
+ return env == "path" || strings.HasSuffix(env, "+path")
+}
diff --git a/src/cmd/go/internal/toolchain/toolchain_test.go b/src/cmd/go/internal/toolchain/toolchain_test.go
new file mode 100644
index 0000000..e8ed566
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/toolchain_test.go
@@ -0,0 +1,66 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package toolchain
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestNewerToolchain(t *testing.T) {
+ for _, tt := range newerToolchainTests {
+ out, err := newerToolchain(tt.need, tt.list)
+ if (err != nil) != (out == "") {
+ t.Errorf("newerToolchain(%v, %v) = %v, %v, want error", tt.need, tt.list, out, err)
+ continue
+ }
+ if out != tt.out {
+ t.Errorf("newerToolchain(%v, %v) = %v, %v want %v, nil", tt.need, tt.list, out, err, tt.out)
+ }
+ }
+}
+
+var f = strings.Fields
+
+var relRC = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0", "1.40.1", "1.40.2", "1.41rc1"}
+var rel2 = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0", "1.40.1", "1.40.2"}
+var rel0 = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0"}
+var newerToolchainTests = []struct {
+ need string
+ list []string
+ out string
+}{
+ {"1.30", rel0, "go1.39.2"},
+ {"1.30", rel2, "go1.39.2"},
+ {"1.30", relRC, "go1.39.2"},
+ {"1.38", rel0, "go1.39.2"},
+ {"1.38", rel2, "go1.39.2"},
+ {"1.38", relRC, "go1.39.2"},
+ {"1.38.1", rel0, "go1.39.2"},
+ {"1.38.1", rel2, "go1.39.2"},
+ {"1.38.1", relRC, "go1.39.2"},
+ {"1.39", rel0, "go1.39.2"},
+ {"1.39", rel2, "go1.39.2"},
+ {"1.39", relRC, "go1.39.2"},
+ {"1.39.2", rel0, "go1.39.2"},
+ {"1.39.2", rel2, "go1.39.2"},
+ {"1.39.2", relRC, "go1.39.2"},
+ {"1.39.3", rel0, "go1.40.0"},
+ {"1.39.3", rel2, "go1.40.2"},
+ {"1.39.3", relRC, "go1.40.2"},
+ {"1.40", rel0, "go1.40.0"},
+ {"1.40", rel2, "go1.40.2"},
+ {"1.40", relRC, "go1.40.2"},
+ {"1.40.1", rel0, ""},
+ {"1.40.1", rel2, "go1.40.2"},
+ {"1.40.1", relRC, "go1.40.2"},
+ {"1.41", rel0, ""},
+ {"1.41", rel2, ""},
+ {"1.41", relRC, "go1.41rc1"},
+ {"1.41.0", rel0, ""},
+ {"1.41.0", rel2, ""},
+ {"1.41.0", relRC, ""},
+ {"1.40", nil, ""},
+}
diff --git a/src/cmd/go/internal/toolchain/umask_none.go b/src/cmd/go/internal/toolchain/umask_none.go
new file mode 100644
index 0000000..b092fe8
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/umask_none.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(darwin || freebsd || linux || netbsd || openbsd)
+
+package toolchain
+
+import "io/fs"
+
+func sysWriteBits() fs.FileMode {
+ return 0700
+}
diff --git a/src/cmd/go/internal/toolchain/umask_unix.go b/src/cmd/go/internal/toolchain/umask_unix.go
new file mode 100644
index 0000000..cbe4307
--- /dev/null
+++ b/src/cmd/go/internal/toolchain/umask_unix.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || freebsd || linux || netbsd || openbsd
+
+package toolchain
+
+import (
+ "io/fs"
+ "syscall"
+)
+
+// sysWriteBits determines which bits to OR into the mode to make a directory writable.
+// It must be called when there are no other file system operations happening.
+func sysWriteBits() fs.FileMode {
+ // Read current umask. There's no way to read it without also setting it,
+ // so set it conservatively and then restore the original one.
+ m := syscall.Umask(0o777)
+ syscall.Umask(m) // restore bits
+ if m&0o22 == 0o22 { // group and world are unwritable by default
+ return 0o700
+ }
+ if m&0o2 == 0o2 { // group is writable by default, but not world
+ return 0o770
+ }
+ return 0o777 // everything is writable by default
+}
diff --git a/src/cmd/go/internal/trace/trace.go b/src/cmd/go/internal/trace/trace.go
new file mode 100644
index 0000000..d69dc4f
--- /dev/null
+++ b/src/cmd/go/internal/trace/trace.go
@@ -0,0 +1,206 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "cmd/internal/traceviewer"
+ "context"
+ "encoding/json"
+ "errors"
+ "os"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+// Constants used in event fields.
+// See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
+// for more details.
+const (
+ phaseDurationBegin = "B"
+ phaseDurationEnd = "E"
+ phaseFlowStart = "s"
+ phaseFlowEnd = "f"
+
+ bindEnclosingSlice = "e"
+)
+
+var traceStarted atomic.Bool
+
+func getTraceContext(ctx context.Context) (traceContext, bool) {
+ if !traceStarted.Load() {
+ return traceContext{}, false
+ }
+ v := ctx.Value(traceKey{})
+ if v == nil {
+ return traceContext{}, false
+ }
+ return v.(traceContext), true
+}
+
+// StartSpan starts a trace event with the given name. The Span ends when its Done method is called.
+func StartSpan(ctx context.Context, name string) (context.Context, *Span) {
+ tc, ok := getTraceContext(ctx)
+ if !ok {
+ return ctx, nil
+ }
+ childSpan := &Span{t: tc.t, name: name, tid: tc.tid, start: time.Now()}
+ tc.t.writeEvent(&traceviewer.Event{
+ Name: childSpan.name,
+ Time: float64(childSpan.start.UnixNano()) / float64(time.Microsecond),
+ TID: childSpan.tid,
+ Phase: phaseDurationBegin,
+ })
+ ctx = context.WithValue(ctx, traceKey{}, traceContext{tc.t, tc.tid})
+ return ctx, childSpan
+}
+
+// StartGoroutine associates the context with a new Thread ID. The Chrome trace viewer associates each
+// trace event with a thread, and doesn't expect events with the same thread id to happen at the
+// same time.
+func StartGoroutine(ctx context.Context) context.Context {
+ tc, ok := getTraceContext(ctx)
+ if !ok {
+ return ctx
+ }
+ return context.WithValue(ctx, traceKey{}, traceContext{tc.t, tc.t.getNextTID()})
+}
+
+// Flow marks a flow indicating that the 'to' span depends on the 'from' span.
+// Flow should be called while the 'to' span is in progress.
+func Flow(ctx context.Context, from *Span, to *Span) {
+ tc, ok := getTraceContext(ctx)
+ if !ok || from == nil || to == nil {
+ return
+ }
+
+ id := tc.t.getNextFlowID()
+ tc.t.writeEvent(&traceviewer.Event{
+ Name: from.name + " -> " + to.name,
+ Category: "flow",
+ ID: id,
+ Time: float64(from.end.UnixNano()) / float64(time.Microsecond),
+ Phase: phaseFlowStart,
+ TID: from.tid,
+ })
+ tc.t.writeEvent(&traceviewer.Event{
+ Name: from.name + " -> " + to.name,
+ Category: "flow", // TODO(matloob): Add Category to Flow?
+ ID: id,
+ Time: float64(to.start.UnixNano()) / float64(time.Microsecond),
+ Phase: phaseFlowEnd,
+ TID: to.tid,
+ BindPoint: bindEnclosingSlice,
+ })
+}
+
+type Span struct {
+ t *tracer
+
+ name string
+ tid uint64
+ start time.Time
+ end time.Time
+}
+
+func (s *Span) Done() {
+ if s == nil {
+ return
+ }
+ s.end = time.Now()
+ s.t.writeEvent(&traceviewer.Event{
+ Name: s.name,
+ Time: float64(s.end.UnixNano()) / float64(time.Microsecond),
+ TID: s.tid,
+ Phase: phaseDurationEnd,
+ })
+}
+
+type tracer struct {
+ file chan traceFile // 1-buffered
+
+ nextTID uint64
+ nextFlowID uint64
+}
+
+func (t *tracer) writeEvent(ev *traceviewer.Event) error {
+ f := <-t.file
+ defer func() { t.file <- f }()
+ var err error
+ if f.entries == 0 {
+ _, err = f.sb.WriteString("[\n")
+ } else {
+ _, err = f.sb.WriteString(",")
+ }
+ f.entries++
+ if err != nil {
+ return nil
+ }
+
+ if err := f.enc.Encode(ev); err != nil {
+ return err
+ }
+
+ // Write event string to output file.
+ _, err = f.f.WriteString(f.sb.String())
+ f.sb.Reset()
+ return err
+}
+
+func (t *tracer) Close() error {
+ f := <-t.file
+ defer func() { t.file <- f }()
+
+ _, firstErr := f.f.WriteString("]")
+ if err := f.f.Close(); firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (t *tracer) getNextTID() uint64 {
+ return atomic.AddUint64(&t.nextTID, 1)
+}
+
+func (t *tracer) getNextFlowID() uint64 {
+ return atomic.AddUint64(&t.nextFlowID, 1)
+}
+
+// traceKey is the context key for tracing information. It is unexported to prevent collisions with context keys defined in
+// other packages.
+type traceKey struct{}
+
+type traceContext struct {
+ t *tracer
+ tid uint64
+}
+
+// Start starts a trace which writes to the given file.
+func Start(ctx context.Context, file string) (context.Context, func() error, error) {
+ traceStarted.Store(true)
+ if file == "" {
+ return nil, nil, errors.New("no trace file supplied")
+ }
+ f, err := os.Create(file)
+ if err != nil {
+ return nil, nil, err
+ }
+ t := &tracer{file: make(chan traceFile, 1)}
+ sb := new(strings.Builder)
+ t.file <- traceFile{
+ f: f,
+ sb: sb,
+ enc: json.NewEncoder(sb),
+ }
+ ctx = context.WithValue(ctx, traceKey{}, traceContext{t: t})
+ return ctx, t.Close, nil
+}
+
+type traceFile struct {
+ f *os.File
+ sb *strings.Builder
+ enc *json.Encoder
+ entries int64
+}
diff --git a/src/cmd/go/internal/vcs/discovery.go b/src/cmd/go/internal/vcs/discovery.go
new file mode 100644
index 0000000..327b44c
--- /dev/null
+++ b/src/cmd/go/internal/vcs/discovery.go
@@ -0,0 +1,97 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// charsetReader returns a reader that converts from the given charset to UTF-8.
+// Currently it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+ switch strings.ToLower(charset) {
+ case "utf-8", "ascii":
+ return input, nil
+ default:
+ return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+ }
+}
+
+// parseMetaGoImports returns meta imports from the HTML in r.
+// Parsing ends at the end of the <head> section or the beginning of the <body>.
+func parseMetaGoImports(r io.Reader, mod ModuleMode) ([]metaImport, error) {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = charsetReader
+ d.Strict = false
+ var imports []metaImport
+ for {
+ t, err := d.RawToken()
+ if err != nil {
+ if err != io.EOF && len(imports) == 0 {
+ return nil, err
+ }
+ break
+ }
+ if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+ break
+ }
+ if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+ break
+ }
+ e, ok := t.(xml.StartElement)
+ if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+ continue
+ }
+ if attrValue(e.Attr, "name") != "go-import" {
+ continue
+ }
+ if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+ imports = append(imports, metaImport{
+ Prefix: f[0],
+ VCS: f[1],
+ RepoRoot: f[2],
+ })
+ }
+ }
+
+ // Extract mod entries if we are paying attention to them.
+ var list []metaImport
+ var have map[string]bool
+ if mod == PreferMod {
+ have = make(map[string]bool)
+ for _, m := range imports {
+ if m.VCS == "mod" {
+ have[m.Prefix] = true
+ list = append(list, m)
+ }
+ }
+ }
+
+ // Append non-mod entries, ignoring those superseded by a mod entry.
+ for _, m := range imports {
+ if m.VCS != "mod" && !have[m.Prefix] {
+ list = append(list, m)
+ }
+ }
+ return list, nil
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+ for _, a := range attrs {
+ if strings.EqualFold(a.Name.Local, name) {
+ return a.Value
+ }
+ }
+ return ""
+}
diff --git a/src/cmd/go/internal/vcs/discovery_test.go b/src/cmd/go/internal/vcs/discovery_test.go
new file mode 100644
index 0000000..eb99fdf
--- /dev/null
+++ b/src/cmd/go/internal/vcs/discovery_test.go
@@ -0,0 +1,110 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var parseMetaGoImportsTests = []struct {
+ in string
+ mod ModuleMode
+ out []metaImport
+}{
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ IgnoreMod,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <meta name="go-import" content="baz/quux git http://github.com/rsc/baz/quux">`,
+ IgnoreMod,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ {"baz/quux", "git", "http://github.com/rsc/baz/quux"},
+ },
+ },
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <meta name="go-import" content="foo/bar mod http://github.com/rsc/baz/quux">`,
+ IgnoreMod,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ },
+ },
+ {
+ `<meta name="go-import" content="foo/bar mod http://github.com/rsc/baz/quux">
+ <meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ IgnoreMod,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ },
+ },
+ {
+ `<meta name="go-import" content="foo/bar mod http://github.com/rsc/baz/quux">
+ <meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ PreferMod,
+ []metaImport{
+ {"foo/bar", "mod", "http://github.com/rsc/baz/quux"},
+ },
+ },
+ {
+ `<head>
+ <meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ </head>`,
+ IgnoreMod,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <body>`,
+ IgnoreMod,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ `<!doctype html><meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ IgnoreMod,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ // XML doesn't like <div style=position:relative>.
+ `<!doctype html><title>Page Not Found</title><meta name=go-import content="chitin.io/chitin git https://github.com/chitin-io/chitin"><div style=position:relative>DRAFT</div>`,
+ IgnoreMod,
+ []metaImport{{"chitin.io/chitin", "git", "https://github.com/chitin-io/chitin"}},
+ },
+ {
+ `<meta name="go-import" content="myitcv.io git https://github.com/myitcv/x">
+ <meta name="go-import" content="myitcv.io/blah2 mod https://raw.githubusercontent.com/myitcv/pubx/master">
+ `,
+ IgnoreMod,
+ []metaImport{{"myitcv.io", "git", "https://github.com/myitcv/x"}},
+ },
+ {
+ `<meta name="go-import" content="myitcv.io git https://github.com/myitcv/x">
+ <meta name="go-import" content="myitcv.io/blah2 mod https://raw.githubusercontent.com/myitcv/pubx/master">
+ `,
+ PreferMod,
+ []metaImport{
+ {"myitcv.io/blah2", "mod", "https://raw.githubusercontent.com/myitcv/pubx/master"},
+ {"myitcv.io", "git", "https://github.com/myitcv/x"},
+ },
+ },
+}
+
+func TestParseMetaGoImports(t *testing.T) {
+ for i, tt := range parseMetaGoImportsTests {
+ out, err := parseMetaGoImports(strings.NewReader(tt.in), tt.mod)
+ if err != nil {
+ t.Errorf("test#%d: %v", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("test#%d:\n\thave %q\n\twant %q", i, out, tt.out)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go
new file mode 100644
index 0000000..dbf16d1
--- /dev/null
+++ b/src/cmd/go/internal/vcs/vcs.go
@@ -0,0 +1,1688 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "internal/lazyregexp"
+ "internal/singleflight"
+ "io/fs"
+ "log"
+ urlpkg "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/search"
+ "cmd/go/internal/str"
+ "cmd/go/internal/web"
+
+ "golang.org/x/mod/module"
+)
+
+// A Cmd describes how to use a version control system
+// like Mercurial, Git, or Subversion.
+type Cmd struct {
+ Name string
+ Cmd string // name of binary to invoke command
+ RootNames []rootName // filename and mode indicating the root of a checkout directory
+
+ CreateCmd []string // commands to download a fresh copy of a repository
+ DownloadCmd []string // commands to download updates into an existing repository
+
+ TagCmd []tagCmd // commands to list tags
+ TagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd
+ TagSyncCmd []string // commands to sync to specific tag
+ TagSyncDefault []string // commands to sync to default tag
+
+ Scheme []string
+ PingCmd string
+
+ RemoteRepo func(v *Cmd, rootDir string) (remoteRepo string, err error)
+ ResolveRepo func(v *Cmd, rootDir, remoteRepo string) (realRepo string, err error)
+ Status func(v *Cmd, rootDir string) (Status, error)
+}
+
+// Status is the current state of a local repository.
+type Status struct {
+ Revision string // Optional.
+ CommitTime time.Time // Optional.
+ Uncommitted bool // Required.
+}
+
+var (
+ // VCSTestRepoURL is the URL of the HTTP server that serves the repos for
+ // vcs-test.golang.org.
+ //
+ // In tests, this is set to the URL of an httptest.Server hosting a
+ // cmd/go/internal/vcweb.Server.
+ VCSTestRepoURL string
+
+ // VCSTestHosts is the set of hosts supported by the vcs-test server.
+ VCSTestHosts []string
+
+ // VCSTestIsLocalHost reports whether the given URL refers to a local
+ // (loopback) host, such as "localhost" or "127.0.0.1:8080".
+ VCSTestIsLocalHost func(*urlpkg.URL) bool
+)
+
+var defaultSecureScheme = map[string]bool{
+ "https": true,
+ "git+ssh": true,
+ "bzr+ssh": true,
+ "svn+ssh": true,
+ "ssh": true,
+}
+
+func (v *Cmd) IsSecure(repo string) bool {
+ u, err := urlpkg.Parse(repo)
+ if err != nil {
+ // If repo is not a URL, it's not secure.
+ return false
+ }
+ if VCSTestRepoURL != "" && web.IsLocalHost(u) {
+ // If the vcstest server is in use, it may redirect to other local ports for
+ // other protocols (such as svn). Assume that all loopback addresses are
+ // secure during testing.
+ return true
+ }
+ return v.isSecureScheme(u.Scheme)
+}
+
+func (v *Cmd) isSecureScheme(scheme string) bool {
+ switch v.Cmd {
+ case "git":
+ // GIT_ALLOW_PROTOCOL is an environment variable defined by Git. It is a
+ // colon-separated list of schemes that are allowed to be used with git
+ // fetch/clone. Any scheme not mentioned will be considered insecure.
+ if allow := os.Getenv("GIT_ALLOW_PROTOCOL"); allow != "" {
+ for _, s := range strings.Split(allow, ":") {
+ if s == scheme {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ return defaultSecureScheme[scheme]
+}
+
+// A tagCmd describes a command to list available tags
+// that can be passed to tagSyncCmd.
+type tagCmd struct {
+ cmd string // command to list tags
+ pattern string // regexp to extract tags from list
+}
+
+// vcsList lists the known version control systems
+var vcsList = []*Cmd{
+ vcsHg,
+ vcsGit,
+ vcsSvn,
+ vcsBzr,
+ vcsFossil,
+}
+
+// vcsMod is a stub for the "mod" scheme. It's returned by
+// repoRootForImportPathDynamic, but is otherwise not treated as a VCS command.
+var vcsMod = &Cmd{Name: "mod"}
+
+// vcsByCmd returns the version control system for the given
+// command name (hg, git, svn, bzr).
+func vcsByCmd(cmd string) *Cmd {
+ for _, vcs := range vcsList {
+ if vcs.Cmd == cmd {
+ return vcs
+ }
+ }
+ return nil
+}
+
+// vcsHg describes how to use Mercurial.
+var vcsHg = &Cmd{
+ Name: "Mercurial",
+ Cmd: "hg",
+ RootNames: []rootName{
+ {filename: ".hg", isDir: true},
+ },
+
+ CreateCmd: []string{"clone -U -- {repo} {dir}"},
+ DownloadCmd: []string{"pull"},
+
+ // We allow both tag and branch names as 'tags'
+ // for selecting a version. This lets people have
+ // a go.release.r60 branch and a go1 branch
+ // and make changes in both, without constantly
+ // editing .hgtags.
+ TagCmd: []tagCmd{
+ {"tags", `^(\S+)`},
+ {"branches", `^(\S+)`},
+ },
+ TagSyncCmd: []string{"update -r {tag}"},
+ TagSyncDefault: []string{"update default"},
+
+ Scheme: []string{"https", "http", "ssh"},
+ PingCmd: "identify -- {scheme}://{repo}",
+ RemoteRepo: hgRemoteRepo,
+ Status: hgStatus,
+}
+
+func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) {
+ out, err := vcsHg.runOutput(rootDir, "paths default")
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+func hgStatus(vcsHg *Cmd, rootDir string) (Status, error) {
+ // Output changeset ID and seconds since epoch.
+ out, err := vcsHg.runOutputVerboseOnly(rootDir, `log -l1 -T {node}:{date|hgdate}`)
+ if err != nil {
+ return Status{}, err
+ }
+
+ // Successful execution without output indicates an empty repo (no commits).
+ var rev string
+ var commitTime time.Time
+ if len(out) > 0 {
+ // Strip trailing timezone offset.
+ if i := bytes.IndexByte(out, ' '); i > 0 {
+ out = out[:i]
+ }
+ rev, commitTime, err = parseRevTime(out)
+ if err != nil {
+ return Status{}, err
+ }
+ }
+
+ // Also look for untracked files.
+ out, err = vcsHg.runOutputVerboseOnly(rootDir, "status")
+ if err != nil {
+ return Status{}, err
+ }
+ uncommitted := len(out) > 0
+
+ return Status{
+ Revision: rev,
+ CommitTime: commitTime,
+ Uncommitted: uncommitted,
+ }, nil
+}
+
+// parseRevTime parses commit details in "revision:seconds" format.
+func parseRevTime(out []byte) (string, time.Time, error) {
+ buf := string(bytes.TrimSpace(out))
+
+ i := strings.IndexByte(buf, ':')
+ if i < 1 {
+ return "", time.Time{}, errors.New("unrecognized VCS tool output")
+ }
+ rev := buf[:i]
+
+ secs, err := strconv.ParseInt(string(buf[i+1:]), 10, 64)
+ if err != nil {
+ return "", time.Time{}, fmt.Errorf("unrecognized VCS tool output: %v", err)
+ }
+
+ return rev, time.Unix(secs, 0), nil
+}
+
+// vcsGit describes how to use Git.
+var vcsGit = &Cmd{
+ Name: "Git",
+ Cmd: "git",
+ RootNames: []rootName{
+ {filename: ".git", isDir: true},
+ },
+
+ CreateCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"},
+ DownloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"},
+
+ TagCmd: []tagCmd{
+ // tags/xxx matches a git tag named xxx
+ // origin/xxx matches a git branch named xxx on the default remote repository
+ {"show-ref", `(?:tags|origin)/(\S+)$`},
+ },
+ TagLookupCmd: []tagCmd{
+ {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`},
+ },
+ TagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"},
+ // both createCmd and downloadCmd update the working dir.
+ // No need to do more here. We used to 'checkout master'
+ // but that doesn't work if the default branch is not named master.
+ // DO NOT add 'checkout master' here.
+ // See golang.org/issue/9032.
+ TagSyncDefault: []string{"submodule update --init --recursive"},
+
+ Scheme: []string{"git", "https", "http", "git+ssh", "ssh"},
+
+ // Leave out the '--' separator in the ls-remote command: git 2.7.4 does not
+ // support such a separator for that command, and this use should be safe
+ // without it because the {scheme} value comes from the predefined list above.
+ // See golang.org/issue/33836.
+ PingCmd: "ls-remote {scheme}://{repo}",
+
+ RemoteRepo: gitRemoteRepo,
+ Status: gitStatus,
+}
+
+// scpSyntaxRe matches the SCP-like addresses used by Git to access
+// repositories by SSH.
+var scpSyntaxRe = lazyregexp.New(`^(\w+)@([\w.-]+):(.*)$`)
+
+func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) {
+ cmd := "config remote.origin.url"
+ errParse := errors.New("unable to parse output of git " + cmd)
+ errRemoteOriginNotFound := errors.New("remote origin not found")
+ outb, err := vcsGit.run1(rootDir, cmd, nil, false)
+ if err != nil {
+ // if it doesn't output any message, it means the config argument is correct,
+ // but the config value itself doesn't exist
+ if outb != nil && len(outb) == 0 {
+ return "", errRemoteOriginNotFound
+ }
+ return "", err
+ }
+ out := strings.TrimSpace(string(outb))
+
+ var repoURL *urlpkg.URL
+ if m := scpSyntaxRe.FindStringSubmatch(out); m != nil {
+ // Match SCP-like syntax and convert it to a URL.
+ // Eg, "git@github.com:user/repo" becomes
+ // "ssh://git@github.com/user/repo".
+ repoURL = &urlpkg.URL{
+ Scheme: "ssh",
+ User: urlpkg.User(m[1]),
+ Host: m[2],
+ Path: m[3],
+ }
+ } else {
+ repoURL, err = urlpkg.Parse(out)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ // Iterate over insecure schemes too, because this function simply
+ // reports the state of the repo. If we can't see insecure schemes then
+ // we can't report the actual repo URL.
+ for _, s := range vcsGit.Scheme {
+ if repoURL.Scheme == s {
+ return repoURL.String(), nil
+ }
+ }
+ return "", errParse
+}
+
+func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) {
+ out, err := vcsGit.runOutputVerboseOnly(rootDir, "status --porcelain")
+ if err != nil {
+ return Status{}, err
+ }
+ uncommitted := len(out) > 0
+
+ // "git status" works for empty repositories, but "git show" does not.
+ // Assume there are no commits in the repo when "git show" fails with
+ // uncommitted files and skip tagging revision / committime.
+ var rev string
+ var commitTime time.Time
+ out, err = vcsGit.runOutputVerboseOnly(rootDir, "-c log.showsignature=false show -s --format=%H:%ct")
+ if err != nil && !uncommitted {
+ return Status{}, err
+ } else if err == nil {
+ rev, commitTime, err = parseRevTime(out)
+ if err != nil {
+ return Status{}, err
+ }
+ }
+
+ return Status{
+ Revision: rev,
+ CommitTime: commitTime,
+ Uncommitted: uncommitted,
+ }, nil
+}
+
+// vcsBzr describes how to use Bazaar.
+var vcsBzr = &Cmd{
+ Name: "Bazaar",
+ Cmd: "bzr",
+ RootNames: []rootName{
+ {filename: ".bzr", isDir: true},
+ },
+
+ CreateCmd: []string{"branch -- {repo} {dir}"},
+
+ // Without --overwrite bzr will not pull tags that changed.
+ // Replace by --overwrite-tags after http://pad.lv/681792 goes in.
+ DownloadCmd: []string{"pull --overwrite"},
+
+ TagCmd: []tagCmd{{"tags", `^(\S+)`}},
+ TagSyncCmd: []string{"update -r {tag}"},
+ TagSyncDefault: []string{"update -r revno:-1"},
+
+ Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
+ PingCmd: "info -- {scheme}://{repo}",
+ RemoteRepo: bzrRemoteRepo,
+ ResolveRepo: bzrResolveRepo,
+ Status: bzrStatus,
+}
+
+func bzrRemoteRepo(vcsBzr *Cmd, rootDir string) (remoteRepo string, err error) {
+ outb, err := vcsBzr.runOutput(rootDir, "config parent_location")
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(outb)), nil
+}
+
+func bzrResolveRepo(vcsBzr *Cmd, rootDir, remoteRepo string) (realRepo string, err error) {
+ outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo)
+ if err != nil {
+ return "", err
+ }
+ out := string(outb)
+
+ // Expect:
+ // ...
+ // (branch root|repository branch): <URL>
+ // ...
+
+ found := false
+ for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} {
+ i := strings.Index(out, prefix)
+ if i >= 0 {
+ out = out[i+len(prefix):]
+ found = true
+ break
+ }
+ }
+ if !found {
+ return "", fmt.Errorf("unable to parse output of bzr info")
+ }
+
+ i := strings.Index(out, "\n")
+ if i < 0 {
+ return "", fmt.Errorf("unable to parse output of bzr info")
+ }
+ out = out[:i]
+ return strings.TrimSpace(out), nil
+}
+
+func bzrStatus(vcsBzr *Cmd, rootDir string) (Status, error) {
+ outb, err := vcsBzr.runOutputVerboseOnly(rootDir, "version-info")
+ if err != nil {
+ return Status{}, err
+ }
+ out := string(outb)
+
+ // Expect (non-empty repositories only):
+ //
+ // revision-id: gopher@gopher.net-20211021072330-qshok76wfypw9lpm
+ // date: 2021-09-21 12:00:00 +1000
+ // ...
+ var rev string
+ var commitTime time.Time
+
+ for _, line := range strings.Split(out, "\n") {
+ i := strings.IndexByte(line, ':')
+ if i < 0 {
+ continue
+ }
+ key := line[:i]
+ value := strings.TrimSpace(line[i+1:])
+
+ switch key {
+ case "revision-id":
+ rev = value
+ case "date":
+ var err error
+ commitTime, err = time.Parse("2006-01-02 15:04:05 -0700", value)
+ if err != nil {
+ return Status{}, errors.New("unable to parse output of bzr version-info")
+ }
+ }
+ }
+
+ outb, err = vcsBzr.runOutputVerboseOnly(rootDir, "status")
+ if err != nil {
+ return Status{}, err
+ }
+
+ // Skip warning when working directory is set to an older revision.
+ if bytes.HasPrefix(outb, []byte("working tree is out of date")) {
+ i := bytes.IndexByte(outb, '\n')
+ if i < 0 {
+ i = len(outb)
+ }
+ outb = outb[:i]
+ }
+ uncommitted := len(outb) > 0
+
+ return Status{
+ Revision: rev,
+ CommitTime: commitTime,
+ Uncommitted: uncommitted,
+ }, nil
+}
+
+// vcsSvn describes how to use Subversion.
+var vcsSvn = &Cmd{
+ Name: "Subversion",
+ Cmd: "svn",
+ RootNames: []rootName{
+ {filename: ".svn", isDir: true},
+ },
+
+ CreateCmd: []string{"checkout -- {repo} {dir}"},
+ DownloadCmd: []string{"update"},
+
+ // There is no tag command in subversion.
+ // The branch information is all in the path names.
+
+ Scheme: []string{"https", "http", "svn", "svn+ssh"},
+ PingCmd: "info -- {scheme}://{repo}",
+ RemoteRepo: svnRemoteRepo,
+}
+
+func svnRemoteRepo(vcsSvn *Cmd, rootDir string) (remoteRepo string, err error) {
+ outb, err := vcsSvn.runOutput(rootDir, "info")
+ if err != nil {
+ return "", err
+ }
+ out := string(outb)
+
+ // Expect:
+ //
+ // ...
+ // URL: <URL>
+ // ...
+ //
+ // Note that we're not using the Repository Root line,
+ // because svn allows checking out subtrees.
+ // The URL will be the URL of the subtree (what we used with 'svn co')
+ // while the Repository Root may be a much higher parent.
+ i := strings.Index(out, "\nURL: ")
+ if i < 0 {
+ return "", fmt.Errorf("unable to parse output of svn info")
+ }
+ out = out[i+len("\nURL: "):]
+ i = strings.Index(out, "\n")
+ if i < 0 {
+ return "", fmt.Errorf("unable to parse output of svn info")
+ }
+ out = out[:i]
+ return strings.TrimSpace(out), nil
+}
+
+// fossilRepoName is the name go get associates with a fossil repository. In the
+// real world the file can be named anything.
+const fossilRepoName = ".fossil"
+
+// vcsFossil describes how to use Fossil (fossil-scm.org)
+var vcsFossil = &Cmd{
+ Name: "Fossil",
+ Cmd: "fossil",
+ RootNames: []rootName{
+ {filename: ".fslckout", isDir: false},
+ {filename: "_FOSSIL_", isDir: false},
+ },
+
+ CreateCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"},
+ DownloadCmd: []string{"up"},
+
+ TagCmd: []tagCmd{{"tag ls", `(.*)`}},
+ TagSyncCmd: []string{"up tag:{tag}"},
+ TagSyncDefault: []string{"up trunk"},
+
+ Scheme: []string{"https", "http"},
+ RemoteRepo: fossilRemoteRepo,
+ Status: fossilStatus,
+}
+
+func fossilRemoteRepo(vcsFossil *Cmd, rootDir string) (remoteRepo string, err error) {
+ out, err := vcsFossil.runOutput(rootDir, "remote-url")
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+var errFossilInfo = errors.New("unable to parse output of fossil info")
+
+func fossilStatus(vcsFossil *Cmd, rootDir string) (Status, error) {
+ outb, err := vcsFossil.runOutputVerboseOnly(rootDir, "info")
+ if err != nil {
+ return Status{}, err
+ }
+ out := string(outb)
+
+ // Expect:
+ // ...
+ // checkout: 91ed71f22c77be0c3e250920f47bfd4e1f9024d2 2021-09-21 12:00:00 UTC
+ // ...
+
+ // Extract revision and commit time.
+ // Ensure line ends with UTC (known timezone offset).
+ const prefix = "\ncheckout:"
+ const suffix = " UTC"
+ i := strings.Index(out, prefix)
+ if i < 0 {
+ return Status{}, errFossilInfo
+ }
+ checkout := out[i+len(prefix):]
+ i = strings.Index(checkout, suffix)
+ if i < 0 {
+ return Status{}, errFossilInfo
+ }
+ checkout = strings.TrimSpace(checkout[:i])
+
+ i = strings.IndexByte(checkout, ' ')
+ if i < 0 {
+ return Status{}, errFossilInfo
+ }
+ rev := checkout[:i]
+
+ commitTime, err := time.ParseInLocation(time.DateTime, checkout[i+1:], time.UTC)
+ if err != nil {
+ return Status{}, fmt.Errorf("%v: %v", errFossilInfo, err)
+ }
+
+ // Also look for untracked changes.
+ outb, err = vcsFossil.runOutputVerboseOnly(rootDir, "changes --differ")
+ if err != nil {
+ return Status{}, err
+ }
+ uncommitted := len(outb) > 0
+
+ return Status{
+ Revision: rev,
+ CommitTime: commitTime,
+ Uncommitted: uncommitted,
+ }, nil
+}
+
+func (v *Cmd) String() string {
+ return v.Name
+}
+
+// run runs the command line cmd in the given directory.
+// keyval is a list of key, value pairs. run expands
+// instances of {key} in cmd into value, but only after
+// splitting cmd into individual arguments.
+// If an error occurs, run prints the command line and the
+// command's combined stdout+stderr to standard error.
+// Otherwise run discards the command's output.
+func (v *Cmd) run(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, true)
+ return err
+}
+
+// runVerboseOnly is like run but only generates error output to standard error in verbose mode.
+func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, false)
+ return err
+}
+
+// runOutput is like run but returns the output of the command.
+func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) {
+ return v.run1(dir, cmd, keyval, true)
+}
+
+// runOutputVerboseOnly is like runOutput but only generates error output to
+// standard error in verbose mode.
+func (v *Cmd) runOutputVerboseOnly(dir string, cmd string, keyval ...string) ([]byte, error) {
+ return v.run1(dir, cmd, keyval, false)
+}
+
+// run1 is the generalized implementation of run and runOutput.
+func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) {
+ m := make(map[string]string)
+ for i := 0; i < len(keyval); i += 2 {
+ m[keyval[i]] = keyval[i+1]
+ }
+ args := strings.Fields(cmdline)
+ for i, arg := range args {
+ args[i] = expand(m, arg)
+ }
+
+ if len(args) >= 2 && args[0] == "-go-internal-mkdir" {
+ var err error
+ if filepath.IsAbs(args[1]) {
+ err = os.Mkdir(args[1], fs.ModePerm)
+ } else {
+ err = os.Mkdir(filepath.Join(dir, args[1]), fs.ModePerm)
+ }
+ if err != nil {
+ return nil, err
+ }
+ args = args[2:]
+ }
+
+ if len(args) >= 2 && args[0] == "-go-internal-cd" {
+ if filepath.IsAbs(args[1]) {
+ dir = args[1]
+ } else {
+ dir = filepath.Join(dir, args[1])
+ }
+ args = args[2:]
+ }
+
+ _, err := exec.LookPath(v.Cmd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr,
+ "go: missing %s command. See https://golang.org/s/gogetcmd\n",
+ v.Name)
+ return nil, err
+ }
+
+ cmd := exec.Command(v.Cmd, args...)
+ cmd.Dir = dir
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "cd %s\n", dir)
+ fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " "))
+ }
+ out, err := cmd.Output()
+ if err != nil {
+ if verbose || cfg.BuildV {
+ fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " "))
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ os.Stderr.Write(ee.Stderr)
+ } else {
+ fmt.Fprintln(os.Stderr, err.Error())
+ }
+ }
+ }
+ return out, err
+}
+
+// Ping pings to determine scheme to use.
+func (v *Cmd) Ping(scheme, repo string) error {
+ // Run the ping command in an arbitrary working directory,
+ // but don't let the current working directory pollute the results.
+ // In module mode, we expect GOMODCACHE to exist and be a safe place for
+ // commands; in GOPATH mode, we expect that to be true of GOPATH/src.
+ dir := cfg.GOMODCACHE
+ if !cfg.ModulesEnabled {
+ dir = filepath.Join(cfg.BuildContext.GOPATH, "src")
+ }
+ os.MkdirAll(dir, 0777) // Ignore errors — if unsuccessful, the command will likely fail.
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ return v.runVerboseOnly(dir, v.PingCmd, "scheme", scheme, "repo", repo)
+}
+
+// Create creates a new copy of repo in dir.
+// The parent of dir must exist; dir must not.
+func (v *Cmd) Create(dir, repo string) error {
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ for _, cmd := range v.CreateCmd {
+ if err := v.run(filepath.Dir(dir), cmd, "dir", dir, "repo", repo); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Download downloads any new changes for the repo in dir.
+func (v *Cmd) Download(dir string) error {
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ for _, cmd := range v.DownloadCmd {
+ if err := v.run(dir, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tags returns the list of available tags for the repo in dir.
+func (v *Cmd) Tags(dir string) ([]string, error) {
+ var tags []string
+ for _, tc := range v.TagCmd {
+ out, err := v.runOutput(dir, tc.cmd)
+ if err != nil {
+ return nil, err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.pattern)
+ for _, m := range re.FindAllStringSubmatch(string(out), -1) {
+ tags = append(tags, m[1])
+ }
+ }
+ return tags, nil
+}
+
+// TagSync syncs the repo in dir to the named tag,
+// which either is a tag returned by tags or is v.tagDefault.
+func (v *Cmd) TagSync(dir, tag string) error {
+ if v.TagSyncCmd == nil {
+ return nil
+ }
+ if tag != "" {
+ for _, tc := range v.TagLookupCmd {
+ out, err := v.runOutput(dir, tc.cmd, "tag", tag)
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.pattern)
+ m := re.FindStringSubmatch(string(out))
+ if len(m) > 1 {
+ tag = m[1]
+ break
+ }
+ }
+ }
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ if tag == "" && v.TagSyncDefault != nil {
+ for _, cmd := range v.TagSyncDefault {
+ if err := v.run(dir, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ for _, cmd := range v.TagSyncCmd {
+ if err := v.run(dir, cmd, "tag", tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A vcsPath describes how to convert an import path into a
+// version control system and repository name.
+type vcsPath struct {
+ pathPrefix string // prefix this description applies to
+ regexp *lazyregexp.Regexp // compiled pattern for import path
+ repo string // repository to use (expand with match of re)
+ vcs string // version control system to use (expand with match of re)
+ check func(match map[string]string) error // additional checks
+ schemelessRepo bool // if true, the repo pattern lacks a scheme
+}
+
+// FromDir inspects dir and its parents to determine the
+// version control system and code repository to use.
+// If no repository is found, FromDir returns an error
+// equivalent to os.ErrNotExist.
+func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cmd, err error) {
+ // Clean and double-check that dir is in (a subdirectory of) srcRoot.
+ dir = filepath.Clean(dir)
+ if srcRoot != "" {
+ srcRoot = filepath.Clean(srcRoot)
+ if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
+ return "", nil, fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
+ }
+ }
+
+ origDir := dir
+ for len(dir) > len(srcRoot) {
+ for _, vcs := range vcsList {
+ if isVCSRoot(dir, vcs.RootNames) {
+ // Record first VCS we find.
+ // If allowNesting is false (as it is in GOPATH), keep looking for
+ // repositories in parent directories and report an error if one is
+ // found to mitigate VCS injection attacks.
+ if vcsCmd == nil {
+ vcsCmd = vcs
+ repoDir = dir
+ if allowNesting {
+ return repoDir, vcsCmd, nil
+ }
+ continue
+ }
+ // Otherwise, we have one VCS inside a different VCS.
+ return "", nil, fmt.Errorf("directory %q uses %s, but parent %q uses %s",
+ repoDir, vcsCmd.Cmd, dir, vcs.Cmd)
+ }
+ }
+
+ // Move to parent.
+ ndir := filepath.Dir(dir)
+ if len(ndir) >= len(dir) {
+ break
+ }
+ dir = ndir
+ }
+ if vcsCmd == nil {
+ return "", nil, &vcsNotFoundError{dir: origDir}
+ }
+ return repoDir, vcsCmd, nil
+}
+
+// isVCSRoot identifies a VCS root by checking whether the directory contains
+// any of the listed root names.
+func isVCSRoot(dir string, rootNames []rootName) bool {
+ for _, root := range rootNames {
+ fi, err := os.Stat(filepath.Join(dir, root.filename))
+ if err == nil && fi.IsDir() == root.isDir {
+ return true
+ }
+ }
+
+ return false
+}
+
+type rootName struct {
+ filename string
+ isDir bool
+}
+
+type vcsNotFoundError struct {
+ dir string
+}
+
+func (e *vcsNotFoundError) Error() string {
+ return fmt.Sprintf("directory %q is not using a known version control system", e.dir)
+}
+
+func (e *vcsNotFoundError) Is(err error) bool {
+ return err == os.ErrNotExist
+}
+
+// A govcsRule is a single GOVCS rule like private:hg|svn.
+type govcsRule struct {
+ pattern string
+ allowed []string
+}
+
+// A govcsConfig is a full GOVCS configuration.
+type govcsConfig []govcsRule
+
+func parseGOVCS(s string) (govcsConfig, error) {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ return nil, nil
+ }
+ var cfg govcsConfig
+ have := make(map[string]string)
+ for _, item := range strings.Split(s, ",") {
+ item = strings.TrimSpace(item)
+ if item == "" {
+ return nil, fmt.Errorf("empty entry in GOVCS")
+ }
+ pattern, list, found := strings.Cut(item, ":")
+ if !found {
+ return nil, fmt.Errorf("malformed entry in GOVCS (missing colon): %q", item)
+ }
+ pattern, list = strings.TrimSpace(pattern), strings.TrimSpace(list)
+ if pattern == "" {
+ return nil, fmt.Errorf("empty pattern in GOVCS: %q", item)
+ }
+ if list == "" {
+ return nil, fmt.Errorf("empty VCS list in GOVCS: %q", item)
+ }
+ if search.IsRelativePath(pattern) {
+ return nil, fmt.Errorf("relative pattern not allowed in GOVCS: %q", pattern)
+ }
+ if old := have[pattern]; old != "" {
+ return nil, fmt.Errorf("unreachable pattern in GOVCS: %q after %q", item, old)
+ }
+ have[pattern] = item
+ allowed := strings.Split(list, "|")
+ for i, a := range allowed {
+ a = strings.TrimSpace(a)
+ if a == "" {
+ return nil, fmt.Errorf("empty VCS name in GOVCS: %q", item)
+ }
+ allowed[i] = a
+ }
+ cfg = append(cfg, govcsRule{pattern, allowed})
+ }
+ return cfg, nil
+}
+
+func (c *govcsConfig) allow(path string, private bool, vcs string) bool {
+ for _, rule := range *c {
+ match := false
+ switch rule.pattern {
+ case "private":
+ match = private
+ case "public":
+ match = !private
+ default:
+ // Note: rule.pattern is known to be comma-free,
+ // so MatchPrefixPatterns is only matching a single pattern for us.
+ match = module.MatchPrefixPatterns(rule.pattern, path)
+ }
+ if !match {
+ continue
+ }
+ for _, allow := range rule.allowed {
+ if allow == vcs || allow == "all" {
+ return true
+ }
+ }
+ return false
+ }
+
+ // By default, nothing is allowed.
+ return false
+}
+
+var (
+ govcs govcsConfig
+ govcsErr error
+ govcsOnce sync.Once
+)
+
+// defaultGOVCS is the default setting for GOVCS.
+// Setting GOVCS adds entries ahead of these but does not remove them.
+// (They are appended to the parsed GOVCS setting.)
+//
+// The rationale behind allowing only Git and Mercurial is that
+// these two systems have had the most attention to issues
+// of being run as clients of untrusted servers. In contrast,
+// Bazaar, Fossil, and Subversion have primarily been used
+// in trusted, authenticated environments and are not as well
+// scrutinized as attack surfaces.
+//
+// See golang.org/issue/41730 for details.
+var defaultGOVCS = govcsConfig{
+ {"private", []string{"all"}},
+ {"public", []string{"git", "hg"}},
+}
+
+// CheckGOVCS checks whether the policy defined by the environment variable
+// GOVCS allows the given vcs command to be used with the given repository
+// root path. Note that root may not be a real package or module path; it's
+// the same as the root path in the go-import meta tag.
+func CheckGOVCS(vcs *Cmd, root string) error {
+ if vcs == vcsMod {
+ // Direct module (proxy protocol) fetches don't
+ // involve an external version control system
+ // and are always allowed.
+ return nil
+ }
+
+ govcsOnce.Do(func() {
+ govcs, govcsErr = parseGOVCS(os.Getenv("GOVCS"))
+ govcs = append(govcs, defaultGOVCS...)
+ })
+ if govcsErr != nil {
+ return govcsErr
+ }
+
+ private := module.MatchPrefixPatterns(cfg.GOPRIVATE, root)
+ if !govcs.allow(root, private, vcs.Cmd) {
+ what := "public"
+ if private {
+ what = "private"
+ }
+ return fmt.Errorf("GOVCS disallows using %s for %s %s; see 'go help vcs'", vcs.Cmd, what, root)
+ }
+
+ return nil
+}
+
+// CheckNested checks for an incorrectly-nested VCS-inside-VCS
+// situation for dir, checking parents up until srcRoot.
+func CheckNested(vcs *Cmd, dir, srcRoot string) error {
+ if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
+ return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
+ }
+
+ otherDir := dir
+ for len(otherDir) > len(srcRoot) {
+ for _, otherVCS := range vcsList {
+ if isVCSRoot(otherDir, otherVCS.RootNames) {
+ // Allow expected vcs in original dir.
+ if otherDir == dir && otherVCS == vcs {
+ continue
+ }
+ // Otherwise, we have one VCS inside a different VCS.
+ return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.Cmd, otherDir, otherVCS.Cmd)
+ }
+ }
+ // Move to parent.
+ newDir := filepath.Dir(otherDir)
+ if len(newDir) >= len(otherDir) {
+ // Shouldn't happen, but just in case, stop.
+ break
+ }
+ otherDir = newDir
+ }
+
+ return nil
+}
+
+// RepoRoot describes the repository root for a tree of source code.
+type RepoRoot struct {
+ Repo string // repository URL, including scheme
+ Root string // import path corresponding to root of repo
+ IsCustom bool // defined by served <meta> tags (as opposed to hard-coded pattern)
+ VCS *Cmd
+}
+
+func httpPrefix(s string) string {
+ for _, prefix := range [...]string{"http:", "https:"} {
+ if strings.HasPrefix(s, prefix) {
+ return prefix
+ }
+ }
+ return ""
+}
+
+// ModuleMode specifies whether to prefer modules when looking up code sources.
+type ModuleMode int
+
+const (
+ IgnoreMod ModuleMode = iota
+ PreferMod
+)
+
+// RepoRootForImportPath analyzes importPath to determine the
+// version control system, and code repository to use.
+func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) {
+ rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths)
+ if err == errUnknownSite {
+ rr, err = repoRootForImportDynamic(importPath, mod, security)
+ if err != nil {
+ err = importErrorf(importPath, "unrecognized import path %q: %v", importPath, err)
+ }
+ }
+ if err != nil {
+ rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic)
+ if err1 == nil {
+ rr = rr1
+ err = nil
+ }
+ }
+
+ // Should have been taken care of above, but make sure.
+ if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") {
+ // Do not allow wildcards in the repo root.
+ rr = nil
+ err = importErrorf(importPath, "cannot expand ... in %q", importPath)
+ }
+ return rr, err
+}
+
+var errUnknownSite = errors.New("dynamic lookup required to find mapping")
+
+// repoRootFromVCSPaths attempts to map importPath to a repoRoot
+// using the mappings defined in vcsPaths.
+func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) {
+ if str.HasPathPrefix(importPath, "example.net") {
+ // TODO(rsc): This should not be necessary, but it's required to keep
+ // tests like ../../testdata/script/mod_get_extra.txt from using the network.
+ // That script has everything it needs in the replacement set, but it is still
+ // doing network calls.
+ return nil, fmt.Errorf("no modules on example.net")
+ }
+ if importPath == "rsc.io" {
+ // This special case allows tests like ../../testdata/script/govcs.txt
+ // to avoid making any network calls. The module lookup for a path
+ // like rsc.io/nonexist.svn/foo needs to not make a network call for
+ // a lookup on rsc.io.
+ return nil, fmt.Errorf("rsc.io is not a module")
+ }
+ // A common error is to use https://packagepath because that's what
+ // hg and git require. Diagnose this helpfully.
+ if prefix := httpPrefix(importPath); prefix != "" {
+ // The importPath has been cleaned, so has only one slash. The pattern
+ // ignores the slashes; the error message puts them back on the RHS at least.
+ return nil, fmt.Errorf("%q not allowed in import path", prefix+"//")
+ }
+ for _, srv := range vcsPaths {
+ if !str.HasPathPrefix(importPath, srv.pathPrefix) {
+ continue
+ }
+ m := srv.regexp.FindStringSubmatch(importPath)
+ if m == nil {
+ if srv.pathPrefix != "" {
+ return nil, importErrorf(importPath, "invalid %s import path %q", srv.pathPrefix, importPath)
+ }
+ continue
+ }
+
+ // Build map of named subexpression matches for expand.
+ match := map[string]string{
+ "prefix": srv.pathPrefix + "/",
+ "import": importPath,
+ }
+ for i, name := range srv.regexp.SubexpNames() {
+ if name != "" && match[name] == "" {
+ match[name] = m[i]
+ }
+ }
+ if srv.vcs != "" {
+ match["vcs"] = expand(match, srv.vcs)
+ }
+ if srv.repo != "" {
+ match["repo"] = expand(match, srv.repo)
+ }
+ if srv.check != nil {
+ if err := srv.check(match); err != nil {
+ return nil, err
+ }
+ }
+ vcs := vcsByCmd(match["vcs"])
+ if vcs == nil {
+ return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
+ }
+ if err := CheckGOVCS(vcs, match["root"]); err != nil {
+ return nil, err
+ }
+ var repoURL string
+ if !srv.schemelessRepo {
+ repoURL = match["repo"]
+ } else {
+ repo := match["repo"]
+ var ok bool
+ repoURL, ok = interceptVCSTest(repo, vcs, security)
+ if !ok {
+ scheme, err := func() (string, error) {
+ for _, s := range vcs.Scheme {
+ if security == web.SecureOnly && !vcs.isSecureScheme(s) {
+ continue
+ }
+
+ // If we know how to ping URL schemes for this VCS,
+ // check that this repo works.
+ // Otherwise, default to the first scheme
+ // that meets the requested security level.
+ if vcs.PingCmd == "" {
+ return s, nil
+ }
+ if err := vcs.Ping(s, repo); err == nil {
+ return s, nil
+ }
+ }
+ securityFrag := ""
+ if security == web.SecureOnly {
+ securityFrag = "secure "
+ }
+ return "", fmt.Errorf("no %sprotocol found for repository", securityFrag)
+ }()
+ if err != nil {
+ return nil, err
+ }
+ repoURL = scheme + "://" + repo
+ }
+ }
+ rr := &RepoRoot{
+ Repo: repoURL,
+ Root: match["root"],
+ VCS: vcs,
+ }
+ return rr, nil
+ }
+ return nil, errUnknownSite
+}
+
+func interceptVCSTest(repo string, vcs *Cmd, security web.SecurityMode) (repoURL string, ok bool) {
+ if VCSTestRepoURL == "" {
+ return "", false
+ }
+ if vcs == vcsMod {
+ // Since the "mod" protocol is implemented internally,
+ // requests will be intercepted at a lower level (in cmd/go/internal/web).
+ return "", false
+ }
+
+ if scheme, path, ok := strings.Cut(repo, "://"); ok {
+ if security == web.SecureOnly && !vcs.isSecureScheme(scheme) {
+ return "", false // Let the caller reject the original URL.
+ }
+ repo = path // Remove leading URL scheme if present.
+ }
+ for _, host := range VCSTestHosts {
+ if !str.HasPathPrefix(repo, host) {
+ continue
+ }
+
+ httpURL := VCSTestRepoURL + strings.TrimPrefix(repo, host)
+
+ if vcs == vcsSvn {
+ // Ping the vcweb HTTP server to tell it to initialize the SVN repository
+ // and get the SVN server URL.
+ u, err := urlpkg.Parse(httpURL + "?vcwebsvn=1")
+ if err != nil {
+ panic(fmt.Sprintf("invalid vcs-test repo URL: %v", err))
+ }
+ svnURL, err := web.GetBytes(u)
+ svnURL = bytes.TrimSpace(svnURL)
+ if err == nil && len(svnURL) > 0 {
+ return string(svnURL) + strings.TrimPrefix(repo, host), true
+ }
+
+ // vcs-test doesn't have a svn handler for the given path,
+ // so resolve the repo to HTTPS instead.
+ }
+
+ return httpURL, true
+ }
+ return "", false
+}
+
+// urlForImportPath returns a partially-populated URL for the given Go import path.
+//
+// The URL leaves the Scheme field blank so that web.Get will try any scheme
+// allowed by the selected security mode.
+func urlForImportPath(importPath string) (*urlpkg.URL, error) {
+ slash := strings.Index(importPath, "/")
+ if slash < 0 {
+ slash = len(importPath)
+ }
+ host, path := importPath[:slash], importPath[slash:]
+ if !strings.Contains(host, ".") {
+ return nil, errors.New("import path does not begin with hostname")
+ }
+ if len(path) == 0 {
+ path = "/"
+ }
+ return &urlpkg.URL{Host: host, Path: path, RawQuery: "go-get=1"}, nil
+}
+
+// repoRootForImportDynamic finds a *RepoRoot for a custom domain that's not
+// statically known by repoRootFromVCSPaths.
+//
+// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld".
+func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) {
+ url, err := urlForImportPath(importPath)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := web.Get(security, url)
+ if err != nil {
+ msg := "https fetch: %v"
+ if security == web.Insecure {
+ msg = "http/" + msg
+ }
+ return nil, fmt.Errorf(msg, err)
+ }
+ body := resp.Body
+ defer body.Close()
+ imports, err := parseMetaGoImports(body, mod)
+ if len(imports) == 0 {
+ if respErr := resp.Err(); respErr != nil {
+ // If the server's status was not OK, prefer to report that instead of
+ // an XML parse error.
+ return nil, respErr
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", importPath, err)
+ }
+ // Find the matched meta import.
+ mmi, err := matchGoImport(imports, importPath)
+ if err != nil {
+ if _, ok := err.(ImportMismatchError); !ok {
+ return nil, fmt.Errorf("parse %s: %v", url, err)
+ }
+ return nil, fmt.Errorf("parse %s: no go-import meta tags (%s)", resp.URL, err)
+ }
+ if cfg.BuildV {
+ log.Printf("get %q: found meta tag %#v at %s", importPath, mmi, url)
+ }
+ // If the import was "uni.edu/bob/project", which said the
+ // prefix was "uni.edu" and the RepoRoot was "evilroot.com",
+ // make sure we don't trust Bob and check out evilroot.com to
+ // "uni.edu" yet (possibly overwriting/preempting another
+ // non-evil student). Instead, first verify the root and see
+ // if it matches Bob's claim.
+ if mmi.Prefix != importPath {
+ if cfg.BuildV {
+ log.Printf("get %q: verifying non-authoritative meta tag", importPath)
+ }
+ var imports []metaImport
+ url, imports, err = metaImportsForPrefix(mmi.Prefix, mod, security)
+ if err != nil {
+ return nil, err
+ }
+ metaImport2, err := matchGoImport(imports, importPath)
+ if err != nil || mmi != metaImport2 {
+ return nil, fmt.Errorf("%s and %s disagree about go-import for %s", resp.URL, url, mmi.Prefix)
+ }
+ }
+
+ if err := validateRepoRoot(mmi.RepoRoot); err != nil {
+ return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err)
+ }
+ var vcs *Cmd
+ if mmi.VCS == "mod" {
+ vcs = vcsMod
+ } else {
+ vcs = vcsByCmd(mmi.VCS)
+ if vcs == nil {
+ return nil, fmt.Errorf("%s: unknown vcs %q", resp.URL, mmi.VCS)
+ }
+ }
+
+ if err := CheckGOVCS(vcs, mmi.Prefix); err != nil {
+ return nil, err
+ }
+
+ repoURL, ok := interceptVCSTest(mmi.RepoRoot, vcs, security)
+ if !ok {
+ repoURL = mmi.RepoRoot
+ }
+ rr := &RepoRoot{
+ Repo: repoURL,
+ Root: mmi.Prefix,
+ IsCustom: true,
+ VCS: vcs,
+ }
+ return rr, nil
+}
+
+// validateRepoRoot returns an error if repoRoot does not seem to be
+// a valid URL with scheme.
+func validateRepoRoot(repoRoot string) error {
+ url, err := urlpkg.Parse(repoRoot)
+ if err != nil {
+ return err
+ }
+ if url.Scheme == "" {
+ return errors.New("no scheme")
+ }
+ if url.Scheme == "file" {
+ return errors.New("file scheme disallowed")
+ }
+ return nil
+}
+
+var fetchGroup singleflight.Group
+var (
+ fetchCacheMu sync.Mutex
+ fetchCache = map[string]fetchResult{} // key is metaImportsForPrefix's importPrefix
+)
+
+// metaImportsForPrefix takes a package's root import path as declared in a <meta> tag
+// and returns its HTML discovery URL and the parsed metaImport lines
+// found on the page.
+//
+// The importPath is of the form "golang.org/x/tools".
+// It is an error if no imports are found.
+// url will still be valid if err != nil.
+// The returned url will be of the form "https://golang.org/x/tools?go-get=1"
+func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.SecurityMode) (*urlpkg.URL, []metaImport, error) {
+ setCache := func(res fetchResult) (fetchResult, error) {
+ fetchCacheMu.Lock()
+ defer fetchCacheMu.Unlock()
+ fetchCache[importPrefix] = res
+ return res, nil
+ }
+
+ resi, _, _ := fetchGroup.Do(importPrefix, func() (resi any, err error) {
+ fetchCacheMu.Lock()
+ if res, ok := fetchCache[importPrefix]; ok {
+ fetchCacheMu.Unlock()
+ return res, nil
+ }
+ fetchCacheMu.Unlock()
+
+ url, err := urlForImportPath(importPrefix)
+ if err != nil {
+ return setCache(fetchResult{err: err})
+ }
+ resp, err := web.Get(security, url)
+ if err != nil {
+ return setCache(fetchResult{url: url, err: fmt.Errorf("fetching %s: %v", importPrefix, err)})
+ }
+ body := resp.Body
+ defer body.Close()
+ imports, err := parseMetaGoImports(body, mod)
+ if len(imports) == 0 {
+ if respErr := resp.Err(); respErr != nil {
+ // If the server's status was not OK, prefer to report that instead of
+ // an XML parse error.
+ return setCache(fetchResult{url: url, err: respErr})
+ }
+ }
+ if err != nil {
+ return setCache(fetchResult{url: url, err: fmt.Errorf("parsing %s: %v", resp.URL, err)})
+ }
+ if len(imports) == 0 {
+ err = fmt.Errorf("fetching %s: no go-import meta tag found in %s", importPrefix, resp.URL)
+ }
+ return setCache(fetchResult{url: url, imports: imports, err: err})
+ })
+ res := resi.(fetchResult)
+ return res.url, res.imports, res.err
+}
+
+type fetchResult struct {
+ url *urlpkg.URL
+ imports []metaImport
+ err error
+}
+
+// metaImport represents the parsed <meta name="go-import"
+// content="prefix vcs reporoot" /> tags from HTML files.
+type metaImport struct {
+ Prefix, VCS, RepoRoot string
+}
+
+// A ImportMismatchError is returned where metaImport/s are present
+// but none match our import path.
+type ImportMismatchError struct {
+ importPath string
+ mismatches []string // the meta imports that were discarded for not matching our importPath
+}
+
+func (m ImportMismatchError) Error() string {
+ formattedStrings := make([]string, len(m.mismatches))
+ for i, pre := range m.mismatches {
+ formattedStrings[i] = fmt.Sprintf("meta tag %s did not match import path %s", pre, m.importPath)
+ }
+ return strings.Join(formattedStrings, ", ")
+}
+
+// matchGoImport returns the metaImport from imports matching importPath.
+// An error is returned if there are multiple matches.
+// An ImportMismatchError is returned if none match.
+func matchGoImport(imports []metaImport, importPath string) (metaImport, error) {
+ match := -1
+
+ errImportMismatch := ImportMismatchError{importPath: importPath}
+ for i, im := range imports {
+ if !str.HasPathPrefix(importPath, im.Prefix) {
+ errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix)
+ continue
+ }
+
+ if match >= 0 {
+ if imports[match].VCS == "mod" && im.VCS != "mod" {
+ // All the mod entries precede all the non-mod entries.
+ // We have a mod entry and don't care about the rest,
+ // matching or not.
+ break
+ }
+ return metaImport{}, fmt.Errorf("multiple meta tags match import path %q", importPath)
+ }
+ match = i
+ }
+
+ if match == -1 {
+ return metaImport{}, errImportMismatch
+ }
+ return imports[match], nil
+}
+
+// expand rewrites s to replace {k} with match[k] for each key k in match.
+func expand(match map[string]string, s string) string {
+ // We want to replace each match exactly once, and the result of expansion
+ // must not depend on the iteration order through the map.
+ // A strings.Replacer has exactly the properties we're looking for.
+ oldNew := make([]string, 0, 2*len(match))
+ for k, v := range match {
+ oldNew = append(oldNew, "{"+k+"}", v)
+ }
+ return strings.NewReplacer(oldNew...).Replace(s)
+}
+
+// vcsPaths defines the meaning of import paths referring to
+// commonly-used VCS hosting sites (github.com/user/dir)
+// and import paths referring to a fully-qualified importPath
+// containing a VCS type (foo.com/repo.git/dir)
+var vcsPaths = []*vcsPath{
+ // GitHub
+ {
+ pathPrefix: "github.com",
+ regexp: lazyregexp.New(`^(?P<root>github\.com/[\w.\-]+/[\w.\-]+)(/[\w.\-]+)*$`),
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // Bitbucket
+ {
+ pathPrefix: "bitbucket.org",
+ regexp: lazyregexp.New(`^(?P<root>bitbucket\.org/(?P<bitname>[\w.\-]+/[\w.\-]+))(/[\w.\-]+)*$`),
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // IBM DevOps Services (JazzHub)
+ {
+ pathPrefix: "hub.jazz.net/git",
+ regexp: lazyregexp.New(`^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[\w.\-]+)(/[\w.\-]+)*$`),
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // Git at Apache
+ {
+ pathPrefix: "git.apache.org",
+ regexp: lazyregexp.New(`^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/[\w.\-]+)*$`),
+ vcs: "git",
+ repo: "https://{root}",
+ },
+
+ // Git at OpenStack
+ {
+ pathPrefix: "git.openstack.org",
+ regexp: lazyregexp.New(`^(?P<root>git\.openstack\.org/[\w.\-]+/[\w.\-]+)(\.git)?(/[\w.\-]+)*$`),
+ vcs: "git",
+ repo: "https://{root}",
+ },
+
+ // chiselapp.com for fossil
+ {
+ pathPrefix: "chiselapp.com",
+ regexp: lazyregexp.New(`^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[\w.\-]+)$`),
+ vcs: "fossil",
+ repo: "https://{root}",
+ },
+
+ // General syntax for any server.
+ // Must be last.
+ {
+ regexp: lazyregexp.New(`(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[\w.\-]+)+?)\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?[\w.\-]+)*$`),
+ schemelessRepo: true,
+ },
+}
+
+// vcsPathsAfterDynamic gives additional vcsPaths entries
+// to try after the dynamic HTML check.
+// This gives those sites a chance to introduce <meta> tags
+// as part of a graceful transition away from the hard-coded logic.
+var vcsPathsAfterDynamic = []*vcsPath{
+ // Launchpad. See golang.org/issue/11436.
+ {
+ pathPrefix: "launchpad.net",
+ regexp: lazyregexp.New(`^(?P<root>launchpad\.net/((?P<project>[\w.\-]+)(?P<series>/[\w.\-]+)?|~[\w.\-]+/(\+junk|[\w.\-]+)/[\w.\-]+))(/[\w.\-]+)*$`),
+ vcs: "bzr",
+ repo: "https://{root}",
+ check: launchpadVCS,
+ },
+}
+
+// noVCSSuffix checks that the repository name does not
+// end in .foo for any version control system foo.
+// The usual culprit is ".git".
+func noVCSSuffix(match map[string]string) error {
+ repo := match["repo"]
+ for _, vcs := range vcsList {
+ if strings.HasSuffix(repo, "."+vcs.Cmd) {
+ return fmt.Errorf("invalid version control suffix in %s path", match["prefix"])
+ }
+ }
+ return nil
+}
+
+// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case,
+// "foo" could be a series name registered in Launchpad with its own branch,
+// and it could also be the name of a directory within the main project
+// branch one level up.
+func launchpadVCS(match map[string]string) error {
+ if match["project"] == "" || match["series"] == "" {
+ return nil
+ }
+ url := &urlpkg.URL{
+ Scheme: "https",
+ Host: "code.launchpad.net",
+ Path: expand(match, "/{project}{series}/.bzr/branch-format"),
+ }
+ _, err := web.GetBytes(url)
+ if err != nil {
+ match["root"] = expand(match, "launchpad.net/{project}")
+ match["repo"] = expand(match, "https://{root}")
+ }
+ return nil
+}
+
+// importError is a copy of load.importError, made to avoid a dependency cycle
+// on cmd/go/internal/load. It just needs to satisfy load.ImportPathError.
+type importError struct {
+ importPath string
+ err error
+}
+
+func importErrorf(path, format string, args ...any) error {
+ err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
+ if errStr := err.Error(); !strings.Contains(errStr, path) {
+ panic(fmt.Sprintf("path %q not in error %q", path, errStr))
+ }
+ return err
+}
+
+func (e *importError) Error() string {
+ return e.err.Error()
+}
+
+func (e *importError) Unwrap() error {
+ // Don't return e.err directly, since we're only wrapping an error if %w
+ // was passed to ImportErrorf.
+ return errors.Unwrap(e.err)
+}
+
+func (e *importError) ImportPath() string {
+ return e.importPath
+}
diff --git a/src/cmd/go/internal/vcs/vcs_test.go b/src/cmd/go/internal/vcs/vcs_test.go
new file mode 100644
index 0000000..2ce85ea
--- /dev/null
+++ b/src/cmd/go/internal/vcs/vcs_test.go
@@ -0,0 +1,581 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "errors"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "cmd/go/internal/web"
+)
+
+func init() {
+ // GOVCS defaults to public:git|hg,private:all,
+ // which breaks many tests here - they can't use non-git, non-hg VCS at all!
+ // Change to fully permissive.
+ // The tests of the GOVCS setting itself are in ../../testdata/script/govcs.txt.
+ os.Setenv("GOVCS", "*:all")
+}
+
+// Test that RepoRootForImportPath determines the correct RepoRoot for a given importPath.
+// TODO(cmang): Add tests for SVN and BZR.
+func TestRepoRootForImportPath(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+
+ tests := []struct {
+ path string
+ want *RepoRoot
+ }{
+ {
+ "github.com/golang/groupcache",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://github.com/golang/groupcache",
+ },
+ },
+ // Unicode letters in directories are not valid.
+ {
+ "github.com/user/unicode/испытание",
+ nil,
+ },
+ // IBM DevOps Services tests
+ {
+ "hub.jazz.net/git/user1/pkgname",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://hub.jazz.net/git/user1/pkgname",
+ },
+ },
+ {
+ "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://hub.jazz.net/git/user1/pkgname",
+ },
+ },
+ {
+ "hub.jazz.net",
+ nil,
+ },
+ {
+ "hubajazz.net",
+ nil,
+ },
+ {
+ "hub2.jazz.net",
+ nil,
+ },
+ {
+ "hub.jazz.net/someotherprefix",
+ nil,
+ },
+ {
+ "hub.jazz.net/someotherprefix/user1/pkgname",
+ nil,
+ },
+ // Spaces are not valid in user names or package names
+ {
+ "hub.jazz.net/git/User 1/pkgname",
+ nil,
+ },
+ {
+ "hub.jazz.net/git/user1/pkg name",
+ nil,
+ },
+ // Dots are not valid in user names
+ {
+ "hub.jazz.net/git/user.1/pkgname",
+ nil,
+ },
+ {
+ "hub.jazz.net/git/user/pkg.name",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://hub.jazz.net/git/user/pkg.name",
+ },
+ },
+ // User names cannot have uppercase letters
+ {
+ "hub.jazz.net/git/USER/pkgname",
+ nil,
+ },
+ // OpenStack tests
+ {
+ "git.openstack.org/openstack/swift",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://git.openstack.org/openstack/swift",
+ },
+ },
+ // Trailing .git is less preferred but included for
+ // compatibility purposes while the same source needs to
+ // be compilable on both old and new go
+ {
+ "git.openstack.org/openstack/swift.git",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://git.openstack.org/openstack/swift.git",
+ },
+ },
+ {
+ "git.openstack.org/openstack/swift/go/hummingbird",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://git.openstack.org/openstack/swift",
+ },
+ },
+ {
+ "git.openstack.org",
+ nil,
+ },
+ {
+ "git.openstack.org/openstack",
+ nil,
+ },
+ // Spaces are not valid in package name
+ {
+ "git.apache.org/package name/path/to/lib",
+ nil,
+ },
+ // Should have ".git" suffix
+ {
+ "git.apache.org/package-name/path/to/lib",
+ nil,
+ },
+ {
+ "gitbapache.org",
+ nil,
+ },
+ {
+ "git.apache.org/package-name.git",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://git.apache.org/package-name.git",
+ },
+ },
+ {
+ "git.apache.org/package-name_2.x.git/path/to/lib",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://git.apache.org/package-name_2.x.git",
+ },
+ },
+ {
+ "chiselapp.com/user/kyle/repository/fossilgg",
+ &RepoRoot{
+ VCS: vcsFossil,
+ Repo: "https://chiselapp.com/user/kyle/repository/fossilgg",
+ },
+ },
+ {
+ // must have a user/$name/repository/$repo path
+ "chiselapp.com/kyle/repository/fossilgg",
+ nil,
+ },
+ {
+ "chiselapp.com/user/kyle/fossilgg",
+ nil,
+ },
+ {
+ "bitbucket.org/workspace/pkgname",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://bitbucket.org/workspace/pkgname",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ got, err := RepoRootForImportPath(test.path, IgnoreMod, web.SecureOnly)
+ want := test.want
+
+ if want == nil {
+ if err == nil {
+ t.Errorf("RepoRootForImportPath(%q): Error expected but not received", test.path)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("RepoRootForImportPath(%q): %v", test.path, err)
+ continue
+ }
+ if got.VCS.Name != want.VCS.Name || got.Repo != want.Repo {
+ t.Errorf("RepoRootForImportPath(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.VCS, got.Repo, want.VCS, want.Repo)
+ }
+ }
+}
+
+// Test that vcs.FromDir correctly inspects a given directory and returns the
+// right VCS and repo directory.
+func TestFromDir(t *testing.T) {
+ tempDir := t.TempDir()
+
+ for _, vcs := range vcsList {
+ for r, root := range vcs.RootNames {
+ vcsName := fmt.Sprint(vcs.Name, r)
+ dir := filepath.Join(tempDir, "example.com", vcsName, root.filename)
+ if root.isDir {
+ err := os.MkdirAll(dir, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ err := os.MkdirAll(filepath.Dir(dir), 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f, err := os.Create(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+ }
+
+ wantRepoDir := filepath.Dir(dir)
+ gotRepoDir, gotVCS, err := FromDir(dir, tempDir, false)
+ if err != nil {
+ t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err)
+ continue
+ }
+ if gotRepoDir != wantRepoDir || gotVCS.Name != vcs.Name {
+ t.Errorf("FromDir(%q, %q) = RepoDir(%s), VCS(%s); want RepoDir(%s), VCS(%s)", dir, tempDir, gotRepoDir, gotVCS.Name, wantRepoDir, vcs.Name)
+ }
+ }
+ }
+}
+
+func TestIsSecure(t *testing.T) {
+ tests := []struct {
+ vcs *Cmd
+ url string
+ secure bool
+ }{
+ {vcsGit, "http://example.com/foo.git", false},
+ {vcsGit, "https://example.com/foo.git", true},
+ {vcsBzr, "http://example.com/foo.bzr", false},
+ {vcsBzr, "https://example.com/foo.bzr", true},
+ {vcsSvn, "http://example.com/svn", false},
+ {vcsSvn, "https://example.com/svn", true},
+ {vcsHg, "http://example.com/foo.hg", false},
+ {vcsHg, "https://example.com/foo.hg", true},
+ {vcsGit, "ssh://user@example.com/foo.git", true},
+ {vcsGit, "user@server:path/to/repo.git", false},
+ {vcsGit, "user@server:", false},
+ {vcsGit, "server:repo.git", false},
+ {vcsGit, "server:path/to/repo.git", false},
+ {vcsGit, "example.com:path/to/repo.git", false},
+ {vcsGit, "path/that/contains/a:colon/repo.git", false},
+ {vcsHg, "ssh://user@example.com/path/to/repo.hg", true},
+ {vcsFossil, "http://example.com/foo", false},
+ {vcsFossil, "https://example.com/foo", true},
+ }
+
+ for _, test := range tests {
+ secure := test.vcs.IsSecure(test.url)
+ if secure != test.secure {
+ t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure)
+ }
+ }
+}
+
+func TestIsSecureGitAllowProtocol(t *testing.T) {
+ tests := []struct {
+ vcs *Cmd
+ url string
+ secure bool
+ }{
+ // Same as TestIsSecure to verify same behavior.
+ {vcsGit, "http://example.com/foo.git", false},
+ {vcsGit, "https://example.com/foo.git", true},
+ {vcsBzr, "http://example.com/foo.bzr", false},
+ {vcsBzr, "https://example.com/foo.bzr", true},
+ {vcsSvn, "http://example.com/svn", false},
+ {vcsSvn, "https://example.com/svn", true},
+ {vcsHg, "http://example.com/foo.hg", false},
+ {vcsHg, "https://example.com/foo.hg", true},
+ {vcsGit, "user@server:path/to/repo.git", false},
+ {vcsGit, "user@server:", false},
+ {vcsGit, "server:repo.git", false},
+ {vcsGit, "server:path/to/repo.git", false},
+ {vcsGit, "example.com:path/to/repo.git", false},
+ {vcsGit, "path/that/contains/a:colon/repo.git", false},
+ {vcsHg, "ssh://user@example.com/path/to/repo.hg", true},
+ // New behavior.
+ {vcsGit, "ssh://user@example.com/foo.git", false},
+ {vcsGit, "foo://example.com/bar.git", true},
+ {vcsHg, "foo://example.com/bar.hg", false},
+ {vcsSvn, "foo://example.com/svn", false},
+ {vcsBzr, "foo://example.com/bar.bzr", false},
+ }
+
+ defer os.Unsetenv("GIT_ALLOW_PROTOCOL")
+ os.Setenv("GIT_ALLOW_PROTOCOL", "https:foo")
+ for _, test := range tests {
+ secure := test.vcs.IsSecure(test.url)
+ if secure != test.secure {
+ t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure)
+ }
+ }
+}
+
+func TestMatchGoImport(t *testing.T) {
+ tests := []struct {
+ imports []metaImport
+ path string
+ mi metaImport
+ err error
+ }{
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo",
+ mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo/",
+ mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo",
+ mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/fooa",
+ mi: metaImport{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo/bar",
+ err: errors.New("should not be allowed to create nested repo"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo/bar/baz",
+ err: errors.New("should not be allowed to create nested repo"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo/bar/baz/qux",
+ err: errors.New("should not be allowed to create nested repo"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com/user/foo/bar/baz/",
+ err: errors.New("should not be allowed to create nested repo"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "example.com",
+ err: errors.New("pathologically short path"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
+ },
+ path: "different.example.com/user/foo",
+ err: errors.New("meta tags do not match import path"),
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"},
+ {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"},
+ },
+ path: "myitcv.io/blah2/foo",
+ mi: metaImport{Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"},
+ },
+ {
+ imports: []metaImport{
+ {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"},
+ {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"},
+ },
+ path: "myitcv.io/other",
+ mi: metaImport{Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"},
+ },
+ }
+
+ for _, test := range tests {
+ mi, err := matchGoImport(test.imports, test.path)
+ if mi != test.mi {
+ t.Errorf("unexpected metaImport; got %v, want %v", mi, test.mi)
+ }
+
+ got := err
+ want := test.err
+ if (got == nil) != (want == nil) {
+ t.Errorf("unexpected error; got %v, want %v", got, want)
+ }
+ }
+}
+
+func TestValidateRepoRoot(t *testing.T) {
+ tests := []struct {
+ root string
+ ok bool
+ }{
+ {
+ root: "",
+ ok: false,
+ },
+ {
+ root: "http://",
+ ok: true,
+ },
+ {
+ root: "git+ssh://",
+ ok: true,
+ },
+ {
+ root: "http#://",
+ ok: false,
+ },
+ {
+ root: "-config",
+ ok: false,
+ },
+ {
+ root: "-config://",
+ ok: false,
+ },
+ }
+
+ for _, test := range tests {
+ err := validateRepoRoot(test.root)
+ ok := err == nil
+ if ok != test.ok {
+ want := "error"
+ if test.ok {
+ want = "nil"
+ }
+ t.Errorf("validateRepoRoot(%q) = %q, want %s", test.root, err, want)
+ }
+ }
+}
+
+var govcsTests = []struct {
+ govcs string
+ path string
+ vcs string
+ ok bool
+}{
+ {"private:all", "is-public.com/foo", "zzz", false},
+ {"private:all", "is-private.com/foo", "zzz", true},
+ {"public:all", "is-public.com/foo", "zzz", true},
+ {"public:all", "is-private.com/foo", "zzz", false},
+ {"public:all,private:none", "is-public.com/foo", "zzz", true},
+ {"public:all,private:none", "is-private.com/foo", "zzz", false},
+ {"*:all", "is-public.com/foo", "zzz", true},
+ {"golang.org:git", "golang.org/x/text", "zzz", false},
+ {"golang.org:git", "golang.org/x/text", "git", true},
+ {"golang.org:zzz", "golang.org/x/text", "zzz", true},
+ {"golang.org:zzz", "golang.org/x/text", "git", false},
+ {"golang.org:zzz", "golang.org/x/text", "zzz", true},
+ {"golang.org:zzz", "golang.org/x/text", "git", false},
+ {"golang.org:git|hg", "golang.org/x/text", "hg", true},
+ {"golang.org:git|hg", "golang.org/x/text", "git", true},
+ {"golang.org:git|hg", "golang.org/x/text", "zzz", false},
+ {"golang.org:all", "golang.org/x/text", "hg", true},
+ {"golang.org:all", "golang.org/x/text", "git", true},
+ {"golang.org:all", "golang.org/x/text", "zzz", true},
+ {"other.xyz/p:none,golang.org/x:git", "other.xyz/p/x", "git", false},
+ {"other.xyz/p:none,golang.org/x:git", "unexpected.com", "git", false},
+ {"other.xyz/p:none,golang.org/x:git", "golang.org/x/text", "zzz", false},
+ {"other.xyz/p:none,golang.org/x:git", "golang.org/x/text", "git", true},
+ {"other.xyz/p:none,golang.org/x:zzz", "golang.org/x/text", "zzz", true},
+ {"other.xyz/p:none,golang.org/x:zzz", "golang.org/x/text", "git", false},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "hg", true},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "git", true},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "zzz", false},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "hg", true},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "git", true},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "zzz", true},
+ {"other.xyz/p:none,golang.org/x:git", "golang.org/y/text", "zzz", false},
+ {"other.xyz/p:none,golang.org/x:git", "golang.org/y/text", "git", false},
+ {"other.xyz/p:none,golang.org/x:zzz", "golang.org/y/text", "zzz", false},
+ {"other.xyz/p:none,golang.org/x:zzz", "golang.org/y/text", "git", false},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "hg", false},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "git", false},
+ {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "zzz", false},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "hg", false},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "git", false},
+ {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "zzz", false},
+}
+
+func TestGOVCS(t *testing.T) {
+ for _, tt := range govcsTests {
+ cfg, err := parseGOVCS(tt.govcs)
+ if err != nil {
+ t.Errorf("parseGOVCS(%q): %v", tt.govcs, err)
+ continue
+ }
+ private := strings.HasPrefix(tt.path, "is-private")
+ ok := cfg.allow(tt.path, private, tt.vcs)
+ if ok != tt.ok {
+ t.Errorf("parseGOVCS(%q).allow(%q, %v, %q) = %v, want %v",
+ tt.govcs, tt.path, private, tt.vcs, ok, tt.ok)
+ }
+ }
+}
+
+var govcsErrors = []struct {
+ s string
+ err string
+}{
+ {`,`, `empty entry in GOVCS`},
+ {`,x`, `empty entry in GOVCS`},
+ {`x,`, `malformed entry in GOVCS (missing colon): "x"`},
+ {`x:y,`, `empty entry in GOVCS`},
+ {`x`, `malformed entry in GOVCS (missing colon): "x"`},
+ {`x:`, `empty VCS list in GOVCS: "x:"`},
+ {`x:|`, `empty VCS name in GOVCS: "x:|"`},
+ {`x:y|`, `empty VCS name in GOVCS: "x:y|"`},
+ {`x:|y`, `empty VCS name in GOVCS: "x:|y"`},
+ {`x:y,z:`, `empty VCS list in GOVCS: "z:"`},
+ {`x:y,z:|`, `empty VCS name in GOVCS: "z:|"`},
+ {`x:y,z:|w`, `empty VCS name in GOVCS: "z:|w"`},
+ {`x:y,z:w|`, `empty VCS name in GOVCS: "z:w|"`},
+ {`x:y,z:w||v`, `empty VCS name in GOVCS: "z:w||v"`},
+ {`x:y,x:z`, `unreachable pattern in GOVCS: "x:z" after "x:y"`},
+}
+
+func TestGOVCSErrors(t *testing.T) {
+ for _, tt := range govcsErrors {
+ _, err := parseGOVCS(tt.s)
+ if err == nil || !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("parseGOVCS(%s): err=%v, want %v", tt.s, err, tt.err)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/vcweb/auth.go b/src/cmd/go/internal/vcweb/auth.go
new file mode 100644
index 0000000..383bf75
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/auth.go
@@ -0,0 +1,108 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+)
+
+// authHandler serves requests only if the Basic Auth data sent with the request
+// matches the contents of a ".access" file in the requested directory.
+//
+// For each request, the handler looks for a file named ".access" and parses it
+// as a JSON-serialized accessToken. If the credentials from the request match
+// the accessToken, the file is served normally; otherwise, it is rejected with
+// the StatusCode and Message provided by the token.
+type authHandler struct{}
+
+type accessToken struct {
+ Username, Password string
+ StatusCode int // defaults to 401.
+ Message string
+}
+
+func (h *authHandler) Available() bool { return true }
+
+func (h *authHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ fs := http.Dir(dir)
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ urlPath := req.URL.Path
+ if urlPath != "" && strings.HasPrefix(path.Base(urlPath), ".") {
+ http.Error(w, "filename contains leading dot", http.StatusBadRequest)
+ return
+ }
+
+ f, err := fs.Open(urlPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ http.NotFound(w, req)
+ } else {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ return
+ }
+
+ accessDir := urlPath
+ if fi, err := f.Stat(); err == nil && !fi.IsDir() {
+ accessDir = path.Dir(urlPath)
+ }
+ f.Close()
+
+ var accessFile http.File
+ for {
+ var err error
+ accessFile, err = fs.Open(path.Join(accessDir, ".access"))
+ if err == nil {
+ break
+ }
+
+ if !os.IsNotExist(err) {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if accessDir == "." {
+ http.Error(w, "failed to locate access file", http.StatusInternalServerError)
+ return
+ }
+ accessDir = path.Dir(accessDir)
+ }
+
+ data, err := io.ReadAll(accessFile)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var token accessToken
+ if err := json.Unmarshal(data, &token); err != nil {
+ logger.Print(err)
+ http.Error(w, "malformed access file", http.StatusInternalServerError)
+ return
+ }
+ if username, password, ok := req.BasicAuth(); !ok || username != token.Username || password != token.Password {
+ code := token.StatusCode
+ if code == 0 {
+ code = http.StatusUnauthorized
+ }
+ if code == http.StatusUnauthorized {
+ w.Header().Add("WWW-Authenticate", fmt.Sprintf("basic realm=%s", accessDir))
+ }
+ http.Error(w, token.Message, code)
+ return
+ }
+
+ http.FileServer(fs).ServeHTTP(w, req)
+ })
+
+ return handler, nil
+}
diff --git a/src/cmd/go/internal/vcweb/bzr.go b/src/cmd/go/internal/vcweb/bzr.go
new file mode 100644
index 0000000..a915fb2
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/bzr.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "log"
+ "net/http"
+)
+
+type bzrHandler struct{}
+
+func (*bzrHandler) Available() bool { return true }
+
+func (*bzrHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ return http.FileServer(http.Dir(dir)), nil
+}
diff --git a/src/cmd/go/internal/vcweb/dir.go b/src/cmd/go/internal/vcweb/dir.go
new file mode 100644
index 0000000..2f122f4
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/dir.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "log"
+ "net/http"
+)
+
+// dirHandler is a vcsHandler that serves the raw contents of a directory.
+type dirHandler struct{}
+
+func (*dirHandler) Available() bool { return true }
+
+func (*dirHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ return http.FileServer(http.Dir(dir)), nil
+}
diff --git a/src/cmd/go/internal/vcweb/fossil.go b/src/cmd/go/internal/vcweb/fossil.go
new file mode 100644
index 0000000..cc24f2f
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/fossil.go
@@ -0,0 +1,61 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "net/http/cgi"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sync"
+)
+
+type fossilHandler struct {
+ once sync.Once
+ fossilPath string
+ fossilPathErr error
+}
+
+func (h *fossilHandler) Available() bool {
+ h.once.Do(func() {
+ h.fossilPath, h.fossilPathErr = exec.LookPath("fossil")
+ })
+ return h.fossilPathErr == nil
+}
+
+func (h *fossilHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ if !h.Available() {
+ return nil, ServerNotInstalledError{name: "fossil"}
+ }
+
+ name := filepath.Base(dir)
+ db := filepath.Join(dir, name+".fossil")
+
+ cgiPath := db + ".cgi"
+ cgiScript := fmt.Sprintf("#!%s\nrepository: %s\n", h.fossilPath, db)
+ if err := os.WriteFile(cgiPath, []byte(cgiScript), 0755); err != nil {
+ return nil, err
+ }
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if _, err := os.Stat(db); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ ch := &cgi.Handler{
+ Env: env,
+ Logger: logger,
+ Path: h.fossilPath,
+ Args: []string{cgiPath},
+ Dir: dir,
+ }
+ ch.ServeHTTP(w, req)
+ })
+
+ return handler, nil
+}
diff --git a/src/cmd/go/internal/vcweb/git.go b/src/cmd/go/internal/vcweb/git.go
new file mode 100644
index 0000000..316c238
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/git.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "log"
+ "net/http"
+ "net/http/cgi"
+ "os/exec"
+ "runtime"
+ "slices"
+ "sync"
+)
+
+type gitHandler struct {
+ once sync.Once
+ gitPath string
+ gitPathErr error
+}
+
+func (h *gitHandler) Available() bool {
+ if runtime.GOOS == "plan9" {
+ // The Git command is usually not the real Git on Plan 9.
+ // See https://golang.org/issues/29640.
+ return false
+ }
+ h.once.Do(func() {
+ h.gitPath, h.gitPathErr = exec.LookPath("git")
+ })
+ return h.gitPathErr == nil
+}
+
+func (h *gitHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ if !h.Available() {
+ return nil, ServerNotInstalledError{name: "git"}
+ }
+
+ handler := &cgi.Handler{
+ Path: h.gitPath,
+ Logger: logger,
+ Args: []string{"http-backend"},
+ Dir: dir,
+ Env: append(slices.Clip(env),
+ "GIT_PROJECT_ROOT="+dir,
+ "GIT_HTTP_EXPORT_ALL=1",
+ ),
+ }
+
+ return handler, nil
+}
diff --git a/src/cmd/go/internal/vcweb/hg.go b/src/cmd/go/internal/vcweb/hg.go
new file mode 100644
index 0000000..4571277
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/hg.go
@@ -0,0 +1,123 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "log"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "os/exec"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+)
+
+type hgHandler struct {
+ once sync.Once
+ hgPath string
+ hgPathErr error
+}
+
+func (h *hgHandler) Available() bool {
+ h.once.Do(func() {
+ h.hgPath, h.hgPathErr = exec.LookPath("hg")
+ })
+ return h.hgPathErr == nil
+}
+
+func (h *hgHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ if !h.Available() {
+ return nil, ServerNotInstalledError{name: "hg"}
+ }
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // Mercurial has a CGI server implementation (called hgweb). In theory we
+ // could use that — however, assuming that hgweb is even installed, the
+ // configuration for hgweb varies by Python version (2 vs 3), and we would
+ // rather not go rooting around trying to find the right Python version to
+ // run.
+ //
+ // Instead, we'll take a somewhat more roundabout approach: we assume that
+ // if "hg" works at all then "hg serve" works too, and we'll execute that as
+ // a subprocess, using a reverse proxy to forward the request and response.
+
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, h.hgPath, "serve", "--port", "0", "--address", "localhost", "--accesslog", os.DevNull, "--name", "vcweb", "--print-url")
+ cmd.Dir = dir
+ cmd.Env = append(slices.Clip(env), "PWD="+dir)
+
+ cmd.Cancel = func() error {
+ err := cmd.Process.Signal(os.Interrupt)
+ if err != nil && !errors.Is(err, os.ErrProcessDone) {
+ err = cmd.Process.Kill()
+ }
+ return err
+ }
+ // This WaitDelay is arbitrary. After 'hg serve' prints its URL, any further
+ // I/O is only for debugging. (The actual output goes through the HTTP URL,
+ // not the standard I/O streams.)
+ cmd.WaitDelay = 10 * time.Second
+
+ stderr := new(strings.Builder)
+ cmd.Stderr = stderr
+
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if err := cmd.Start(); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ var wg sync.WaitGroup
+ defer func() {
+ cancel()
+ err := cmd.Wait()
+ if out := strings.TrimSuffix(stderr.String(), "interrupted!\n"); out != "" {
+ logger.Printf("%v: %v\n%s", cmd, err, out)
+ } else {
+ logger.Printf("%v", cmd)
+ }
+ wg.Wait()
+ }()
+
+ r := bufio.NewReader(stdout)
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
+ // We have read what should be the server URL. 'hg serve' shouldn't need to
+ // write anything else to stdout, but it's not a big deal if it does anyway.
+ // Keep the stdout pipe open so that 'hg serve' won't get a SIGPIPE, but
+ // actively discard its output so that it won't hang on a blocking write.
+ wg.Add(1)
+ go func() {
+ io.Copy(io.Discard, r)
+ wg.Done()
+ }()
+
+ u, err := url.Parse(strings.TrimSpace(line))
+ if err != nil {
+ logger.Printf("%v: %v", cmd, err)
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ logger.Printf("proxying hg request to %s", u)
+ httputil.NewSingleHostReverseProxy(u).ServeHTTP(w, req)
+ })
+
+ return handler, nil
+}
diff --git a/src/cmd/go/internal/vcweb/insecure.go b/src/cmd/go/internal/vcweb/insecure.go
new file mode 100644
index 0000000..1d6af25
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/insecure.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "log"
+ "net/http"
+)
+
+// insecureHandler redirects requests to the same host and path but using the
+// "http" scheme instead of "https".
+type insecureHandler struct{}
+
+func (h *insecureHandler) Available() bool { return true }
+
+func (h *insecureHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ // The insecure-redirect handler implementation doesn't depend or dir or env.
+ //
+ // The only effect of the directory is to determine which prefix the caller
+ // will strip from the request before passing it on to this handler.
+ return h, nil
+}
+
+func (h *insecureHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if req.Host == "" && req.URL.Host == "" {
+ http.Error(w, "no Host provided in request", http.StatusBadRequest)
+ return
+ }
+
+ // Note that if the handler is wrapped with http.StripPrefix, the prefix
+ // will remain stripped in the redirected URL, preventing redirect loops
+ // if the scheme is already "http".
+
+ u := *req.URL
+ u.Scheme = "http"
+ u.User = nil
+ u.Host = req.Host
+
+ http.Redirect(w, req, u.String(), http.StatusFound)
+}
diff --git a/src/cmd/go/internal/vcweb/script.go b/src/cmd/go/internal/vcweb/script.go
new file mode 100644
index 0000000..c35b46f
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/script.go
@@ -0,0 +1,345 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/go/internal/script"
+ "context"
+ "errors"
+ "fmt"
+ "internal/txtar"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/zip"
+)
+
+// newScriptEngine returns a script engine augmented with commands for
+// reproducing version-control repositories by replaying commits.
+func newScriptEngine() *script.Engine {
+ conds := script.DefaultConds()
+
+ interrupt := func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) }
+ gracePeriod := 30 * time.Second // arbitrary
+
+ cmds := script.DefaultCmds()
+ cmds["at"] = scriptAt()
+ cmds["bzr"] = script.Program("bzr", interrupt, gracePeriod)
+ cmds["fossil"] = script.Program("fossil", interrupt, gracePeriod)
+ cmds["git"] = script.Program("git", interrupt, gracePeriod)
+ cmds["hg"] = script.Program("hg", interrupt, gracePeriod)
+ cmds["handle"] = scriptHandle()
+ cmds["modzip"] = scriptModzip()
+ cmds["svnadmin"] = script.Program("svnadmin", interrupt, gracePeriod)
+ cmds["svn"] = script.Program("svn", interrupt, gracePeriod)
+ cmds["unquote"] = scriptUnquote()
+
+ return &script.Engine{
+ Cmds: cmds,
+ Conds: conds,
+ }
+}
+
+// loadScript interprets the given script content using the vcweb script engine.
+// loadScript always returns either a non-nil handler or a non-nil error.
+//
+// The script content must be a txtar archive with a comment containing a script
+// with exactly one "handle" command and zero or more VCS commands to prepare
+// the repository to be served.
+func (s *Server) loadScript(ctx context.Context, logger *log.Logger, scriptPath string, scriptContent []byte, workDir string) (http.Handler, error) {
+ ar := txtar.Parse(scriptContent)
+
+ if err := os.MkdirAll(workDir, 0755); err != nil {
+ return nil, err
+ }
+
+ st, err := s.newState(ctx, workDir)
+ if err != nil {
+ return nil, err
+ }
+ if err := st.ExtractFiles(ar); err != nil {
+ return nil, err
+ }
+
+ scriptName := filepath.Base(scriptPath)
+ scriptLog := new(strings.Builder)
+ err = s.engine.Execute(st, scriptName, bufio.NewReader(bytes.NewReader(ar.Comment)), scriptLog)
+ closeErr := st.CloseAndWait(scriptLog)
+ logger.Printf("%s:", scriptName)
+ io.WriteString(logger.Writer(), scriptLog.String())
+ io.WriteString(logger.Writer(), "\n")
+ if err != nil {
+ return nil, err
+ }
+ if closeErr != nil {
+ return nil, err
+ }
+
+ sc, err := getScriptCtx(st)
+ if err != nil {
+ return nil, err
+ }
+ if sc.handler == nil {
+ return nil, errors.New("script completed without setting handler")
+ }
+ return sc.handler, nil
+}
+
+// newState returns a new script.State for executing scripts in workDir.
+func (s *Server) newState(ctx context.Context, workDir string) (*script.State, error) {
+ ctx = &scriptCtx{
+ Context: ctx,
+ server: s,
+ }
+
+ st, err := script.NewState(ctx, workDir, s.env)
+ if err != nil {
+ return nil, err
+ }
+ return st, nil
+}
+
+// scriptEnviron returns a new environment that attempts to provide predictable
+// behavior for the supported version-control tools.
+func scriptEnviron(homeDir string) []string {
+ env := []string{
+ "USER=gopher",
+ homeEnvName() + "=" + homeDir,
+ "GIT_CONFIG_NOSYSTEM=1",
+ "HGRCPATH=" + filepath.Join(homeDir, ".hgrc"),
+ "HGENCODING=utf-8",
+ }
+ // Preserve additional environment variables that may be needed by VCS tools.
+ for _, k := range []string{
+ pathEnvName(),
+ tempEnvName(),
+ "SYSTEMROOT", // must be preserved on Windows to find DLLs; golang.org/issue/25210
+ "WINDIR", // must be preserved on Windows to be able to run PowerShell command; golang.org/issue/30711
+ "ComSpec", // must be preserved on Windows to be able to run Batch files; golang.org/issue/56555
+ "DYLD_LIBRARY_PATH", // must be preserved on macOS systems to find shared libraries
+ "LD_LIBRARY_PATH", // must be preserved on Unix systems to find shared libraries
+ "LIBRARY_PATH", // allow override of non-standard static library paths
+ "PYTHONPATH", // may be needed by hg to find imported modules
+ } {
+ if v, ok := os.LookupEnv(k); ok {
+ env = append(env, k+"="+v)
+ }
+ }
+
+ if os.Getenv("GO_BUILDER_NAME") != "" || os.Getenv("GIT_TRACE_CURL") == "1" {
+ // To help diagnose https://go.dev/issue/52545,
+ // enable tracing for Git HTTPS requests.
+ env = append(env,
+ "GIT_TRACE_CURL=1",
+ "GIT_TRACE_CURL_NO_DATA=1",
+ "GIT_REDACT_COOKIES=o,SSO,GSSO_Uberproxy")
+ }
+
+ return env
+}
+
+// homeEnvName returns the environment variable used by os.UserHomeDir
+// to locate the user's home directory.
+func homeEnvName() string {
+ switch runtime.GOOS {
+ case "windows":
+ return "USERPROFILE"
+ case "plan9":
+ return "home"
+ default:
+ return "HOME"
+ }
+}
+
+// tempEnvName returns the environment variable used by os.TempDir
+// to locate the default directory for temporary files.
+func tempEnvName() string {
+ switch runtime.GOOS {
+ case "windows":
+ return "TMP"
+ case "plan9":
+ return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine
+ default:
+ return "TMPDIR"
+ }
+}
+
+// pathEnvName returns the environment variable used by exec.LookPath to
+// identify directories to search for executables.
+func pathEnvName() string {
+ switch runtime.GOOS {
+ case "plan9":
+ return "path"
+ default:
+ return "PATH"
+ }
+}
+
+// A scriptCtx is a context.Context that stores additional state for script
+// commands.
+type scriptCtx struct {
+ context.Context
+ server *Server
+ commitTime time.Time
+ handlerName string
+ handler http.Handler
+}
+
+// scriptCtxKey is the key associating the *scriptCtx in a script's Context..
+type scriptCtxKey struct{}
+
+func (sc *scriptCtx) Value(key any) any {
+ if key == (scriptCtxKey{}) {
+ return sc
+ }
+ return sc.Context.Value(key)
+}
+
+func getScriptCtx(st *script.State) (*scriptCtx, error) {
+ sc, ok := st.Context().Value(scriptCtxKey{}).(*scriptCtx)
+ if !ok {
+ return nil, errors.New("scriptCtx not found in State.Context")
+ }
+ return sc, nil
+}
+
+func scriptAt() script.Cmd {
+ return script.Command(
+ script.CmdUsage{
+ Summary: "set the current commit time for all version control systems",
+ Args: "time",
+ Detail: []string{
+ "The argument must be an absolute timestamp in RFC3339 format.",
+ },
+ },
+ func(st *script.State, args ...string) (script.WaitFunc, error) {
+ if len(args) != 1 {
+ return nil, script.ErrUsage
+ }
+
+ sc, err := getScriptCtx(st)
+ if err != nil {
+ return nil, err
+ }
+
+ sc.commitTime, err = time.ParseInLocation(time.RFC3339, args[0], time.UTC)
+ if err == nil {
+ st.Setenv("GIT_COMMITTER_DATE", args[0])
+ st.Setenv("GIT_AUTHOR_DATE", args[0])
+ }
+ return nil, err
+ })
+}
+
+func scriptHandle() script.Cmd {
+ return script.Command(
+ script.CmdUsage{
+ Summary: "set the HTTP handler that will serve the script's output",
+ Args: "handler [dir]",
+ Detail: []string{
+ "The handler will be passed the script's current working directory and environment as arguments.",
+ "Valid handlers include 'dir' (for general http.Dir serving), 'bzr', 'fossil', 'git', and 'hg'",
+ },
+ },
+ func(st *script.State, args ...string) (script.WaitFunc, error) {
+ if len(args) == 0 || len(args) > 2 {
+ return nil, script.ErrUsage
+ }
+
+ sc, err := getScriptCtx(st)
+ if err != nil {
+ return nil, err
+ }
+
+ if sc.handler != nil {
+ return nil, fmt.Errorf("server handler already set to %s", sc.handlerName)
+ }
+
+ name := args[0]
+ h, ok := sc.server.vcsHandlers[name]
+ if !ok {
+ return nil, fmt.Errorf("unrecognized VCS %q", name)
+ }
+ sc.handlerName = name
+ if !h.Available() {
+ return nil, ServerNotInstalledError{name}
+ }
+
+ dir := st.Getwd()
+ if len(args) >= 2 {
+ dir = st.Path(args[1])
+ }
+ sc.handler, err = h.Handler(dir, st.Environ(), sc.server.logger)
+ return nil, err
+ })
+}
+
+func scriptModzip() script.Cmd {
+ return script.Command(
+ script.CmdUsage{
+ Summary: "create a Go module zip file from a directory",
+ Args: "zipfile path@version dir",
+ },
+ func(st *script.State, args ...string) (wait script.WaitFunc, err error) {
+ if len(args) != 3 {
+ return nil, script.ErrUsage
+ }
+ zipPath := st.Path(args[0])
+ mPath, version, ok := strings.Cut(args[1], "@")
+ if !ok {
+ return nil, script.ErrUsage
+ }
+ dir := st.Path(args[2])
+
+ if err := os.MkdirAll(filepath.Dir(zipPath), 0755); err != nil {
+ return nil, err
+ }
+ f, err := os.Create(zipPath)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ }()
+
+ return nil, zip.CreateFromDir(f, module.Version{Path: mPath, Version: version}, dir)
+ })
+}
+
+func scriptUnquote() script.Cmd {
+ return script.Command(
+ script.CmdUsage{
+ Summary: "unquote the argument as a Go string",
+ Args: "string",
+ },
+ func(st *script.State, args ...string) (script.WaitFunc, error) {
+ if len(args) != 1 {
+ return nil, script.ErrUsage
+ }
+
+ s, err := strconv.Unquote(`"` + args[0] + `"`)
+ if err != nil {
+ return nil, err
+ }
+
+ wait := func(*script.State) (stdout, stderr string, err error) {
+ return s, "", nil
+ }
+ return wait, nil
+ })
+}
diff --git a/src/cmd/go/internal/vcweb/svn.go b/src/cmd/go/internal/vcweb/svn.go
new file mode 100644
index 0000000..60222f1
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/svn.go
@@ -0,0 +1,199 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb
+
+import (
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "os/exec"
+ "strings"
+ "sync"
+)
+
+// An svnHandler serves requests for Subversion repos.
+//
+// Unlike the other vcweb handlers, svnHandler does not serve the Subversion
+// protocol directly over the HTTP connection. Instead, it opens a separate port
+// that serves the (non-HTTP) 'svn' protocol. The test binary can retrieve the
+// URL for that port by sending an HTTP request with the query parameter
+// "vcwebsvn=1".
+//
+// We take this approach because the 'svn' protocol is implemented by a
+// lightweight 'svnserve' binary that is usually packaged along with the 'svn'
+// client binary, whereas only known implementation of the Subversion HTTP
+// protocol is the mod_dav_svn apache2 module. Apache2 has a lot of dependencies
+// and also seems to rely on global configuration via well-known file paths, so
+// implementing a hermetic test using apache2 would require the test to run in a
+// complicated container environment, which wouldn't be nearly as
+// straightforward for Go contributors to set up and test against on their local
+// machine.
+type svnHandler struct {
+ svnRoot string // a directory containing all svn repos to be served
+ logger *log.Logger
+
+ pathOnce sync.Once
+ svnservePath string // the path to the 'svnserve' executable
+ svnserveErr error
+
+ listenOnce sync.Once
+ s chan *svnState // 1-buffered
+}
+
+// An svnState describes the state of a port serving the 'svn://' protocol.
+type svnState struct {
+ listener net.Listener
+ listenErr error
+ conns map[net.Conn]struct{}
+ closing bool
+ done chan struct{}
+}
+
+func (h *svnHandler) Available() bool {
+ h.pathOnce.Do(func() {
+ h.svnservePath, h.svnserveErr = exec.LookPath("svnserve")
+ })
+ return h.svnserveErr == nil
+}
+
+// Handler returns an http.Handler that checks for the "vcwebsvn" query
+// parameter and then serves the 'svn://' URL for the repository at the
+// requested path.
+// The HTTP client is expected to read that URL and pass it to the 'svn' client.
+func (h *svnHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) {
+ if !h.Available() {
+ return nil, ServerNotInstalledError{name: "svn"}
+ }
+
+ // Go ahead and start the listener now, so that if it fails (for example, due
+ // to port exhaustion) we can return an error from the Handler method instead
+ // of serving an error for each individual HTTP request.
+ h.listenOnce.Do(func() {
+ h.s = make(chan *svnState, 1)
+ l, err := net.Listen("tcp", "localhost:0")
+ done := make(chan struct{})
+
+ h.s <- &svnState{
+ listener: l,
+ listenErr: err,
+ conns: map[net.Conn]struct{}{},
+ done: done,
+ }
+ if err != nil {
+ close(done)
+ return
+ }
+
+ h.logger.Printf("serving svn on svn://%v", l.Addr())
+
+ go func() {
+ for {
+ c, err := l.Accept()
+
+ s := <-h.s
+ if err != nil {
+ s.listenErr = err
+ if len(s.conns) == 0 {
+ close(s.done)
+ }
+ h.s <- s
+ return
+ }
+ if s.closing {
+ c.Close()
+ } else {
+ s.conns[c] = struct{}{}
+ go h.serve(c)
+ }
+ h.s <- s
+ }
+ }()
+ })
+
+ s := <-h.s
+ addr := ""
+ if s.listener != nil {
+ addr = s.listener.Addr().String()
+ }
+ err := s.listenErr
+ h.s <- s
+ if err != nil {
+ return nil, err
+ }
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if req.FormValue("vcwebsvn") != "" {
+ w.Header().Add("Content-Type", "text/plain; charset=UTF-8")
+ io.WriteString(w, "svn://"+addr+"\n")
+ return
+ }
+ http.NotFound(w, req)
+ })
+
+ return handler, nil
+}
+
+// serve serves a single 'svn://' connection on c.
+func (h *svnHandler) serve(c net.Conn) {
+ defer func() {
+ c.Close()
+
+ s := <-h.s
+ delete(s.conns, c)
+ if len(s.conns) == 0 && s.listenErr != nil {
+ close(s.done)
+ }
+ h.s <- s
+ }()
+
+ // The "--inetd" flag causes svnserve to speak the 'svn' protocol over its
+ // stdin and stdout streams as if invoked by the Unix "inetd" service.
+ // We aren't using inetd, but we are implementing essentially the same
+ // approach: using a host process to listen for connections and spawn
+ // subprocesses to serve them.
+ cmd := exec.Command(h.svnservePath, "--read-only", "--root="+h.svnRoot, "--inetd")
+ cmd.Stdin = c
+ cmd.Stdout = c
+ stderr := new(strings.Builder)
+ cmd.Stderr = stderr
+ err := cmd.Run()
+
+ var errFrag any = "ok"
+ if err != nil {
+ errFrag = err
+ }
+ stderrFrag := ""
+ if stderr.Len() > 0 {
+ stderrFrag = "\n" + stderr.String()
+ }
+ h.logger.Printf("%v: %s%s", cmd, errFrag, stderrFrag)
+}
+
+// Close stops accepting new svn:// connections and terminates the existing
+// ones, then waits for the 'svnserve' subprocesses to complete.
+func (h *svnHandler) Close() error {
+ h.listenOnce.Do(func() {})
+ if h.s == nil {
+ return nil
+ }
+
+ var err error
+ s := <-h.s
+ s.closing = true
+ if s.listener == nil {
+ err = s.listenErr
+ } else {
+ err = s.listener.Close()
+ }
+ for c := range s.conns {
+ c.Close()
+ }
+ done := s.done
+ h.s <- s
+
+ <-done
+ return err
+}
diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest.go b/src/cmd/go/internal/vcweb/vcstest/vcstest.go
new file mode 100644
index 0000000..d460259
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/vcstest/vcstest.go
@@ -0,0 +1,169 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vcstest serves the repository scripts in cmd/go/testdata/vcstest
+// using the [vcweb] script engine.
+package vcstest
+
+import (
+ "cmd/go/internal/vcs"
+ "cmd/go/internal/vcweb"
+ "cmd/go/internal/web"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+var Hosts = []string{
+ "vcs-test.golang.org",
+}
+
+type Server struct {
+ vcweb *vcweb.Server
+ workDir string
+ HTTP *httptest.Server
+ HTTPS *httptest.Server
+}
+
+// NewServer returns a new test-local vcweb server that serves VCS requests
+// for modules with paths that begin with "vcs-test.golang.org" using the
+// scripts in cmd/go/testdata/vcstest.
+func NewServer() (srv *Server, err error) {
+ if vcs.VCSTestRepoURL != "" {
+ panic("vcs URL hooks already set")
+ }
+
+ scriptDir := filepath.Join(testenv.GOROOT(nil), "src/cmd/go/testdata/vcstest")
+
+ workDir, err := os.MkdirTemp("", "vcstest")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(workDir)
+ }
+ }()
+
+ logger := log.Default()
+ if !testing.Verbose() {
+ logger = log.New(io.Discard, "", log.LstdFlags)
+ }
+ handler, err := vcweb.NewServer(scriptDir, workDir, logger)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ handler.Close()
+ }
+ }()
+
+ srvHTTP := httptest.NewServer(handler)
+ httpURL, err := url.Parse(srvHTTP.URL)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ srvHTTP.Close()
+ }
+ }()
+
+ srvHTTPS := httptest.NewTLSServer(handler)
+ httpsURL, err := url.Parse(srvHTTPS.URL)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ srvHTTPS.Close()
+ }
+ }()
+
+ srv = &Server{
+ vcweb: handler,
+ workDir: workDir,
+ HTTP: srvHTTP,
+ HTTPS: srvHTTPS,
+ }
+ vcs.VCSTestRepoURL = srv.HTTP.URL
+ vcs.VCSTestHosts = Hosts
+
+ var interceptors []web.Interceptor
+ for _, host := range Hosts {
+ interceptors = append(interceptors,
+ web.Interceptor{Scheme: "http", FromHost: host, ToHost: httpURL.Host, Client: srv.HTTP.Client()},
+ web.Interceptor{Scheme: "https", FromHost: host, ToHost: httpsURL.Host, Client: srv.HTTPS.Client()})
+ }
+ web.EnableTestHooks(interceptors)
+
+ fmt.Fprintln(os.Stderr, "vcs-test.golang.org rerouted to "+srv.HTTP.URL)
+ fmt.Fprintln(os.Stderr, "https://vcs-test.golang.org rerouted to "+srv.HTTPS.URL)
+
+ return srv, nil
+}
+
+func (srv *Server) Close() error {
+ if vcs.VCSTestRepoURL != srv.HTTP.URL {
+ panic("vcs URL hooks modified before Close")
+ }
+ vcs.VCSTestRepoURL = ""
+ vcs.VCSTestHosts = nil
+ web.DisableTestHooks()
+
+ srv.HTTP.Close()
+ srv.HTTPS.Close()
+ err := srv.vcweb.Close()
+ if rmErr := os.RemoveAll(srv.workDir); err == nil {
+ err = rmErr
+ }
+ return err
+}
+
+func (srv *Server) WriteCertificateFile() (string, error) {
+ b := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: srv.HTTPS.Certificate().Raw,
+ })
+
+ filename := filepath.Join(srv.workDir, "cert.pem")
+ if err := os.WriteFile(filename, b, 0644); err != nil {
+ return "", err
+ }
+ return filename, nil
+}
+
+// TLSClient returns an http.Client that can talk to the httptest.Server
+// whose certificate is written to the given file path.
+func TLSClient(certFile string) (*http.Client, error) {
+ client := &http.Client{
+ Transport: http.DefaultTransport.(*http.Transport).Clone(),
+ }
+
+ pemBytes, err := os.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+
+ certpool := x509.NewCertPool()
+ if !certpool.AppendCertsFromPEM(pemBytes) {
+ return nil, fmt.Errorf("no certificates found in %s", certFile)
+ }
+ client.Transport.(*http.Transport).TLSClientConfig = &tls.Config{
+ RootCAs: certpool,
+ }
+
+ return client, nil
+}
diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go
new file mode 100644
index 0000000..4a6d600
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go
@@ -0,0 +1,170 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcstest_test
+
+import (
+ "cmd/go/internal/vcweb"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ dir = flag.String("dir", "../../../testdata/vcstest", "directory containing scripts to serve")
+ host = flag.String("host", "localhost", "hostname on which to serve HTTP")
+ port = flag.Int("port", -1, "port on which to serve HTTP; if nonnegative, skips running tests")
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+
+ if *port >= 0 {
+ err := serveStandalone(*host, *port)
+ if err != nil {
+ log.Fatal(err)
+ }
+ os.Exit(0)
+ }
+
+ m.Run()
+}
+
+// serveStandalone serves the vcweb testdata in a standalone HTTP server.
+func serveStandalone(host string, port int) (err error) {
+ scriptDir, err := filepath.Abs(*dir)
+ if err != nil {
+ return err
+ }
+ work, err := os.MkdirTemp("", "vcweb")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if rmErr := os.RemoveAll(work); err == nil {
+ err = rmErr
+ }
+ }()
+
+ log.Printf("running scripts in %s", work)
+
+ v, err := vcweb.NewServer(scriptDir, work, log.Default())
+ if err != nil {
+ return err
+ }
+
+ l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
+ if err != nil {
+ return err
+ }
+ log.Printf("serving on http://%s:%d/", host, l.Addr().(*net.TCPAddr).Port)
+
+ return http.Serve(l, v)
+}
+
+// TestScripts verifies that the VCS setup scripts in cmd/go/testdata/vcstest
+// run successfully.
+func TestScripts(t *testing.T) {
+ scriptDir, err := filepath.Abs(*dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s, err := vcweb.NewServer(scriptDir, t.TempDir(), log.Default())
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv := httptest.NewServer(s)
+
+ // To check for data races in the handler, run the root handler to produce an
+ // overview of the script status at an arbitrary point during the test.
+ // (We ignore the output because the expected failure mode is a friendly stack
+ // dump from the race detector.)
+ t.Run("overview", func(t *testing.T) {
+ t.Parallel()
+
+ time.Sleep(1 * time.Millisecond) // Give the other handlers time to race.
+
+ resp, err := http.Get(srv.URL)
+ if err == nil {
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ } else {
+ t.Error(err)
+ }
+ })
+
+ t.Cleanup(func() {
+ // The subtests spawned by WalkDir run in parallel. When they complete, this
+ // Cleanup callback will run. At that point we fetch the root URL (which
+ // contains a status page), both to test that the root handler runs without
+ // crashing and to display a nice summary of the server's view of the test
+ // coverage.
+ resp, err := http.Get(srv.URL)
+ if err == nil {
+ var body []byte
+ body, err = io.ReadAll(resp.Body)
+ if err == nil && testing.Verbose() {
+ t.Logf("GET %s:\n%s", srv.URL, body)
+ }
+ resp.Body.Close()
+ }
+ if err != nil {
+ t.Error(err)
+ }
+
+ srv.Close()
+ })
+
+ err = filepath.WalkDir(scriptDir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil || d.IsDir() {
+ return err
+ }
+
+ rel, err := filepath.Rel(scriptDir, path)
+ if err != nil {
+ return err
+ }
+ if rel == "README" {
+ return nil
+ }
+
+ t.Run(filepath.ToSlash(rel), func(t *testing.T) {
+ t.Parallel()
+
+ buf := new(strings.Builder)
+ logger := log.New(buf, "", log.LstdFlags)
+ // Load the script but don't try to serve the results:
+ // different VCS tools have different handler protocols,
+ // and the tests that actually use these repos will ensure
+ // that they are served correctly as a side effect anyway.
+ err := s.HandleScript(rel, logger, func(http.Handler) {})
+ if buf.Len() > 0 {
+ t.Log(buf)
+ }
+ if err != nil {
+ if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, &notInstalled) || errors.Is(err, exec.ErrNotFound) {
+ t.Skip(err)
+ }
+ t.Error(err)
+ }
+ })
+ return nil
+ })
+
+ if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/src/cmd/go/internal/vcweb/vcweb.go b/src/cmd/go/internal/vcweb/vcweb.go
new file mode 100644
index 0000000..f748b34
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/vcweb.go
@@ -0,0 +1,425 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vcweb serves version control repos for testing the go command.
+//
+// It is loosely derived from golang.org/x/build/vcs-test/vcweb,
+// which ran as a service hosted at vcs-test.golang.org.
+//
+// When a repository URL is first requested, the vcweb [Server] dynamically
+// regenerates the repository using a script interpreted by a [script.Engine].
+// The script produces the server's contents for a corresponding root URL and
+// all subdirectories of that URL, which are then cached: subsequent requests
+// for any URL generated by the script will serve the script's previous output
+// until the script is modified.
+//
+// The script engine includes all of the engine's default commands and
+// conditions, as well as commands for each supported VCS binary (bzr, fossil,
+// git, hg, and svn), a "handle" command that informs the script which protocol
+// or handler to use to serve the request, and utilities "at" (which sets
+// environment variables for Git timestamps) and "unquote" (which unquotes its
+// argument as if it were a Go string literal).
+//
+// The server's "/" endpoint provides a summary of the available scripts,
+// and "/help" provides documentation for the script environment.
+//
+// To run a standalone server based on the vcweb engine, use:
+//
+// go test cmd/go/internal/vcweb/vcstest -v --port=0
+package vcweb
+
+import (
+ "bufio"
+ "cmd/go/internal/script"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "text/tabwriter"
+ "time"
+)
+
+// A Server serves cached, dynamically-generated version control repositories.
+type Server struct {
+ env []string
+ logger *log.Logger
+
+ scriptDir string
+ workDir string
+ homeDir string // $workdir/home
+ engine *script.Engine
+
+ scriptCache sync.Map // script path → *scriptResult
+
+ vcsHandlers map[string]vcsHandler
+}
+
+// A vcsHandler serves repositories over HTTP for a known version-control tool.
+type vcsHandler interface {
+ Available() bool
+ Handler(dir string, env []string, logger *log.Logger) (http.Handler, error)
+}
+
+// A scriptResult describes the cached result of executing a vcweb script.
+type scriptResult struct {
+ mu sync.RWMutex
+
+ hash [sha256.Size]byte // hash of the script file, for cache invalidation
+ hashTime time.Time // timestamp at which the script was run, for diagnostics
+
+ handler http.Handler // HTTP handler configured by the script
+ err error // error from executing the script, if any
+}
+
+// NewServer returns a Server that generates and serves repositories in workDir
+// using the scripts found in scriptDir and its subdirectories.
+//
+// A request for the path /foo/bar/baz will be handled by the first script along
+// that path that exists: $scriptDir/foo.txt, $scriptDir/foo/bar.txt, or
+// $scriptDir/foo/bar/baz.txt.
+func NewServer(scriptDir, workDir string, logger *log.Logger) (*Server, error) {
+ if scriptDir == "" {
+ panic("vcweb.NewServer: scriptDir is required")
+ }
+ var err error
+ scriptDir, err = filepath.Abs(scriptDir)
+ if err != nil {
+ return nil, err
+ }
+
+ if workDir == "" {
+ workDir, err = os.MkdirTemp("", "vcweb-*")
+ if err != nil {
+ return nil, err
+ }
+ logger.Printf("vcweb work directory: %s", workDir)
+ } else {
+ workDir, err = filepath.Abs(workDir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ homeDir := filepath.Join(workDir, "home")
+ if err := os.MkdirAll(homeDir, 0755); err != nil {
+ return nil, err
+ }
+
+ env := scriptEnviron(homeDir)
+
+ s := &Server{
+ env: env,
+ logger: logger,
+ scriptDir: scriptDir,
+ workDir: workDir,
+ homeDir: homeDir,
+ engine: newScriptEngine(),
+ vcsHandlers: map[string]vcsHandler{
+ "auth": new(authHandler),
+ "dir": new(dirHandler),
+ "bzr": new(bzrHandler),
+ "fossil": new(fossilHandler),
+ "git": new(gitHandler),
+ "hg": new(hgHandler),
+ "insecure": new(insecureHandler),
+ "svn": &svnHandler{svnRoot: workDir, logger: logger},
+ },
+ }
+
+ if err := os.WriteFile(filepath.Join(s.homeDir, ".gitconfig"), []byte(gitConfig), 0644); err != nil {
+ return nil, err
+ }
+ gitConfigDir := filepath.Join(s.homeDir, ".config", "git")
+ if err := os.MkdirAll(gitConfigDir, 0755); err != nil {
+ return nil, err
+ }
+ if err := os.WriteFile(filepath.Join(gitConfigDir, "ignore"), []byte(""), 0644); err != nil {
+ return nil, err
+ }
+
+ if err := os.WriteFile(filepath.Join(s.homeDir, ".hgrc"), []byte(hgrc), 0644); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func (s *Server) Close() error {
+ var firstErr error
+ for _, h := range s.vcsHandlers {
+ if c, ok := h.(io.Closer); ok {
+ if closeErr := c.Close(); firstErr == nil {
+ firstErr = closeErr
+ }
+ }
+ }
+ return firstErr
+}
+
+// gitConfig contains a ~/.gitconfg file that attempts to provide
+// deterministic, platform-agnostic behavior for the 'git' command.
+var gitConfig = `
+[user]
+ name = Go Gopher
+ email = gopher@golang.org
+[init]
+ defaultBranch = main
+[core]
+ eol = lf
+[gui]
+ encoding = utf-8
+`[1:]
+
+// hgrc contains a ~/.hgrc file that attempts to provide
+// deterministic, platform-agnostic behavior for the 'hg' command.
+var hgrc = `
+[ui]
+username=Go Gopher <gopher@golang.org>
+[phases]
+new-commit=public
+[extensions]
+convert=
+`[1:]
+
+// ServeHTTP implements [http.Handler] for version-control repositories.
+func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.logger.Printf("serving %s", req.URL)
+
+ defer func() {
+ if v := recover(); v != nil {
+ debug.PrintStack()
+ s.logger.Fatal(v)
+ }
+ }()
+
+ urlPath := req.URL.Path
+ if !strings.HasPrefix(urlPath, "/") {
+ urlPath = "/" + urlPath
+ }
+ clean := path.Clean(urlPath)[1:]
+ if clean == "" {
+ s.overview(w, req)
+ return
+ }
+ if clean == "help" {
+ s.help(w, req)
+ return
+ }
+
+ // Locate the script that generates the requested path.
+ // We follow directories all the way to the end, then look for a ".txt" file
+ // matching the first component that doesn't exist. That guarantees
+ // uniqueness: if a path exists as a directory, then it cannot exist as a
+ // ".txt" script (because the search would ignore that file).
+ scriptPath := "."
+ for _, part := range strings.Split(clean, "/") {
+ scriptPath = filepath.Join(scriptPath, part)
+ dir := filepath.Join(s.scriptDir, scriptPath)
+ if _, err := os.Stat(dir); err != nil {
+ if !os.IsNotExist(err) {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // scriptPath does not exist as a directory, so it either is the script
+ // location or the script doesn't exist.
+ break
+ }
+ }
+ scriptPath += ".txt"
+
+ err := s.HandleScript(scriptPath, s.logger, func(handler http.Handler) {
+ handler.ServeHTTP(w, req)
+ })
+ if err != nil {
+ s.logger.Print(err)
+ if notFound := (ScriptNotFoundError{}); errors.As(err, &notFound) {
+ http.NotFound(w, req)
+ } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, &notInstalled) || errors.Is(err, exec.ErrNotFound) {
+ http.Error(w, err.Error(), http.StatusNotImplemented)
+ } else {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ }
+}
+
+// A ScriptNotFoundError indicates that the requested script file does not exist.
+// (It typically wraps a "stat" error for the script file.)
+type ScriptNotFoundError struct{ err error }
+
+func (e ScriptNotFoundError) Error() string { return e.err.Error() }
+func (e ScriptNotFoundError) Unwrap() error { return e.err }
+
+// A ServerNotInstalledError indicates that the server binary required for the
+// indicated VCS does not exist.
+type ServerNotInstalledError struct{ name string }
+
+func (v ServerNotInstalledError) Error() string {
+ return fmt.Sprintf("server for %#q VCS is not installed", v.name)
+}
+
+// HandleScript ensures that the script at scriptRelPath has been evaluated
+// with its current contents.
+//
+// If the script completed successfully, HandleScript invokes f on the handler
+// with the script's result still read-locked, and waits for it to return. (That
+// ensures that cache invalidation does not race with an in-flight handler.)
+//
+// Otherwise, HandleScript returns the (cached) error from executing the script.
+func (s *Server) HandleScript(scriptRelPath string, logger *log.Logger, f func(http.Handler)) error {
+ ri, ok := s.scriptCache.Load(scriptRelPath)
+ if !ok {
+ ri, _ = s.scriptCache.LoadOrStore(scriptRelPath, new(scriptResult))
+ }
+ r := ri.(*scriptResult)
+
+ relDir := strings.TrimSuffix(scriptRelPath, filepath.Ext(scriptRelPath))
+ workDir := filepath.Join(s.workDir, relDir)
+ prefix := path.Join("/", filepath.ToSlash(relDir))
+
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for {
+ // For efficiency, we cache the script's output (in the work directory)
+ // across invocations. However, to allow for rapid iteration, we hash the
+ // script's contents and regenerate its output if the contents change.
+ //
+ // That way, one can use 'go run main.go' in this directory to stand up a
+ // server and see the output of the test script in order to fine-tune it.
+ content, err := os.ReadFile(filepath.Join(s.scriptDir, scriptRelPath))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ return ScriptNotFoundError{err}
+ }
+
+ hash := sha256.Sum256(content)
+ if prevHash := r.hash; prevHash != hash {
+ // The script's hash has changed, so regenerate its output.
+ func() {
+ r.mu.RUnlock()
+ r.mu.Lock()
+ defer func() {
+ r.mu.Unlock()
+ r.mu.RLock()
+ }()
+ if r.hash != prevHash {
+ // The cached result changed while we were waiting on the lock.
+ // It may have been updated to our hash or something even newer,
+ // so don't overwrite it.
+ return
+ }
+
+ r.hash = hash
+ r.hashTime = time.Now()
+ r.handler, r.err = nil, nil
+
+ if err := os.RemoveAll(workDir); err != nil {
+ r.err = err
+ return
+ }
+
+ // Note: we use context.Background here instead of req.Context() so that we
+ // don't cache a spurious error (and lose work) if the request is canceled
+ // while the script is still running.
+ scriptHandler, err := s.loadScript(context.Background(), logger, scriptRelPath, content, workDir)
+ if err != nil {
+ r.err = err
+ return
+ }
+ r.handler = http.StripPrefix(prefix, scriptHandler)
+ }()
+ }
+
+ if r.hash != hash {
+ continue // Raced with an update from another handler; try again.
+ }
+
+ if r.err != nil {
+ return r.err
+ }
+ f(r.handler)
+ return nil
+ }
+}
+
+// overview serves an HTML summary of the status of the scripts in the server's
+// script directory.
+func (s *Server) overview(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "<html>\n")
+ fmt.Fprintf(w, "<title>vcweb</title>\n<pre>\n")
+ fmt.Fprintf(w, "<b>vcweb</b>\n\n")
+ fmt.Fprintf(w, "This server serves various version control repos for testing the go command.\n\n")
+ fmt.Fprintf(w, "For an overview of the script language, see <a href=\"/help\">/help</a>.\n\n")
+
+ fmt.Fprintf(w, "<b>cache</b>\n")
+
+ tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ err := filepath.WalkDir(s.scriptDir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if filepath.Ext(path) != ".txt" {
+ return nil
+ }
+
+ rel, err := filepath.Rel(s.scriptDir, path)
+ if err != nil {
+ return err
+ }
+ hashTime := "(not loaded)"
+ status := ""
+ if ri, ok := s.scriptCache.Load(rel); ok {
+ r := ri.(*scriptResult)
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if !r.hashTime.IsZero() {
+ hashTime = r.hashTime.Format(time.RFC3339)
+ }
+ if r.err == nil {
+ status = "ok"
+ } else {
+ status = r.err.Error()
+ }
+ }
+ fmt.Fprintf(tw, "%s\t%s\t%s\n", rel, hashTime, status)
+ return nil
+ })
+ tw.Flush()
+
+ if err != nil {
+ fmt.Fprintln(w, err)
+ }
+}
+
+// help serves a plain-text summary of the server's supported script language.
+func (s *Server) help(w http.ResponseWriter, req *http.Request) {
+ st, err := s.newState(req.Context(), s.workDir)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ scriptLog := new(strings.Builder)
+ err = s.engine.Execute(st, "help", bufio.NewReader(strings.NewReader("help")), scriptLog)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+ io.WriteString(w, scriptLog.String())
+}
diff --git a/src/cmd/go/internal/vcweb/vcweb_test.go b/src/cmd/go/internal/vcweb/vcweb_test.go
new file mode 100644
index 0000000..20b2137
--- /dev/null
+++ b/src/cmd/go/internal/vcweb/vcweb_test.go
@@ -0,0 +1,63 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb_test
+
+import (
+ "cmd/go/internal/vcweb"
+ "io"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+)
+
+func TestHelp(t *testing.T) {
+ s, err := vcweb.NewServer(os.DevNull, t.TempDir(), log.Default())
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv := httptest.NewServer(s)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL + "/help")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ t.Fatal(resp.Status)
+ }
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("%s", body)
+}
+
+func TestOverview(t *testing.T) {
+ s, err := vcweb.NewServer(os.DevNull, t.TempDir(), log.Default())
+ if err != nil {
+ t.Fatal(err)
+ }
+ srv := httptest.NewServer(s)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ t.Fatal(resp.Status)
+ }
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("%s", body)
+}
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
new file mode 100644
index 0000000..4a0132a
--- /dev/null
+++ b/src/cmd/go/internal/version/version.go
@@ -0,0 +1,173 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version implements the “go version” command.
+package version
+
+import (
+ "context"
+ "debug/buildinfo"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+)
+
+var CmdVersion = &base.Command{
+ UsageLine: "go version [-m] [-v] [file ...]",
+ Short: "print Go version",
+ Long: `Version prints the build information for Go binary files.
+
+Go version reports the Go version used to build each of the named files.
+
+If no files are named on the command line, go version prints its own
+version information.
+
+If a directory is named, go version walks that directory, recursively,
+looking for recognized Go binaries and reporting their versions.
+By default, go version does not report unrecognized files found
+during a directory scan. The -v flag causes it to report unrecognized files.
+
+The -m flag causes go version to print each file's embedded
+module version information, when available. In the output, the module
+information consists of multiple lines following the version line, each
+indented by a leading tab character.
+
+See also: go doc runtime/debug.BuildInfo.
+`,
+}
+
+func init() {
+ base.AddChdirFlag(&CmdVersion.Flag)
+ CmdVersion.Run = runVersion // break init cycle
+}
+
+var (
+ versionM = CmdVersion.Flag.Bool("m", false, "")
+ versionV = CmdVersion.Flag.Bool("v", false, "")
+)
+
+func runVersion(ctx context.Context, cmd *base.Command, args []string) {
+ if len(args) == 0 {
+ // If any of this command's flags were passed explicitly, error
+ // out, because they only make sense with arguments.
+ //
+ // Don't error if the flags came from GOFLAGS, since that can be
+ // a reasonable use case. For example, imagine GOFLAGS=-v to
+ // turn "verbose mode" on for all Go commands, which should not
+ // break "go version".
+ var argOnlyFlag string
+ if !base.InGOFLAGS("-m") && *versionM {
+ argOnlyFlag = "-m"
+ } else if !base.InGOFLAGS("-v") && *versionV {
+ argOnlyFlag = "-v"
+ }
+ if argOnlyFlag != "" {
+ fmt.Fprintf(os.Stderr, "go: 'go version' only accepts %s flag with arguments\n", argOnlyFlag)
+ base.SetExitStatus(2)
+ return
+ }
+ v := runtime.Version()
+ if gover.TestVersion != "" {
+ v = gover.TestVersion + " (TESTGO_VERSION)"
+ }
+ fmt.Printf("go version %s %s/%s\n", v, runtime.GOOS, runtime.GOARCH)
+ return
+ }
+
+ for _, arg := range args {
+ info, err := os.Stat(arg)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ base.SetExitStatus(1)
+ continue
+ }
+ if info.IsDir() {
+ scanDir(arg)
+ } else {
+ scanFile(arg, info, true)
+ }
+ }
+}
+
+// scanDir scans a directory for binary to run scanFile on.
+func scanDir(dir string) {
+ filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if d.Type().IsRegular() || d.Type()&fs.ModeSymlink != 0 {
+ info, err := d.Info()
+ if err != nil {
+ if *versionV {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", path, err)
+ }
+ return nil
+ }
+ scanFile(path, info, *versionV)
+ }
+ return nil
+ })
+}
+
+// isGoBinaryCandidate reports whether the file is a candidate to be a Go binary.
+func isGoBinaryCandidate(file string, info fs.FileInfo) bool {
+ if info.Mode().IsRegular() && info.Mode()&0111 != 0 {
+ return true
+ }
+ name := strings.ToLower(file)
+ switch filepath.Ext(name) {
+ case ".so", ".exe", ".dll":
+ return true
+ default:
+ return strings.Contains(name, ".so.")
+ }
+}
+
+// scanFile scans file to try to report the Go and module versions.
+// If mustPrint is true, scanFile will report any error reading file.
+// Otherwise (mustPrint is false, because scanFile is being called
+// by scanDir) scanFile prints nothing for non-Go binaries.
+func scanFile(file string, info fs.FileInfo, mustPrint bool) {
+ if info.Mode()&fs.ModeSymlink != 0 {
+ // Accept file symlinks only.
+ i, err := os.Stat(file)
+ if err != nil || !i.Mode().IsRegular() {
+ if mustPrint {
+ fmt.Fprintf(os.Stderr, "%s: symlink\n", file)
+ }
+ return
+ }
+ info = i
+ }
+
+ bi, err := buildinfo.ReadFile(file)
+ if err != nil {
+ if mustPrint {
+ if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) {
+ fmt.Fprintf(os.Stderr, "%v\n", file)
+ } else {
+
+ // Skip errors for non-Go binaries.
+ // buildinfo.ReadFile errors are not fine-grained enough
+ // to know if the file is a Go binary or not,
+ // so try to infer it from the file mode and extension.
+ if isGoBinaryCandidate(file, info) {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", file, err)
+ }
+ }
+ }
+ return
+ }
+
+ fmt.Printf("%s: %s\n", file, bi.GoVersion)
+ bi.GoVersion = "" // suppress printing go version again
+ mod := bi.String()
+ if *versionM && len(mod) > 0 {
+ fmt.Printf("\t%s\n", strings.ReplaceAll(mod[:len(mod)-1], "\n", "\n\t"))
+ }
+}
diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go
new file mode 100644
index 0000000..2d42097
--- /dev/null
+++ b/src/cmd/go/internal/vet/vet.go
@@ -0,0 +1,120 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vet implements the “go vet” command.
+package vet
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/trace"
+ "cmd/go/internal/work"
+)
+
+// Break init loop.
+func init() {
+ CmdVet.Run = runVet
+}
+
+var CmdVet = &base.Command{
+ CustomFlags: true,
+ UsageLine: "go vet [build flags] [-vettool prog] [vet flags] [packages]",
+ Short: "report likely mistakes in packages",
+ Long: `
+Vet runs the Go vet command on the packages named by the import paths.
+
+For more about vet and its flags, see 'go doc cmd/vet'.
+For more about specifying packages, see 'go help packages'.
+For a list of checkers and their flags, see 'go tool vet help'.
+For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+
+The -vettool=prog flag selects a different analysis tool with alternative
+or additional checks.
+For example, the 'shadow' analyzer can be built and run using these commands:
+
+ go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
+ go vet -vettool=$(which shadow)
+
+The build flags supported by go vet are those that control package resolution
+and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
+For more about these flags, see 'go help build'.
+
+See also: go fmt, go fix.
+ `,
+}
+
+func runVet(ctx context.Context, cmd *base.Command, args []string) {
+ vetFlags, pkgArgs := vetFlags(args)
+ modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that.
+
+ if cfg.DebugTrace != "" {
+ var close func() error
+ var err error
+ ctx, close, err = trace.Start(ctx, cfg.DebugTrace)
+ if err != nil {
+ base.Fatalf("failed to start trace: %v", err)
+ }
+ defer func() {
+ if err := close(); err != nil {
+ base.Fatalf("failed to stop trace: %v", err)
+ }
+ }()
+ }
+
+ ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command"))
+ defer span.Done()
+
+ work.BuildInit()
+ work.VetFlags = vetFlags
+ if len(vetFlags) > 0 {
+ work.VetExplicit = true
+ }
+ if vetTool != "" {
+ var err error
+ work.VetTool, err = filepath.Abs(vetTool)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ }
+
+ pkgOpts := load.PackageOpts{ModResolveTests: true}
+ pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs)
+ load.CheckPackageErrors(pkgs)
+ if len(pkgs) == 0 {
+ base.Fatalf("no packages to vet")
+ }
+
+ b := work.NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ root := &work.Action{Mode: "go vet"}
+ for _, p := range pkgs {
+ _, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, nil)
+ if err != nil {
+ base.Errorf("%v", err)
+ continue
+ }
+ if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil {
+ base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir)
+ continue
+ }
+ if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 {
+ root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest))
+ }
+ if pxtest != nil {
+ root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, pxtest))
+ }
+ }
+ b.Do(ctx, root)
+}
diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go
new file mode 100644
index 0000000..eb7af65
--- /dev/null
+++ b/src/cmd/go/internal/vet/vetflag.go
@@ -0,0 +1,191 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vet
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cmdflag"
+ "cmd/go/internal/work"
+)
+
+// go vet flag processing
+//
+// We query the flags of the tool specified by -vettool and accept any
+// of those flags plus any flag valid for 'go build'. The tool must
+// support -flags, which prints a description of its flags in JSON to
+// stdout.
+
+// vetTool specifies the vet command to run.
+// Any tool that supports the (still unpublished) vet
+// command-line protocol may be supplied; see
+// golang.org/x/tools/go/analysis/unitchecker for one
+// implementation. It is also used by tests.
+//
+// The default behavior (vetTool=="") runs 'go tool vet'.
+var vetTool string // -vettool
+
+func init() {
+ work.AddBuildFlags(CmdVet, work.DefaultBuildFlags)
+ CmdVet.Flag.StringVar(&vetTool, "vettool", "", "")
+}
+
+func parseVettoolFlag(args []string) {
+ // Extract -vettool by ad hoc flag processing:
+ // its value is needed even before we can declare
+ // the flags available during main flag processing.
+ for i, arg := range args {
+ if arg == "-vettool" || arg == "--vettool" {
+ if i+1 >= len(args) {
+ log.Fatalf("%s requires a filename", arg)
+ }
+ vetTool = args[i+1]
+ return
+ } else if strings.HasPrefix(arg, "-vettool=") ||
+ strings.HasPrefix(arg, "--vettool=") {
+ vetTool = arg[strings.IndexByte(arg, '=')+1:]
+ return
+ }
+ }
+}
+
+// vetFlags processes the command line, splitting it at the first non-flag
+// into the list of flags and list of packages.
+func vetFlags(args []string) (passToVet, packageNames []string) {
+ parseVettoolFlag(args)
+
+ // Query the vet command for its flags.
+ var tool string
+ if vetTool == "" {
+ tool = base.Tool("vet")
+ } else {
+ var err error
+ tool, err = filepath.Abs(vetTool)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ out := new(bytes.Buffer)
+ vetcmd := exec.Command(tool, "-flags")
+ vetcmd.Stdout = out
+ if err := vetcmd.Run(); err != nil {
+ fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ var analysisFlags []struct {
+ Name string
+ Bool bool
+ Usage string
+ }
+ if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil {
+ fmt.Fprintf(os.Stderr, "go: can't unmarshal JSON from %s -flags: %v", tool, err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+
+ // Add vet's flags to CmdVet.Flag.
+ //
+ // Some flags, in particular -tags and -v, are known to vet but
+ // also defined as build flags. This works fine, so we omit duplicates here.
+ // However some, like -x, are known to the build but not to vet.
+ isVetFlag := make(map[string]bool, len(analysisFlags))
+ cf := CmdVet.Flag
+ for _, f := range analysisFlags {
+ isVetFlag[f.Name] = true
+ if cf.Lookup(f.Name) == nil {
+ if f.Bool {
+ cf.Bool(f.Name, false, "")
+ } else {
+ cf.String(f.Name, "", "")
+ }
+ }
+ }
+
+ // Record the set of vet tool flags set by GOFLAGS. We want to pass them to
+ // the vet tool, but only if they aren't overridden by an explicit argument.
+ base.SetFromGOFLAGS(&CmdVet.Flag)
+ addFromGOFLAGS := map[string]bool{}
+ CmdVet.Flag.Visit(func(f *flag.Flag) {
+ if isVetFlag[f.Name] {
+ addFromGOFLAGS[f.Name] = true
+ }
+ })
+
+ explicitFlags := make([]string, 0, len(args))
+ for len(args) > 0 {
+ f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args)
+
+ if errors.Is(err, flag.ErrHelp) {
+ exitWithUsage()
+ }
+
+ if errors.Is(err, cmdflag.ErrFlagTerminator) {
+ // All remaining args must be package names, but the flag terminator is
+ // not included.
+ packageNames = remainingArgs
+ break
+ }
+
+ if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) {
+ // Everything from here on out — including the argument we just consumed —
+ // must be a package name.
+ packageNames = args
+ break
+ }
+
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ exitWithUsage()
+ }
+
+ if isVetFlag[f.Name] {
+ // Forward the raw arguments rather than cleaned equivalents, just in
+ // case the vet tool parses them idiosyncratically.
+ explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...)
+
+ // This flag has been overridden explicitly, so don't forward its implicit
+ // value from GOFLAGS.
+ delete(addFromGOFLAGS, f.Name)
+ }
+
+ args = remainingArgs
+ }
+
+ // Prepend arguments from GOFLAGS before other arguments.
+ CmdVet.Flag.Visit(func(f *flag.Flag) {
+ if addFromGOFLAGS[f.Name] {
+ passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value))
+ }
+ })
+ passToVet = append(passToVet, explicitFlags...)
+ return passToVet, packageNames
+}
+
+func exitWithUsage() {
+ fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine)
+ fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName())
+
+ // This part is additional to what (*Command).Usage does:
+ cmd := "go tool vet"
+ if vetTool != "" {
+ cmd = vetTool
+ }
+ fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd)
+ fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd)
+
+ base.SetExitStatus(2)
+ base.Exit()
+}
diff --git a/src/cmd/go/internal/web/api.go b/src/cmd/go/internal/web/api.go
new file mode 100644
index 0000000..7a6e0c3
--- /dev/null
+++ b/src/cmd/go/internal/web/api.go
@@ -0,0 +1,246 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package web defines minimal helper routines for accessing HTTP/HTTPS
+// resources without requiring external dependencies on the net package.
+//
+// If the cmd_go_bootstrap build tag is present, web avoids the use of the net
+// package and returns errors for all network operations.
+package web
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// SecurityMode specifies whether a function should make network
+// calls using insecure transports (eg, plain text HTTP).
+// The zero value is "secure".
+type SecurityMode int
+
+const (
+ SecureOnly SecurityMode = iota // Reject plain HTTP; validate HTTPS.
+ DefaultSecurity // Allow plain HTTP if explicit; validate HTTPS.
+ Insecure // Allow plain HTTP if not explicitly HTTPS; skip HTTPS validation.
+)
+
+// An HTTPError describes an HTTP error response (non-200 result).
+type HTTPError struct {
+ URL string // redacted
+ Status string
+ StatusCode int
+ Err error // underlying error, if known
+ Detail string // limited to maxErrorDetailLines and maxErrorDetailBytes
+}
+
+const (
+ maxErrorDetailLines = 8
+ maxErrorDetailBytes = maxErrorDetailLines * 81
+)
+
+func (e *HTTPError) Error() string {
+ if e.Detail != "" {
+ detailSep := " "
+ if strings.ContainsRune(e.Detail, '\n') {
+ detailSep = "\n\t"
+ }
+ return fmt.Sprintf("reading %s: %v\n\tserver response:%s%s", e.URL, e.Status, detailSep, e.Detail)
+ }
+
+ if eErr := e.Err; eErr != nil {
+ if pErr, ok := e.Err.(*fs.PathError); ok {
+ if u, err := url.Parse(e.URL); err == nil {
+ if fp, err := urlToFilePath(u); err == nil && pErr.Path == fp {
+ // Remove the redundant copy of the path.
+ eErr = pErr.Err
+ }
+ }
+ }
+ return fmt.Sprintf("reading %s: %v", e.URL, eErr)
+ }
+
+ return fmt.Sprintf("reading %s: %v", e.URL, e.Status)
+}
+
+func (e *HTTPError) Is(target error) bool {
+ return target == fs.ErrNotExist && (e.StatusCode == 404 || e.StatusCode == 410)
+}
+
+func (e *HTTPError) Unwrap() error {
+ return e.Err
+}
+
+// GetBytes returns the body of the requested resource, or an error if the
+// response status was not http.StatusOK.
+//
+// GetBytes is a convenience wrapper around Get and Response.Err.
+func GetBytes(u *url.URL) ([]byte, error) {
+ resp, err := Get(DefaultSecurity, u)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if err := resp.Err(); err != nil {
+ return nil, err
+ }
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", u.Redacted(), err)
+ }
+ return b, nil
+}
+
+type Response struct {
+ URL string // redacted
+ Status string
+ StatusCode int
+ Header map[string][]string
+ Body io.ReadCloser // Either the original body or &errorDetail.
+
+ fileErr error
+ errorDetail errorDetailBuffer
+}
+
+// Err returns an *HTTPError corresponding to the response r.
+// If the response r has StatusCode 200 or 0 (unset), Err returns nil.
+// Otherwise, Err may read from r.Body in order to extract relevant error detail.
+func (r *Response) Err() error {
+ if r.StatusCode == 200 || r.StatusCode == 0 {
+ return nil
+ }
+
+ return &HTTPError{
+ URL: r.URL,
+ Status: r.Status,
+ StatusCode: r.StatusCode,
+ Err: r.fileErr,
+ Detail: r.formatErrorDetail(),
+ }
+}
+
+// formatErrorDetail converts r.errorDetail (a prefix of the output of r.Body)
+// into a short, tab-indented summary.
+func (r *Response) formatErrorDetail() string {
+ if r.Body != &r.errorDetail {
+ return "" // Error detail collection not enabled.
+ }
+
+ // Ensure that r.errorDetail has been populated.
+ _, _ = io.Copy(io.Discard, r.Body)
+
+ s := r.errorDetail.buf.String()
+ if !utf8.ValidString(s) {
+ return "" // Don't try to recover non-UTF-8 error messages.
+ }
+ for _, r := range s {
+ if !unicode.IsGraphic(r) && !unicode.IsSpace(r) {
+ return "" // Don't let the server do any funny business with the user's terminal.
+ }
+ }
+
+ var detail strings.Builder
+ for i, line := range strings.Split(s, "\n") {
+ if strings.TrimSpace(line) == "" {
+ break // Stop at the first blank line.
+ }
+ if i > 0 {
+ detail.WriteString("\n\t")
+ }
+ if i >= maxErrorDetailLines {
+ detail.WriteString("[Truncated: too many lines.]")
+ break
+ }
+ if detail.Len()+len(line) > maxErrorDetailBytes {
+ detail.WriteString("[Truncated: too long.]")
+ break
+ }
+ detail.WriteString(line)
+ }
+
+ return detail.String()
+}
+
+// Get returns the body of the HTTP or HTTPS resource specified at the given URL.
+//
+// If the URL does not include an explicit scheme, Get first tries "https".
+// If the server does not respond under that scheme and the security mode is
+// Insecure, Get then tries "http".
+// The URL included in the response indicates which scheme was actually used,
+// and it is a redacted URL suitable for use in error messages.
+//
+// For the "https" scheme only, credentials are attached using the
+// cmd/go/internal/auth package. If the URL itself includes a username and
+// password, it will not be attempted under the "http" scheme unless the
+// security mode is Insecure.
+//
+// Get returns a non-nil error only if the request did not receive a response
+// under any applicable scheme. (A non-2xx response does not cause an error.)
+func Get(security SecurityMode, u *url.URL) (*Response, error) {
+ return get(security, u)
+}
+
+// OpenBrowser attempts to open the requested URL in a web browser.
+func OpenBrowser(url string) (opened bool) {
+ return openBrowser(url)
+}
+
+// Join returns the result of adding the slash-separated
+// path elements to the end of u's path.
+func Join(u *url.URL, path string) *url.URL {
+ j := *u
+ if path == "" {
+ return &j
+ }
+ j.Path = strings.TrimSuffix(u.Path, "/") + "/" + strings.TrimPrefix(path, "/")
+ j.RawPath = strings.TrimSuffix(u.RawPath, "/") + "/" + strings.TrimPrefix(path, "/")
+ return &j
+}
+
+// An errorDetailBuffer is an io.ReadCloser that copies up to
+// maxErrorDetailLines into a buffer for later inspection.
+type errorDetailBuffer struct {
+ r io.ReadCloser
+ buf strings.Builder
+ bufLines int
+}
+
+func (b *errorDetailBuffer) Close() error {
+ return b.r.Close()
+}
+
+func (b *errorDetailBuffer) Read(p []byte) (n int, err error) {
+ n, err = b.r.Read(p)
+
+ // Copy the first maxErrorDetailLines+1 lines into b.buf,
+ // discarding any further lines.
+ //
+ // Note that the read may begin or end in the middle of a UTF-8 character,
+ // so don't try to do anything fancy with characters that encode to larger
+ // than one byte.
+ if b.bufLines <= maxErrorDetailLines {
+ for _, line := range bytes.SplitAfterN(p[:n], []byte("\n"), maxErrorDetailLines-b.bufLines) {
+ b.buf.Write(line)
+ if len(line) > 0 && line[len(line)-1] == '\n' {
+ b.bufLines++
+ if b.bufLines > maxErrorDetailLines {
+ break
+ }
+ }
+ }
+ }
+
+ return n, err
+}
+
+// IsLocalHost reports whether the given URL refers to a local
+// (loopback) host, such as "localhost" or "127.0.0.1:8080".
+func IsLocalHost(u *url.URL) bool {
+ return isLocalHost(u)
+}
diff --git a/src/cmd/go/internal/web/bootstrap.go b/src/cmd/go/internal/web/bootstrap.go
new file mode 100644
index 0000000..6312169
--- /dev/null
+++ b/src/cmd/go/internal/web/bootstrap.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmd_go_bootstrap
+
+// This code is compiled only into the bootstrap 'go' binary.
+// These stubs avoid importing packages with large dependency
+// trees that potentially require C linking,
+// like the use of "net/http" in vcs.go.
+
+package web
+
+import (
+ "errors"
+ urlpkg "net/url"
+)
+
+func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
+ return nil, errors.New("no http in bootstrap go command")
+}
+
+func openBrowser(url string) bool { return false }
+
+func isLocalHost(u *urlpkg.URL) bool { return false }
diff --git a/src/cmd/go/internal/web/file_test.go b/src/cmd/go/internal/web/file_test.go
new file mode 100644
index 0000000..3734df5
--- /dev/null
+++ b/src/cmd/go/internal/web/file_test.go
@@ -0,0 +1,60 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestGetFileURL(t *testing.T) {
+ const content = "Hello, file!\n"
+
+ f, err := os.CreateTemp("", "web-TestGetFileURL")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(f.Name())
+
+ if _, err := f.WriteString(content); err != nil {
+ t.Error(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ u, err := urlFromFilePath(f.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := GetBytes(u)
+ if err != nil {
+ t.Fatalf("GetBytes(%v) = _, %v", u, err)
+ }
+ if string(b) != content {
+ t.Fatalf("after writing %q to %s, GetBytes(%v) read %q", content, f.Name(), u, b)
+ }
+}
+
+func TestGetNonexistentFile(t *testing.T) {
+ path, err := filepath.Abs("nonexistent")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ u, err := urlFromFilePath(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := GetBytes(u)
+ if !errors.Is(err, fs.ErrNotExist) {
+ t.Fatalf("GetBytes(%v) = %q, %v; want _, fs.ErrNotExist", u, b, err)
+ }
+}
diff --git a/src/cmd/go/internal/web/http.go b/src/cmd/go/internal/web/http.go
new file mode 100644
index 0000000..4fc939a
--- /dev/null
+++ b/src/cmd/go/internal/web/http.go
@@ -0,0 +1,395 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmd_go_bootstrap
+
+// This code is compiled into the real 'go' binary, but it is not
+// compiled into the binary that is built during all.bash, so as
+// to avoid needing to build net (and thus use cgo) during the
+// bootstrap process.
+
+package web
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "net"
+ "net/http"
+ urlpkg "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "cmd/go/internal/auth"
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/internal/browser"
+)
+
+// impatientInsecureHTTPClient is used with GOINSECURE,
+// when we're connecting to https servers that might not be there
+// or might be using self-signed certificates.
+var impatientInsecureHTTPClient = &http.Client{
+ CheckRedirect: checkRedirect,
+ Timeout: 5 * time.Second,
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ },
+}
+
+var securityPreservingDefaultClient = securityPreservingHTTPClient(http.DefaultClient)
+
+// securityPreservingHTTPClient returns a client that is like the original
+// but rejects redirects to plain-HTTP URLs if the original URL was secure.
+func securityPreservingHTTPClient(original *http.Client) *http.Client {
+ c := new(http.Client)
+ *c = *original
+ c.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ if len(via) > 0 && via[0].URL.Scheme == "https" && req.URL.Scheme != "https" {
+ lastHop := via[len(via)-1].URL
+ return fmt.Errorf("redirected from secure URL %s to insecure URL %s", lastHop, req.URL)
+ }
+ return checkRedirect(req, via)
+ }
+ return c
+}
+
+func checkRedirect(req *http.Request, via []*http.Request) error {
+ // Go's http.DefaultClient allows 10 redirects before returning an error.
+ // Mimic that behavior here.
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+
+ interceptRequest(req)
+ return nil
+}
+
+type Interceptor struct {
+ Scheme string
+ FromHost string
+ ToHost string
+ Client *http.Client
+}
+
+func EnableTestHooks(interceptors []Interceptor) error {
+ if enableTestHooks {
+ return errors.New("web: test hooks already enabled")
+ }
+
+ for _, t := range interceptors {
+ if t.FromHost == "" {
+ panic("EnableTestHooks: missing FromHost")
+ }
+ if t.ToHost == "" {
+ panic("EnableTestHooks: missing ToHost")
+ }
+ }
+
+ testInterceptors = interceptors
+ enableTestHooks = true
+ return nil
+}
+
+func DisableTestHooks() {
+ if !enableTestHooks {
+ panic("web: test hooks not enabled")
+ }
+ enableTestHooks = false
+ testInterceptors = nil
+}
+
+var (
+ enableTestHooks = false
+ testInterceptors []Interceptor
+)
+
+func interceptURL(u *urlpkg.URL) (*Interceptor, bool) {
+ if !enableTestHooks {
+ return nil, false
+ }
+ for i, t := range testInterceptors {
+ if u.Host == t.FromHost && (u.Scheme == "" || u.Scheme == t.Scheme) {
+ return &testInterceptors[i], true
+ }
+ }
+ return nil, false
+}
+
+func interceptRequest(req *http.Request) {
+ if t, ok := interceptURL(req.URL); ok {
+ req.Host = req.URL.Host
+ req.URL.Host = t.ToHost
+ }
+}
+
+func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
+ start := time.Now()
+
+ if url.Scheme == "file" {
+ return getFile(url)
+ }
+
+ if enableTestHooks {
+ switch url.Host {
+ case "proxy.golang.org":
+ if os.Getenv("TESTGOPROXY404") == "1" {
+ res := &Response{
+ URL: url.Redacted(),
+ Status: "404 testing",
+ StatusCode: 404,
+ Header: make(map[string][]string),
+ Body: http.NoBody,
+ }
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: %v (%.3fs)\n", url.Redacted(), res.Status, time.Since(start).Seconds())
+ }
+ return res, nil
+ }
+
+ case "localhost.localdev":
+ return nil, fmt.Errorf("no such host localhost.localdev")
+
+ default:
+ if os.Getenv("TESTGONETWORK") == "panic" {
+ if _, ok := interceptURL(url); !ok {
+ host := url.Host
+ if h, _, err := net.SplitHostPort(url.Host); err == nil && h != "" {
+ host = h
+ }
+ addr := net.ParseIP(host)
+ if addr == nil || (!addr.IsLoopback() && !addr.IsUnspecified()) {
+ panic("use of network: " + url.String())
+ }
+ }
+ }
+ }
+ }
+
+ fetch := func(url *urlpkg.URL) (*urlpkg.URL, *http.Response, error) {
+ // Note: The -v build flag does not mean "print logging information",
+ // despite its historical misuse for this in GOPATH-based go get.
+ // We print extra logging in -x mode instead, which traces what
+ // commands are executed.
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s\n", url.Redacted())
+ }
+
+ req, err := http.NewRequest("GET", url.String(), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ if url.Scheme == "https" {
+ auth.AddCredentials(req)
+ }
+ t, intercepted := interceptURL(req.URL)
+ if intercepted {
+ req.Host = req.URL.Host
+ req.URL.Host = t.ToHost
+ }
+
+ release, err := base.AcquireNet()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var res *http.Response
+ if security == Insecure && url.Scheme == "https" { // fail earlier
+ res, err = impatientInsecureHTTPClient.Do(req)
+ } else {
+ if intercepted && t.Client != nil {
+ client := securityPreservingHTTPClient(t.Client)
+ res, err = client.Do(req)
+ } else {
+ res, err = securityPreservingDefaultClient.Do(req)
+ }
+ }
+
+ if err != nil {
+ // Per the docs for [net/http.Client.Do], “On error, any Response can be
+ // ignored. A non-nil Response with a non-nil error only occurs when
+ // CheckRedirect fails, and even then the returned Response.Body is
+ // already closed.”
+ release()
+ return nil, nil, err
+ }
+
+ // “If the returned error is nil, the Response will contain a non-nil Body
+ // which the user is expected to close.”
+ body := res.Body
+ res.Body = hookCloser{
+ ReadCloser: body,
+ afterClose: release,
+ }
+ return url, res, err
+ }
+
+ var (
+ fetched *urlpkg.URL
+ res *http.Response
+ err error
+ )
+ if url.Scheme == "" || url.Scheme == "https" {
+ secure := new(urlpkg.URL)
+ *secure = *url
+ secure.Scheme = "https"
+
+ fetched, res, err = fetch(secure)
+ if err != nil {
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: %v\n", secure.Redacted(), err)
+ }
+ if security != Insecure || url.Scheme == "https" {
+ // HTTPS failed, and we can't fall back to plain HTTP.
+ // Report the error from the HTTPS attempt.
+ return nil, err
+ }
+ }
+ }
+
+ if res == nil {
+ switch url.Scheme {
+ case "http":
+ if security == SecureOnly {
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: insecure\n", url.Redacted())
+ }
+ return nil, fmt.Errorf("insecure URL: %s", url.Redacted())
+ }
+ case "":
+ if security != Insecure {
+ panic("should have returned after HTTPS failure")
+ }
+ default:
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: unsupported\n", url.Redacted())
+ }
+ return nil, fmt.Errorf("unsupported scheme: %s", url.Redacted())
+ }
+
+ insecure := new(urlpkg.URL)
+ *insecure = *url
+ insecure.Scheme = "http"
+ if insecure.User != nil && security != Insecure {
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: insecure credentials\n", insecure.Redacted())
+ }
+ return nil, fmt.Errorf("refusing to pass credentials to insecure URL: %s", insecure.Redacted())
+ }
+
+ fetched, res, err = fetch(insecure)
+ if err != nil {
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: %v\n", insecure.Redacted(), err)
+ }
+ // HTTP failed, and we already tried HTTPS if applicable.
+ // Report the error from the HTTP attempt.
+ return nil, err
+ }
+ }
+
+ // Note: accepting a non-200 OK here, so people can serve a
+ // meta import in their http 404 page.
+ if cfg.BuildX {
+ fmt.Fprintf(os.Stderr, "# get %s: %v (%.3fs)\n", fetched.Redacted(), res.Status, time.Since(start).Seconds())
+ }
+
+ r := &Response{
+ URL: fetched.Redacted(),
+ Status: res.Status,
+ StatusCode: res.StatusCode,
+ Header: map[string][]string(res.Header),
+ Body: res.Body,
+ }
+
+ if res.StatusCode != http.StatusOK {
+ contentType := res.Header.Get("Content-Type")
+ if mediaType, params, _ := mime.ParseMediaType(contentType); mediaType == "text/plain" {
+ switch charset := strings.ToLower(params["charset"]); charset {
+ case "us-ascii", "utf-8", "":
+ // Body claims to be plain text in UTF-8 or a subset thereof.
+ // Try to extract a useful error message from it.
+ r.errorDetail.r = res.Body
+ r.Body = &r.errorDetail
+ }
+ }
+ }
+
+ return r, nil
+}
+
+func getFile(u *urlpkg.URL) (*Response, error) {
+ path, err := urlToFilePath(u)
+ if err != nil {
+ return nil, err
+ }
+ f, err := os.Open(path)
+
+ if os.IsNotExist(err) {
+ return &Response{
+ URL: u.Redacted(),
+ Status: http.StatusText(http.StatusNotFound),
+ StatusCode: http.StatusNotFound,
+ Body: http.NoBody,
+ fileErr: err,
+ }, nil
+ }
+
+ if os.IsPermission(err) {
+ return &Response{
+ URL: u.Redacted(),
+ Status: http.StatusText(http.StatusForbidden),
+ StatusCode: http.StatusForbidden,
+ Body: http.NoBody,
+ fileErr: err,
+ }, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &Response{
+ URL: u.Redacted(),
+ Status: http.StatusText(http.StatusOK),
+ StatusCode: http.StatusOK,
+ Body: f,
+ }, nil
+}
+
+func openBrowser(url string) bool { return browser.Open(url) }
+
+func isLocalHost(u *urlpkg.URL) bool {
+ // VCSTestRepoURL itself is secure, and it may redirect requests to other
+ // ports (such as a port serving the "svn" protocol) which should also be
+ // considered secure.
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "localhost" {
+ return true
+ }
+ if ip := net.ParseIP(host); ip != nil && ip.IsLoopback() {
+ return true
+ }
+ return false
+}
+
+type hookCloser struct {
+ io.ReadCloser
+ afterClose func()
+}
+
+func (c hookCloser) Close() error {
+ err := c.ReadCloser.Close()
+ c.afterClose()
+ return err
+}
diff --git a/src/cmd/go/internal/web/url.go b/src/cmd/go/internal/web/url.go
new file mode 100644
index 0000000..146c51f
--- /dev/null
+++ b/src/cmd/go/internal/web/url.go
@@ -0,0 +1,95 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+ "errors"
+ "net/url"
+ "path/filepath"
+ "strings"
+)
+
+// TODO(golang.org/issue/32456): If accepted, move these functions into the
+// net/url package.
+
+var errNotAbsolute = errors.New("path is not absolute")
+
+func urlToFilePath(u *url.URL) (string, error) {
+ if u.Scheme != "file" {
+ return "", errors.New("non-file URL")
+ }
+
+ checkAbs := func(path string) (string, error) {
+ if !filepath.IsAbs(path) {
+ return "", errNotAbsolute
+ }
+ return path, nil
+ }
+
+ if u.Path == "" {
+ if u.Host != "" || u.Opaque == "" {
+ return "", errors.New("file URL missing path")
+ }
+ return checkAbs(filepath.FromSlash(u.Opaque))
+ }
+
+ path, err := convertFileURLPath(u.Host, u.Path)
+ if err != nil {
+ return path, err
+ }
+ return checkAbs(path)
+}
+
+func urlFromFilePath(path string) (*url.URL, error) {
+ if !filepath.IsAbs(path) {
+ return nil, errNotAbsolute
+ }
+
+ // If path has a Windows volume name, convert the volume to a host and prefix
+ // per https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/.
+ if vol := filepath.VolumeName(path); vol != "" {
+ if strings.HasPrefix(vol, `\\`) {
+ path = filepath.ToSlash(path[2:])
+ i := strings.IndexByte(path, '/')
+
+ if i < 0 {
+ // A degenerate case.
+ // \\host.example.com (without a share name)
+ // becomes
+ // file://host.example.com/
+ return &url.URL{
+ Scheme: "file",
+ Host: path,
+ Path: "/",
+ }, nil
+ }
+
+ // \\host.example.com\Share\path\to\file
+ // becomes
+ // file://host.example.com/Share/path/to/file
+ return &url.URL{
+ Scheme: "file",
+ Host: path[:i],
+ Path: filepath.ToSlash(path[i:]),
+ }, nil
+ }
+
+ // C:\path\to\file
+ // becomes
+ // file:///C:/path/to/file
+ return &url.URL{
+ Scheme: "file",
+ Path: "/" + filepath.ToSlash(path),
+ }, nil
+ }
+
+ // /path/to/file
+ // becomes
+ // file:///path/to/file
+ return &url.URL{
+ Scheme: "file",
+ Path: filepath.ToSlash(path),
+ }, nil
+}
diff --git a/src/cmd/go/internal/web/url_other.go b/src/cmd/go/internal/web/url_other.go
new file mode 100644
index 0000000..84bbd72
--- /dev/null
+++ b/src/cmd/go/internal/web/url_other.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package web
+
+import (
+ "errors"
+ "path/filepath"
+)
+
+func convertFileURLPath(host, path string) (string, error) {
+ switch host {
+ case "", "localhost":
+ default:
+ return "", errors.New("file URL specifies non-local host")
+ }
+ return filepath.FromSlash(path), nil
+}
diff --git a/src/cmd/go/internal/web/url_other_test.go b/src/cmd/go/internal/web/url_other_test.go
new file mode 100644
index 0000000..5c197de
--- /dev/null
+++ b/src/cmd/go/internal/web/url_other_test.go
@@ -0,0 +1,36 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package web
+
+var urlTests = []struct {
+ url string
+ filePath string
+ canonicalURL string // If empty, assume equal to url.
+ wantErr string
+}{
+ // Examples from RFC 8089:
+ {
+ url: `file:///path/to/file`,
+ filePath: `/path/to/file`,
+ },
+ {
+ url: `file:/path/to/file`,
+ filePath: `/path/to/file`,
+ canonicalURL: `file:///path/to/file`,
+ },
+ {
+ url: `file://localhost/path/to/file`,
+ filePath: `/path/to/file`,
+ canonicalURL: `file:///path/to/file`,
+ },
+
+ // We reject non-local files.
+ {
+ url: `file://host.example.com/path/to/file`,
+ wantErr: "file URL specifies non-local host",
+ },
+}
diff --git a/src/cmd/go/internal/web/url_test.go b/src/cmd/go/internal/web/url_test.go
new file mode 100644
index 0000000..8f462f5
--- /dev/null
+++ b/src/cmd/go/internal/web/url_test.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+ "net/url"
+ "testing"
+)
+
+func TestURLToFilePath(t *testing.T) {
+ for _, tc := range urlTests {
+ if tc.url == "" {
+ continue
+ }
+ tc := tc
+
+ t.Run(tc.url, func(t *testing.T) {
+ u, err := url.Parse(tc.url)
+ if err != nil {
+ t.Fatalf("url.Parse(%q): %v", tc.url, err)
+ }
+
+ path, err := urlToFilePath(u)
+ if err != nil {
+ if err.Error() == tc.wantErr {
+ return
+ }
+ if tc.wantErr == "" {
+ t.Fatalf("urlToFilePath(%v): %v; want <nil>", u, err)
+ } else {
+ t.Fatalf("urlToFilePath(%v): %v; want %s", u, err, tc.wantErr)
+ }
+ }
+
+ if path != tc.filePath || tc.wantErr != "" {
+ t.Fatalf("urlToFilePath(%v) = %q, <nil>; want %q, %s", u, path, tc.filePath, tc.wantErr)
+ }
+ })
+ }
+}
+
+func TestURLFromFilePath(t *testing.T) {
+ for _, tc := range urlTests {
+ if tc.filePath == "" {
+ continue
+ }
+ tc := tc
+
+ t.Run(tc.filePath, func(t *testing.T) {
+ u, err := urlFromFilePath(tc.filePath)
+ if err != nil {
+ if err.Error() == tc.wantErr {
+ return
+ }
+ if tc.wantErr == "" {
+ t.Fatalf("urlFromFilePath(%v): %v; want <nil>", tc.filePath, err)
+ } else {
+ t.Fatalf("urlFromFilePath(%v): %v; want %s", tc.filePath, err, tc.wantErr)
+ }
+ }
+
+ if tc.wantErr != "" {
+ t.Fatalf("urlFromFilePath(%v) = <nil>; want error: %s", tc.filePath, tc.wantErr)
+ }
+
+ wantURL := tc.url
+ if tc.canonicalURL != "" {
+ wantURL = tc.canonicalURL
+ }
+ if u.String() != wantURL {
+ t.Errorf("urlFromFilePath(%v) = %v; want %s", tc.filePath, u, wantURL)
+ }
+ })
+ }
+}
diff --git a/src/cmd/go/internal/web/url_windows.go b/src/cmd/go/internal/web/url_windows.go
new file mode 100644
index 0000000..2a65ec8
--- /dev/null
+++ b/src/cmd/go/internal/web/url_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+ "errors"
+ "path/filepath"
+ "strings"
+)
+
+func convertFileURLPath(host, path string) (string, error) {
+ if len(path) == 0 || path[0] != '/' {
+ return "", errNotAbsolute
+ }
+
+ path = filepath.FromSlash(path)
+
+ // We interpret Windows file URLs per the description in
+ // https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/.
+
+ // The host part of a file URL (if any) is the UNC volume name,
+ // but RFC 8089 reserves the authority "localhost" for the local machine.
+ if host != "" && host != "localhost" {
+ // A common "legacy" format omits the leading slash before a drive letter,
+ // encoding the drive letter as the host instead of part of the path.
+ // (See https://blogs.msdn.microsoft.com/freeassociations/2005/05/19/the-bizarre-and-unhappy-story-of-file-urls/.)
+ // We do not support that format, but we should at least emit a more
+ // helpful error message for it.
+ if filepath.VolumeName(host) != "" {
+ return "", errors.New("file URL encodes volume in host field: too few slashes?")
+ }
+ return `\\` + host + path, nil
+ }
+
+ // If host is empty, path must contain an initial slash followed by a
+ // drive letter and path. Remove the slash and verify that the path is valid.
+ if vol := filepath.VolumeName(path[1:]); vol == "" || strings.HasPrefix(vol, `\\`) {
+ return "", errors.New("file URL missing drive letter")
+ }
+ return path[1:], nil
+}
diff --git a/src/cmd/go/internal/web/url_windows_test.go b/src/cmd/go/internal/web/url_windows_test.go
new file mode 100644
index 0000000..06386a0
--- /dev/null
+++ b/src/cmd/go/internal/web/url_windows_test.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+var urlTests = []struct {
+ url string
+ filePath string
+ canonicalURL string // If empty, assume equal to url.
+ wantErr string
+}{
+ // Examples from https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/:
+
+ {
+ url: `file://laptop/My%20Documents/FileSchemeURIs.doc`,
+ filePath: `\\laptop\My Documents\FileSchemeURIs.doc`,
+ },
+ {
+ url: `file:///C:/Documents%20and%20Settings/davris/FileSchemeURIs.doc`,
+ filePath: `C:\Documents and Settings\davris\FileSchemeURIs.doc`,
+ },
+ {
+ url: `file:///D:/Program%20Files/Viewer/startup.htm`,
+ filePath: `D:\Program Files\Viewer\startup.htm`,
+ },
+ {
+ url: `file:///C:/Program%20Files/Music/Web%20Sys/main.html?REQUEST=RADIO`,
+ filePath: `C:\Program Files\Music\Web Sys\main.html`,
+ canonicalURL: `file:///C:/Program%20Files/Music/Web%20Sys/main.html`,
+ },
+ {
+ url: `file://applib/products/a-b/abc_9/4148.920a/media/start.swf`,
+ filePath: `\\applib\products\a-b\abc_9\4148.920a\media\start.swf`,
+ },
+ {
+ url: `file:////applib/products/a%2Db/abc%5F9/4148.920a/media/start.swf`,
+ wantErr: "file URL missing drive letter",
+ },
+ {
+ url: `C:\Program Files\Music\Web Sys\main.html?REQUEST=RADIO`,
+ wantErr: "non-file URL",
+ },
+
+ // The example "file://D:\Program Files\Viewer\startup.htm" errors out in
+ // url.Parse, so we substitute a slash-based path for testing instead.
+ {
+ url: `file://D:/Program Files/Viewer/startup.htm`,
+ wantErr: "file URL encodes volume in host field: too few slashes?",
+ },
+
+ // The blog post discourages the use of non-ASCII characters because they
+ // depend on the user's current codepage. However, when we are working with Go
+ // strings we assume UTF-8 encoding, and our url package refuses to encode
+ // URLs to non-ASCII strings.
+ {
+ url: `file:///C:/exampleㄓ.txt`,
+ filePath: `C:\exampleㄓ.txt`,
+ canonicalURL: `file:///C:/example%E3%84%93.txt`,
+ },
+ {
+ url: `file:///C:/example%E3%84%93.txt`,
+ filePath: `C:\exampleㄓ.txt`,
+ },
+
+ // Examples from RFC 8089:
+
+ // We allow the drive-letter variation from section E.2, because it is
+ // simpler to support than not to. However, we do not generate the shorter
+ // form in the reverse direction.
+ {
+ url: `file:c:/path/to/file`,
+ filePath: `c:\path\to\file`,
+ canonicalURL: `file:///c:/path/to/file`,
+ },
+
+ // We encode the UNC share name as the authority following section E.3.1,
+ // because that is what the Microsoft blog post explicitly recommends.
+ {
+ url: `file://host.example.com/Share/path/to/file.txt`,
+ filePath: `\\host.example.com\Share\path\to\file.txt`,
+ },
+
+ // We decline the four- and five-slash variations from section E.3.2.
+ // The paths in these URLs would change meaning under path.Clean.
+ {
+ url: `file:////host.example.com/path/to/file`,
+ wantErr: "file URL missing drive letter",
+ },
+ {
+ url: `file://///host.example.com/path/to/file`,
+ wantErr: "file URL missing drive letter",
+ },
+}
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
new file mode 100644
index 0000000..d4d0a71
--- /dev/null
+++ b/src/cmd/go/internal/work/action.go
@@ -0,0 +1,917 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph creation (planning).
+
+package work
+
+import (
+ "bufio"
+ "bytes"
+ "container/heap"
+ "context"
+ "debug/elf"
+ "encoding/json"
+ "fmt"
+ "internal/platform"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+ "cmd/go/internal/robustio"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/internal/buildid"
+)
+
+// A Builder holds global state about a build.
+// It does not hold per-package state, because we
+// build packages in parallel, and the builder is shared.
+type Builder struct {
+ WorkDir string // the temporary work directory (ends in filepath.Separator)
+ actionCache map[cacheKey]*Action // a cache of already-constructed actions
+ mkdirCache map[string]bool // a cache of created directories
+ flagCache map[[2]string]bool // a cache of supported compiler flags
+ gccCompilerIDCache map[string]cache.ActionID // cache for gccCompilerID
+ Print func(args ...any) (int, error)
+
+ IsCmdList bool // running as part of go list; set p.Stale and additional fields below
+ NeedError bool // list needs p.Error
+ NeedExport bool // list needs p.Export
+ NeedCompiledGoFiles bool // list needs p.CompiledGoFiles
+ AllowErrors bool // errors don't immediately exit the program
+
+ objdirSeq int // counter for NewObjdir
+ pkgSeq int
+
+ output sync.Mutex
+ scriptDir string // current directory in printed script
+
+ exec sync.Mutex
+ readySema chan bool
+ ready actionQueue
+
+ id sync.Mutex
+ toolIDCache map[string]string // tool name -> tool ID
+ buildIDCache map[string]string // file name -> build ID
+}
+
+// NOTE: Much of Action would not need to be exported if not for test.
+// Maybe test functionality should move into this package too?
+
+// An Actor runs an action.
+type Actor interface {
+ Act(*Builder, context.Context, *Action) error
+}
+
+// An ActorFunc is an Actor that calls the function.
+type ActorFunc func(*Builder, context.Context, *Action) error
+
+func (f ActorFunc) Act(b *Builder, ctx context.Context, a *Action) error {
+ return f(b, ctx, a)
+}
+
+// An Action represents a single action in the action graph.
+type Action struct {
+ Mode string // description of action operation
+ Package *load.Package // the package this action works on
+ Deps []*Action // actions that must happen before this one
+ Actor Actor // the action itself (nil = no-op)
+ IgnoreFail bool // whether to run f even if dependencies fail
+ TestOutput *bytes.Buffer // test output buffer
+ Args []string // additional args for runProgram
+
+ triggers []*Action // inverse of deps
+
+ buggyInstall bool // is this a buggy install (see -linkshared)?
+
+ TryCache func(*Builder, *Action) bool // callback for cache bypass
+
+ // Generated files, directories.
+ Objdir string // directory for intermediate objects
+ Target string // goal of the action: the created package or executable
+ built string // the actual created package or executable
+ actionID cache.ActionID // cache ID of action input
+ buildID string // build ID of action output
+
+ VetxOnly bool // Mode=="vet": only being called to supply info about dependencies
+ needVet bool // Mode=="build": need to fill in vet config
+ needBuild bool // Mode=="build": need to do actual build (can be false if needVet is true)
+ vetCfg *vetConfig // vet config
+ output []byte // output redirect buffer (nil means use b.Print)
+
+ // Execution state.
+ pending int // number of deps yet to complete
+ priority int // relative execution priority
+ Failed bool // whether the action failed
+ json *actionJSON // action graph information
+ nonGoOverlay map[string]string // map from non-.go source files to copied files in objdir. Nil if no overlay is used.
+ traceSpan *trace.Span
+}
+
+// BuildActionID returns the action ID section of a's build ID.
+func (a *Action) BuildActionID() string { return actionID(a.buildID) }
+
+// BuildContentID returns the content ID section of a's build ID.
+func (a *Action) BuildContentID() string { return contentID(a.buildID) }
+
+// BuildID returns a's build ID.
+func (a *Action) BuildID() string { return a.buildID }
+
+// BuiltTarget returns the actual file that was built. This differs
+// from Target when the result was cached.
+func (a *Action) BuiltTarget() string { return a.built }
+
+// An actionQueue is a priority queue of actions.
+type actionQueue []*Action
+
+// Implement heap.Interface
+func (q *actionQueue) Len() int { return len(*q) }
+func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] }
+func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority }
+func (q *actionQueue) Push(x any) { *q = append(*q, x.(*Action)) }
+func (q *actionQueue) Pop() any {
+ n := len(*q) - 1
+ x := (*q)[n]
+ *q = (*q)[:n]
+ return x
+}
+
+func (q *actionQueue) push(a *Action) {
+ if a.json != nil {
+ a.json.TimeReady = time.Now()
+ }
+ heap.Push(q, a)
+}
+
+func (q *actionQueue) pop() *Action {
+ return heap.Pop(q).(*Action)
+}
+
+type actionJSON struct {
+ ID int
+ Mode string
+ Package string
+ Deps []int `json:",omitempty"`
+ IgnoreFail bool `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Link bool `json:",omitempty"`
+ Objdir string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ Priority int `json:",omitempty"`
+ Failed bool `json:",omitempty"`
+ Built string `json:",omitempty"`
+ VetxOnly bool `json:",omitempty"`
+ NeedVet bool `json:",omitempty"`
+ NeedBuild bool `json:",omitempty"`
+ ActionID string `json:",omitempty"`
+ BuildID string `json:",omitempty"`
+ TimeReady time.Time `json:",omitempty"`
+ TimeStart time.Time `json:",omitempty"`
+ TimeDone time.Time `json:",omitempty"`
+
+ Cmd []string // `json:",omitempty"`
+ CmdReal time.Duration `json:",omitempty"`
+ CmdUser time.Duration `json:",omitempty"`
+ CmdSys time.Duration `json:",omitempty"`
+}
+
+// cacheKey is the key for the action cache.
+type cacheKey struct {
+ mode string
+ p *load.Package
+}
+
+func actionGraphJSON(a *Action) string {
+ var workq []*Action
+ var inWorkq = make(map[*Action]int)
+
+ add := func(a *Action) {
+ if _, ok := inWorkq[a]; ok {
+ return
+ }
+ inWorkq[a] = len(workq)
+ workq = append(workq, a)
+ }
+ add(a)
+
+ for i := 0; i < len(workq); i++ {
+ for _, dep := range workq[i].Deps {
+ add(dep)
+ }
+ }
+
+ var list []*actionJSON
+ for id, a := range workq {
+ if a.json == nil {
+ a.json = &actionJSON{
+ Mode: a.Mode,
+ ID: id,
+ IgnoreFail: a.IgnoreFail,
+ Args: a.Args,
+ Objdir: a.Objdir,
+ Target: a.Target,
+ Failed: a.Failed,
+ Priority: a.priority,
+ Built: a.built,
+ VetxOnly: a.VetxOnly,
+ NeedBuild: a.needBuild,
+ NeedVet: a.needVet,
+ }
+ if a.Package != nil {
+ // TODO(rsc): Make this a unique key for a.Package somehow.
+ a.json.Package = a.Package.ImportPath
+ }
+ for _, a1 := range a.Deps {
+ a.json.Deps = append(a.json.Deps, inWorkq[a1])
+ }
+ }
+ list = append(list, a.json)
+ }
+
+ js, err := json.MarshalIndent(list, "", "\t")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go: writing debug action graph: %v\n", err)
+ return ""
+ }
+ return string(js)
+}
+
+// BuildMode specifies the build mode:
+// are we just building things or also installing the results?
+type BuildMode int
+
+const (
+ ModeBuild BuildMode = iota
+ ModeInstall
+ ModeBuggyInstall
+
+ ModeVetOnly = 1 << 8
+)
+
+// NewBuilder returns a new Builder ready for use.
+//
+// If workDir is the empty string, NewBuilder creates a WorkDir if needed
+// and arranges for it to be removed in case of an unclean exit.
+// The caller must Close the builder explicitly to clean up the WorkDir
+// before a clean exit.
+func NewBuilder(workDir string) *Builder {
+ b := new(Builder)
+
+ b.Print = func(a ...any) (int, error) {
+ return fmt.Fprint(os.Stderr, a...)
+ }
+ b.actionCache = make(map[cacheKey]*Action)
+ b.mkdirCache = make(map[string]bool)
+ b.toolIDCache = make(map[string]string)
+ b.buildIDCache = make(map[string]string)
+
+ if workDir != "" {
+ b.WorkDir = workDir
+ } else if cfg.BuildN {
+ b.WorkDir = "$WORK"
+ } else {
+ if !buildInitStarted {
+ panic("internal error: NewBuilder called before BuildInit")
+ }
+ tmp, err := os.MkdirTemp(cfg.Getenv("GOTMPDIR"), "go-build")
+ if err != nil {
+ base.Fatalf("go: creating work dir: %v", err)
+ }
+ if !filepath.IsAbs(tmp) {
+ abs, err := filepath.Abs(tmp)
+ if err != nil {
+ os.RemoveAll(tmp)
+ base.Fatalf("go: creating work dir: %v", err)
+ }
+ tmp = abs
+ }
+ b.WorkDir = tmp
+ builderWorkDirs.Store(b, b.WorkDir)
+ if cfg.BuildX || cfg.BuildWork {
+ fmt.Fprintf(os.Stderr, "WORK=%s\n", b.WorkDir)
+ }
+ }
+
+ if err := CheckGOOSARCHPair(cfg.Goos, cfg.Goarch); err != nil {
+ fmt.Fprintf(os.Stderr, "go: %v\n", err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+
+ for _, tag := range cfg.BuildContext.BuildTags {
+ if strings.Contains(tag, ",") {
+ fmt.Fprintf(os.Stderr, "go: -tags space-separated list contains comma\n")
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ }
+
+ return b
+}
+
+var builderWorkDirs sync.Map // *Builder → WorkDir
+
+func (b *Builder) Close() error {
+ wd, ok := builderWorkDirs.Load(b)
+ if !ok {
+ return nil
+ }
+ defer builderWorkDirs.Delete(b)
+
+ if b.WorkDir != wd.(string) {
+ base.Errorf("go: internal error: Builder WorkDir unexpectedly changed from %s to %s", wd, b.WorkDir)
+ }
+
+ if !cfg.BuildWork {
+ if err := robustio.RemoveAll(b.WorkDir); err != nil {
+ return err
+ }
+ }
+ b.WorkDir = ""
+ return nil
+}
+
+func closeBuilders() {
+ leakedBuilders := 0
+ builderWorkDirs.Range(func(bi, _ any) bool {
+ leakedBuilders++
+ if err := bi.(*Builder).Close(); err != nil {
+ base.Error(err)
+ }
+ return true
+ })
+
+ if leakedBuilders > 0 && base.GetExitStatus() == 0 {
+ fmt.Fprintf(os.Stderr, "go: internal error: Builder leaked on successful exit\n")
+ base.SetExitStatus(1)
+ }
+}
+
+func CheckGOOSARCHPair(goos, goarch string) error {
+ if !platform.BuildModeSupported(cfg.BuildContext.Compiler, "default", goos, goarch) {
+ return fmt.Errorf("unsupported GOOS/GOARCH pair %s/%s", goos, goarch)
+ }
+ return nil
+}
+
+// NewObjdir returns the name of a fresh object directory under b.WorkDir.
+// It is up to the caller to call b.Mkdir on the result at an appropriate time.
+// The result ends in a slash, so that file names in that directory
+// can be constructed with direct string addition.
+//
+// NewObjdir must be called only from a single goroutine at a time,
+// so it is safe to call during action graph construction, but it must not
+// be called during action graph execution.
+func (b *Builder) NewObjdir() string {
+ b.objdirSeq++
+ return str.WithFilePathSeparator(filepath.Join(b.WorkDir, fmt.Sprintf("b%03d", b.objdirSeq)))
+}
+
+// readpkglist returns the list of packages that were built into the shared library
+// at shlibpath. For the native toolchain this list is stored, newline separated, in
+// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the
+// .go_export section.
+func readpkglist(shlibpath string) (pkgs []*load.Package) {
+ var stk load.ImportStack
+ if cfg.BuildToolchainName == "gccgo" {
+ f, err := elf.Open(shlibpath)
+ if err != nil {
+ base.Fatal(fmt.Errorf("failed to open shared library: %v", err))
+ }
+ sect := f.Section(".go_export")
+ if sect == nil {
+ base.Fatal(fmt.Errorf("%s: missing .go_export section", shlibpath))
+ }
+ data, err := sect.Data()
+ if err != nil {
+ base.Fatal(fmt.Errorf("%s: failed to read .go_export section: %v", shlibpath, err))
+ }
+ pkgpath := []byte("pkgpath ")
+ for _, line := range bytes.Split(data, []byte{'\n'}) {
+ if path, found := bytes.CutPrefix(line, pkgpath); found {
+ path = bytes.TrimSuffix(path, []byte{';'})
+ pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0))
+ }
+ }
+ } else {
+ pkglistbytes, err := buildid.ReadELFNote(shlibpath, "Go\x00\x00", 1)
+ if err != nil {
+ base.Fatalf("readELFNote failed: %v", err)
+ }
+ scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes))
+ for scanner.Scan() {
+ t := scanner.Text()
+ pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0))
+ }
+ }
+ return
+}
+
+// cacheAction looks up {mode, p} in the cache and returns the resulting action.
+// If the cache has no such action, f() is recorded and returned.
+// TODO(rsc): Change the second key from *load.Package to interface{},
+// to make the caching in linkShared less awkward?
+func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *Action {
+ a := b.actionCache[cacheKey{mode, p}]
+ if a == nil {
+ a = f()
+ b.actionCache[cacheKey{mode, p}] = a
+ }
+ return a
+}
+
+// AutoAction returns the "right" action for go build or go install of p.
+func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action {
+ if p.Name == "main" {
+ return b.LinkAction(mode, depMode, p)
+ }
+ return b.CompileAction(mode, depMode, p)
+}
+
+// CompileAction returns the action for compiling and possibly installing
+// (according to mode) the given package. The resulting action is only
+// for building packages (archives), never for linking executables.
+// depMode is the action (build or install) to use when building dependencies.
+// To turn package main into an executable, call b.Link instead.
+func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Action {
+ vetOnly := mode&ModeVetOnly != 0
+ mode &^= ModeVetOnly
+
+ if mode != ModeBuild && p.Target == "" {
+ // No permanent target.
+ mode = ModeBuild
+ }
+ if mode != ModeBuild && p.Name == "main" {
+ // We never install the .a file for a main package.
+ mode = ModeBuild
+ }
+
+ // Construct package build action.
+ a := b.cacheAction("build", p, func() *Action {
+ a := &Action{
+ Mode: "build",
+ Package: p,
+ Actor: ActorFunc((*Builder).build),
+ Objdir: b.NewObjdir(),
+ }
+
+ if p.Error == nil || !p.Error.IsImportCycle {
+ for _, p1 := range p.Internal.Imports {
+ a.Deps = append(a.Deps, b.CompileAction(depMode, depMode, p1))
+ }
+ }
+
+ if p.Standard {
+ switch p.ImportPath {
+ case "builtin", "unsafe":
+ // Fake packages - nothing to build.
+ a.Mode = "built-in package"
+ a.Actor = nil
+ return a
+ }
+
+ // gccgo standard library is "fake" too.
+ if cfg.BuildToolchainName == "gccgo" {
+ // the target name is needed for cgo.
+ a.Mode = "gccgo stdlib"
+ a.Target = p.Target
+ a.Actor = nil
+ return a
+ }
+ }
+
+ return a
+ })
+
+ // Find the build action; the cache entry may have been replaced
+ // by the install action during (*Builder).installAction.
+ buildAction := a
+ switch buildAction.Mode {
+ case "build", "built-in package", "gccgo stdlib":
+ // ok
+ case "build-install":
+ buildAction = a.Deps[0]
+ default:
+ panic("lost build action: " + buildAction.Mode)
+ }
+ buildAction.needBuild = buildAction.needBuild || !vetOnly
+
+ // Construct install action.
+ if mode == ModeInstall || mode == ModeBuggyInstall {
+ a = b.installAction(a, mode)
+ }
+
+ return a
+}
+
+// VetAction returns the action for running go vet on package p.
+// It depends on the action for compiling p.
+// If the caller may be causing p to be installed, it is up to the caller
+// to make sure that the install depends on (runs after) vet.
+func (b *Builder) VetAction(mode, depMode BuildMode, p *load.Package) *Action {
+ a := b.vetAction(mode, depMode, p)
+ a.VetxOnly = false
+ return a
+}
+
+func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action {
+ // Construct vet action.
+ a := b.cacheAction("vet", p, func() *Action {
+ a1 := b.CompileAction(mode|ModeVetOnly, depMode, p)
+
+ // vet expects to be able to import "fmt".
+ var stk load.ImportStack
+ stk.Push("vet")
+ p1, err := load.LoadImportWithFlags("fmt", p.Dir, p, &stk, nil, 0)
+ if err != nil {
+ base.Fatalf("unexpected error loading fmt package from package %s: %v", p.ImportPath, err)
+ }
+ stk.Pop()
+ aFmt := b.CompileAction(ModeBuild, depMode, p1)
+
+ var deps []*Action
+ if a1.buggyInstall {
+ // (*Builder).vet expects deps[0] to be the package
+ // and deps[1] to be "fmt". If we see buggyInstall
+ // here then a1 is an install of a shared library,
+ // and the real package is a1.Deps[0].
+ deps = []*Action{a1.Deps[0], aFmt, a1}
+ } else {
+ deps = []*Action{a1, aFmt}
+ }
+ for _, p1 := range p.Internal.Imports {
+ deps = append(deps, b.vetAction(mode, depMode, p1))
+ }
+
+ a := &Action{
+ Mode: "vet",
+ Package: p,
+ Deps: deps,
+ Objdir: a1.Objdir,
+ VetxOnly: true,
+ IgnoreFail: true, // it's OK if vet of dependencies "fails" (reports problems)
+ }
+ if a1.Actor == nil {
+ // Built-in packages like unsafe.
+ return a
+ }
+ deps[0].needVet = true
+ a.Actor = ActorFunc((*Builder).vet)
+ return a
+ })
+ return a
+}
+
+// LinkAction returns the action for linking p into an executable
+// and possibly installing the result (according to mode).
+// depMode is the action (build or install) to use when compiling dependencies.
+func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action {
+ // Construct link action.
+ a := b.cacheAction("link", p, func() *Action {
+ a := &Action{
+ Mode: "link",
+ Package: p,
+ }
+
+ a1 := b.CompileAction(ModeBuild, depMode, p)
+ a.Actor = ActorFunc((*Builder).link)
+ a.Deps = []*Action{a1}
+ a.Objdir = a1.Objdir
+
+ // An executable file. (This is the name of a temporary file.)
+ // Because we run the temporary file in 'go run' and 'go test',
+ // the name will show up in ps listings. If the caller has specified
+ // a name, use that instead of a.out. The binary is generated
+ // in an otherwise empty subdirectory named exe to avoid
+ // naming conflicts. The only possible conflict is if we were
+ // to create a top-level package named exe.
+ name := "a.out"
+ if p.Internal.ExeName != "" {
+ name = p.Internal.ExeName
+ } else if (cfg.Goos == "darwin" || cfg.Goos == "windows") && cfg.BuildBuildmode == "c-shared" && p.Target != "" {
+ // On OS X, the linker output name gets recorded in the
+ // shared library's LC_ID_DYLIB load command.
+ // The code invoking the linker knows to pass only the final
+ // path element. Arrange that the path element matches what
+ // we'll install it as; otherwise the library is only loadable as "a.out".
+ // On Windows, DLL file name is recorded in PE file
+ // export section, so do like on OS X.
+ _, name = filepath.Split(p.Target)
+ }
+ a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix
+ a.built = a.Target
+ b.addTransitiveLinkDeps(a, a1, "")
+
+ // Sequence the build of the main package (a1) strictly after the build
+ // of all other dependencies that go into the link. It is likely to be after
+ // them anyway, but just make sure. This is required by the build ID-based
+ // shortcut in (*Builder).useCache(a1), which will call b.linkActionID(a).
+ // In order for that linkActionID call to compute the right action ID, all the
+ // dependencies of a (except a1) must have completed building and have
+ // recorded their build IDs.
+ a1.Deps = append(a1.Deps, &Action{Mode: "nop", Deps: a.Deps[1:]})
+ return a
+ })
+
+ if mode == ModeInstall || mode == ModeBuggyInstall {
+ a = b.installAction(a, mode)
+ }
+
+ return a
+}
+
+// installAction returns the action for installing the result of a1.
+func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action {
+ // Because we overwrite the build action with the install action below,
+ // a1 may already be an install action fetched from the "build" cache key,
+ // and the caller just doesn't realize.
+ if strings.HasSuffix(a1.Mode, "-install") {
+ if a1.buggyInstall && mode == ModeInstall {
+ // Congratulations! The buggy install is now a proper install.
+ a1.buggyInstall = false
+ }
+ return a1
+ }
+
+ // If there's no actual action to build a1,
+ // there's nothing to install either.
+ // This happens if a1 corresponds to reusing an already-built object.
+ if a1.Actor == nil {
+ return a1
+ }
+
+ p := a1.Package
+ return b.cacheAction(a1.Mode+"-install", p, func() *Action {
+ // The install deletes the temporary build result,
+ // so we need all other actions, both past and future,
+ // that attempt to depend on the build to depend instead
+ // on the install.
+
+ // Make a private copy of a1 (the build action),
+ // no longer accessible to any other rules.
+ buildAction := new(Action)
+ *buildAction = *a1
+
+ // Overwrite a1 with the install action.
+ // This takes care of updating past actions that
+ // point at a1 for the build action; now they will
+ // point at a1 and get the install action.
+ // We also leave a1 in the action cache as the result
+ // for "build", so that actions not yet created that
+ // try to depend on the build will instead depend
+ // on the install.
+ *a1 = Action{
+ Mode: buildAction.Mode + "-install",
+ Actor: ActorFunc(BuildInstallFunc),
+ Package: p,
+ Objdir: buildAction.Objdir,
+ Deps: []*Action{buildAction},
+ Target: p.Target,
+ built: p.Target,
+
+ buggyInstall: mode == ModeBuggyInstall,
+ }
+
+ b.addInstallHeaderAction(a1)
+ return a1
+ })
+}
+
+// addTransitiveLinkDeps adds to the link action a all packages
+// that are transitive dependencies of a1.Deps.
+// That is, if a is a link of package main, a1 is the compile of package main
+// and a1.Deps is the actions for building packages directly imported by
+// package main (what the compiler needs). The linker needs all packages
+// transitively imported by the whole program; addTransitiveLinkDeps
+// makes sure those are present in a.Deps.
+// If shlib is non-empty, then a corresponds to the build and installation of shlib,
+// so any rebuild of shlib should not be added as a dependency.
+func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) {
+ // Expand Deps to include all built packages, for the linker.
+ // Use breadth-first search to find rebuilt-for-test packages
+ // before the standard ones.
+ // TODO(rsc): Eliminate the standard ones from the action graph,
+ // which will require doing a little bit more rebuilding.
+ workq := []*Action{a1}
+ haveDep := map[string]bool{}
+ if a1.Package != nil {
+ haveDep[a1.Package.ImportPath] = true
+ }
+ for i := 0; i < len(workq); i++ {
+ a1 := workq[i]
+ for _, a2 := range a1.Deps {
+ // TODO(rsc): Find a better discriminator than the Mode strings, once the dust settles.
+ if a2.Package == nil || (a2.Mode != "build-install" && a2.Mode != "build") || haveDep[a2.Package.ImportPath] {
+ continue
+ }
+ haveDep[a2.Package.ImportPath] = true
+ a.Deps = append(a.Deps, a2)
+ if a2.Mode == "build-install" {
+ a2 = a2.Deps[0] // walk children of "build" action
+ }
+ workq = append(workq, a2)
+ }
+ }
+
+ // If this is go build -linkshared, then the link depends on the shared libraries
+ // in addition to the packages themselves. (The compile steps do not.)
+ if cfg.BuildLinkshared {
+ haveShlib := map[string]bool{shlib: true}
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 == nil || p1.Shlib == "" || haveShlib[filepath.Base(p1.Shlib)] {
+ continue
+ }
+ haveShlib[filepath.Base(p1.Shlib)] = true
+ // TODO(rsc): The use of ModeInstall here is suspect, but if we only do ModeBuild,
+ // we'll end up building an overall library or executable that depends at runtime
+ // on other libraries that are out-of-date, which is clearly not good either.
+ // We call it ModeBuggyInstall to make clear that this is not right.
+ a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil))
+ }
+ }
+}
+
+// addInstallHeaderAction adds an install header action to a, if needed.
+// The action a should be an install action as generated by either
+// b.CompileAction or b.LinkAction with mode=ModeInstall,
+// and so a.Deps[0] is the corresponding build action.
+func (b *Builder) addInstallHeaderAction(a *Action) {
+ // Install header for cgo in c-archive and c-shared modes.
+ p := a.Package
+ if p.UsesCgo() && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") {
+ hdrTarget := a.Target[:len(a.Target)-len(filepath.Ext(a.Target))] + ".h"
+ if cfg.BuildContext.Compiler == "gccgo" && cfg.BuildO == "" {
+ // For the header file, remove the "lib"
+ // added by go/build, so we generate pkg.h
+ // rather than libpkg.h.
+ dir, file := filepath.Split(hdrTarget)
+ file = strings.TrimPrefix(file, "lib")
+ hdrTarget = filepath.Join(dir, file)
+ }
+ ah := &Action{
+ Mode: "install header",
+ Package: a.Package,
+ Deps: []*Action{a.Deps[0]},
+ Actor: ActorFunc((*Builder).installHeader),
+ Objdir: a.Deps[0].Objdir,
+ Target: hdrTarget,
+ }
+ a.Deps = append(a.Deps, ah)
+ }
+}
+
+// buildmodeShared takes the "go build" action a1 into the building of a shared library of a1.Deps.
+// That is, the input a1 represents "go build pkgs" and the result represents "go build -buildmode=shared pkgs".
+func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs []*load.Package, a1 *Action) *Action {
+ name, err := libname(args, pkgs)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ return b.linkSharedAction(mode, depMode, name, a1)
+}
+
+// linkSharedAction takes a grouping action a1 corresponding to a list of built packages
+// and returns an action that links them together into a shared library with the name shlib.
+// If a1 is nil, shlib should be an absolute path to an existing shared library,
+// and then linkSharedAction reads that library to find out the package list.
+func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action {
+ fullShlib := shlib
+ shlib = filepath.Base(shlib)
+ a := b.cacheAction("build-shlib "+shlib, nil, func() *Action {
+ if a1 == nil {
+ // TODO(rsc): Need to find some other place to store config,
+ // not in pkg directory. See golang.org/issue/22196.
+ pkgs := readpkglist(fullShlib)
+ a1 = &Action{
+ Mode: "shlib packages",
+ }
+ for _, p := range pkgs {
+ a1.Deps = append(a1.Deps, b.CompileAction(mode, depMode, p))
+ }
+ }
+
+ // Fake package to hold ldflags.
+ // As usual shared libraries are a kludgy, abstraction-violating special case:
+ // we let them use the flags specified for the command-line arguments.
+ p := &load.Package{}
+ p.Internal.CmdlinePkg = true
+ p.Internal.Ldflags = load.BuildLdflags.For(p)
+ p.Internal.Gccgoflags = load.BuildGccgoflags.For(p)
+
+ // Add implicit dependencies to pkgs list.
+ // Currently buildmode=shared forces external linking mode, and
+ // external linking mode forces an import of runtime/cgo (and
+ // math on arm). So if it was not passed on the command line and
+ // it is not present in another shared library, add it here.
+ // TODO(rsc): Maybe this should only happen if "runtime" is in the original package set.
+ // TODO(rsc): This should probably be changed to use load.LinkerDeps(p).
+ // TODO(rsc): We don't add standard library imports for gccgo
+ // because they are all always linked in anyhow.
+ // Maybe load.LinkerDeps should be used and updated.
+ a := &Action{
+ Mode: "go build -buildmode=shared",
+ Package: p,
+ Objdir: b.NewObjdir(),
+ Actor: ActorFunc((*Builder).linkShared),
+ Deps: []*Action{a1},
+ }
+ a.Target = filepath.Join(a.Objdir, shlib)
+ if cfg.BuildToolchainName != "gccgo" {
+ add := func(a1 *Action, pkg string, force bool) {
+ for _, a2 := range a1.Deps {
+ if a2.Package != nil && a2.Package.ImportPath == pkg {
+ return
+ }
+ }
+ var stk load.ImportStack
+ p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0)
+ if p.Error != nil {
+ base.Fatalf("load %s: %v", pkg, p.Error)
+ }
+ // Assume that if pkg (runtime/cgo or math)
+ // is already accounted for in a different shared library,
+ // then that shared library also contains runtime,
+ // so that anything we do will depend on that library,
+ // so we don't need to include pkg in our shared library.
+ if force || p.Shlib == "" || filepath.Base(p.Shlib) == pkg {
+ a1.Deps = append(a1.Deps, b.CompileAction(depMode, depMode, p))
+ }
+ }
+ add(a1, "runtime/cgo", false)
+ if cfg.Goarch == "arm" {
+ add(a1, "math", false)
+ }
+
+ // The linker step still needs all the usual linker deps.
+ // (For example, the linker always opens runtime.a.)
+ for _, dep := range load.LinkerDeps(nil) {
+ add(a, dep, true)
+ }
+ }
+ b.addTransitiveLinkDeps(a, a1, shlib)
+ return a
+ })
+
+ // Install result.
+ if (mode == ModeInstall || mode == ModeBuggyInstall) && a.Actor != nil {
+ buildAction := a
+
+ a = b.cacheAction("install-shlib "+shlib, nil, func() *Action {
+ // Determine the eventual install target.
+ // The install target is root/pkg/shlib, where root is the source root
+ // in which all the packages lie.
+ // TODO(rsc): Perhaps this cross-root check should apply to the full
+ // transitive package dependency list, not just the ones named
+ // on the command line?
+ pkgDir := a1.Deps[0].Package.Internal.Build.PkgTargetRoot
+ for _, a2 := range a1.Deps {
+ if dir := a2.Package.Internal.Build.PkgTargetRoot; dir != pkgDir {
+ base.Fatalf("installing shared library: cannot use packages %s and %s from different roots %s and %s",
+ a1.Deps[0].Package.ImportPath,
+ a2.Package.ImportPath,
+ pkgDir,
+ dir)
+ }
+ }
+ // TODO(rsc): Find out and explain here why gccgo is different.
+ if cfg.BuildToolchainName == "gccgo" {
+ pkgDir = filepath.Join(pkgDir, "shlibs")
+ }
+ target := filepath.Join(pkgDir, shlib)
+
+ a := &Action{
+ Mode: "go install -buildmode=shared",
+ Objdir: buildAction.Objdir,
+ Actor: ActorFunc(BuildInstallFunc),
+ Deps: []*Action{buildAction},
+ Target: target,
+ }
+ for _, a2 := range buildAction.Deps[0].Deps {
+ p := a2.Package
+ pkgTargetRoot := p.Internal.Build.PkgTargetRoot
+ if pkgTargetRoot == "" {
+ continue
+ }
+ a.Deps = append(a.Deps, &Action{
+ Mode: "shlibname",
+ Package: p,
+ Actor: ActorFunc((*Builder).installShlibname),
+ Target: filepath.Join(pkgTargetRoot, p.ImportPath+".shlibname"),
+ Deps: []*Action{a.Deps[0]},
+ })
+ }
+ return a
+ })
+ }
+
+ return a
+}
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
new file mode 100644
index 0000000..e2e0e07
--- /dev/null
+++ b/src/cmd/go/internal/work/build.go
@@ -0,0 +1,956 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/search"
+ "cmd/go/internal/trace"
+)
+
+var CmdBuild = &base.Command{
+ UsageLine: "go build [-o output] [build flags] [packages]",
+ Short: "compile packages and dependencies",
+ Long: `
+Build compiles the packages named by the import paths,
+along with their dependencies, but it does not install the results.
+
+If the arguments to build are a list of .go files from a single directory,
+build treats them as a list of source files specifying a single package.
+
+When compiling packages, build ignores files that end in '_test.go'.
+
+When compiling a single main package, build writes
+the resulting executable to an output file named after
+the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
+or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe').
+The '.exe' suffix is added when writing a Windows executable.
+
+When compiling multiple packages or a single non-main package,
+build compiles the packages but discards the resulting object,
+serving only as a check that the packages can be built.
+
+The -o flag forces build to write the resulting executable or object
+to the named output file or directory, instead of the default behavior described
+in the last two paragraphs. If the named output is an existing directory or
+ends with a slash or backslash, then any resulting executables
+will be written to that directory.
+
+The build flags are shared by the build, clean, get, install, list, run,
+and test commands:
+
+ -C dir
+ Change to dir before running the command.
+ Any files named on the command line are interpreted after
+ changing directories.
+ If used, this flag must be the first one in the command line.
+ -a
+ force rebuilding of packages that are already up-to-date.
+ -n
+ print the commands but do not run them.
+ -p n
+ the number of programs, such as build commands or
+ test binaries, that can be run in parallel.
+ The default is GOMAXPROCS, normally the number of CPUs available.
+ -race
+ enable data race detection.
+ Supported only on linux/amd64, freebsd/amd64, darwin/amd64, darwin/arm64, windows/amd64,
+ linux/ppc64le and linux/arm64 (only for 48-bit VMA).
+ -msan
+ enable interoperation with memory sanitizer.
+ Supported only on linux/amd64, linux/arm64, freebsd/amd64
+ and only with Clang/LLVM as the host C compiler.
+ PIE build mode will be used on all platforms except linux/amd64.
+ -asan
+ enable interoperation with address sanitizer.
+ Supported only on linux/arm64, linux/amd64.
+ Supported only on linux/amd64 or linux/arm64 and only with GCC 7 and higher
+ or Clang/LLVM 9 and higher.
+ -cover
+ enable code coverage instrumentation.
+ -covermode set,count,atomic
+ set the mode for coverage analysis.
+ The default is "set" unless -race is enabled,
+ in which case it is "atomic".
+ The values:
+ set: bool: does this statement run?
+ count: int: how many times does this statement run?
+ atomic: int: count, but correct in multithreaded tests;
+ significantly more expensive.
+ Sets -cover.
+ -coverpkg pattern1,pattern2,pattern3
+ For a build that targets package 'main' (e.g. building a Go
+ executable), apply coverage analysis to each package matching
+ the patterns. The default is to apply coverage analysis to
+ packages in the main Go module. See 'go help packages' for a
+ description of package patterns. Sets -cover.
+ -v
+ print the names of packages as they are compiled.
+ -work
+ print the name of the temporary work directory and
+ do not delete it when exiting.
+ -x
+ print the commands.
+ -asmflags '[pattern=]arg list'
+ arguments to pass on each go tool asm invocation.
+ -buildmode mode
+ build mode to use. See 'go help buildmode' for more.
+ -buildvcs
+ Whether to stamp binaries with version control information
+ ("true", "false", or "auto"). By default ("auto"), version control
+ information is stamped into a binary if the main package, the main module
+ containing it, and the current directory are all in the same repository.
+ Use -buildvcs=false to always omit version control information, or
+ -buildvcs=true to error out if version control information is available but
+ cannot be included due to a missing tool or ambiguous directory structure.
+ -compiler name
+ name of compiler to use, as in runtime.Compiler (gccgo or gc).
+ -gccgoflags '[pattern=]arg list'
+ arguments to pass on each gccgo compiler/linker invocation.
+ -gcflags '[pattern=]arg list'
+ arguments to pass on each go tool compile invocation.
+ -installsuffix suffix
+ a suffix to use in the name of the package installation directory,
+ in order to keep output separate from default builds.
+ If using the -race flag, the install suffix is automatically set to race
+ or, if set explicitly, has _race appended to it. Likewise for the -msan
+ and -asan flags. Using a -buildmode option that requires non-default compile
+ flags has a similar effect.
+ -ldflags '[pattern=]arg list'
+ arguments to pass on each go tool link invocation.
+ -linkshared
+ build code that will be linked against shared libraries previously
+ created with -buildmode=shared.
+ -mod mode
+ module download mode to use: readonly, vendor, or mod.
+ By default, if a vendor directory is present and the go version in go.mod
+ is 1.14 or higher, the go command acts as if -mod=vendor were set.
+ Otherwise, the go command acts as if -mod=readonly were set.
+ See https://golang.org/ref/mod#build-commands for details.
+ -modcacherw
+ leave newly-created directories in the module cache read-write
+ instead of making them read-only.
+ -modfile file
+ in module aware mode, read (and possibly write) an alternate go.mod
+ file instead of the one in the module root directory. A file named
+ "go.mod" must still be present in order to determine the module root
+ directory, but it is not accessed. When -modfile is specified, an
+ alternate go.sum file is also used: its path is derived from the
+ -modfile flag by trimming the ".mod" extension and appending ".sum".
+ -overlay file
+ read a JSON config file that provides an overlay for build operations.
+ The file is a JSON struct with a single field, named 'Replace', that
+ maps each disk file path (a string) to its backing file path, so that
+ a build will run as if the disk file path exists with the contents
+ given by the backing file paths, or as if the disk file path does not
+ exist if its backing file path is empty. Support for the -overlay flag
+ has some limitations: importantly, cgo files included from outside the
+ include path must be in the same directory as the Go package they are
+ included from, and overlays will not appear when binaries and tests are
+ run through go run and go test respectively.
+ -pgo file
+ specify the file path of a profile for profile-guided optimization (PGO).
+ When the special name "auto" is specified, for each main package in the
+ build, the go command selects a file named "default.pgo" in the package's
+ directory if that file exists, and applies it to the (transitive)
+ dependencies of the main package (other packages are not affected).
+ Special name "off" turns off PGO. The default is "auto".
+ -pkgdir dir
+ install and load all packages from dir instead of the usual locations.
+ For example, when building with a non-standard configuration,
+ use -pkgdir to keep generated packages in a separate location.
+ -tags tag,list
+ a comma-separated list of additional build tags to consider satisfied
+ during the build. For more information about build tags, see
+ 'go help buildconstraint'. (Earlier versions of Go used a
+ space-separated list, and that form is deprecated but still recognized.)
+ -trimpath
+ remove all file system paths from the resulting executable.
+ Instead of absolute file system paths, the recorded file names
+ will begin either a module path@version (when using modules),
+ or a plain import path (when using the standard library, or GOPATH).
+ -toolexec 'cmd args'
+ a program to use to invoke toolchain programs like vet and asm.
+ For example, instead of running asm, the go command will run
+ 'cmd args /path/to/asm <arguments for asm>'.
+ The TOOLEXEC_IMPORTPATH environment variable will be set,
+ matching 'go list -f {{.ImportPath}}' for the package being built.
+
+The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a
+space-separated list of arguments to pass to an underlying tool
+during the build. To embed spaces in an element in the list, surround
+it with either single or double quotes. The argument list may be
+preceded by a package pattern and an equal sign, which restricts
+the use of that argument list to the building of packages matching
+that pattern (see 'go help packages' for a description of package
+patterns). Without a pattern, the argument list applies only to the
+packages named on the command line. The flags may be repeated
+with different patterns in order to specify different arguments for
+different sets of packages. If a package matches patterns given in
+multiple flags, the latest match on the command line wins.
+For example, 'go build -gcflags=-S fmt' prints the disassembly
+only for package fmt, while 'go build -gcflags=all=-S fmt'
+prints the disassembly for fmt and all its dependencies.
+
+For more about specifying packages, see 'go help packages'.
+For more about where packages and binaries are installed,
+run 'go help gopath'.
+For more about calling between Go and C/C++, run 'go help c'.
+
+Note: Build adheres to certain conventions such as those described
+by 'go help gopath'. Not all projects can follow these conventions,
+however. Installations that have their own conventions or that use
+a separate software build system may choose to use lower-level
+invocations such as 'go tool compile' and 'go tool link' to avoid
+some of the overheads and design decisions of the build tool.
+
+See also: go install, go get, go clean.
+ `,
+}
+
+const concurrentGCBackendCompilationEnabledByDefault = true
+
+func init() {
+ // break init cycle
+ CmdBuild.Run = runBuild
+ CmdInstall.Run = runInstall
+
+ CmdBuild.Flag.StringVar(&cfg.BuildO, "o", "", "output file or directory")
+
+ AddBuildFlags(CmdBuild, DefaultBuildFlags)
+ AddBuildFlags(CmdInstall, DefaultBuildFlags)
+ if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+ AddCoverFlags(CmdBuild, nil)
+ AddCoverFlags(CmdInstall, nil)
+ }
+}
+
+// Note that flags consulted by other parts of the code
+// (for example, buildV) are in cmd/go/internal/cfg.
+
+var (
+ forcedAsmflags []string // internally-forced flags for cmd/asm
+ forcedGcflags []string // internally-forced flags for cmd/compile
+ forcedLdflags []string // internally-forced flags for cmd/link
+ forcedGccgoflags []string // internally-forced flags for gccgo
+)
+
+var BuildToolchain toolchain = noToolchain{}
+var ldBuildmode string
+
+// buildCompiler implements flag.Var.
+// It implements Set by updating both
+// BuildToolchain and buildContext.Compiler.
+type buildCompiler struct{}
+
+func (c buildCompiler) Set(value string) error {
+ switch value {
+ case "gc":
+ BuildToolchain = gcToolchain{}
+ case "gccgo":
+ BuildToolchain = gccgoToolchain{}
+ default:
+ return fmt.Errorf("unknown compiler %q", value)
+ }
+ cfg.BuildToolchainName = value
+ cfg.BuildToolchainCompiler = BuildToolchain.compiler
+ cfg.BuildToolchainLinker = BuildToolchain.linker
+ cfg.BuildContext.Compiler = value
+ return nil
+}
+
+func (c buildCompiler) String() string {
+ return cfg.BuildContext.Compiler
+}
+
+func init() {
+ switch build.Default.Compiler {
+ case "gc", "gccgo":
+ buildCompiler{}.Set(build.Default.Compiler)
+ }
+}
+
+type BuildFlagMask int
+
+const (
+ DefaultBuildFlags BuildFlagMask = 0
+ OmitModFlag BuildFlagMask = 1 << iota
+ OmitModCommonFlags
+ OmitVFlag
+)
+
+// AddBuildFlags adds the flags common to the build, clean, get,
+// install, list, run, and test commands.
+func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
+ base.AddBuildFlagsNX(&cmd.Flag)
+ base.AddChdirFlag(&cmd.Flag)
+ cmd.Flag.BoolVar(&cfg.BuildA, "a", false, "")
+ cmd.Flag.IntVar(&cfg.BuildP, "p", cfg.BuildP, "")
+ if mask&OmitVFlag == 0 {
+ cmd.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+ }
+
+ cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "")
+ cmd.Flag.Var(buildCompiler{}, "compiler", "")
+ cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "")
+ cmd.Flag.Var(&load.BuildGcflags, "gcflags", "")
+ cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "")
+ if mask&OmitModFlag == 0 {
+ base.AddModFlag(&cmd.Flag)
+ }
+ if mask&OmitModCommonFlags == 0 {
+ base.AddModCommonFlags(&cmd.Flag)
+ } else {
+ // Add the overlay flag even when we don't add the rest of the mod common flags.
+ // This only affects 'go get' in GOPATH mode, but add the flag anyway for
+ // consistency.
+ cmd.Flag.StringVar(&fsys.OverlayFile, "overlay", "", "")
+ }
+ cmd.Flag.StringVar(&cfg.BuildContext.InstallSuffix, "installsuffix", "", "")
+ cmd.Flag.Var(&load.BuildLdflags, "ldflags", "")
+ cmd.Flag.BoolVar(&cfg.BuildLinkshared, "linkshared", false, "")
+ cmd.Flag.StringVar(&cfg.BuildPGO, "pgo", "auto", "")
+ cmd.Flag.StringVar(&cfg.BuildPkgdir, "pkgdir", "", "")
+ cmd.Flag.BoolVar(&cfg.BuildRace, "race", false, "")
+ cmd.Flag.BoolVar(&cfg.BuildMSan, "msan", false, "")
+ cmd.Flag.BoolVar(&cfg.BuildASan, "asan", false, "")
+ cmd.Flag.Var((*tagsFlag)(&cfg.BuildContext.BuildTags), "tags", "")
+ cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildToolexec), "toolexec", "")
+ cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "")
+ cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "")
+ cmd.Flag.Var((*buildvcsFlag)(&cfg.BuildBuildvcs), "buildvcs", "")
+
+ // Undocumented, unstable debugging flags.
+ cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "")
+ cmd.Flag.StringVar(&cfg.DebugTrace, "debug-trace", "", "")
+ cmd.Flag.StringVar(&cfg.DebugRuntimeTrace, "debug-runtime-trace", "", "")
+}
+
+// AddCoverFlags adds coverage-related flags to "cmd". If the
+// CoverageRedesign experiment is enabled, we add -cover{mode,pkg} to
+// the build command and only -coverprofile to the test command. If
+// the CoverageRedesign experiment is disabled, -cover* flags are
+// added only to the test command.
+func AddCoverFlags(cmd *base.Command, coverProfileFlag *string) {
+ addCover := false
+ if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+ // New coverage enabled: both build and test commands get
+ // coverage flags.
+ addCover = true
+ } else {
+ // New coverage disabled: only test command gets cover flags.
+ addCover = coverProfileFlag != nil
+ }
+ if addCover {
+ cmd.Flag.BoolVar(&cfg.BuildCover, "cover", false, "")
+ cmd.Flag.Var(coverFlag{(*coverModeFlag)(&cfg.BuildCoverMode)}, "covermode", "")
+ cmd.Flag.Var(coverFlag{commaListFlag{&cfg.BuildCoverPkg}}, "coverpkg", "")
+ }
+ if coverProfileFlag != nil {
+ cmd.Flag.Var(coverFlag{V: stringFlag{coverProfileFlag}}, "coverprofile", "")
+ }
+}
+
+// tagsFlag is the implementation of the -tags flag.
+type tagsFlag []string
+
+func (v *tagsFlag) Set(s string) error {
+ // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'".
+ if strings.Contains(s, " ") || strings.Contains(s, "'") {
+ return (*base.StringsFlag)(v).Set(s)
+ }
+
+ // Split on commas, ignore empty strings.
+ *v = []string{}
+ for _, s := range strings.Split(s, ",") {
+ if s != "" {
+ *v = append(*v, s)
+ }
+ }
+ return nil
+}
+
+func (v *tagsFlag) String() string {
+ return "<TagsFlag>"
+}
+
+// buildvcsFlag is the implementation of the -buildvcs flag.
+type buildvcsFlag string
+
+func (f *buildvcsFlag) IsBoolFlag() bool { return true } // allow -buildvcs (without arguments)
+
+func (f *buildvcsFlag) Set(s string) error {
+ // https://go.dev/issue/51748: allow "-buildvcs=auto",
+ // in addition to the usual "true" and "false".
+ if s == "" || s == "auto" {
+ *f = "auto"
+ return nil
+ }
+
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return errors.New("value is neither 'auto' nor a valid bool")
+ }
+ *f = (buildvcsFlag)(strconv.FormatBool(b)) // convert to canonical "true" or "false"
+ return nil
+}
+
+func (f *buildvcsFlag) String() string { return string(*f) }
+
+// fileExtSplit expects a filename and returns the name
+// and ext (without the dot). If the file has no
+// extension, ext will be empty.
+func fileExtSplit(file string) (name, ext string) {
+ dotExt := filepath.Ext(file)
+ name = file[:len(file)-len(dotExt)]
+ if dotExt != "" {
+ ext = dotExt[1:]
+ }
+ return
+}
+
+func pkgsMain(pkgs []*load.Package) (res []*load.Package) {
+ for _, p := range pkgs {
+ if p.Name == "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+func pkgsNotMain(pkgs []*load.Package) (res []*load.Package) {
+ for _, p := range pkgs {
+ if p.Name != "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+func oneMainPkg(pkgs []*load.Package) []*load.Package {
+ if len(pkgs) != 1 || pkgs[0].Name != "main" {
+ base.Fatalf("-buildmode=%s requires exactly one main package", cfg.BuildBuildmode)
+ }
+ return pkgs
+}
+
+var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs }
+
+func runBuild(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+ BuildInit()
+ b := NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
+ load.CheckPackageErrors(pkgs)
+
+ explicitO := len(cfg.BuildO) > 0
+
+ if len(pkgs) == 1 && pkgs[0].Name == "main" && cfg.BuildO == "" {
+ cfg.BuildO = pkgs[0].DefaultExecName()
+ cfg.BuildO += cfg.ExeSuffix
+ }
+
+ // sanity check some often mis-used options
+ switch cfg.BuildContext.Compiler {
+ case "gccgo":
+ if load.BuildGcflags.Present() {
+ fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags")
+ }
+ if load.BuildLdflags.Present() {
+ fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags")
+ }
+ case "gc":
+ if load.BuildGccgoflags.Present() {
+ fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags")
+ }
+ }
+
+ depMode := ModeBuild
+
+ pkgs = omitTestOnly(pkgsFilter(pkgs))
+
+ // Special case -o /dev/null by not writing at all.
+ if base.IsNull(cfg.BuildO) {
+ cfg.BuildO = ""
+ }
+
+ if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+ load.PrepareForCoverageBuild(pkgs)
+ }
+
+ if cfg.BuildO != "" {
+ // If the -o name exists and is a directory or
+ // ends with a slash or backslash, then
+ // write all main packages to that directory.
+ // Otherwise require only a single package be built.
+ if fi, err := os.Stat(cfg.BuildO); (err == nil && fi.IsDir()) ||
+ strings.HasSuffix(cfg.BuildO, "/") ||
+ strings.HasSuffix(cfg.BuildO, string(os.PathSeparator)) {
+ if !explicitO {
+ base.Fatalf("go: build output %q already exists and is a directory", cfg.BuildO)
+ }
+ a := &Action{Mode: "go build"}
+ for _, p := range pkgs {
+ if p.Name != "main" {
+ continue
+ }
+
+ p.Target = filepath.Join(cfg.BuildO, p.DefaultExecName())
+ p.Target += cfg.ExeSuffix
+ p.Stale = true
+ p.StaleReason = "build -o flag in use"
+ a.Deps = append(a.Deps, b.AutoAction(ModeInstall, depMode, p))
+ }
+ if len(a.Deps) == 0 {
+ base.Fatalf("go: no main packages to build")
+ }
+ b.Do(ctx, a)
+ return
+ }
+ if len(pkgs) > 1 {
+ base.Fatalf("go: cannot write multiple packages to non-directory %s", cfg.BuildO)
+ } else if len(pkgs) == 0 {
+ base.Fatalf("no packages to build")
+ }
+ p := pkgs[0]
+ p.Target = cfg.BuildO
+ p.Stale = true // must build - not up to date
+ p.StaleReason = "build -o flag in use"
+ a := b.AutoAction(ModeInstall, depMode, p)
+ b.Do(ctx, a)
+ return
+ }
+
+ a := &Action{Mode: "go build"}
+ for _, p := range pkgs {
+ a.Deps = append(a.Deps, b.AutoAction(ModeBuild, depMode, p))
+ }
+ if cfg.BuildBuildmode == "shared" {
+ a = b.buildmodeShared(ModeBuild, depMode, args, pkgs, a)
+ }
+ b.Do(ctx, a)
+}
+
+var CmdInstall = &base.Command{
+ UsageLine: "go install [build flags] [packages]",
+ Short: "compile and install packages and dependencies",
+ Long: `
+Install compiles and installs the packages named by the import paths.
+
+Executables are installed in the directory named by the GOBIN environment
+variable, which defaults to $GOPATH/bin or $HOME/go/bin if the GOPATH
+environment variable is not set. Executables in $GOROOT
+are installed in $GOROOT/bin or $GOTOOLDIR instead of $GOBIN.
+
+If the arguments have version suffixes (like @latest or @v1.0.0), "go install"
+builds packages in module-aware mode, ignoring the go.mod file in the current
+directory or any parent directory, if there is one. This is useful for
+installing executables without affecting the dependencies of the main module.
+To eliminate ambiguity about which module versions are used in the build, the
+arguments must satisfy the following constraints:
+
+- Arguments must be package paths or package patterns (with "..." wildcards).
+They must not be standard packages (like fmt), meta-patterns (std, cmd,
+all), or relative or absolute file paths.
+
+- All arguments must have the same version suffix. Different queries are not
+allowed, even if they refer to the same version.
+
+- All arguments must refer to packages in the same module at the same version.
+
+- Package path arguments must refer to main packages. Pattern arguments
+will only match main packages.
+
+- No module is considered the "main" module. If the module containing
+packages named on the command line has a go.mod file, it must not contain
+directives (replace and exclude) that would cause it to be interpreted
+differently than if it were the main module. The module must not require
+a higher version of itself.
+
+- Vendor directories are not used in any module. (Vendor directories are not
+included in the module zip files downloaded by 'go install'.)
+
+If the arguments don't have version suffixes, "go install" may run in
+module-aware mode or GOPATH mode, depending on the GO111MODULE environment
+variable and the presence of a go.mod file. See 'go help modules' for details.
+If module-aware mode is enabled, "go install" runs in the context of the main
+module.
+
+When module-aware mode is disabled, non-main packages are installed in the
+directory $GOPATH/pkg/$GOOS_$GOARCH. When module-aware mode is enabled,
+non-main packages are built and cached but not installed.
+
+Before Go 1.20, the standard library was installed to
+$GOROOT/pkg/$GOOS_$GOARCH.
+Starting in Go 1.20, the standard library is built and cached but not installed.
+Setting GODEBUG=installgoroot=all restores the use of
+$GOROOT/pkg/$GOOS_$GOARCH.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+
+See also: go build, go get, go clean.
+ `,
+}
+
+// libname returns the filename to use for the shared library when using
+// -buildmode=shared. The rules we use are:
+// Use arguments for special 'meta' packages:
+//
+// std --> libstd.so
+// std cmd --> libstd,cmd.so
+//
+// A single non-meta argument with trailing "/..." is special cased:
+//
+// foo/... --> libfoo.so
+// (A relative path like "./..." expands the "." first)
+//
+// Use import paths for other cases, changing '/' to '-':
+//
+// somelib --> libsubdir-somelib.so
+// ./ or ../ --> libsubdir-somelib.so
+// gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
+// a/... b/... ---> liba/c,b/d.so - all matching import paths
+//
+// Name parts are joined with ','.
+func libname(args []string, pkgs []*load.Package) (string, error) {
+ var libname string
+ appendName := func(arg string) {
+ if libname == "" {
+ libname = arg
+ } else {
+ libname += "," + arg
+ }
+ }
+ var haveNonMeta bool
+ for _, arg := range args {
+ if search.IsMetaPackage(arg) {
+ appendName(arg)
+ } else {
+ haveNonMeta = true
+ }
+ }
+ if len(libname) == 0 { // non-meta packages only. use import paths
+ if len(args) == 1 && strings.HasSuffix(args[0], "/...") {
+ // Special case of "foo/..." as mentioned above.
+ arg := strings.TrimSuffix(args[0], "/...")
+ if build.IsLocalImport(arg) {
+ cwd, _ := os.Getwd()
+ bp, _ := cfg.BuildContext.ImportDir(filepath.Join(cwd, arg), build.FindOnly)
+ if bp.ImportPath != "" && bp.ImportPath != "." {
+ arg = bp.ImportPath
+ }
+ }
+ appendName(strings.ReplaceAll(arg, "/", "-"))
+ } else {
+ for _, pkg := range pkgs {
+ appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-"))
+ }
+ }
+ } else if haveNonMeta { // have both meta package and a non-meta one
+ return "", errors.New("mixing of meta and non-meta packages is not allowed")
+ }
+ // TODO(mwhudson): Needs to change for platforms that use different naming
+ // conventions...
+ return "lib" + libname + ".so", nil
+}
+
+func runInstall(ctx context.Context, cmd *base.Command, args []string) {
+ for _, arg := range args {
+ if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) {
+ installOutsideModule(ctx, args)
+ return
+ }
+ }
+
+ modload.InitWorkfile()
+ BuildInit()
+ pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
+ if cfg.ModulesEnabled && !modload.HasModRoot() {
+ haveErrors := false
+ allMissingErrors := true
+ for _, pkg := range pkgs {
+ if pkg.Error == nil {
+ continue
+ }
+ haveErrors = true
+ if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) {
+ allMissingErrors = false
+ break
+ }
+ }
+ if haveErrors && allMissingErrors {
+ latestArgs := make([]string, len(args))
+ for i := range args {
+ latestArgs[i] = args[i] + "@latest"
+ }
+ hint := strings.Join(latestArgs, " ")
+ base.Fatalf("go: 'go install' requires a version when current directory is not in a module\n\tTry 'go install %s' to install the latest version", hint)
+ }
+ }
+ load.CheckPackageErrors(pkgs)
+
+ if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+ load.PrepareForCoverageBuild(pkgs)
+ }
+
+ InstallPackages(ctx, args, pkgs)
+}
+
+// omitTestOnly returns pkgs with test-only packages removed.
+func omitTestOnly(pkgs []*load.Package) []*load.Package {
+ var list []*load.Package
+ for _, p := range pkgs {
+ if len(p.GoFiles)+len(p.CgoFiles) == 0 && !p.Internal.CmdlinePkgLiteral {
+ // Package has no source files,
+ // perhaps due to build tags or perhaps due to only having *_test.go files.
+ // Also, it is only being processed as the result of a wildcard match
+ // like ./..., not because it was listed as a literal path on the command line.
+ // Ignore it.
+ continue
+ }
+ list = append(list, p)
+ }
+ return list
+}
+
+func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Package) {
+ ctx, span := trace.StartSpan(ctx, "InstallPackages "+strings.Join(patterns, " "))
+ defer span.Done()
+
+ if cfg.GOBIN != "" && !filepath.IsAbs(cfg.GOBIN) {
+ base.Fatalf("cannot install, GOBIN must be an absolute path")
+ }
+
+ pkgs = omitTestOnly(pkgsFilter(pkgs))
+ for _, p := range pkgs {
+ if p.Target == "" {
+ switch {
+ case p.Name != "main" && p.Internal.Local && p.ConflictDir == "":
+ // Non-executables outside GOPATH need not have a target:
+ // we can use the cache to hold the built package archive for use in future builds.
+ // The ones inside GOPATH should have a target (in GOPATH/pkg)
+ // or else something is wrong and worth reporting (like a ConflictDir).
+ case p.Name != "main" && p.Module != nil:
+ // Non-executables have no target (except the cache) when building with modules.
+ case p.Name != "main" && p.Standard && p.Internal.Build.PkgObj == "":
+ // Most packages in std do not need an installed .a, because they can be
+ // rebuilt and used directly from the build cache.
+ // A few targets (notably those using cgo) still do need to be installed
+ // in case the user's environment lacks a C compiler.
+ case p.Internal.GobinSubdir:
+ base.Errorf("go: cannot install cross-compiled binaries when GOBIN is set")
+ case p.Internal.CmdlineFiles:
+ base.Errorf("go: no install location for .go files listed on command line (GOBIN not set)")
+ case p.ConflictDir != "":
+ base.Errorf("go: no install location for %s: hidden by %s", p.Dir, p.ConflictDir)
+ default:
+ base.Errorf("go: no install location for directory %s outside GOPATH\n"+
+ "\tFor more details see: 'go help gopath'", p.Dir)
+ }
+ }
+ }
+ base.ExitIfErrors()
+
+ b := NewBuilder("")
+ defer func() {
+ if err := b.Close(); err != nil {
+ base.Fatal(err)
+ }
+ }()
+
+ depMode := ModeBuild
+ a := &Action{Mode: "go install"}
+ var tools []*Action
+ for _, p := range pkgs {
+ // If p is a tool, delay the installation until the end of the build.
+ // This avoids installing assemblers/compilers that are being executed
+ // by other steps in the build.
+ a1 := b.AutoAction(ModeInstall, depMode, p)
+ if load.InstallTargetDir(p) == load.ToTool {
+ a.Deps = append(a.Deps, a1.Deps...)
+ a1.Deps = append(a1.Deps, a)
+ tools = append(tools, a1)
+ continue
+ }
+ a.Deps = append(a.Deps, a1)
+ }
+ if len(tools) > 0 {
+ a = &Action{
+ Mode: "go install (tools)",
+ Deps: tools,
+ }
+ }
+
+ if cfg.BuildBuildmode == "shared" {
+ // Note: If buildmode=shared then only non-main packages
+ // are present in the pkgs list, so all the special case code about
+ // tools above did not apply, and a is just a simple Action
+ // with a list of Deps, one per package named in pkgs,
+ // the same as in runBuild.
+ a = b.buildmodeShared(ModeInstall, ModeInstall, patterns, pkgs, a)
+ }
+
+ b.Do(ctx, a)
+ base.ExitIfErrors()
+
+ // Success. If this command is 'go install' with no arguments
+ // and the current directory (the implicit argument) is a command,
+ // remove any leftover command binary from a previous 'go build'.
+ // The binary is installed; it's not needed here anymore.
+ // And worse it might be a stale copy, which you don't want to find
+ // instead of the installed one if $PATH contains dot.
+ // One way to view this behavior is that it is as if 'go install' first
+ // runs 'go build' and the moves the generated file to the install dir.
+ // See issue 9645.
+ if len(patterns) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" {
+ // Compute file 'go build' would have created.
+ // If it exists and is an executable file, remove it.
+ targ := pkgs[0].DefaultExecName()
+ targ += cfg.ExeSuffix
+ if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory
+ fi, err := os.Stat(targ)
+ if err == nil {
+ m := fi.Mode()
+ if m.IsRegular() {
+ if m&0111 != 0 || cfg.Goos == "windows" { // windows never sets executable bit
+ os.Remove(targ)
+ }
+ }
+ }
+ }
+ }
+}
+
+// installOutsideModule implements 'go install pkg@version'. It builds and
+// installs one or more main packages in module mode while ignoring any go.mod
+// in the current directory or parent directories.
+//
+// See golang.org/issue/40276 for details and rationale.
+func installOutsideModule(ctx context.Context, args []string) {
+ modload.ForceUseModules = true
+ modload.RootMode = modload.NoRoot
+ modload.AllowMissingModuleImports()
+ modload.Init()
+ BuildInit()
+
+ // Load packages. Ignore non-main packages.
+ // Print a warning if an argument contains "..." and matches no main packages.
+ // PackagesAndErrors already prints warnings for patterns that don't match any
+ // packages, so be careful not to double print.
+ // TODO(golang.org/issue/40276): don't report errors loading non-main packages
+ // matched by a pattern.
+ pkgOpts := load.PackageOpts{MainOnly: true}
+ pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args)
+ if err != nil {
+ base.Fatal(err)
+ }
+ load.CheckPackageErrors(pkgs)
+ patterns := make([]string, len(args))
+ for i, arg := range args {
+ patterns[i] = arg[:strings.Index(arg, "@")]
+ }
+
+ // Build and install the packages.
+ InstallPackages(ctx, patterns, pkgs)
+}
+
+// ExecCmd is the command to use to run user binaries.
+// Normally it is empty, meaning run the binaries directly.
+// If cross-compiling and running on a remote system or
+// simulator, it is typically go_GOOS_GOARCH_exec, with
+// the target GOOS and GOARCH substituted.
+// The -exec flag overrides these defaults.
+var ExecCmd []string
+
+// FindExecCmd derives the value of ExecCmd to use.
+// It returns that value and leaves ExecCmd set for direct use.
+func FindExecCmd() []string {
+ if ExecCmd != nil {
+ return ExecCmd
+ }
+ ExecCmd = []string{} // avoid work the second time
+ if cfg.Goos == runtime.GOOS && cfg.Goarch == runtime.GOARCH {
+ return ExecCmd
+ }
+ path, err := exec.LookPath(fmt.Sprintf("go_%s_%s_exec", cfg.Goos, cfg.Goarch))
+ if err == nil {
+ ExecCmd = []string{path}
+ }
+ return ExecCmd
+}
+
+// A coverFlag is a flag.Value that also implies -cover.
+type coverFlag struct{ V flag.Value }
+
+func (f coverFlag) String() string { return f.V.String() }
+
+func (f coverFlag) Set(value string) error {
+ if err := f.V.Set(value); err != nil {
+ return err
+ }
+ cfg.BuildCover = true
+ return nil
+}
+
+type coverModeFlag string
+
+func (f *coverModeFlag) String() string { return string(*f) }
+func (f *coverModeFlag) Set(value string) error {
+ switch value {
+ case "", "set", "count", "atomic":
+ *f = coverModeFlag(value)
+ cfg.BuildCoverMode = value
+ return nil
+ default:
+ return errors.New(`valid modes are "set", "count", or "atomic"`)
+ }
+}
+
+// A commaListFlag is a flag.Value representing a comma-separated list.
+type commaListFlag struct{ Vals *[]string }
+
+func (f commaListFlag) String() string { return strings.Join(*f.Vals, ",") }
+
+func (f commaListFlag) Set(value string) error {
+ if value == "" {
+ *f.Vals = nil
+ } else {
+ *f.Vals = strings.Split(value, ",")
+ }
+ return nil
+}
+
+// A stringFlag is a flag.Value representing a single string.
+type stringFlag struct{ val *string }
+
+func (f stringFlag) String() string { return *f.val }
+func (f stringFlag) Set(value string) error {
+ *f.val = value
+ return nil
+}
diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go
new file mode 100644
index 0000000..91648a3
--- /dev/null
+++ b/src/cmd/go/internal/work/build_test.go
@@ -0,0 +1,283 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "fmt"
+ "internal/testenv"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+)
+
+func TestRemoveDevNull(t *testing.T) {
+ fi, err := os.Lstat(os.DevNull)
+ if err != nil {
+ t.Skip(err)
+ }
+ if fi.Mode().IsRegular() {
+ t.Errorf("Lstat(%s).Mode().IsRegular() = true; expected false", os.DevNull)
+ }
+ mayberemovefile(os.DevNull)
+ _, err = os.Lstat(os.DevNull)
+ if err != nil {
+ t.Errorf("mayberemovefile(%s) did remove it; oops", os.DevNull)
+ }
+}
+
+func TestSplitPkgConfigOutput(t *testing.T) {
+ for _, test := range []struct {
+ in []byte
+ want []string
+ }{
+ {[]byte(`-r:foo -L/usr/white\ space/lib -lfoo\ bar -lbar\ baz`), []string{"-r:foo", "-L/usr/white space/lib", "-lfoo bar", "-lbar baz"}},
+ {[]byte(`-lextra\ fun\ arg\\`), []string{`-lextra fun arg\`}},
+ {[]byte("\textra whitespace\r\n"), []string{"extra", "whitespace\r"}},
+ {[]byte(" \r\n "), []string{"\r"}},
+ {[]byte(`"-r:foo" "-L/usr/white space/lib" "-lfoo bar" "-lbar baz"`), []string{"-r:foo", "-L/usr/white space/lib", "-lfoo bar", "-lbar baz"}},
+ {[]byte(`"-lextra fun arg\\"`), []string{`-lextra fun arg\`}},
+ {[]byte(`" \r\n\ "`), []string{` \r\n\ `}},
+ {[]byte(`""`), []string{""}},
+ {[]byte(``), nil},
+ {[]byte(`"\\"`), []string{`\`}},
+ {[]byte(`"\x"`), []string{`\x`}},
+ {[]byte(`"\\x"`), []string{`\x`}},
+ {[]byte(`'\\'`), []string{`\\`}},
+ {[]byte(`'\x'`), []string{`\x`}},
+ {[]byte(`"\\x"`), []string{`\x`}},
+ {[]byte("\\\n"), nil},
+ {[]byte(`-fPIC -I/test/include/foo -DQUOTED='"/test/share/doc"'`), []string{"-fPIC", "-I/test/include/foo", `-DQUOTED="/test/share/doc"`}},
+ {[]byte(`-fPIC -I/test/include/foo -DQUOTED="/test/share/doc"`), []string{"-fPIC", "-I/test/include/foo", "-DQUOTED=/test/share/doc"}},
+ {[]byte(`-fPIC -I/test/include/foo -DQUOTED=\"/test/share/doc\"`), []string{"-fPIC", "-I/test/include/foo", `-DQUOTED="/test/share/doc"`}},
+ {[]byte(`-fPIC -I/test/include/foo -DQUOTED='/test/share/doc'`), []string{"-fPIC", "-I/test/include/foo", "-DQUOTED=/test/share/doc"}},
+ {[]byte(`-DQUOTED='/te\st/share/d\oc'`), []string{`-DQUOTED=/te\st/share/d\oc`}},
+ {[]byte(`-Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world`), []string{"-Dhello=10", "-Dworld=+32", "-DDEFINED_FROM_PKG_CONFIG=hello world"}},
+ {[]byte(`"broken\"" \\\a "a"`), []string{"broken\"", "\\a", "a"}},
+ } {
+ got, err := splitPkgConfigOutput(test.in)
+ if err != nil {
+ t.Errorf("splitPkgConfigOutput on %#q failed with error %v", test.in, err)
+ continue
+ }
+ if !reflect.DeepEqual(got, test.want) {
+ t.Errorf("splitPkgConfigOutput(%#q) = %#q; want %#q", test.in, got, test.want)
+ }
+ }
+
+ for _, test := range []struct {
+ in []byte
+ want []string
+ }{
+ // broken quotation
+ {[]byte(`" \r\n `), nil},
+ {[]byte(`"-r:foo" "-L/usr/white space/lib "-lfoo bar" "-lbar baz"`), nil},
+ {[]byte(`"-lextra fun arg\\`), nil},
+ // broken char escaping
+ {[]byte(`broken flag\`), nil},
+ {[]byte(`extra broken flag \`), nil},
+ {[]byte(`\`), nil},
+ {[]byte(`"broken\"" "extra" \`), nil},
+ } {
+ got, err := splitPkgConfigOutput(test.in)
+ if err == nil {
+ t.Errorf("splitPkgConfigOutput(%v) = %v; haven't failed with error as expected.", test.in, got)
+ }
+ if !reflect.DeepEqual(got, test.want) {
+ t.Errorf("splitPkgConfigOutput(%v) = %v; want %v", test.in, got, test.want)
+ }
+ }
+
+}
+
+func TestSharedLibName(t *testing.T) {
+ // TODO(avdva) - make these values platform-specific
+ prefix := "lib"
+ suffix := ".so"
+ testData := []struct {
+ args []string
+ pkgs []*load.Package
+ expected string
+ expectErr bool
+ rootedAt string
+ }{
+ {
+ args: []string{"std"},
+ pkgs: []*load.Package{},
+ expected: "std",
+ },
+ {
+ args: []string{"std", "cmd"},
+ pkgs: []*load.Package{},
+ expected: "std,cmd",
+ },
+ {
+ args: []string{},
+ pkgs: []*load.Package{pkgImportPath("gopkg.in/somelib")},
+ expected: "gopkg.in-somelib",
+ },
+ {
+ args: []string{"./..."},
+ pkgs: []*load.Package{pkgImportPath("somelib")},
+ expected: "somelib",
+ rootedAt: "somelib",
+ },
+ {
+ args: []string{"../somelib", "../somelib"},
+ pkgs: []*load.Package{pkgImportPath("somelib")},
+ expected: "somelib",
+ },
+ {
+ args: []string{"../lib1", "../lib2"},
+ pkgs: []*load.Package{pkgImportPath("gopkg.in/lib1"), pkgImportPath("gopkg.in/lib2")},
+ expected: "gopkg.in-lib1,gopkg.in-lib2",
+ },
+ {
+ args: []string{"./..."},
+ pkgs: []*load.Package{
+ pkgImportPath("gopkg.in/dir/lib1"),
+ pkgImportPath("gopkg.in/lib2"),
+ pkgImportPath("gopkg.in/lib3"),
+ },
+ expected: "gopkg.in",
+ rootedAt: "gopkg.in",
+ },
+ {
+ args: []string{"std", "../lib2"},
+ pkgs: []*load.Package{},
+ expectErr: true,
+ },
+ {
+ args: []string{"all", "./"},
+ pkgs: []*load.Package{},
+ expectErr: true,
+ },
+ {
+ args: []string{"cmd", "fmt"},
+ pkgs: []*load.Package{},
+ expectErr: true,
+ },
+ }
+ for _, data := range testData {
+ func() {
+ if data.rootedAt != "" {
+ tmpGopath, err := os.MkdirTemp("", "gopath")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cwd := base.Cwd()
+ oldGopath := cfg.BuildContext.GOPATH
+ defer func() {
+ cfg.BuildContext.GOPATH = oldGopath
+ os.Chdir(cwd)
+ err := os.RemoveAll(tmpGopath)
+ if err != nil {
+ t.Error(err)
+ }
+ }()
+ root := filepath.Join(tmpGopath, "src", data.rootedAt)
+ err = os.MkdirAll(root, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cfg.BuildContext.GOPATH = tmpGopath
+ os.Chdir(root)
+ }
+ computed, err := libname(data.args, data.pkgs)
+ if err != nil {
+ if !data.expectErr {
+ t.Errorf("libname returned an error %q, expected a name", err.Error())
+ }
+ } else if data.expectErr {
+ t.Errorf("libname returned %q, expected an error", computed)
+ } else {
+ expected := prefix + data.expected + suffix
+ if expected != computed {
+ t.Errorf("libname returned %q, expected %q", computed, expected)
+ }
+ }
+ }()
+ }
+}
+
+func pkgImportPath(pkgpath string) *load.Package {
+ return &load.Package{
+ PackagePublic: load.PackagePublic{
+ ImportPath: pkgpath,
+ },
+ }
+}
+
+// When installing packages, the installed package directory should
+// respect the SetGID bit and group name of the destination
+// directory.
+// See https://golang.org/issue/18878.
+func TestRespectSetgidDir(t *testing.T) {
+ var b Builder
+
+ // Check that `cp` is called instead of `mv` by looking at the output
+ // of `(*Builder).ShowCmd` afterwards as a sanity check.
+ cfg.BuildX = true
+ var cmdBuf strings.Builder
+ b.Print = func(a ...any) (int, error) {
+ return cmdBuf.WriteString(fmt.Sprint(a...))
+ }
+
+ setgiddir, err := os.MkdirTemp("", "SetGroupID")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(setgiddir)
+
+ // BSD mkdir(2) inherits the parent directory group, and other platforms
+ // can inherit the parent directory group via setgid. The test setup (chmod
+ // setgid) will fail if the process does not have the group permission to
+ // the new temporary directory.
+ err = os.Chown(setgiddir, os.Getuid(), os.Getgid())
+ if err != nil {
+ if testenv.SyscallIsNotSupported(err) {
+ t.Skip("skipping: chown is not supported on " + runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+
+ // Change setgiddir's permissions to include the SetGID bit.
+ if err := os.Chmod(setgiddir, 0755|fs.ModeSetgid); err != nil {
+ if testenv.SyscallIsNotSupported(err) {
+ t.Skip("skipping: chmod is not supported on " + runtime.GOOS)
+ }
+ t.Fatal(err)
+ }
+ if fi, err := os.Stat(setgiddir); err != nil {
+ t.Fatal(err)
+ } else if fi.Mode()&fs.ModeSetgid == 0 {
+ t.Skip("skipping: Chmod ignored ModeSetgid on " + runtime.GOOS)
+ }
+
+ pkgfile, err := os.CreateTemp("", "pkgfile")
+ if err != nil {
+ t.Fatalf("os.CreateTemp(\"\", \"pkgfile\"): %v", err)
+ }
+ defer os.Remove(pkgfile.Name())
+ defer pkgfile.Close()
+
+ dirGIDFile := filepath.Join(setgiddir, "setgid")
+ if err := b.moveOrCopyFile(dirGIDFile, pkgfile.Name(), 0666, true); err != nil {
+ t.Fatalf("moveOrCopyFile: %v", err)
+ }
+
+ got := strings.TrimSpace(cmdBuf.String())
+ want := b.fmtcmd("", "cp %s %s", pkgfile.Name(), dirGIDFile)
+ if got != want {
+ t.Fatalf("moveOrCopyFile(%q, %q): want %q, got %q", dirGIDFile, pkgfile.Name(), want, got)
+ }
+}
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
new file mode 100644
index 0000000..a1d7599
--- /dev/null
+++ b/src/cmd/go/internal/work/buildid.go
@@ -0,0 +1,703 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "cmd/internal/buildid"
+ "cmd/internal/quoted"
+)
+
+// Build IDs
+//
+// Go packages and binaries are stamped with build IDs that record both
+// the action ID, which is a hash of the inputs to the action that produced
+// the packages or binary, and the content ID, which is a hash of the action
+// output, namely the archive or binary itself. The hash is the same one
+// used by the build artifact cache (see cmd/go/internal/cache), but
+// truncated when stored in packages and binaries, as the full length is not
+// needed and is a bit unwieldy. The precise form is
+//
+// actionID/[.../]contentID
+//
+// where the actionID and contentID are prepared by buildid.HashToString below.
+// and are found by looking for the first or last slash.
+// Usually the buildID is simply actionID/contentID, but see below for an
+// exception.
+//
+// The build ID serves two primary purposes.
+//
+// 1. The action ID half allows installed packages and binaries to serve as
+// one-element cache entries. If we intend to build math.a with a given
+// set of inputs summarized in the action ID, and the installed math.a already
+// has that action ID, we can reuse the installed math.a instead of rebuilding it.
+//
+// 2. The content ID half allows the easy preparation of action IDs for steps
+// that consume a particular package or binary. The content hash of every
+// input file for a given action must be included in the action ID hash.
+// Storing the content ID in the build ID lets us read it from the file with
+// minimal I/O, instead of reading and hashing the entire file.
+// This is especially effective since packages and binaries are typically
+// the largest inputs to an action.
+//
+// Separating action ID from content ID is important for reproducible builds.
+// The compiler is compiled with itself. If an output were represented by its
+// own action ID (instead of content ID) when computing the action ID of
+// the next step in the build process, then the compiler could never have its
+// own input action ID as its output action ID (short of a miraculous hash collision).
+// Instead we use the content IDs to compute the next action ID, and because
+// the content IDs converge, so too do the action IDs and therefore the
+// build IDs and the overall compiler binary. See cmd/dist's cmdbootstrap
+// for the actual convergence sequence.
+//
+// The “one-element cache” purpose is a bit more complex for installed
+// binaries. For a binary, like cmd/gofmt, there are two steps: compile
+// cmd/gofmt/*.go into main.a, and then link main.a into the gofmt binary.
+// We do not install gofmt's main.a, only the gofmt binary. Being able to
+// decide that the gofmt binary is up-to-date means computing the action ID
+// for the final link of the gofmt binary and comparing it against the
+// already-installed gofmt binary. But computing the action ID for the link
+// means knowing the content ID of main.a, which we did not keep.
+// To sidestep this problem, each binary actually stores an expanded build ID:
+//
+// actionID(binary)/actionID(main.a)/contentID(main.a)/contentID(binary)
+//
+// (Note that this can be viewed equivalently as:
+//
+// actionID(binary)/buildID(main.a)/contentID(binary)
+//
+// Storing the buildID(main.a) in the middle lets the computations that care
+// about the prefix or suffix halves ignore the middle and preserves the
+// original build ID as a contiguous string.)
+//
+// During the build, when it's time to build main.a, the gofmt binary has the
+// information needed to decide whether the eventual link would produce
+// the same binary: if the action ID for main.a's inputs matches and then
+// the action ID for the link step matches when assuming the given main.a
+// content ID, then the binary as a whole is up-to-date and need not be rebuilt.
+//
+// This is all a bit complex and may be simplified once we can rely on the
+// main cache, but at least at the start we will be using the content-based
+// staleness determination without a cache beyond the usual installed
+// package and binary locations.
+
+const buildIDSeparator = "/"
+
+// actionID returns the action ID half of a build ID.
+func actionID(buildID string) string {
+ i := strings.Index(buildID, buildIDSeparator)
+ if i < 0 {
+ return buildID
+ }
+ return buildID[:i]
+}
+
+// contentID returns the content ID half of a build ID.
+func contentID(buildID string) string {
+ return buildID[strings.LastIndex(buildID, buildIDSeparator)+1:]
+}
+
+// toolID returns the unique ID to use for the current copy of the
+// named tool (asm, compile, cover, link).
+//
+// It is important that if the tool changes (for example a compiler bug is fixed
+// and the compiler reinstalled), toolID returns a different string, so that old
+// package archives look stale and are rebuilt (with the fixed compiler).
+// This suggests using a content hash of the tool binary, as stored in the build ID.
+//
+// Unfortunately, we can't just open the tool binary, because the tool might be
+// invoked via a wrapper program specified by -toolexec and we don't know
+// what the wrapper program does. In particular, we want "-toolexec toolstash"
+// to continue working: it does no good if "-toolexec toolstash" is executing a
+// stashed copy of the compiler but the go command is acting as if it will run
+// the standard copy of the compiler. The solution is to ask the tool binary to tell
+// us its own build ID using the "-V=full" flag now supported by all tools.
+// Then we know we're getting the build ID of the compiler that will actually run
+// during the build. (How does the compiler binary know its own content hash?
+// We store it there using updateBuildID after the standard link step.)
+//
+// A final twist is that we'd prefer to have reproducible builds for release toolchains.
+// It should be possible to cross-compile for Windows from either Linux or Mac
+// or Windows itself and produce the same binaries, bit for bit. If the tool ID,
+// which influences the action ID half of the build ID, is based on the content ID,
+// then the Linux compiler binary and Mac compiler binary will have different tool IDs
+// and therefore produce executables with different action IDs.
+// To avoid this problem, for releases we use the release version string instead
+// of the compiler binary's content hash. This assumes that all compilers built
+// on all different systems are semantically equivalent, which is of course only true
+// modulo bugs. (Producing the exact same executables also requires that the different
+// build setups agree on details like $GOROOT and file name paths, but at least the
+// tool IDs do not make it impossible.)
+func (b *Builder) toolID(name string) string {
+ b.id.Lock()
+ id := b.toolIDCache[name]
+ b.id.Unlock()
+
+ if id != "" {
+ return id
+ }
+
+ path := base.Tool(name)
+ desc := "go tool " + name
+
+ // Special case: undocumented -vettool overrides usual vet,
+ // for testing vet or supplying an alternative analysis tool.
+ if name == "vet" && VetTool != "" {
+ path = VetTool
+ desc = VetTool
+ }
+
+ cmdline := str.StringList(cfg.BuildToolexec, path, "-V=full")
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ var stdout, stderr strings.Builder
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ if stderr.Len() > 0 {
+ os.Stderr.WriteString(stderr.String())
+ }
+ base.Fatalf("go: error obtaining buildID for %s: %v", desc, err)
+ }
+
+ line := stdout.String()
+ f := strings.Fields(line)
+ if len(f) < 3 || f[0] != name && path != VetTool || f[1] != "version" || f[2] == "devel" && !strings.HasPrefix(f[len(f)-1], "buildID=") {
+ base.Fatalf("go: parsing buildID from %s -V=full: unexpected output:\n\t%s", desc, line)
+ }
+ if f[2] == "devel" {
+ // On the development branch, use the content ID part of the build ID.
+ id = contentID(f[len(f)-1])
+ } else {
+ // For a release, the output is like: "compile version go1.9.1 X:framepointer".
+ // Use the whole line.
+ id = strings.TrimSpace(line)
+ }
+
+ b.id.Lock()
+ b.toolIDCache[name] = id
+ b.id.Unlock()
+
+ return id
+}
+
+// gccToolID returns the unique ID to use for a tool that is invoked
+// by the GCC driver. This is used particularly for gccgo, but this can also
+// be used for gcc, g++, gfortran, etc.; those tools all use the GCC
+// driver under different names. The approach used here should also
+// work for sufficiently new versions of clang. Unlike toolID, the
+// name argument is the program to run. The language argument is the
+// type of input file as passed to the GCC driver's -x option.
+//
+// For these tools we have no -V=full option to dump the build ID,
+// but we can run the tool with -v -### to reliably get the compiler proper
+// and hash that. That will work in the presence of -toolexec.
+//
+// In order to get reproducible builds for released compilers, we
+// detect a released compiler by the absence of "experimental" in the
+// --version output, and in that case we just use the version string.
+//
+// gccToolID also returns the underlying executable for the compiler.
+// The caller assumes that stat of the exe can be used, combined with the id,
+// to detect changes in the underlying compiler. The returned exe can be empty,
+// which means to rely only on the id.
+func (b *Builder) gccToolID(name, language string) (id, exe string, err error) {
+ key := name + "." + language
+ b.id.Lock()
+ id = b.toolIDCache[key]
+ exe = b.toolIDCache[key+".exe"]
+ b.id.Unlock()
+
+ if id != "" {
+ return id, exe, nil
+ }
+
+ // Invoke the driver with -### to see the subcommands and the
+ // version strings. Use -x to set the language. Pretend to
+ // compile an empty file on standard input.
+ cmdline := str.StringList(cfg.BuildToolexec, name, "-###", "-x", language, "-c", "-")
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ // Force untranslated output so that we see the string "version".
+ cmd.Env = append(os.Environ(), "LC_ALL=C")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", "", fmt.Errorf("%s: %v; output: %q", name, err, out)
+ }
+
+ version := ""
+ lines := strings.Split(string(out), "\n")
+ for _, line := range lines {
+ if fields := strings.Fields(line); len(fields) > 1 && fields[1] == "version" || len(fields) > 2 && fields[2] == "version" {
+ version = line
+ break
+ }
+ }
+ if version == "" {
+ return "", "", fmt.Errorf("%s: can not find version number in %q", name, out)
+ }
+
+ if !strings.Contains(version, "experimental") {
+ // This is a release. Use this line as the tool ID.
+ id = version
+ } else {
+ // This is a development version. The first line with
+ // a leading space is the compiler proper.
+ compiler := ""
+ for _, line := range lines {
+ if strings.HasPrefix(line, " ") && !strings.HasPrefix(line, " (in-process)") {
+ compiler = line
+ break
+ }
+ }
+ if compiler == "" {
+ return "", "", fmt.Errorf("%s: can not find compilation command in %q", name, out)
+ }
+
+ fields, _ := quoted.Split(compiler)
+ if len(fields) == 0 {
+ return "", "", fmt.Errorf("%s: compilation command confusion %q", name, out)
+ }
+ exe = fields[0]
+ if !strings.ContainsAny(exe, `/\`) {
+ if lp, err := exec.LookPath(exe); err == nil {
+ exe = lp
+ }
+ }
+ id, err = buildid.ReadFile(exe)
+ if err != nil {
+ return "", "", err
+ }
+
+ // If we can't find a build ID, use a hash.
+ if id == "" {
+ id = b.fileHash(exe)
+ }
+ }
+
+ b.id.Lock()
+ b.toolIDCache[key] = id
+ b.toolIDCache[key+".exe"] = exe
+ b.id.Unlock()
+
+ return id, exe, nil
+}
+
+// Check if assembler used by gccgo is GNU as.
+func assemblerIsGas() bool {
+ cmd := exec.Command(BuildToolchain.compiler(), "-print-prog-name=as")
+ assembler, err := cmd.Output()
+ if err == nil {
+ cmd := exec.Command(strings.TrimSpace(string(assembler)), "--version")
+ out, err := cmd.Output()
+ return err == nil && strings.Contains(string(out), "GNU")
+ } else {
+ return false
+ }
+}
+
+// gccgoBuildIDFile creates an assembler file that records the
+// action's build ID in an SHF_EXCLUDE section for ELF files or
+// in a CSECT in XCOFF files.
+func (b *Builder) gccgoBuildIDFile(a *Action) (string, error) {
+ sfile := a.Objdir + "_buildid.s"
+
+ var buf bytes.Buffer
+ if cfg.Goos == "aix" {
+ fmt.Fprintf(&buf, "\t.csect .go.buildid[XO]\n")
+ } else if (cfg.Goos != "solaris" && cfg.Goos != "illumos") || assemblerIsGas() {
+ fmt.Fprintf(&buf, "\t"+`.section .go.buildid,"e"`+"\n")
+ } else if cfg.Goarch == "sparc" || cfg.Goarch == "sparc64" {
+ fmt.Fprintf(&buf, "\t"+`.section ".go.buildid",#exclude`+"\n")
+ } else { // cfg.Goarch == "386" || cfg.Goarch == "amd64"
+ fmt.Fprintf(&buf, "\t"+`.section .go.buildid,#exclude`+"\n")
+ }
+ fmt.Fprintf(&buf, "\t.byte ")
+ for i := 0; i < len(a.buildID); i++ {
+ if i > 0 {
+ if i%8 == 0 {
+ fmt.Fprintf(&buf, "\n\t.byte ")
+ } else {
+ fmt.Fprintf(&buf, ",")
+ }
+ }
+ fmt.Fprintf(&buf, "%#02x", a.buildID[i])
+ }
+ fmt.Fprintf(&buf, "\n")
+ if cfg.Goos != "solaris" && cfg.Goos != "illumos" && cfg.Goos != "aix" {
+ secType := "@progbits"
+ if cfg.Goarch == "arm" {
+ secType = "%progbits"
+ }
+ fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType)
+ fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType)
+ }
+
+ if cfg.BuildN || cfg.BuildX {
+ for _, line := range bytes.Split(buf.Bytes(), []byte("\n")) {
+ b.Showcmd("", "echo '%s' >> %s", line, sfile)
+ }
+ if cfg.BuildN {
+ return sfile, nil
+ }
+ }
+
+ if err := os.WriteFile(sfile, buf.Bytes(), 0666); err != nil {
+ return "", err
+ }
+
+ return sfile, nil
+}
+
+// buildID returns the build ID found in the given file.
+// If no build ID is found, buildID returns the content hash of the file.
+func (b *Builder) buildID(file string) string {
+ b.id.Lock()
+ id := b.buildIDCache[file]
+ b.id.Unlock()
+
+ if id != "" {
+ return id
+ }
+
+ id, err := buildid.ReadFile(file)
+ if err != nil {
+ id = b.fileHash(file)
+ }
+
+ b.id.Lock()
+ b.buildIDCache[file] = id
+ b.id.Unlock()
+
+ return id
+}
+
+// fileHash returns the content hash of the named file.
+func (b *Builder) fileHash(file string) string {
+ file, _ = fsys.OverlayPath(file)
+ sum, err := cache.FileHash(file)
+ if err != nil {
+ return ""
+ }
+ return buildid.HashToString(sum)
+}
+
+// useCache tries to satisfy the action a, which has action ID actionHash,
+// by using a cached result from an earlier build. At the moment, the only
+// cached result is the installed package or binary at target.
+// If useCache decides that the cache can be used, it sets a.buildID
+// and a.built for use by parent actions and then returns true.
+// Otherwise it sets a.buildID to a temporary build ID for use in the build
+// and returns false. When useCache returns false the expectation is that
+// the caller will build the target and then call updateBuildID to finish the
+// build ID computation.
+// When useCache returns false, it may have initiated buffering of output
+// during a's work. The caller should defer b.flushOutput(a), to make sure
+// that flushOutput is eventually called regardless of whether the action
+// succeeds. The flushOutput call must happen after updateBuildID.
+func (b *Builder) useCache(a *Action, actionHash cache.ActionID, target string, printOutput bool) bool {
+ // The second half of the build ID here is a placeholder for the content hash.
+ // It's important that the overall buildID be unlikely verging on impossible
+ // to appear in the output by chance, but that should be taken care of by
+ // the actionID half; if it also appeared in the input that would be like an
+ // engineered 120-bit partial SHA256 collision.
+ a.actionID = actionHash
+ actionID := buildid.HashToString(actionHash)
+ if a.json != nil {
+ a.json.ActionID = actionID
+ }
+ contentID := actionID // temporary placeholder, likely unique
+ a.buildID = actionID + buildIDSeparator + contentID
+
+ // Executable binaries also record the main build ID in the middle.
+ // See "Build IDs" comment above.
+ if a.Mode == "link" {
+ mainpkg := a.Deps[0]
+ a.buildID = actionID + buildIDSeparator + mainpkg.buildID + buildIDSeparator + contentID
+ }
+
+ // If user requested -a, we force a rebuild, so don't use the cache.
+ if cfg.BuildA {
+ if p := a.Package; p != nil && !p.Stale {
+ p.Stale = true
+ p.StaleReason = "build -a flag in use"
+ }
+ // Begin saving output for later writing to cache.
+ a.output = []byte{}
+ return false
+ }
+
+ c := cache.Default()
+
+ if target != "" {
+ buildID, _ := buildid.ReadFile(target)
+ if strings.HasPrefix(buildID, actionID+buildIDSeparator) {
+ a.buildID = buildID
+ if a.json != nil {
+ a.json.BuildID = a.buildID
+ }
+ a.built = target
+ // Poison a.Target to catch uses later in the build.
+ a.Target = "DO NOT USE - " + a.Mode
+ return true
+ }
+ // Special case for building a main package: if the only thing we
+ // want the package for is to link a binary, and the binary is
+ // already up-to-date, then to avoid a rebuild, report the package
+ // as up-to-date as well. See "Build IDs" comment above.
+ // TODO(rsc): Rewrite this code to use a TryCache func on the link action.
+ if !b.NeedExport && a.Mode == "build" && len(a.triggers) == 1 && a.triggers[0].Mode == "link" {
+ if id := strings.Split(buildID, buildIDSeparator); len(id) == 4 && id[1] == actionID {
+ // Temporarily assume a.buildID is the package build ID
+ // stored in the installed binary, and see if that makes
+ // the upcoming link action ID a match. If so, report that
+ // we built the package, safe in the knowledge that the
+ // link step will not ask us for the actual package file.
+ // Note that (*Builder).LinkAction arranged that all of
+ // a.triggers[0]'s dependencies other than a are also
+ // dependencies of a, so that we can be sure that,
+ // other than a.buildID, b.linkActionID is only accessing
+ // build IDs of completed actions.
+ oldBuildID := a.buildID
+ a.buildID = id[1] + buildIDSeparator + id[2]
+ linkID := buildid.HashToString(b.linkActionID(a.triggers[0]))
+ if id[0] == linkID {
+ // Best effort attempt to display output from the compile and link steps.
+ // If it doesn't work, it doesn't work: reusing the cached binary is more
+ // important than reprinting diagnostic information.
+ if printOutput {
+ showStdout(b, c, a.actionID, "stdout") // compile output
+ showStdout(b, c, a.actionID, "link-stdout") // link output
+ }
+
+ // Poison a.Target to catch uses later in the build.
+ a.Target = "DO NOT USE - main build pseudo-cache Target"
+ a.built = "DO NOT USE - main build pseudo-cache built"
+ if a.json != nil {
+ a.json.BuildID = a.buildID
+ }
+ return true
+ }
+ // Otherwise restore old build ID for main build.
+ a.buildID = oldBuildID
+ }
+ }
+ }
+
+ // Special case for linking a test binary: if the only thing we
+ // want the binary for is to run the test, and the test result is cached,
+ // then to avoid the link step, report the link as up-to-date.
+ // We avoid the nested build ID problem in the previous special case
+ // by recording the test results in the cache under the action ID half.
+ if len(a.triggers) == 1 && a.triggers[0].TryCache != nil && a.triggers[0].TryCache(b, a.triggers[0]) {
+ // Best effort attempt to display output from the compile and link steps.
+ // If it doesn't work, it doesn't work: reusing the test result is more
+ // important than reprinting diagnostic information.
+ if printOutput {
+ showStdout(b, c, a.Deps[0].actionID, "stdout") // compile output
+ showStdout(b, c, a.Deps[0].actionID, "link-stdout") // link output
+ }
+
+ // Poison a.Target to catch uses later in the build.
+ a.Target = "DO NOT USE - pseudo-cache Target"
+ a.built = "DO NOT USE - pseudo-cache built"
+ return true
+ }
+
+ // Check to see if the action output is cached.
+ if file, _, err := cache.GetFile(c, actionHash); err == nil {
+ if buildID, err := buildid.ReadFile(file); err == nil {
+ if printOutput {
+ showStdout(b, c, a.actionID, "stdout")
+ }
+ a.built = file
+ a.Target = "DO NOT USE - using cache"
+ a.buildID = buildID
+ if a.json != nil {
+ a.json.BuildID = a.buildID
+ }
+ if p := a.Package; p != nil && target != "" {
+ p.Stale = true
+ // Clearer than explaining that something else is stale.
+ p.StaleReason = "not installed but available in build cache"
+ }
+ return true
+ }
+ }
+
+ // If we've reached this point, we can't use the cache for the action.
+ if p := a.Package; p != nil && !p.Stale {
+ p.Stale = true
+ p.StaleReason = "build ID mismatch"
+ if b.IsCmdList {
+ // Since we may end up printing StaleReason, include more detail.
+ for _, p1 := range p.Internal.Imports {
+ if p1.Stale && p1.StaleReason != "" {
+ if strings.HasPrefix(p1.StaleReason, "stale dependency: ") {
+ p.StaleReason = p1.StaleReason
+ break
+ }
+ if strings.HasPrefix(p.StaleReason, "build ID mismatch") {
+ p.StaleReason = "stale dependency: " + p1.ImportPath
+ }
+ }
+ }
+ }
+ }
+
+ // Begin saving output for later writing to cache.
+ a.output = []byte{}
+ return false
+}
+
+func showStdout(b *Builder, c cache.Cache, actionID cache.ActionID, key string) error {
+ stdout, stdoutEntry, err := cache.GetBytes(c, cache.Subkey(actionID, key))
+ if err != nil {
+ return err
+ }
+
+ if len(stdout) > 0 {
+ if cfg.BuildX || cfg.BuildN {
+ b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID))))
+ }
+ if !cfg.BuildN {
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.Print(string(stdout))
+ }
+ }
+ return nil
+}
+
+// flushOutput flushes the output being queued in a.
+func (b *Builder) flushOutput(a *Action) {
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.Print(string(a.output))
+ a.output = nil
+}
+
+// updateBuildID updates the build ID in the target written by action a.
+// It requires that useCache was called for action a and returned false,
+// and that the build was then carried out and given the temporary
+// a.buildID to record as the build ID in the resulting package or binary.
+// updateBuildID computes the final content ID and updates the build IDs
+// in the binary.
+//
+// Keep in sync with src/cmd/buildid/buildid.go
+func (b *Builder) updateBuildID(a *Action, target string, rewrite bool) error {
+ if cfg.BuildX || cfg.BuildN {
+ if rewrite {
+ b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList(base.Tool("buildid"), "-w", target)))
+ }
+ if cfg.BuildN {
+ return nil
+ }
+ }
+
+ c := cache.Default()
+
+ // Cache output from compile/link, even if we don't do the rest.
+ switch a.Mode {
+ case "build":
+ cache.PutBytes(c, cache.Subkey(a.actionID, "stdout"), a.output)
+ case "link":
+ // Even though we don't cache the binary, cache the linker text output.
+ // We might notice that an installed binary is up-to-date but still
+ // want to pretend to have run the linker.
+ // Store it under the main package's action ID
+ // to make it easier to find when that's all we have.
+ for _, a1 := range a.Deps {
+ if p1 := a1.Package; p1 != nil && p1.Name == "main" {
+ cache.PutBytes(c, cache.Subkey(a1.actionID, "link-stdout"), a.output)
+ break
+ }
+ }
+ }
+
+ // Find occurrences of old ID and compute new content-based ID.
+ r, err := os.Open(target)
+ if err != nil {
+ return err
+ }
+ matches, hash, err := buildid.FindAndHash(r, a.buildID, 0)
+ r.Close()
+ if err != nil {
+ return err
+ }
+ newID := a.buildID[:strings.LastIndex(a.buildID, buildIDSeparator)] + buildIDSeparator + buildid.HashToString(hash)
+ if len(newID) != len(a.buildID) {
+ return fmt.Errorf("internal error: build ID length mismatch %q vs %q", a.buildID, newID)
+ }
+
+ // Replace with new content-based ID.
+ a.buildID = newID
+ if a.json != nil {
+ a.json.BuildID = a.buildID
+ }
+ if len(matches) == 0 {
+ // Assume the user specified -buildid= to override what we were going to choose.
+ return nil
+ }
+
+ if rewrite {
+ w, err := os.OpenFile(target, os.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ err = buildid.Rewrite(w, matches, newID)
+ if err != nil {
+ w.Close()
+ return err
+ }
+ if err := w.Close(); err != nil {
+ return err
+ }
+ }
+
+ // Cache package builds, but not binaries (link steps).
+ // The expectation is that binaries are not reused
+ // nearly as often as individual packages, and they're
+ // much larger, so the cache-footprint-to-utility ratio
+ // of binaries is much lower for binaries.
+ // Not caching the link step also makes sure that repeated "go run" at least
+ // always rerun the linker, so that they don't get too fast.
+ // (We don't want people thinking go is a scripting language.)
+ // Note also that if we start caching binaries, then we will
+ // copy the binaries out of the cache to run them, and then
+ // that will mean the go process is itself writing a binary
+ // and then executing it, so we will need to defend against
+ // ETXTBSY problems as discussed in exec.go and golang.org/issue/22220.
+ if a.Mode == "build" {
+ r, err := os.Open(target)
+ if err == nil {
+ if a.output == nil {
+ panic("internal error: a.output not set")
+ }
+ outputID, _, err := c.Put(a.actionID, r)
+ r.Close()
+ if err == nil && cfg.BuildX {
+ b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
+ }
+ if b.NeedExport {
+ if err != nil {
+ return err
+ }
+ a.Package.Export = c.OutputFile(outputID)
+ a.Package.BuildID = a.buildID
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
new file mode 100644
index 0000000..13d2a78
--- /dev/null
+++ b/src/cmd/go/internal/work/exec.go
@@ -0,0 +1,3936 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph execution.
+
+package work
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/token"
+ "internal/coverage"
+ "internal/lazyregexp"
+ "io"
+ "io/fs"
+ "log"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/load"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/trace"
+ "cmd/internal/buildid"
+ "cmd/internal/quoted"
+ "cmd/internal/sys"
+)
+
+const defaultCFlags = "-O2 -g"
+
+// actionList returns the list of actions in the dag rooted at root
+// as visited in a depth-first post-order traversal.
+func actionList(root *Action) []*Action {
+ seen := map[*Action]bool{}
+ all := []*Action{}
+ var walk func(*Action)
+ walk = func(a *Action) {
+ if seen[a] {
+ return
+ }
+ seen[a] = true
+ for _, a1 := range a.Deps {
+ walk(a1)
+ }
+ all = append(all, a)
+ }
+ walk(root)
+ return all
+}
+
+// Do runs the action graph rooted at root.
+func (b *Builder) Do(ctx context.Context, root *Action) {
+ ctx, span := trace.StartSpan(ctx, "exec.Builder.Do ("+root.Mode+" "+root.Target+")")
+ defer span.Done()
+
+ if !b.IsCmdList {
+ // If we're doing real work, take time at the end to trim the cache.
+ c := cache.Default()
+ defer func() {
+ if err := c.Close(); err != nil {
+ base.Fatalf("go: failed to trim cache: %v", err)
+ }
+ }()
+ }
+
+ // Build list of all actions, assigning depth-first post-order priority.
+ // The original implementation here was a true queue
+ // (using a channel) but it had the effect of getting
+ // distracted by low-level leaf actions to the detriment
+ // of completing higher-level actions. The order of
+ // work does not matter much to overall execution time,
+ // but when running "go test std" it is nice to see each test
+ // results as soon as possible. The priorities assigned
+ // ensure that, all else being equal, the execution prefers
+ // to do what it would have done first in a simple depth-first
+ // dependency order traversal.
+ all := actionList(root)
+ for i, a := range all {
+ a.priority = i
+ }
+
+ // Write action graph, without timing information, in case we fail and exit early.
+ writeActionGraph := func() {
+ if file := cfg.DebugActiongraph; file != "" {
+ if strings.HasSuffix(file, ".go") {
+ // Do not overwrite Go source code in:
+ // go build -debug-actiongraph x.go
+ base.Fatalf("go: refusing to write action graph to %v\n", file)
+ }
+ js := actionGraphJSON(root)
+ if err := os.WriteFile(file, []byte(js), 0666); err != nil {
+ fmt.Fprintf(os.Stderr, "go: writing action graph: %v\n", err)
+ base.SetExitStatus(1)
+ }
+ }
+ }
+ writeActionGraph()
+
+ b.readySema = make(chan bool, len(all))
+
+ // Initialize per-action execution state.
+ for _, a := range all {
+ for _, a1 := range a.Deps {
+ a1.triggers = append(a1.triggers, a)
+ }
+ a.pending = len(a.Deps)
+ if a.pending == 0 {
+ b.ready.push(a)
+ b.readySema <- true
+ }
+ }
+
+ // Handle runs a single action and takes care of triggering
+ // any actions that are runnable as a result.
+ handle := func(ctx context.Context, a *Action) {
+ if a.json != nil {
+ a.json.TimeStart = time.Now()
+ }
+ var err error
+ if a.Actor != nil && (!a.Failed || a.IgnoreFail) {
+ // TODO(matloob): Better action descriptions
+ desc := "Executing action "
+ if a.Package != nil {
+ desc += "(" + a.Mode + " " + a.Package.Desc() + ")"
+ }
+ ctx, span := trace.StartSpan(ctx, desc)
+ a.traceSpan = span
+ for _, d := range a.Deps {
+ trace.Flow(ctx, d.traceSpan, a.traceSpan)
+ }
+ err = a.Actor.Act(b, ctx, a)
+ span.Done()
+ }
+ if a.json != nil {
+ a.json.TimeDone = time.Now()
+ }
+
+ // The actions run in parallel but all the updates to the
+ // shared work state are serialized through b.exec.
+ b.exec.Lock()
+ defer b.exec.Unlock()
+
+ if err != nil {
+ if b.AllowErrors && a.Package != nil {
+ if a.Package.Error == nil {
+ a.Package.Error = &load.PackageError{Err: err}
+ }
+ } else {
+ var ipe load.ImportPathError
+ if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) {
+ err = fmt.Errorf("%s: %v", a.Package.ImportPath, err)
+ }
+ base.Errorf("%s", err)
+ }
+ a.Failed = true
+ }
+
+ for _, a0 := range a.triggers {
+ if a.Failed {
+ a0.Failed = true
+ }
+ if a0.pending--; a0.pending == 0 {
+ b.ready.push(a0)
+ b.readySema <- true
+ }
+ }
+
+ if a == root {
+ close(b.readySema)
+ }
+ }
+
+ var wg sync.WaitGroup
+
+ // Kick off goroutines according to parallelism.
+ // If we are using the -n flag (just printing commands)
+ // drop the parallelism to 1, both to make the output
+ // deterministic and because there is no real work anyway.
+ par := cfg.BuildP
+ if cfg.BuildN {
+ par = 1
+ }
+ for i := 0; i < par; i++ {
+ wg.Add(1)
+ go func() {
+ ctx := trace.StartGoroutine(ctx)
+ defer wg.Done()
+ for {
+ select {
+ case _, ok := <-b.readySema:
+ if !ok {
+ return
+ }
+ // Receiving a value from b.readySema entitles
+ // us to take from the ready queue.
+ b.exec.Lock()
+ a := b.ready.pop()
+ b.exec.Unlock()
+ handle(ctx, a)
+ case <-base.Interrupted:
+ base.SetExitStatus(1)
+ return
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+
+ // Write action graph again, this time with timing information.
+ writeActionGraph()
+}
+
+// buildActionID computes the action ID for a build action.
+func (b *Builder) buildActionID(a *Action) cache.ActionID {
+ p := a.Package
+ h := cache.NewHash("build " + p.ImportPath)
+
+ // Configuration independent of compiler toolchain.
+ // Note: buildmode has already been accounted for in buildGcflags
+ // and should not be inserted explicitly. Most buildmodes use the
+ // same compiler settings and can reuse each other's results.
+ // If not, the reason is already recorded in buildGcflags.
+ fmt.Fprintf(h, "compile\n")
+
+ // Include information about the origin of the package that
+ // may be embedded in the debug info for the object file.
+ if cfg.BuildTrimpath {
+ // When -trimpath is used with a package built from the module cache,
+ // its debug information refers to the module path and version
+ // instead of the directory.
+ if p.Module != nil {
+ fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
+ }
+ } else if p.Goroot {
+ // The Go compiler always hides the exact value of $GOROOT
+ // when building things in GOROOT.
+ //
+ // The C compiler does not, but for packages in GOROOT we rewrite the path
+ // as though -trimpath were set, so that we don't invalidate the build cache
+ // (and especially any precompiled C archive files) when changing
+ // GOROOT_FINAL. (See https://go.dev/issue/50183.)
+ //
+ // b.WorkDir is always either trimmed or rewritten to
+ // the literal string "/tmp/go-build".
+ } else if !strings.HasPrefix(p.Dir, b.WorkDir) {
+ // -trimpath is not set and no other rewrite rules apply,
+ // so the object file may refer to the absolute directory
+ // containing the package.
+ fmt.Fprintf(h, "dir %s\n", p.Dir)
+ }
+
+ if p.Module != nil {
+ fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
+ }
+ fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
+ fmt.Fprintf(h, "import %q\n", p.ImportPath)
+ fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
+ if cfg.BuildTrimpath {
+ fmt.Fprintln(h, "trimpath")
+ }
+ if p.Internal.ForceLibrary {
+ fmt.Fprintf(h, "forcelibrary\n")
+ }
+ if len(p.CgoFiles)+len(p.SwigFiles)+len(p.SwigCXXFiles) > 0 {
+ fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo"))
+ cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p)
+
+ ccExe := b.ccExe()
+ fmt.Fprintf(h, "CC=%q %q %q %q\n", ccExe, cppflags, cflags, ldflags)
+ // Include the C compiler tool ID so that if the C
+ // compiler changes we rebuild the package.
+ if ccID, _, err := b.gccToolID(ccExe[0], "c"); err == nil {
+ fmt.Fprintf(h, "CC ID=%q\n", ccID)
+ }
+ if len(p.CXXFiles)+len(p.SwigCXXFiles) > 0 {
+ cxxExe := b.cxxExe()
+ fmt.Fprintf(h, "CXX=%q %q\n", cxxExe, cxxflags)
+ if cxxID, _, err := b.gccToolID(cxxExe[0], "c++"); err == nil {
+ fmt.Fprintf(h, "CXX ID=%q\n", cxxID)
+ }
+ }
+ if len(p.FFiles) > 0 {
+ fcExe := b.fcExe()
+ fmt.Fprintf(h, "FC=%q %q\n", fcExe, fflags)
+ if fcID, _, err := b.gccToolID(fcExe[0], "f95"); err == nil {
+ fmt.Fprintf(h, "FC ID=%q\n", fcID)
+ }
+ }
+ // TODO(rsc): Should we include the SWIG version?
+ }
+ if p.Internal.CoverMode != "" {
+ fmt.Fprintf(h, "cover %q %q\n", p.Internal.CoverMode, b.toolID("cover"))
+ }
+ if p.Internal.FuzzInstrument {
+ if fuzzFlags := fuzzInstrumentFlags(); fuzzFlags != nil {
+ fmt.Fprintf(h, "fuzz %q\n", fuzzFlags)
+ }
+ }
+ if p.Internal.BuildInfo != nil {
+ fmt.Fprintf(h, "modinfo %q\n", p.Internal.BuildInfo.String())
+ }
+
+ // Configuration specific to compiler toolchain.
+ switch cfg.BuildToolchainName {
+ default:
+ base.Fatalf("buildActionID: unknown build toolchain %q", cfg.BuildToolchainName)
+ case "gc":
+ fmt.Fprintf(h, "compile %s %q %q\n", b.toolID("compile"), forcedGcflags, p.Internal.Gcflags)
+ if len(p.SFiles) > 0 {
+ fmt.Fprintf(h, "asm %q %q %q\n", b.toolID("asm"), forcedAsmflags, p.Internal.Asmflags)
+ }
+
+ // GOARM, GOMIPS, etc.
+ key, val := cfg.GetArchEnv()
+ fmt.Fprintf(h, "%s=%s\n", key, val)
+
+ if cfg.CleanGOEXPERIMENT != "" {
+ fmt.Fprintf(h, "GOEXPERIMENT=%q\n", cfg.CleanGOEXPERIMENT)
+ }
+
+ // TODO(rsc): Convince compiler team not to add more magic environment variables,
+ // or perhaps restrict the environment variables passed to subprocesses.
+ // Because these are clumsy, undocumented special-case hacks
+ // for debugging the compiler, they are not settable using 'go env -w',
+ // and so here we use os.Getenv, not cfg.Getenv.
+ magic := []string{
+ "GOCLOBBERDEADHASH",
+ "GOSSAFUNC",
+ "GOSSADIR",
+ "GOCOMPILEDEBUG",
+ }
+ for _, env := range magic {
+ if x := os.Getenv(env); x != "" {
+ fmt.Fprintf(h, "magic %s=%s\n", env, x)
+ }
+ }
+
+ case "gccgo":
+ id, _, err := b.gccToolID(BuildToolchain.compiler(), "go")
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ fmt.Fprintf(h, "compile %s %q %q\n", id, forcedGccgoflags, p.Internal.Gccgoflags)
+ fmt.Fprintf(h, "pkgpath %s\n", gccgoPkgpath(p))
+ fmt.Fprintf(h, "ar %q\n", BuildToolchain.(gccgoToolchain).ar())
+ if len(p.SFiles) > 0 {
+ id, _, _ = b.gccToolID(BuildToolchain.compiler(), "assembler-with-cpp")
+ // Ignore error; different assembler versions
+ // are unlikely to make any difference anyhow.
+ fmt.Fprintf(h, "asm %q\n", id)
+ }
+ }
+
+ // Input files.
+ inputFiles := str.StringList(
+ p.GoFiles,
+ p.CgoFiles,
+ p.CFiles,
+ p.CXXFiles,
+ p.FFiles,
+ p.MFiles,
+ p.HFiles,
+ p.SFiles,
+ p.SysoFiles,
+ p.SwigFiles,
+ p.SwigCXXFiles,
+ p.EmbedFiles,
+ )
+ for _, file := range inputFiles {
+ fmt.Fprintf(h, "file %s %s\n", file, b.fileHash(filepath.Join(p.Dir, file)))
+ }
+ if p.Internal.PGOProfile != "" {
+ fmt.Fprintf(h, "pgofile %s\n", b.fileHash(p.Internal.PGOProfile))
+ }
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 != nil {
+ fmt.Fprintf(h, "import %s %s\n", p1.ImportPath, contentID(a1.buildID))
+ }
+ }
+
+ return h.Sum()
+}
+
+// needCgoHdr reports whether the actions triggered by this one
+// expect to be able to access the cgo-generated header file.
+func (b *Builder) needCgoHdr(a *Action) bool {
+ // If this build triggers a header install, run cgo to get the header.
+ if !b.IsCmdList && (a.Package.UsesCgo() || a.Package.UsesSwig()) && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") {
+ for _, t1 := range a.triggers {
+ if t1.Mode == "install header" {
+ return true
+ }
+ }
+ for _, t1 := range a.triggers {
+ for _, t2 := range t1.triggers {
+ if t2.Mode == "install header" {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// allowedVersion reports whether the version v is an allowed version of go
+// (one that we can compile).
+// v is known to be of the form "1.23".
+func allowedVersion(v string) bool {
+ // Special case: no requirement.
+ if v == "" {
+ return true
+ }
+ return gover.Compare(gover.Local(), v) >= 0
+}
+
+const (
+ needBuild uint32 = 1 << iota
+ needCgoHdr
+ needVet
+ needCompiledGoFiles
+ needStale
+)
+
+// build is the action for building a single package.
+// Note that any new influence on this logic must be reported in b.buildActionID above as well.
+func (b *Builder) build(ctx context.Context, a *Action) (err error) {
+ p := a.Package
+
+ bit := func(x uint32, b bool) uint32 {
+ if b {
+ return x
+ }
+ return 0
+ }
+
+ cachedBuild := false
+ need := bit(needBuild, !b.IsCmdList && a.needBuild || b.NeedExport) |
+ bit(needCgoHdr, b.needCgoHdr(a)) |
+ bit(needVet, a.needVet) |
+ bit(needCompiledGoFiles, b.NeedCompiledGoFiles)
+
+ if !p.BinaryOnly {
+ if b.useCache(a, b.buildActionID(a), p.Target, need&needBuild != 0) {
+ // We found the main output in the cache.
+ // If we don't need any other outputs, we can stop.
+ // Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr).
+ // Remember that we might have them in cache
+ // and check again after we create a.Objdir.
+ cachedBuild = true
+ a.output = []byte{} // start saving output in case we miss any cache results
+ need &^= needBuild
+ if b.NeedExport {
+ p.Export = a.built
+ p.BuildID = a.buildID
+ }
+ if need&needCompiledGoFiles != 0 {
+ if err := b.loadCachedCompiledGoFiles(a); err == nil {
+ need &^= needCompiledGoFiles
+ }
+ }
+ }
+
+ // Source files might be cached, even if the full action is not
+ // (e.g., go list -compiled -find).
+ if !cachedBuild && need&needCompiledGoFiles != 0 {
+ if err := b.loadCachedCompiledGoFiles(a); err == nil {
+ need &^= needCompiledGoFiles
+ }
+ }
+
+ if need == 0 {
+ return nil
+ }
+ defer b.flushOutput(a)
+ }
+
+ defer func() {
+ if err != nil && b.IsCmdList && b.NeedError && p.Error == nil {
+ p.Error = &load.PackageError{Err: err}
+ }
+ }()
+ if cfg.BuildN {
+ // In -n mode, print a banner between packages.
+ // The banner is five lines so that when changes to
+ // different sections of the bootstrap script have to
+ // be merged, the banners give patch something
+ // to use to find its context.
+ b.Print("\n#\n# " + p.ImportPath + "\n#\n\n")
+ }
+
+ if cfg.BuildV {
+ b.Print(p.ImportPath + "\n")
+ }
+
+ if p.Error != nil {
+ // Don't try to build anything for packages with errors. There may be a
+ // problem with the inputs that makes the package unsafe to build.
+ return p.Error
+ }
+
+ if p.BinaryOnly {
+ p.Stale = true
+ p.StaleReason = "binary-only packages are no longer supported"
+ if b.IsCmdList {
+ return nil
+ }
+ return errors.New("binary-only packages are no longer supported")
+ }
+
+ if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
+ return errors.New("module requires Go " + p.Module.GoVersion + " or later")
+ }
+
+ if err := b.checkDirectives(a); err != nil {
+ return err
+ }
+
+ if err := b.Mkdir(a.Objdir); err != nil {
+ return err
+ }
+ objdir := a.Objdir
+
+ // Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
+ if cachedBuild && need&needCgoHdr != 0 {
+ if err := b.loadCachedCgoHdr(a); err == nil {
+ need &^= needCgoHdr
+ }
+ }
+
+ // Load cached vet config, but only if that's all we have left
+ // (need == needVet, not testing just the one bit).
+ // If we are going to do a full build anyway,
+ // we're going to regenerate the files below anyway.
+ if need == needVet {
+ if err := b.loadCachedVet(a); err == nil {
+ need &^= needVet
+ }
+ }
+ if need == 0 {
+ return nil
+ }
+
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.Target)
+ if dir != "" {
+ if err := b.Mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ gofiles := str.StringList(p.GoFiles)
+ cgofiles := str.StringList(p.CgoFiles)
+ cfiles := str.StringList(p.CFiles)
+ sfiles := str.StringList(p.SFiles)
+ cxxfiles := str.StringList(p.CXXFiles)
+ var objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
+
+ if p.UsesCgo() || p.UsesSwig() {
+ if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(p); err != nil {
+ return
+ }
+ }
+
+ // Compute overlays for .c/.cc/.h/etc. and if there are any overlays
+ // put correct contents of all those files in the objdir, to ensure
+ // the correct headers are included. nonGoOverlay is the overlay that
+ // points from nongo files to the copied files in objdir.
+ nonGoFileLists := [][]string{p.CFiles, p.SFiles, p.CXXFiles, p.HFiles, p.FFiles}
+OverlayLoop:
+ for _, fs := range nonGoFileLists {
+ for _, f := range fs {
+ if _, ok := fsys.OverlayPath(mkAbs(p.Dir, f)); ok {
+ a.nonGoOverlay = make(map[string]string)
+ break OverlayLoop
+ }
+ }
+ }
+ if a.nonGoOverlay != nil {
+ for _, fs := range nonGoFileLists {
+ for i := range fs {
+ from := mkAbs(p.Dir, fs[i])
+ opath, _ := fsys.OverlayPath(from)
+ dst := objdir + filepath.Base(fs[i])
+ if err := b.copyFile(dst, opath, 0666, false); err != nil {
+ return err
+ }
+ a.nonGoOverlay[from] = dst
+ }
+ }
+ }
+
+ // Run SWIG on each .swig and .swigcxx file.
+ // Each run will generate two files, a .go file and a .c or .cxx file.
+ // The .go file will use import "C" and is to be processed by cgo.
+ if p.UsesSwig() {
+ outGo, outC, outCXX, err := b.swig(a, p, objdir, pcCFLAGS)
+ if err != nil {
+ return err
+ }
+ cgofiles = append(cgofiles, outGo...)
+ cfiles = append(cfiles, outC...)
+ cxxfiles = append(cxxfiles, outCXX...)
+ }
+
+ // If we're doing coverage, preprocess the .go files and put them in the work directory
+ if p.Internal.CoverMode != "" {
+ outfiles := []string{}
+ infiles := []string{}
+ for i, file := range str.StringList(gofiles, cgofiles) {
+ if base.IsTestFile(file) {
+ continue // Not covering this file.
+ }
+
+ var sourceFile string
+ var coverFile string
+ var key string
+ if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
+ // cgo files have absolute paths
+ base = filepath.Base(base)
+ sourceFile = file
+ coverFile = objdir + base + ".cgo1.go"
+ key = base + ".go"
+ } else {
+ sourceFile = filepath.Join(p.Dir, file)
+ coverFile = objdir + file
+ key = file
+ }
+ coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
+ if cfg.Experiment.CoverageRedesign {
+ infiles = append(infiles, sourceFile)
+ outfiles = append(outfiles, coverFile)
+ } else {
+ cover := p.Internal.CoverVars[key]
+ if cover == nil {
+ continue // Not covering this file.
+ }
+ if err := b.cover(a, coverFile, sourceFile, cover.Var); err != nil {
+ return err
+ }
+ }
+ if i < len(gofiles) {
+ gofiles[i] = coverFile
+ } else {
+ cgofiles[i-len(gofiles)] = coverFile
+ }
+ }
+
+ if cfg.Experiment.CoverageRedesign {
+ if len(infiles) != 0 {
+ // Coverage instrumentation creates new top level
+ // variables in the target package for things like
+ // meta-data containers, counter vars, etc. To avoid
+ // collisions with user variables, suffix the var name
+ // with 12 hex digits from the SHA-256 hash of the
+ // import path. Choice of 12 digits is historical/arbitrary,
+ // we just need enough of the hash to avoid accidents,
+ // as opposed to precluding determined attempts by
+ // users to break things.
+ sum := sha256.Sum256([]byte(a.Package.ImportPath))
+ coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
+ mode := a.Package.Internal.CoverMode
+ if mode == "" {
+ panic("covermode should be set at this point")
+ }
+ if newoutfiles, err := b.cover2(a, infiles, outfiles, coverVar, mode); err != nil {
+ return err
+ } else {
+ outfiles = newoutfiles
+ gofiles = append([]string{newoutfiles[0]}, gofiles...)
+ }
+ } else {
+ // If there are no input files passed to cmd/cover,
+ // then we don't want to pass -covercfg when building
+ // the package with the compiler, so set covermode to
+ // the empty string so as to signal that we need to do
+ // that.
+ p.Internal.CoverMode = ""
+ }
+ }
+ }
+
+ // Run cgo.
+ if p.UsesCgo() || p.UsesSwig() {
+ // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
+ // There is one exception: runtime/cgo's job is to bridge the
+ // cgo and non-cgo worlds, so it necessarily has files in both.
+ // In that case gcc only gets the gcc_* files.
+ var gccfiles []string
+ gccfiles = append(gccfiles, cfiles...)
+ cfiles = nil
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ filter := func(files, nongcc, gcc []string) ([]string, []string) {
+ for _, f := range files {
+ if strings.HasPrefix(f, "gcc_") {
+ gcc = append(gcc, f)
+ } else {
+ nongcc = append(nongcc, f)
+ }
+ }
+ return nongcc, gcc
+ }
+ sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
+ } else {
+ for _, sfile := range sfiles {
+ data, err := os.ReadFile(filepath.Join(p.Dir, sfile))
+ if err == nil {
+ if bytes.HasPrefix(data, []byte("TEXT")) || bytes.Contains(data, []byte("\nTEXT")) ||
+ bytes.HasPrefix(data, []byte("DATA")) || bytes.Contains(data, []byte("\nDATA")) ||
+ bytes.HasPrefix(data, []byte("GLOBL")) || bytes.Contains(data, []byte("\nGLOBL")) {
+ return fmt.Errorf("package using cgo has Go assembly file %s", sfile)
+ }
+ }
+ }
+ gccfiles = append(gccfiles, sfiles...)
+ sfiles = nil
+ }
+
+ outGo, outObj, err := b.cgo(a, base.Tool("cgo"), objdir, pcCFLAGS, pcLDFLAGS, mkAbsFiles(p.Dir, cgofiles), gccfiles, cxxfiles, p.MFiles, p.FFiles)
+
+ // The files in cxxfiles have now been handled by b.cgo.
+ cxxfiles = nil
+
+ if err != nil {
+ return err
+ }
+ if cfg.BuildToolchainName == "gccgo" {
+ cgoObjects = append(cgoObjects, a.Objdir+"_cgo_flags")
+ }
+ cgoObjects = append(cgoObjects, outObj...)
+ gofiles = append(gofiles, outGo...)
+
+ switch cfg.BuildBuildmode {
+ case "c-archive", "c-shared":
+ b.cacheCgoHdr(a)
+ }
+ }
+
+ var srcfiles []string // .go and non-.go
+ srcfiles = append(srcfiles, gofiles...)
+ srcfiles = append(srcfiles, sfiles...)
+ srcfiles = append(srcfiles, cfiles...)
+ srcfiles = append(srcfiles, cxxfiles...)
+ b.cacheSrcFiles(a, srcfiles)
+
+ // Running cgo generated the cgo header.
+ need &^= needCgoHdr
+
+ // Sanity check only, since Package.load already checked as well.
+ if len(gofiles) == 0 {
+ return &load.NoGoError{Package: p}
+ }
+
+ // Prepare Go vet config if needed.
+ if need&needVet != 0 {
+ buildVetConfig(a, srcfiles)
+ need &^= needVet
+ }
+ if need&needCompiledGoFiles != 0 {
+ if err := b.loadCachedCompiledGoFiles(a); err != nil {
+ return fmt.Errorf("loading compiled Go files from cache: %w", err)
+ }
+ need &^= needCompiledGoFiles
+ }
+ if need == 0 {
+ // Nothing left to do.
+ return nil
+ }
+
+ // Collect symbol ABI requirements from assembly.
+ symabis, err := BuildToolchain.symabis(b, a, sfiles)
+ if err != nil {
+ return err
+ }
+
+ // Prepare Go import config.
+ // We start it off with a comment so it can't be empty, so icfg.Bytes() below is never nil.
+ // It should never be empty anyway, but there have been bugs in the past that resulted
+ // in empty configs, which then unfortunately turn into "no config passed to compiler",
+ // and the compiler falls back to looking in pkg itself, which mostly works,
+ // except when it doesn't.
+ var icfg bytes.Buffer
+ fmt.Fprintf(&icfg, "# import config\n")
+ for i, raw := range p.Internal.RawImports {
+ final := p.Imports[i]
+ if final != raw {
+ fmt.Fprintf(&icfg, "importmap %s=%s\n", raw, final)
+ }
+ }
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 == nil || p1.ImportPath == "" || a1.built == "" {
+ continue
+ }
+ fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built)
+ }
+
+ // Prepare Go embed config if needed.
+ // Unlike the import config, it's okay for the embed config to be empty.
+ var embedcfg []byte
+ if len(p.Internal.Embed) > 0 {
+ var embed struct {
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ embed.Patterns = p.Internal.Embed
+ embed.Files = make(map[string]string)
+ for _, file := range p.EmbedFiles {
+ embed.Files[file] = filepath.Join(p.Dir, file)
+ }
+ js, err := json.MarshalIndent(&embed, "", "\t")
+ if err != nil {
+ return fmt.Errorf("marshal embedcfg: %v", err)
+ }
+ embedcfg = js
+ }
+
+ if p.Internal.BuildInfo != nil && cfg.ModulesEnabled {
+ prog := modload.ModInfoProg(p.Internal.BuildInfo.String(), cfg.BuildToolchainName == "gccgo")
+ if len(prog) > 0 {
+ if err := b.writeFile(objdir+"_gomod_.go", prog); err != nil {
+ return err
+ }
+ gofiles = append(gofiles, objdir+"_gomod_.go")
+ }
+ }
+
+ // Compile Go.
+ objpkg := objdir + "_pkg_.a"
+ ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), embedcfg, symabis, len(sfiles) > 0, gofiles)
+ if len(out) > 0 {
+ output := b.processOutput(out)
+ if err != nil {
+ return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), output)
+ } else {
+ b.showOutput(a, p.Dir, p.Desc(), output)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ if ofile != objpkg {
+ objects = append(objects, ofile)
+ }
+
+ // Copy .h files named for goos or goarch or goos_goarch
+ // to names using GOOS and GOARCH.
+ // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
+ _goos_goarch := "_" + cfg.Goos + "_" + cfg.Goarch
+ _goos := "_" + cfg.Goos
+ _goarch := "_" + cfg.Goarch
+ for _, file := range p.HFiles {
+ name, ext := fileExtSplit(file)
+ switch {
+ case strings.HasSuffix(name, _goos_goarch):
+ targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext
+ if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goarch):
+ targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext
+ if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goos):
+ targ := file[:len(name)-len(_goos)] + "_GOOS." + ext
+ if err := b.copyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, file := range cfiles {
+ out := file[:len(file)-len(".c")] + ".o"
+ if err := BuildToolchain.cc(b, a, objdir+out, file); err != nil {
+ return err
+ }
+ objects = append(objects, out)
+ }
+
+ // Assemble .s files.
+ if len(sfiles) > 0 {
+ ofiles, err := BuildToolchain.asm(b, a, sfiles)
+ if err != nil {
+ return err
+ }
+ objects = append(objects, ofiles...)
+ }
+
+ // For gccgo on ELF systems, we write the build ID as an assembler file.
+ // This lets us set the SHF_EXCLUDE flag.
+ // This is read by readGccgoArchive in cmd/internal/buildid/buildid.go.
+ if a.buildID != "" && cfg.BuildToolchainName == "gccgo" {
+ switch cfg.Goos {
+ case "aix", "android", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "solaris":
+ asmfile, err := b.gccgoBuildIDFile(a)
+ if err != nil {
+ return err
+ }
+ ofiles, err := BuildToolchain.asm(b, a, []string{asmfile})
+ if err != nil {
+ return err
+ }
+ objects = append(objects, ofiles...)
+ }
+ }
+
+ // NOTE(rsc): On Windows, it is critically important that the
+ // gcc-compiled objects (cgoObjects) be listed after the ordinary
+ // objects in the archive. I do not know why this is.
+ // https://golang.org/issue/2601
+ objects = append(objects, cgoObjects...)
+
+ // Add system object files.
+ for _, syso := range p.SysoFiles {
+ objects = append(objects, filepath.Join(p.Dir, syso))
+ }
+
+ // Pack into archive in objdir directory.
+ // If the Go compiler wrote an archive, we only need to add the
+ // object files for non-Go sources to the archive.
+ // If the Go compiler wrote an archive and the package is entirely
+ // Go sources, there is no pack to execute at all.
+ if len(objects) > 0 {
+ if err := BuildToolchain.pack(b, a, objpkg, objects); err != nil {
+ return err
+ }
+ }
+
+ if err := b.updateBuildID(a, objpkg, true); err != nil {
+ return err
+ }
+
+ a.built = objpkg
+ return nil
+}
+
+func (b *Builder) checkDirectives(a *Action) error {
+ var msg *bytes.Buffer
+ p := a.Package
+ var seen map[string]token.Position
+ for _, d := range p.Internal.Build.Directives {
+ if strings.HasPrefix(d.Text, "//go:debug") {
+ key, _, err := load.ParseGoDebug(d.Text)
+ if err != nil && err != load.ErrNotGoDebug {
+ if msg == nil {
+ msg = new(bytes.Buffer)
+ }
+ fmt.Fprintf(msg, "%s: invalid //go:debug: %v\n", d.Pos, err)
+ continue
+ }
+ if pos, ok := seen[key]; ok {
+ fmt.Fprintf(msg, "%s: repeated //go:debug for %v\n\t%s: previous //go:debug\n", d.Pos, key, pos)
+ continue
+ }
+ if seen == nil {
+ seen = make(map[string]token.Position)
+ }
+ seen[key] = d.Pos
+ }
+ }
+ if msg != nil {
+ return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(msg.Bytes()))
+
+ }
+ return nil
+}
+
+func (b *Builder) cacheObjdirFile(a *Action, c cache.Cache, name string) error {
+ f, err := os.Open(a.Objdir + name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ _, _, err = c.Put(cache.Subkey(a.actionID, name), f)
+ return err
+}
+
+func (b *Builder) findCachedObjdirFile(a *Action, c cache.Cache, name string) (string, error) {
+ file, _, err := cache.GetFile(c, cache.Subkey(a.actionID, name))
+ if err != nil {
+ return "", fmt.Errorf("loading cached file %s: %w", name, err)
+ }
+ return file, nil
+}
+
+func (b *Builder) loadCachedObjdirFile(a *Action, c cache.Cache, name string) error {
+ cached, err := b.findCachedObjdirFile(a, c, name)
+ if err != nil {
+ return err
+ }
+ return b.copyFile(a.Objdir+name, cached, 0666, true)
+}
+
+func (b *Builder) cacheCgoHdr(a *Action) {
+ c := cache.Default()
+ b.cacheObjdirFile(a, c, "_cgo_install.h")
+}
+
+func (b *Builder) loadCachedCgoHdr(a *Action) error {
+ c := cache.Default()
+ return b.loadCachedObjdirFile(a, c, "_cgo_install.h")
+}
+
+func (b *Builder) cacheSrcFiles(a *Action, srcfiles []string) {
+ c := cache.Default()
+ var buf bytes.Buffer
+ for _, file := range srcfiles {
+ if !strings.HasPrefix(file, a.Objdir) {
+ // not generated
+ buf.WriteString("./")
+ buf.WriteString(file)
+ buf.WriteString("\n")
+ continue
+ }
+ name := file[len(a.Objdir):]
+ buf.WriteString(name)
+ buf.WriteString("\n")
+ if err := b.cacheObjdirFile(a, c, name); err != nil {
+ return
+ }
+ }
+ cache.PutBytes(c, cache.Subkey(a.actionID, "srcfiles"), buf.Bytes())
+}
+
+func (b *Builder) loadCachedVet(a *Action) error {
+ c := cache.Default()
+ list, _, err := cache.GetBytes(c, cache.Subkey(a.actionID, "srcfiles"))
+ if err != nil {
+ return fmt.Errorf("reading srcfiles list: %w", err)
+ }
+ var srcfiles []string
+ for _, name := range strings.Split(string(list), "\n") {
+ if name == "" { // end of list
+ continue
+ }
+ if strings.HasPrefix(name, "./") {
+ srcfiles = append(srcfiles, name[2:])
+ continue
+ }
+ if err := b.loadCachedObjdirFile(a, c, name); err != nil {
+ return err
+ }
+ srcfiles = append(srcfiles, a.Objdir+name)
+ }
+ buildVetConfig(a, srcfiles)
+ return nil
+}
+
+func (b *Builder) loadCachedCompiledGoFiles(a *Action) error {
+ c := cache.Default()
+ list, _, err := cache.GetBytes(c, cache.Subkey(a.actionID, "srcfiles"))
+ if err != nil {
+ return fmt.Errorf("reading srcfiles list: %w", err)
+ }
+ var gofiles []string
+ for _, name := range strings.Split(string(list), "\n") {
+ if name == "" { // end of list
+ continue
+ } else if !strings.HasSuffix(name, ".go") {
+ continue
+ }
+ if strings.HasPrefix(name, "./") {
+ gofiles = append(gofiles, name[len("./"):])
+ continue
+ }
+ file, err := b.findCachedObjdirFile(a, c, name)
+ if err != nil {
+ return fmt.Errorf("finding %s: %w", name, err)
+ }
+ gofiles = append(gofiles, file)
+ }
+ a.Package.CompiledGoFiles = gofiles
+ return nil
+}
+
+// vetConfig is the configuration passed to vet describing a single package.
+type vetConfig struct {
+ ID string // package ID (example: "fmt [fmt.test]")
+ Compiler string // compiler name (gc, gccgo)
+ Dir string // directory containing package
+ ImportPath string // canonical import path ("package path")
+ GoFiles []string // absolute paths to package source files
+ NonGoFiles []string // absolute paths to package non-Go files
+ IgnoredFiles []string // absolute paths to ignored source files
+
+ ImportMap map[string]string // map import path in source code to package path
+ PackageFile map[string]string // map package path to .a file with export data
+ Standard map[string]bool // map package path to whether it's in the standard library
+ PackageVetx map[string]string // map package path to vetx data from earlier vet run
+ VetxOnly bool // only compute vetx data; don't report detected problems
+ VetxOutput string // write vetx data to this output file
+ GoVersion string // Go version for package
+
+ SucceedOnTypecheckFailure bool // awful hack; see #18395 and below
+}
+
+func buildVetConfig(a *Action, srcfiles []string) {
+ // Classify files based on .go extension.
+ // srcfiles does not include raw cgo files.
+ var gofiles, nongofiles []string
+ for _, name := range srcfiles {
+ if strings.HasSuffix(name, ".go") {
+ gofiles = append(gofiles, name)
+ } else {
+ nongofiles = append(nongofiles, name)
+ }
+ }
+
+ ignored := str.StringList(a.Package.IgnoredGoFiles, a.Package.IgnoredOtherFiles)
+
+ // Pass list of absolute paths to vet,
+ // so that vet's error messages will use absolute paths,
+ // so that we can reformat them relative to the directory
+ // in which the go command is invoked.
+ vcfg := &vetConfig{
+ ID: a.Package.ImportPath,
+ Compiler: cfg.BuildToolchainName,
+ Dir: a.Package.Dir,
+ GoFiles: mkAbsFiles(a.Package.Dir, gofiles),
+ NonGoFiles: mkAbsFiles(a.Package.Dir, nongofiles),
+ IgnoredFiles: mkAbsFiles(a.Package.Dir, ignored),
+ ImportPath: a.Package.ImportPath,
+ ImportMap: make(map[string]string),
+ PackageFile: make(map[string]string),
+ Standard: make(map[string]bool),
+ }
+ if a.Package.Module != nil {
+ v := a.Package.Module.GoVersion
+ if v == "" {
+ v = gover.DefaultGoModVersion
+ }
+ vcfg.GoVersion = "go" + v
+ }
+ a.vetCfg = vcfg
+ for i, raw := range a.Package.Internal.RawImports {
+ final := a.Package.Imports[i]
+ vcfg.ImportMap[raw] = final
+ }
+
+ // Compute the list of mapped imports in the vet config
+ // so that we can add any missing mappings below.
+ vcfgMapped := make(map[string]bool)
+ for _, p := range vcfg.ImportMap {
+ vcfgMapped[p] = true
+ }
+
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 == nil || p1.ImportPath == "" {
+ continue
+ }
+ // Add import mapping if needed
+ // (for imports like "runtime/cgo" that appear only in generated code).
+ if !vcfgMapped[p1.ImportPath] {
+ vcfg.ImportMap[p1.ImportPath] = p1.ImportPath
+ }
+ if a1.built != "" {
+ vcfg.PackageFile[p1.ImportPath] = a1.built
+ }
+ if p1.Standard {
+ vcfg.Standard[p1.ImportPath] = true
+ }
+ }
+}
+
+// VetTool is the path to an alternate vet tool binary.
+// The caller is expected to set it (if needed) before executing any vet actions.
+var VetTool string
+
+// VetFlags are the default flags to pass to vet.
+// The caller is expected to set them before executing any vet actions.
+var VetFlags []string
+
+// VetExplicit records whether the vet flags were set explicitly on the command line.
+var VetExplicit bool
+
+func (b *Builder) vet(ctx context.Context, a *Action) error {
+ // a.Deps[0] is the build of the package being vetted.
+ // a.Deps[1] is the build of the "fmt" package.
+
+ a.Failed = false // vet of dependency may have failed but we can still succeed
+
+ if a.Deps[0].Failed {
+ // The build of the package has failed. Skip vet check.
+ // Vet could return export data for non-typecheck errors,
+ // but we ignore it because the package cannot be compiled.
+ return nil
+ }
+
+ vcfg := a.Deps[0].vetCfg
+ if vcfg == nil {
+ // Vet config should only be missing if the build failed.
+ return fmt.Errorf("vet config not found")
+ }
+
+ vcfg.VetxOnly = a.VetxOnly
+ vcfg.VetxOutput = a.Objdir + "vet.out"
+ vcfg.PackageVetx = make(map[string]string)
+
+ h := cache.NewHash("vet " + a.Package.ImportPath)
+ fmt.Fprintf(h, "vet %q\n", b.toolID("vet"))
+
+ vetFlags := VetFlags
+
+ // In GOROOT, we enable all the vet tests during 'go test',
+ // not just the high-confidence subset. This gets us extra
+ // checking for the standard library (at some compliance cost)
+ // and helps us gain experience about how well the checks
+ // work, to help decide which should be turned on by default.
+ // The command-line still wins.
+ //
+ // Note that this flag change applies even when running vet as
+ // a dependency of vetting a package outside std.
+ // (Otherwise we'd have to introduce a whole separate
+ // space of "vet fmt as a dependency of a std top-level vet"
+ // versus "vet fmt as a dependency of a non-std top-level vet".)
+ // This is OK as long as the packages that are farther down the
+ // dependency tree turn on *more* analysis, as here.
+ // (The unsafeptr check does not write any facts for use by
+ // later vet runs, nor does unreachable.)
+ if a.Package.Goroot && !VetExplicit && VetTool == "" {
+ // Turn off -unsafeptr checks.
+ // There's too much unsafe.Pointer code
+ // that vet doesn't like in low-level packages
+ // like runtime, sync, and reflect.
+ // Note that $GOROOT/src/buildall.bash
+ // does the same for the misc-compile trybots
+ // and should be updated if these flags are
+ // changed here.
+ vetFlags = []string{"-unsafeptr=false"}
+
+ // Also turn off -unreachable checks during go test.
+ // During testing it is very common to make changes
+ // like hard-coded forced returns or panics that make
+ // code unreachable. It's unreasonable to insist on files
+ // not having any unreachable code during "go test".
+ // (buildall.bash still runs with -unreachable enabled
+ // for the overall whole-tree scan.)
+ if cfg.CmdName == "test" {
+ vetFlags = append(vetFlags, "-unreachable=false")
+ }
+ }
+
+ // Note: We could decide that vet should compute export data for
+ // all analyses, in which case we don't need to include the flags here.
+ // But that would mean that if an analysis causes problems like
+ // unexpected crashes there would be no way to turn it off.
+ // It seems better to let the flags disable export analysis too.
+ fmt.Fprintf(h, "vetflags %q\n", vetFlags)
+
+ fmt.Fprintf(h, "pkg %q\n", a.Deps[0].actionID)
+ for _, a1 := range a.Deps {
+ if a1.Mode == "vet" && a1.built != "" {
+ fmt.Fprintf(h, "vetout %q %s\n", a1.Package.ImportPath, b.fileHash(a1.built))
+ vcfg.PackageVetx[a1.Package.ImportPath] = a1.built
+ }
+ }
+ key := cache.ActionID(h.Sum())
+
+ if vcfg.VetxOnly && !cfg.BuildA {
+ c := cache.Default()
+ if file, _, err := cache.GetFile(c, key); err == nil {
+ a.built = file
+ return nil
+ }
+ }
+
+ js, err := json.MarshalIndent(vcfg, "", "\t")
+ if err != nil {
+ return fmt.Errorf("internal error marshaling vet config: %v", err)
+ }
+ js = append(js, '\n')
+ if err := b.writeFile(a.Objdir+"vet.cfg", js); err != nil {
+ return err
+ }
+
+ // TODO(rsc): Why do we pass $GCCGO to go vet?
+ env := b.cCompilerEnv()
+ if cfg.BuildToolchainName == "gccgo" {
+ env = append(env, "GCCGO="+BuildToolchain.compiler())
+ }
+
+ p := a.Package
+ tool := VetTool
+ if tool == "" {
+ tool = base.Tool("vet")
+ }
+ runErr := b.run(a, p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg")
+
+ // If vet wrote export data, save it for input to future vets.
+ if f, err := os.Open(vcfg.VetxOutput); err == nil {
+ a.built = vcfg.VetxOutput
+ cache.Default().Put(key, f)
+ f.Close()
+ }
+
+ return runErr
+}
+
+// linkActionID computes the action ID for a link action.
+func (b *Builder) linkActionID(a *Action) cache.ActionID {
+ p := a.Package
+ h := cache.NewHash("link " + p.ImportPath)
+
+ // Toolchain-independent configuration.
+ fmt.Fprintf(h, "link\n")
+ fmt.Fprintf(h, "buildmode %s goos %s goarch %s\n", cfg.BuildBuildmode, cfg.Goos, cfg.Goarch)
+ fmt.Fprintf(h, "import %q\n", p.ImportPath)
+ fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
+ if cfg.BuildTrimpath {
+ fmt.Fprintln(h, "trimpath")
+ }
+
+ // Toolchain-dependent configuration, shared with b.linkSharedActionID.
+ b.printLinkerConfig(h, p)
+
+ // Input files.
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 != nil {
+ if a1.built != "" || a1.buildID != "" {
+ buildID := a1.buildID
+ if buildID == "" {
+ buildID = b.buildID(a1.built)
+ }
+ fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(buildID))
+ }
+ // Because we put package main's full action ID into the binary's build ID,
+ // we must also put the full action ID into the binary's action ID hash.
+ if p1.Name == "main" {
+ fmt.Fprintf(h, "packagemain %s\n", a1.buildID)
+ }
+ if p1.Shlib != "" {
+ fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib)))
+ }
+ }
+ }
+
+ return h.Sum()
+}
+
+// printLinkerConfig prints the linker config into the hash h,
+// as part of the computation of a linker-related action ID.
+func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) {
+ switch cfg.BuildToolchainName {
+ default:
+ base.Fatalf("linkActionID: unknown toolchain %q", cfg.BuildToolchainName)
+
+ case "gc":
+ fmt.Fprintf(h, "link %s %q %s\n", b.toolID("link"), forcedLdflags, ldBuildmode)
+ if p != nil {
+ fmt.Fprintf(h, "linkflags %q\n", p.Internal.Ldflags)
+ }
+
+ // GOARM, GOMIPS, etc.
+ key, val := cfg.GetArchEnv()
+ fmt.Fprintf(h, "%s=%s\n", key, val)
+
+ if cfg.CleanGOEXPERIMENT != "" {
+ fmt.Fprintf(h, "GOEXPERIMENT=%q\n", cfg.CleanGOEXPERIMENT)
+ }
+
+ // The linker writes source file paths that say GOROOT_FINAL, but
+ // only if -trimpath is not specified (see ld() in gc.go).
+ gorootFinal := cfg.GOROOT_FINAL
+ if cfg.BuildTrimpath {
+ gorootFinal = trimPathGoRootFinal
+ }
+ fmt.Fprintf(h, "GOROOT=%s\n", gorootFinal)
+
+ // GO_EXTLINK_ENABLED controls whether the external linker is used.
+ fmt.Fprintf(h, "GO_EXTLINK_ENABLED=%s\n", cfg.Getenv("GO_EXTLINK_ENABLED"))
+
+ // TODO(rsc): Do cgo settings and flags need to be included?
+ // Or external linker settings and flags?
+
+ case "gccgo":
+ id, _, err := b.gccToolID(BuildToolchain.linker(), "go")
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ fmt.Fprintf(h, "link %s %s\n", id, ldBuildmode)
+ // TODO(iant): Should probably include cgo flags here.
+ }
+}
+
+// link is the action for linking a single command.
+// Note that any new influence on this logic must be reported in b.linkActionID above as well.
+func (b *Builder) link(ctx context.Context, a *Action) (err error) {
+ if b.useCache(a, b.linkActionID(a), a.Package.Target, !b.IsCmdList) || b.IsCmdList {
+ return nil
+ }
+ defer b.flushOutput(a)
+
+ if err := b.Mkdir(a.Objdir); err != nil {
+ return err
+ }
+
+ importcfg := a.Objdir + "importcfg.link"
+ if err := b.writeLinkImportcfg(a, importcfg); err != nil {
+ return err
+ }
+
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.Target)
+ if dir != "" {
+ if err := b.Mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ if err := BuildToolchain.ld(b, a, a.Target, importcfg, a.Deps[0].built); err != nil {
+ return err
+ }
+
+ // Update the binary with the final build ID.
+ // But if OmitDebug is set, don't rewrite the binary, because we set OmitDebug
+ // on binaries that we are going to run and then delete.
+ // There's no point in doing work on such a binary.
+ // Worse, opening the binary for write here makes it
+ // essentially impossible to safely fork+exec due to a fundamental
+ // incompatibility between ETXTBSY and threads on modern Unix systems.
+ // See golang.org/issue/22220.
+ // We still call updateBuildID to update a.buildID, which is important
+ // for test result caching, but passing rewrite=false (final arg)
+ // means we don't actually rewrite the binary, nor store the
+ // result into the cache. That's probably a net win:
+ // less cache space wasted on large binaries we are not likely to
+ // need again. (On the other hand it does make repeated go test slower.)
+ // It also makes repeated go run slower, which is a win in itself:
+ // we don't want people to treat go run like a scripting environment.
+ if err := b.updateBuildID(a, a.Target, !a.Package.Internal.OmitDebug); err != nil {
+ return err
+ }
+
+ a.built = a.Target
+ return nil
+}
+
+func (b *Builder) writeLinkImportcfg(a *Action, file string) error {
+ // Prepare Go import cfg.
+ var icfg bytes.Buffer
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if p1 == nil {
+ continue
+ }
+ fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built)
+ if p1.Shlib != "" {
+ fmt.Fprintf(&icfg, "packageshlib %s=%s\n", p1.ImportPath, p1.Shlib)
+ }
+ }
+ info := ""
+ if a.Package.Internal.BuildInfo != nil {
+ info = a.Package.Internal.BuildInfo.String()
+ }
+ fmt.Fprintf(&icfg, "modinfo %q\n", modload.ModInfoData(info))
+ return b.writeFile(file, icfg.Bytes())
+}
+
+// PkgconfigCmd returns a pkg-config binary name
+// defaultPkgConfig is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) PkgconfigCmd() string {
+ return envList("PKG_CONFIG", cfg.DefaultPkgConfig)[0]
+}
+
+// splitPkgConfigOutput parses the pkg-config output into a slice of flags.
+// This implements the shell quoting semantics described in
+// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02,
+// except that it does not support parameter or arithmetic expansion or command
+// substitution and hard-codes the <blank> delimiters instead of reading them
+// from LC_LOCALE.
+func splitPkgConfigOutput(out []byte) ([]string, error) {
+ if len(out) == 0 {
+ return nil, nil
+ }
+ var flags []string
+ flag := make([]byte, 0, len(out))
+ didQuote := false // was the current flag parsed from a quoted string?
+ escaped := false // did we just read `\` in a non-single-quoted context?
+ quote := byte(0) // what is the quote character around the current string?
+
+ for _, c := range out {
+ if escaped {
+ if quote == '"' {
+ // “The <backslash> shall retain its special meaning as an escape
+ // character … only when followed by one of the following characters
+ // when considered special:”
+ switch c {
+ case '$', '`', '"', '\\', '\n':
+ // Handle the escaped character normally.
+ default:
+ // Not an escape character after all.
+ flag = append(flag, '\\', c)
+ escaped = false
+ continue
+ }
+ }
+
+ if c == '\n' {
+ // “If a <newline> follows the <backslash>, the shell shall interpret
+ // this as line continuation.”
+ } else {
+ flag = append(flag, c)
+ }
+ escaped = false
+ continue
+ }
+
+ if quote != 0 && c == quote {
+ quote = 0
+ continue
+ }
+ switch quote {
+ case '\'':
+ // “preserve the literal value of each character”
+ flag = append(flag, c)
+ continue
+ case '"':
+ // “preserve the literal value of all characters within the double-quotes,
+ // with the exception of …”
+ switch c {
+ case '`', '$', '\\':
+ default:
+ flag = append(flag, c)
+ continue
+ }
+ }
+
+ // “The application shall quote the following characters if they are to
+ // represent themselves:”
+ switch c {
+ case '|', '&', ';', '<', '>', '(', ')', '$', '`':
+ return nil, fmt.Errorf("unexpected shell character %q in pkgconf output", c)
+
+ case '\\':
+ // “A <backslash> that is not quoted shall preserve the literal value of
+ // the following character, with the exception of a <newline>.”
+ escaped = true
+ continue
+
+ case '"', '\'':
+ quote = c
+ didQuote = true
+ continue
+
+ case ' ', '\t', '\n':
+ if len(flag) > 0 || didQuote {
+ flags = append(flags, string(flag))
+ }
+ flag, didQuote = flag[:0], false
+ continue
+ }
+
+ flag = append(flag, c)
+ }
+
+ // Prefer to report a missing quote instead of a missing escape. If the string
+ // is something like `"foo\`, it's ambiguous as to whether the trailing
+ // backslash is really an escape at all.
+ if quote != 0 {
+ return nil, errors.New("unterminated quoted string in pkgconf output")
+ }
+ if escaped {
+ return nil, errors.New("broken character escaping in pkgconf output")
+ }
+
+ if len(flag) > 0 || didQuote {
+ flags = append(flags, string(flag))
+ }
+ return flags, nil
+}
+
+// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
+func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) {
+ if pcargs := p.CgoPkgConfig; len(pcargs) > 0 {
+ // pkg-config permits arguments to appear anywhere in
+ // the command line. Move them all to the front, before --.
+ var pcflags []string
+ var pkgs []string
+ for _, pcarg := range pcargs {
+ if pcarg == "--" {
+ // We're going to add our own "--" argument.
+ } else if strings.HasPrefix(pcarg, "--") {
+ pcflags = append(pcflags, pcarg)
+ } else {
+ pkgs = append(pkgs, pcarg)
+ }
+ }
+ for _, pkg := range pkgs {
+ if !load.SafeArg(pkg) {
+ return nil, nil, fmt.Errorf("invalid pkg-config package name: %s", pkg)
+ }
+ }
+ var out []byte
+ out, err = b.runOut(nil, p.Dir, nil, b.PkgconfigCmd(), "--cflags", pcflags, "--", pkgs)
+ if err != nil {
+ err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, b.PkgconfigCmd()+" --cflags "+strings.Join(pcflags, " ")+" -- "+strings.Join(pkgs, " "), string(out)+err.Error())
+ return nil, nil, err
+ }
+ if len(out) > 0 {
+ cflags, err = splitPkgConfigOutput(bytes.TrimSpace(out))
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := checkCompilerFlags("CFLAGS", "pkg-config --cflags", cflags); err != nil {
+ return nil, nil, err
+ }
+ }
+ out, err = b.runOut(nil, p.Dir, nil, b.PkgconfigCmd(), "--libs", pcflags, "--", pkgs)
+ if err != nil {
+ err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, b.PkgconfigCmd()+" --libs "+strings.Join(pcflags, " ")+" -- "+strings.Join(pkgs, " "), string(out)+err.Error())
+ return nil, nil, err
+ }
+ if len(out) > 0 {
+ // We need to handle path with spaces so that C:/Program\ Files can pass
+ // checkLinkerFlags. Use splitPkgConfigOutput here just like we treat cflags.
+ ldflags, err = splitPkgConfigOutput(bytes.TrimSpace(out))
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := checkLinkerFlags("LDFLAGS", "pkg-config --libs", ldflags); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ return
+}
+
+func (b *Builder) installShlibname(ctx context.Context, a *Action) error {
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ // TODO: BuildN
+ a1 := a.Deps[0]
+ if err := b.Mkdir(filepath.Dir(a.Target)); err != nil {
+ return err
+ }
+ err := os.WriteFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"), 0666)
+ if err != nil {
+ return err
+ }
+ if cfg.BuildX {
+ b.Showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.Target), a.Target)
+ }
+ return nil
+}
+
+func (b *Builder) linkSharedActionID(a *Action) cache.ActionID {
+ h := cache.NewHash("linkShared")
+
+ // Toolchain-independent configuration.
+ fmt.Fprintf(h, "linkShared\n")
+ fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
+
+ // Toolchain-dependent configuration, shared with b.linkActionID.
+ b.printLinkerConfig(h, nil)
+
+ // Input files.
+ for _, a1 := range a.Deps {
+ p1 := a1.Package
+ if a1.built == "" {
+ continue
+ }
+ if p1 != nil {
+ fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built)))
+ if p1.Shlib != "" {
+ fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib)))
+ }
+ }
+ }
+ // Files named on command line are special.
+ for _, a1 := range a.Deps[0].Deps {
+ p1 := a1.Package
+ fmt.Fprintf(h, "top %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built)))
+ }
+
+ return h.Sum()
+}
+
+func (b *Builder) linkShared(ctx context.Context, a *Action) (err error) {
+ if b.useCache(a, b.linkSharedActionID(a), a.Target, !b.IsCmdList) || b.IsCmdList {
+ return nil
+ }
+ defer b.flushOutput(a)
+
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ if err := b.Mkdir(a.Objdir); err != nil {
+ return err
+ }
+
+ importcfg := a.Objdir + "importcfg.link"
+ if err := b.writeLinkImportcfg(a, importcfg); err != nil {
+ return err
+ }
+
+ // TODO(rsc): There is a missing updateBuildID here,
+ // but we have to decide where to store the build ID in these files.
+ a.built = a.Target
+ return BuildToolchain.ldShared(b, a, a.Deps[0].Deps, a.Target, importcfg, a.Deps)
+}
+
+// BuildInstallFunc is the action for installing a single package or executable.
+func BuildInstallFunc(b *Builder, ctx context.Context, a *Action) (err error) {
+ defer func() {
+ if err != nil {
+ // a.Package == nil is possible for the go install -buildmode=shared
+ // action that installs libmangledname.so, which corresponds to
+ // a list of packages, not just one.
+ sep, path := "", ""
+ if a.Package != nil {
+ sep, path = " ", a.Package.ImportPath
+ }
+ err = fmt.Errorf("go %s%s%s: %v", cfg.CmdName, sep, path, err)
+ }
+ }()
+
+ a1 := a.Deps[0]
+ a.buildID = a1.buildID
+ if a.json != nil {
+ a.json.BuildID = a.buildID
+ }
+
+ // If we are using the eventual install target as an up-to-date
+ // cached copy of the thing we built, then there's no need to
+ // copy it into itself (and that would probably fail anyway).
+ // In this case a1.built == a.Target because a1.built == p.Target,
+ // so the built target is not in the a1.Objdir tree that b.cleanup(a1) removes.
+ if a1.built == a.Target {
+ a.built = a.Target
+ if !a.buggyInstall {
+ b.cleanup(a1)
+ }
+ // Whether we're smart enough to avoid a complete rebuild
+ // depends on exactly what the staleness and rebuild algorithms
+ // are, as well as potentially the state of the Go build cache.
+ // We don't really want users to be able to infer (or worse start depending on)
+ // those details from whether the modification time changes during
+ // "go install", so do a best-effort update of the file times to make it
+ // look like we rewrote a.Target even if we did not. Updating the mtime
+ // may also help other mtime-based systems that depend on our
+ // previous mtime updates that happened more often.
+ // This is still not perfect - we ignore the error result, and if the file was
+ // unwritable for some reason then pretending to have written it is also
+ // confusing - but it's probably better than not doing the mtime update.
+ //
+ // But don't do that for the special case where building an executable
+ // with -linkshared implicitly installs all its dependent libraries.
+ // We want to hide that awful detail as much as possible, so don't
+ // advertise it by touching the mtimes (usually the libraries are up
+ // to date).
+ if !a.buggyInstall && !b.IsCmdList {
+ if cfg.BuildN {
+ b.Showcmd("", "touch %s", a.Target)
+ } else if err := AllowInstall(a); err == nil {
+ now := time.Now()
+ os.Chtimes(a.Target, now, now)
+ }
+ }
+ return nil
+ }
+
+ // If we're building for go list -export,
+ // never install anything; just keep the cache reference.
+ if b.IsCmdList {
+ a.built = a1.built
+ return nil
+ }
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ if err := b.Mkdir(a.Objdir); err != nil {
+ return err
+ }
+
+ perm := fs.FileMode(0666)
+ if a1.Mode == "link" {
+ switch cfg.BuildBuildmode {
+ case "c-archive", "c-shared", "plugin":
+ default:
+ perm = 0777
+ }
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.Target)
+ if dir != "" {
+ if err := b.Mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ if !a.buggyInstall {
+ defer b.cleanup(a1)
+ }
+
+ return b.moveOrCopyFile(a.Target, a1.built, perm, false)
+}
+
+// AllowInstall returns a non-nil error if this invocation of the go command is
+// allowed to install a.Target.
+//
+// The build of cmd/go running under its own test is forbidden from installing
+// to its original GOROOT. The var is exported so it can be set by TestMain.
+var AllowInstall = func(*Action) error { return nil }
+
+// cleanup removes a's object dir to keep the amount of
+// on-disk garbage down in a large build. On an operating system
+// with aggressive buffering, cleaning incrementally like
+// this keeps the intermediate objects from hitting the disk.
+func (b *Builder) cleanup(a *Action) {
+ if !cfg.BuildWork {
+ if cfg.BuildX {
+ // Don't say we are removing the directory if
+ // we never created it.
+ if _, err := os.Stat(a.Objdir); err == nil || cfg.BuildN {
+ b.Showcmd("", "rm -r %s", a.Objdir)
+ }
+ }
+ os.RemoveAll(a.Objdir)
+ }
+}
+
+// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
+func (b *Builder) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) error {
+ if cfg.BuildN {
+ b.Showcmd("", "mv %s %s", src, dst)
+ return nil
+ }
+
+ // If we can update the mode and rename to the dst, do it.
+ // Otherwise fall back to standard copy.
+
+ // If the source is in the build cache, we need to copy it.
+ if strings.HasPrefix(src, cache.DefaultDir()) {
+ return b.copyFile(dst, src, perm, force)
+ }
+
+ // On Windows, always copy the file, so that we respect the NTFS
+ // permissions of the parent folder. https://golang.org/issue/22343.
+ // What matters here is not cfg.Goos (the system we are building
+ // for) but runtime.GOOS (the system we are building on).
+ if runtime.GOOS == "windows" {
+ return b.copyFile(dst, src, perm, force)
+ }
+
+ // If the destination directory has the group sticky bit set,
+ // we have to copy the file to retain the correct permissions.
+ // https://golang.org/issue/18878
+ if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
+ if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
+ return b.copyFile(dst, src, perm, force)
+ }
+ }
+
+ // The perm argument is meant to be adjusted according to umask,
+ // but we don't know what the umask is.
+ // Create a dummy file to find out.
+ // This avoids build tags and works even on systems like Plan 9
+ // where the file mask computation incorporates other information.
+ mode := perm
+ f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+ if err == nil {
+ fi, err := f.Stat()
+ if err == nil {
+ mode = fi.Mode() & 0777
+ }
+ name := f.Name()
+ f.Close()
+ os.Remove(name)
+ }
+
+ if err := os.Chmod(src, mode); err == nil {
+ if err := os.Rename(src, dst); err == nil {
+ if cfg.BuildX {
+ b.Showcmd("", "mv %s %s", src, dst)
+ }
+ return nil
+ }
+ }
+
+ return b.copyFile(dst, src, perm, force)
+}
+
+// copyFile is like 'cp src dst'.
+func (b *Builder) copyFile(dst, src string, perm fs.FileMode, force bool) error {
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "cp %s %s", src, dst)
+ if cfg.BuildN {
+ return nil
+ }
+ }
+
+ sf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+
+ // Be careful about removing/overwriting dst.
+ // Do not remove/overwrite if dst exists and is a directory
+ // or a non-empty non-object file.
+ if fi, err := os.Stat(dst); err == nil {
+ if fi.IsDir() {
+ return fmt.Errorf("build output %q already exists and is a directory", dst)
+ }
+ if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) {
+ return fmt.Errorf("build output %q already exists and is not an object file", dst)
+ }
+ }
+
+ // On Windows, remove lingering ~ file from last attempt.
+ if runtime.GOOS == "windows" {
+ if _, err := os.Stat(dst + "~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ }
+
+ mayberemovefile(dst)
+ df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil && runtime.GOOS == "windows" {
+ // Windows does not allow deletion of a binary file
+ // while it is executing. Try to move it out of the way.
+ // If the move fails, which is likely, we'll try again the
+ // next time we do an install of this binary.
+ if err := os.Rename(dst, dst+"~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ }
+ if err != nil {
+ return fmt.Errorf("copying %s: %w", src, err) // err should already refer to dst
+ }
+
+ _, err = io.Copy(df, sf)
+ df.Close()
+ if err != nil {
+ mayberemovefile(dst)
+ return fmt.Errorf("copying %s to %s: %v", src, dst, err)
+ }
+ return nil
+}
+
+// writeFile writes the text to file.
+func (b *Builder) writeFile(file string, text []byte) error {
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "cat >%s << 'EOF' # internal\n%sEOF", file, text)
+ }
+ if cfg.BuildN {
+ return nil
+ }
+ return os.WriteFile(file, text, 0666)
+}
+
+// Install the cgo export header file, if there is one.
+func (b *Builder) installHeader(ctx context.Context, a *Action) error {
+ src := a.Objdir + "_cgo_install.h"
+ if _, err := os.Stat(src); os.IsNotExist(err) {
+ // If the file does not exist, there are no exported
+ // functions, and we do not install anything.
+ // TODO(rsc): Once we know that caching is rebuilding
+ // at the right times (not missing rebuilds), here we should
+ // probably delete the installed header, if any.
+ if cfg.BuildX {
+ b.Showcmd("", "# %s not created", src)
+ }
+ return nil
+ }
+
+ if err := AllowInstall(a); err != nil {
+ return err
+ }
+
+ dir, _ := filepath.Split(a.Target)
+ if dir != "" {
+ if err := b.Mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ return b.moveOrCopyFile(a.Target, src, 0666, true)
+}
+
+// cover runs, in effect,
+//
+// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
+func (b *Builder) cover(a *Action, dst, src string, varName string) error {
+ return b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
+ cfg.BuildToolexec,
+ base.Tool("cover"),
+ "-mode", a.Package.Internal.CoverMode,
+ "-var", varName,
+ "-o", dst,
+ src)
+}
+
+// cover2 runs, in effect,
+//
+// go tool cover -pkgcfg=<config file> -mode=b.coverMode -var="varName" -o <outfiles> <infiles>
+//
+// Return value is an updated output files list; in addition to the
+// regular outputs (instrumented source files) the cover tool also
+// writes a separate file (appearing first in the list of outputs)
+// that will contain coverage counters and meta-data.
+func (b *Builder) cover2(a *Action, infiles, outfiles []string, varName string, mode string) ([]string, error) {
+ pkgcfg := a.Objdir + "pkgcfg.txt"
+ covoutputs := a.Objdir + "coveroutfiles.txt"
+ odir := filepath.Dir(outfiles[0])
+ cv := filepath.Join(odir, "covervars.go")
+ outfiles = append([]string{cv}, outfiles...)
+ if err := b.writeCoverPkgInputs(a, pkgcfg, covoutputs, outfiles); err != nil {
+ return nil, err
+ }
+ args := []string{base.Tool("cover"),
+ "-pkgcfg", pkgcfg,
+ "-mode", mode,
+ "-var", varName,
+ "-outfilelist", covoutputs,
+ }
+ args = append(args, infiles...)
+ if err := b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil,
+ cfg.BuildToolexec, args); err != nil {
+ return nil, err
+ }
+ return outfiles, nil
+}
+
+func (b *Builder) writeCoverPkgInputs(a *Action, pconfigfile string, covoutputsfile string, outfiles []string) error {
+ p := a.Package
+ p.Internal.CoverageCfg = a.Objdir + "coveragecfg"
+ pcfg := coverage.CoverPkgConfig{
+ PkgPath: p.ImportPath,
+ PkgName: p.Name,
+ // Note: coverage granularity is currently hard-wired to
+ // 'perblock'; there isn't a way using "go build -cover" or "go
+ // test -cover" to select it. This may change in the future
+ // depending on user demand.
+ Granularity: "perblock",
+ OutConfig: p.Internal.CoverageCfg,
+ Local: p.Internal.Local,
+ }
+ if a.Package.Module != nil {
+ pcfg.ModulePath = a.Package.Module.Path
+ }
+ data, err := json.Marshal(pcfg)
+ if err != nil {
+ return err
+ }
+ if err := b.writeFile(pconfigfile, data); err != nil {
+ return err
+ }
+ var sb strings.Builder
+ for i := range outfiles {
+ fmt.Fprintf(&sb, "%s\n", outfiles[i])
+ }
+ return b.writeFile(covoutputsfile, []byte(sb.String()))
+}
+
+var objectMagic = [][]byte{
+ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive
+ {'<', 'b', 'i', 'g', 'a', 'f', '>', '\n'}, // Package AIX big archive
+ {'\x7F', 'E', 'L', 'F'}, // ELF
+ {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit
+ {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit
+ {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit
+ {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit
+ {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc
+ {0x4d, 0x5a, 0x78, 0x00, 0x01, 0x00}, // PE (Windows) as generated by llvm for dll
+ {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386
+ {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64
+ {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm
+ {0x00, 0x61, 0x73, 0x6D}, // WASM
+ {0x01, 0xDF}, // XCOFF 32bit
+ {0x01, 0xF7}, // XCOFF 64bit
+}
+
+func isObject(s string) bool {
+ f, err := os.Open(s)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ buf := make([]byte, 64)
+ io.ReadFull(f, buf)
+ for _, magic := range objectMagic {
+ if bytes.HasPrefix(buf, magic) {
+ return true
+ }
+ }
+ return false
+}
+
+// mayberemovefile removes a file only if it is a regular file
+// When running as a user with sufficient privileges, we may delete
+// even device files, for example, which is not intended.
+func mayberemovefile(s string) {
+ if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
+ return
+ }
+ os.Remove(s)
+}
+
+// fmtcmd formats a command in the manner of fmt.Sprintf but also:
+//
+// If dir is non-empty and the script is not in dir right now,
+// fmtcmd inserts "cd dir\n" before the command.
+//
+// fmtcmd replaces the value of b.WorkDir with $WORK.
+// fmtcmd replaces the value of goroot with $GOROOT.
+// fmtcmd replaces the value of b.gobin with $GOBIN.
+//
+// fmtcmd replaces the name of the current directory with dot (.)
+// but only when it is at the beginning of a space-separated token.
+func (b *Builder) fmtcmd(dir string, format string, args ...any) string {
+ cmd := fmt.Sprintf(format, args...)
+ if dir != "" && dir != "/" {
+ dot := " ."
+ if dir[len(dir)-1] == filepath.Separator {
+ dot += string(filepath.Separator)
+ }
+ cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
+ if b.scriptDir != dir {
+ b.scriptDir = dir
+ cmd = "cd " + dir + "\n" + cmd
+ }
+ }
+ if b.WorkDir != "" && !strings.HasPrefix(cmd, "cat ") {
+ cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK")
+ escaped := strconv.Quote(b.WorkDir)
+ escaped = escaped[1 : len(escaped)-1] // strip quote characters
+ if escaped != b.WorkDir {
+ cmd = strings.ReplaceAll(cmd, escaped, "$WORK")
+ }
+ }
+ return cmd
+}
+
+// Showcmd prints the given command to standard output
+// for the implementation of -n or -x.
+func (b *Builder) Showcmd(dir string, format string, args ...any) {
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.Print(b.fmtcmd(dir, format, args...) + "\n")
+}
+
+// showOutput prints "# desc" followed by the given output.
+// The output is expected to contain references to 'dir', usually
+// the source directory for the package that has failed to build.
+// showOutput rewrites mentions of dir with a relative path to dir
+// when the relative path is shorter. This is usually more pleasant.
+// For example, if fmt doesn't compile and we are in src/html,
+// the output is
+//
+// $ go build
+// # fmt
+// ../fmt/print.go:1090: undefined: asdf
+// $
+//
+// instead of
+//
+// $ go build
+// # fmt
+// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+// $
+//
+// showOutput also replaces references to the work directory with $WORK.
+//
+// If a is not nil and a.output is not nil, showOutput appends to that slice instead of
+// printing to b.Print.
+func (b *Builder) showOutput(a *Action, dir, desc, out string) {
+ importPath := ""
+ if a != nil && a.Package != nil {
+ importPath = a.Package.ImportPath
+ }
+ psErr := formatOutput(b.WorkDir, dir, importPath, desc, out)
+ if a != nil && a.output != nil {
+ a.output = append(a.output, psErr.prefix...)
+ a.output = append(a.output, psErr.suffix...)
+ return
+ }
+
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.Print(psErr.prefix, psErr.suffix)
+}
+
+// A prefixSuffixError is an error formatted by formatOutput.
+type prefixSuffixError struct {
+ importPath string
+ prefix, suffix string
+}
+
+func (e *prefixSuffixError) Error() string {
+ if e.importPath != "" && !strings.HasPrefix(strings.TrimPrefix(e.prefix, "# "), e.importPath) {
+ return fmt.Sprintf("go build %s:\n%s%s", e.importPath, e.prefix, e.suffix)
+ }
+ return e.prefix + e.suffix
+}
+
+func (e *prefixSuffixError) ImportPath() string {
+ return e.importPath
+}
+
+// formatOutput prints "# desc" followed by the given output.
+// The output is expected to contain references to 'dir', usually
+// the source directory for the package that has failed to build.
+// formatOutput rewrites mentions of dir with a relative path to dir
+// when the relative path is shorter. This is usually more pleasant.
+// For example, if fmt doesn't compile and we are in src/html,
+// the output is
+//
+// $ go build
+// # fmt
+// ../fmt/print.go:1090: undefined: asdf
+// $
+//
+// instead of
+//
+// $ go build
+// # fmt
+// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+// $
+//
+// formatOutput also replaces references to the work directory with $WORK.
+// formatOutput returns the output in a prefix with the description and a
+// suffix with the actual output.
+func formatOutput(workDir, dir, importPath, desc, out string) *prefixSuffixError {
+ prefix := "# " + desc
+ suffix := "\n" + out
+
+ suffix = strings.ReplaceAll(suffix, " "+workDir, " $WORK")
+
+ for {
+ // Note that dir starts out long, something like
+ // /foo/bar/baz/root/a
+ // The target string to be reduced is something like
+ // (blah-blah-blah) /foo/bar/baz/root/sibling/whatever.go:blah:blah
+ // /foo/bar/baz/root/a doesn't match /foo/bar/baz/root/sibling, but the prefix
+ // /foo/bar/baz/root does. And there may be other niblings sharing shorter
+ // prefixes, the only way to find them is to look.
+ // This doesn't always produce a relative path --
+ // /foo is shorter than ../../.., for example.
+ //
+ if reldir := base.ShortPath(dir); reldir != dir {
+ suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n\t"+dir, "\n\t"+reldir)
+ if filepath.Separator == '\\' {
+ // Don't know why, sometimes this comes out with slashes, not backslashes.
+ wdir := strings.ReplaceAll(dir, "\\", "/")
+ suffix = strings.ReplaceAll(suffix, " "+wdir, " "+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n"+wdir, "\n"+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n\t"+wdir, "\n\t"+reldir)
+ }
+ }
+ dirP := filepath.Dir(dir)
+ if dir == dirP {
+ break
+ }
+ dir = dirP
+ }
+
+ return &prefixSuffixError{importPath: importPath, prefix: prefix, suffix: suffix}
+}
+
+var cgoLine = lazyregexp.New(`\[[^\[\]]+\.(cgo1|cover)\.go:[0-9]+(:[0-9]+)?\]`)
+var cgoTypeSigRe = lazyregexp.New(`\b_C2?(type|func|var|macro)_\B`)
+
+// run runs the command given by cmdline in the directory dir.
+// If the command fails, run prints information about the failure
+// and returns a non-nil error.
+func (b *Builder) run(a *Action, dir string, desc string, env []string, cmdargs ...any) error {
+ out, err := b.runOut(a, dir, env, cmdargs...)
+ if len(out) > 0 {
+ if desc == "" {
+ desc = b.fmtcmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " "))
+ }
+ if err != nil {
+ err = formatOutput(b.WorkDir, dir, a.Package.ImportPath, desc, b.processOutput(out))
+ } else {
+ b.showOutput(a, dir, desc, b.processOutput(out))
+ }
+ }
+ return err
+}
+
+// processOutput prepares the output of runOut to be output to the console.
+func (b *Builder) processOutput(out []byte) string {
+ if out[len(out)-1] != '\n' {
+ out = append(out, '\n')
+ }
+ messages := string(out)
+ // Fix up output referring to cgo-generated code to be more readable.
+ // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
+ // Replace *[100]_Ctype_foo with *[100]C.foo.
+ // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
+ if !cfg.BuildX && cgoLine.MatchString(messages) {
+ messages = cgoLine.ReplaceAllString(messages, "")
+ messages = cgoTypeSigRe.ReplaceAllString(messages, "C.")
+ }
+ return messages
+}
+
+// runOut runs the command given by cmdline in the directory dir.
+// It returns the command output and any errors that occurred.
+// It accumulates execution time in a.
+func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...any) ([]byte, error) {
+ cmdline := str.StringList(cmdargs...)
+
+ for _, arg := range cmdline {
+ // GNU binutils commands, including gcc and gccgo, interpret an argument
+ // @foo anywhere in the command line (even following --) as meaning
+ // "read and insert arguments from the file named foo."
+ // Don't say anything that might be misinterpreted that way.
+ if strings.HasPrefix(arg, "@") {
+ return nil, fmt.Errorf("invalid command-line argument %s in command: %s", arg, joinUnambiguously(cmdline))
+ }
+ }
+
+ if cfg.BuildN || cfg.BuildX {
+ var envcmdline string
+ for _, e := range env {
+ if j := strings.IndexByte(e, '='); j != -1 {
+ if strings.ContainsRune(e[j+1:], '\'') {
+ envcmdline += fmt.Sprintf("%s=%q", e[:j], e[j+1:])
+ } else {
+ envcmdline += fmt.Sprintf("%s='%s'", e[:j], e[j+1:])
+ }
+ envcmdline += " "
+ }
+ }
+ envcmdline += joinUnambiguously(cmdline)
+ b.Showcmd(dir, "%s", envcmdline)
+ if cfg.BuildN {
+ return nil, nil
+ }
+ }
+
+ var buf bytes.Buffer
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ if cmd.Path != "" {
+ cmd.Args[0] = cmd.Path
+ }
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ cleanup := passLongArgsInResponseFiles(cmd)
+ defer cleanup()
+ if dir != "." {
+ cmd.Dir = dir
+ }
+ cmd.Env = cmd.Environ() // Pre-allocate with correct PWD.
+
+ // Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools.
+ // It doesn't really matter if -toolexec isn't being used.
+ // Note that a.Package.Desc is not really an import path,
+ // but this is consistent with 'go list -f {{.ImportPath}}'.
+ // Plus, it is useful to uniquely identify packages in 'go list -json'.
+ if a != nil && a.Package != nil {
+ cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.Desc())
+ }
+
+ cmd.Env = append(cmd.Env, env...)
+ start := time.Now()
+ err := cmd.Run()
+ if a != nil && a.json != nil {
+ aj := a.json
+ aj.Cmd = append(aj.Cmd, joinUnambiguously(cmdline))
+ aj.CmdReal += time.Since(start)
+ if ps := cmd.ProcessState; ps != nil {
+ aj.CmdUser += ps.UserTime()
+ aj.CmdSys += ps.SystemTime()
+ }
+ }
+
+ // err can be something like 'exit status 1'.
+ // Add information about what program was running.
+ // Note that if buf.Bytes() is non-empty, the caller usually
+ // shows buf.Bytes() and does not print err at all, so the
+ // prefix here does not make most output any more verbose.
+ if err != nil {
+ err = errors.New(cmdline[0] + ": " + err.Error())
+ }
+ return buf.Bytes(), err
+}
+
+// joinUnambiguously prints the slice, quoting where necessary to make the
+// output unambiguous.
+// TODO: See issue 5279. The printing of commands needs a complete redo.
+func joinUnambiguously(a []string) string {
+ var buf strings.Builder
+ for i, s := range a {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ q := strconv.Quote(s)
+ // A gccgo command line can contain -( and -).
+ // Make sure we quote them since they are special to the shell.
+ // The trimpath argument can also contain > (part of =>) and ;. Quote those too.
+ if s == "" || strings.ContainsAny(s, " ()>;") || len(q) > len(s)+2 {
+ buf.WriteString(q)
+ } else {
+ buf.WriteString(s)
+ }
+ }
+ return buf.String()
+}
+
+// cCompilerEnv returns environment variables to set when running the
+// C compiler. This is needed to disable escape codes in clang error
+// messages that confuse tools like cgo.
+func (b *Builder) cCompilerEnv() []string {
+ return []string{"TERM=dumb"}
+}
+
+// Mkdir makes the named directory.
+func (b *Builder) Mkdir(dir string) error {
+ // Make Mkdir(a.Objdir) a no-op instead of an error when a.Objdir == "".
+ if dir == "" {
+ return nil
+ }
+
+ b.exec.Lock()
+ defer b.exec.Unlock()
+ // We can be a little aggressive about being
+ // sure directories exist. Skip repeated calls.
+ if b.mkdirCache[dir] {
+ return nil
+ }
+ b.mkdirCache[dir] = true
+
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "mkdir -p %s", dir)
+ if cfg.BuildN {
+ return nil
+ }
+ }
+
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Symlink creates a symlink newname -> oldname.
+func (b *Builder) Symlink(oldname, newname string) error {
+ // It's not an error to try to recreate an existing symlink.
+ if link, err := os.Readlink(newname); err == nil && link == oldname {
+ return nil
+ }
+
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "ln -s %s %s", oldname, newname)
+ if cfg.BuildN {
+ return nil
+ }
+ }
+ return os.Symlink(oldname, newname)
+}
+
+// mkAbs returns an absolute path corresponding to
+// evaluating f in the directory dir.
+// We always pass absolute paths of source files so that
+// the error messages will include the full path to a file
+// in need of attention.
+func mkAbs(dir, f string) string {
+ // Leave absolute paths alone.
+ // Also, during -n mode we use the pseudo-directory $WORK
+ // instead of creating an actual work directory that won't be used.
+ // Leave paths beginning with $WORK alone too.
+ if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") {
+ return f
+ }
+ return filepath.Join(dir, f)
+}
+
+type toolchain interface {
+ // gc runs the compiler in a specific directory on a set of files
+ // and returns the name of the generated output file.
+ gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error)
+ // cc runs the toolchain's C compiler in a directory on a C file
+ // to produce an output file.
+ cc(b *Builder, a *Action, ofile, cfile string) error
+ // asm runs the assembler in a specific directory on specific files
+ // and returns a list of named output files.
+ asm(b *Builder, a *Action, sfiles []string) ([]string, error)
+ // symabis scans the symbol ABIs from sfiles and returns the
+ // path to the output symbol ABIs file, or "" if none.
+ symabis(b *Builder, a *Action, sfiles []string) (string, error)
+ // pack runs the archive packer in a specific directory to create
+ // an archive from a set of object files.
+ // typically it is run in the object directory.
+ pack(b *Builder, a *Action, afile string, ofiles []string) error
+ // ld runs the linker to create an executable starting at mainpkg.
+ ld(b *Builder, root *Action, out, importcfg, mainpkg string) error
+ // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions
+ ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error
+
+ compiler() string
+ linker() string
+}
+
+type noToolchain struct{}
+
+func noCompiler() error {
+ log.Fatalf("unknown compiler %q", cfg.BuildContext.Compiler)
+ return nil
+}
+
+func (noToolchain) compiler() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) linker() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) {
+ return "", nil, noCompiler()
+}
+
+func (noToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+ return nil, noCompiler()
+}
+
+func (noToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+ return "", noCompiler()
+}
+
+func (noToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
+ return noCompiler()
+}
+
+func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+ return noCompiler()
+}
+
+// gcc runs the gcc C compiler to create an object from a single C file.
+func (b *Builder) gcc(a *Action, p *load.Package, workdir, out string, flags []string, cfile string) error {
+ return b.ccompile(a, p, out, flags, cfile, b.GccCmd(p.Dir, workdir))
+}
+
+// gxx runs the g++ C++ compiler to create an object from a single C++ file.
+func (b *Builder) gxx(a *Action, p *load.Package, workdir, out string, flags []string, cxxfile string) error {
+ return b.ccompile(a, p, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
+}
+
+// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file.
+func (b *Builder) gfortran(a *Action, p *load.Package, workdir, out string, flags []string, ffile string) error {
+ return b.ccompile(a, p, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
+}
+
+// ccompile runs the given C or C++ compiler and creates an object from a single source file.
+func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []string, file string, compiler []string) error {
+ file = mkAbs(p.Dir, file)
+ outfile = mkAbs(p.Dir, outfile)
+
+ // Elide source directory paths if -trimpath or GOROOT_FINAL is set.
+ // This is needed for source files (e.g., a .c file in a package directory).
+ // TODO(golang.org/issue/36072): cgo also generates files with #line
+ // directives pointing to the source directory. It should not generate those
+ // when -trimpath is enabled.
+ if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+ if cfg.BuildTrimpath || p.Goroot {
+ prefixMapFlag := "-fdebug-prefix-map"
+ if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+ prefixMapFlag = "-ffile-prefix-map"
+ }
+ // Keep in sync with Action.trimpath.
+ // The trimmed paths are a little different, but we need to trim in mostly the
+ // same situations.
+ var from, toPath string
+ if m := p.Module; m == nil {
+ if p.Root == "" { // command-line-arguments in GOPATH mode, maybe?
+ from = p.Dir
+ toPath = p.ImportPath
+ } else if p.Goroot {
+ from = p.Root
+ toPath = "GOROOT"
+ } else {
+ from = p.Root
+ toPath = "GOPATH"
+ }
+ } else if m.Dir == "" {
+ // The module is in the vendor directory. Replace the entire vendor
+ // directory path, because the module's Dir is not filled in.
+ from = modload.VendorDir()
+ toPath = "vendor"
+ } else {
+ from = m.Dir
+ toPath = m.Path
+ if m.Version != "" {
+ toPath += "@" + m.Version
+ }
+ }
+ // -fdebug-prefix-map (or -ffile-prefix-map) requires an absolute "to"
+ // path (or it joins the path with the working directory). Pick something
+ // that makes sense for the target platform.
+ var to string
+ if cfg.BuildContext.GOOS == "windows" {
+ to = filepath.Join(`\\_\_`, toPath)
+ } else {
+ to = filepath.Join("/_", toPath)
+ }
+ flags = append(slices.Clip(flags), prefixMapFlag+"="+from+"="+to)
+ }
+ }
+
+ // Tell gcc to not insert truly random numbers into the build process
+ // this ensures LTO won't create random numbers for symbols.
+ if b.gccSupportsFlag(compiler, "-frandom-seed=1") {
+ flags = append(flags, "-frandom-seed="+buildid.HashToString(a.actionID))
+ }
+
+ overlayPath := file
+ if p, ok := a.nonGoOverlay[overlayPath]; ok {
+ overlayPath = p
+ }
+ output, err := b.runOut(a, filepath.Dir(overlayPath), b.cCompilerEnv(), compiler, flags, "-o", outfile, "-c", filepath.Base(overlayPath))
+ if len(output) > 0 {
+ // On FreeBSD 11, when we pass -g to clang 3.8 it
+ // invokes its internal assembler with -dwarf-version=2.
+ // When it sees .section .note.GNU-stack, it warns
+ // "DWARF2 only supports one section per compilation unit".
+ // This warning makes no sense, since the section is empty,
+ // but it confuses people.
+ // We work around the problem by detecting the warning
+ // and dropping -g and trying again.
+ if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) {
+ newFlags := make([]string, 0, len(flags))
+ for _, f := range flags {
+ if !strings.HasPrefix(f, "-g") {
+ newFlags = append(newFlags, f)
+ }
+ }
+ if len(newFlags) < len(flags) {
+ return b.ccompile(a, p, outfile, newFlags, file, compiler)
+ }
+ }
+
+ if err != nil || os.Getenv("GO_BUILDER_NAME") != "" {
+ err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(output))
+ } else {
+ b.showOutput(a, p.Dir, p.Desc(), b.processOutput(output))
+ }
+ }
+ return err
+}
+
+// gccld runs the gcc linker to create an executable from a set of object files.
+// Any error output is only displayed for BuildN or BuildX.
+func (b *Builder) gccld(a *Action, p *load.Package, objdir, outfile string, flags []string, objs []string) error {
+ var cmd []string
+ if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 {
+ cmd = b.GxxCmd(p.Dir, objdir)
+ } else {
+ cmd = b.GccCmd(p.Dir, objdir)
+ }
+
+ cmdargs := []any{cmd, "-o", outfile, objs, flags}
+ dir := p.Dir
+ out, err := b.runOut(a, base.Cwd(), b.cCompilerEnv(), cmdargs...)
+
+ if len(out) > 0 {
+ // Filter out useless linker warnings caused by bugs outside Go.
+ // See also cmd/link/internal/ld's hostlink method.
+ var save [][]byte
+ var skipLines int
+ for _, line := range bytes.SplitAfter(out, []byte("\n")) {
+ // golang.org/issue/26073 - Apple Xcode bug
+ if bytes.Contains(line, []byte("ld: warning: text-based stub file")) {
+ continue
+ }
+
+ if skipLines > 0 {
+ skipLines--
+ continue
+ }
+
+ // Remove duplicate main symbol with runtime/cgo on AIX.
+ // With runtime/cgo, two main are available:
+ // One is generated by cgo tool with {return 0;}.
+ // The other one is the main calling runtime.rt0_go
+ // in runtime/cgo.
+ // The second can't be used by cgo programs because
+ // runtime.rt0_go is unknown to them.
+ // Therefore, we let ld remove this main version
+ // and used the cgo generated one.
+ if p.ImportPath == "runtime/cgo" && bytes.Contains(line, []byte("ld: 0711-224 WARNING: Duplicate symbol: .main")) {
+ skipLines = 1
+ continue
+ }
+
+ save = append(save, line)
+ }
+ out = bytes.Join(save, nil)
+ if len(out) > 0 && (cfg.BuildN || cfg.BuildX) {
+ b.showOutput(nil, dir, p.ImportPath, b.processOutput(out))
+ }
+ }
+ return err
+}
+
+// GccCmd returns a gcc command line prefix
+// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) GccCmd(incdir, workdir string) []string {
+ return b.compilerCmd(b.ccExe(), incdir, workdir)
+}
+
+// GxxCmd returns a g++ command line prefix
+// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) GxxCmd(incdir, workdir string) []string {
+ return b.compilerCmd(b.cxxExe(), incdir, workdir)
+}
+
+// gfortranCmd returns a gfortran command line prefix.
+func (b *Builder) gfortranCmd(incdir, workdir string) []string {
+ return b.compilerCmd(b.fcExe(), incdir, workdir)
+}
+
+// ccExe returns the CC compiler setting without all the extra flags we add implicitly.
+func (b *Builder) ccExe() []string {
+ return envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+}
+
+// cxxExe returns the CXX compiler setting without all the extra flags we add implicitly.
+func (b *Builder) cxxExe() []string {
+ return envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+}
+
+// fcExe returns the FC compiler setting without all the extra flags we add implicitly.
+func (b *Builder) fcExe() []string {
+ return envList("FC", "gfortran")
+}
+
+// compilerCmd returns a command line prefix for the given environment
+// variable and using the default command when the variable is empty.
+func (b *Builder) compilerCmd(compiler []string, incdir, workdir string) []string {
+ a := append(compiler, "-I", incdir)
+
+ // Definitely want -fPIC but on Windows gcc complains
+ // "-fPIC ignored for target (all code is position independent)"
+ if cfg.Goos != "windows" {
+ a = append(a, "-fPIC")
+ }
+ a = append(a, b.gccArchArgs()...)
+ // gcc-4.5 and beyond require explicit "-pthread" flag
+ // for multithreading with pthread library.
+ if cfg.BuildContext.CgoEnabled {
+ switch cfg.Goos {
+ case "windows":
+ a = append(a, "-mthreads")
+ default:
+ a = append(a, "-pthread")
+ }
+ }
+
+ if cfg.Goos == "aix" {
+ // mcmodel=large must always be enabled to allow large TOC.
+ a = append(a, "-mcmodel=large")
+ }
+
+ // disable ASCII art in clang errors, if possible
+ if b.gccSupportsFlag(compiler, "-fno-caret-diagnostics") {
+ a = append(a, "-fno-caret-diagnostics")
+ }
+ // clang is too smart about command-line arguments
+ if b.gccSupportsFlag(compiler, "-Qunused-arguments") {
+ a = append(a, "-Qunused-arguments")
+ }
+
+ // zig cc passes --gc-sections to the underlying linker, which then causes
+ // undefined symbol errors when compiling with cgo but without C code.
+ // https://github.com/golang/go/issues/52690
+ if b.gccSupportsFlag(compiler, "-Wl,--no-gc-sections") {
+ a = append(a, "-Wl,--no-gc-sections")
+ }
+
+ // disable word wrapping in error messages
+ a = append(a, "-fmessage-length=0")
+
+ // Tell gcc not to include the work directory in object files.
+ if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+ if workdir == "" {
+ workdir = b.WorkDir
+ }
+ workdir = strings.TrimSuffix(workdir, string(filepath.Separator))
+ if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+ a = append(a, "-ffile-prefix-map="+workdir+"=/tmp/go-build")
+ } else {
+ a = append(a, "-fdebug-prefix-map="+workdir+"=/tmp/go-build")
+ }
+ }
+
+ // Tell gcc not to include flags in object files, which defeats the
+ // point of -fdebug-prefix-map above.
+ if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") {
+ a = append(a, "-gno-record-gcc-switches")
+ }
+
+ // On OS X, some of the compilers behave as if -fno-common
+ // is always set, and the Mach-O linker in 6l/8l assumes this.
+ // See https://golang.org/issue/3253.
+ if cfg.Goos == "darwin" || cfg.Goos == "ios" {
+ a = append(a, "-fno-common")
+ }
+
+ return a
+}
+
+// gccNoPie returns the flag to use to request non-PIE. On systems
+// with PIE (position independent executables) enabled by default,
+// -no-pie must be passed when doing a partial link with -Wl,-r.
+// But -no-pie is not supported by all compilers, and clang spells it -nopie.
+func (b *Builder) gccNoPie(linker []string) string {
+ if b.gccSupportsFlag(linker, "-no-pie") {
+ return "-no-pie"
+ }
+ if b.gccSupportsFlag(linker, "-nopie") {
+ return "-nopie"
+ }
+ return ""
+}
+
+// gccSupportsFlag checks to see if the compiler supports a flag.
+func (b *Builder) gccSupportsFlag(compiler []string, flag string) bool {
+ key := [2]string{compiler[0], flag}
+
+ // We used to write an empty C file, but that gets complicated with go
+ // build -n. We tried using a file that does not exist, but that fails on
+ // systems with GCC version 4.2.1; that is the last GPLv2 version of GCC,
+ // so some systems have frozen on it. Now we pass an empty file on stdin,
+ // which should work at least for GCC and clang.
+ //
+ // If the argument is "-Wl,", then it is testing the linker. In that case,
+ // skip "-c". If it's not "-Wl,", then we are testing the compiler and can
+ // omit the linking step with "-c".
+ //
+ // Using the same CFLAGS/LDFLAGS here and for building the program.
+
+ // On the iOS builder the command
+ // $CC -Wl,--no-gc-sections -x c - -o /dev/null < /dev/null
+ // is failing with:
+ // Unable to remove existing file: Invalid argument
+ tmp := os.DevNull
+ if runtime.GOOS == "windows" || runtime.GOOS == "ios" {
+ f, err := os.CreateTemp(b.WorkDir, "")
+ if err != nil {
+ return false
+ }
+ f.Close()
+ tmp = f.Name()
+ defer os.Remove(tmp)
+ }
+
+ cmdArgs := str.StringList(compiler, flag)
+ if strings.HasPrefix(flag, "-Wl,") /* linker flag */ {
+ ldflags, err := buildFlags("LDFLAGS", defaultCFlags, nil, checkLinkerFlags)
+ if err != nil {
+ return false
+ }
+ cmdArgs = append(cmdArgs, ldflags...)
+ } else { /* compiler flag, add "-c" */
+ cflags, err := buildFlags("CFLAGS", defaultCFlags, nil, checkCompilerFlags)
+ if err != nil {
+ return false
+ }
+ cmdArgs = append(cmdArgs, cflags...)
+ cmdArgs = append(cmdArgs, "-c")
+ }
+
+ cmdArgs = append(cmdArgs, "-x", "c", "-", "-o", tmp)
+
+ if cfg.BuildN {
+ b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+ return false
+ }
+
+ // gccCompilerID acquires b.exec, so do before acquiring lock.
+ compilerID, cacheOK := b.gccCompilerID(compiler[0])
+
+ b.exec.Lock()
+ defer b.exec.Unlock()
+ if b, ok := b.flagCache[key]; ok {
+ return b
+ }
+ if b.flagCache == nil {
+ b.flagCache = make(map[[2]string]bool)
+ }
+
+ // Look in build cache.
+ var flagID cache.ActionID
+ if cacheOK {
+ flagID = cache.Subkey(compilerID, "gccSupportsFlag "+flag)
+ if data, _, err := cache.GetBytes(cache.Default(), flagID); err == nil {
+ supported := string(data) == "true"
+ b.flagCache[key] = supported
+ return supported
+ }
+ }
+
+ if cfg.BuildX {
+ b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+ }
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ cmd.Dir = b.WorkDir
+ cmd.Env = append(cmd.Environ(), "LC_ALL=C")
+ out, _ := cmd.CombinedOutput()
+ // GCC says "unrecognized command line option".
+ // clang says "unknown argument".
+ // tcc says "unsupported"
+ // AIX says "not recognized"
+ // Older versions of GCC say "unrecognised debug output level".
+ // For -fsplit-stack GCC says "'-fsplit-stack' is not supported".
+ supported := !bytes.Contains(out, []byte("unrecognized")) &&
+ !bytes.Contains(out, []byte("unknown")) &&
+ !bytes.Contains(out, []byte("unrecognised")) &&
+ !bytes.Contains(out, []byte("is not supported")) &&
+ !bytes.Contains(out, []byte("not recognized")) &&
+ !bytes.Contains(out, []byte("unsupported"))
+
+ if cacheOK {
+ s := "false"
+ if supported {
+ s = "true"
+ }
+ cache.PutBytes(cache.Default(), flagID, []byte(s))
+ }
+
+ b.flagCache[key] = supported
+ return supported
+}
+
+// statString returns a string form of an os.FileInfo, for serializing and comparison.
+func statString(info os.FileInfo) string {
+ return fmt.Sprintf("stat %d %x %v %v\n", info.Size(), uint64(info.Mode()), info.ModTime(), info.IsDir())
+}
+
+// gccCompilerID returns a build cache key for the current gcc,
+// as identified by running 'compiler'.
+// The caller can use subkeys of the key.
+// Other parts of cmd/go can use the id as a hash
+// of the installed compiler version.
+func (b *Builder) gccCompilerID(compiler string) (id cache.ActionID, ok bool) {
+ if cfg.BuildN {
+ b.Showcmd(b.WorkDir, "%s || true", joinUnambiguously([]string{compiler, "--version"}))
+ return cache.ActionID{}, false
+ }
+
+ b.exec.Lock()
+ defer b.exec.Unlock()
+
+ if id, ok := b.gccCompilerIDCache[compiler]; ok {
+ return id, ok
+ }
+
+ // We hash the compiler's full path to get a cache entry key.
+ // That cache entry holds a validation description,
+ // which is of the form:
+ //
+ // filename \x00 statinfo \x00
+ // ...
+ // compiler id
+ //
+ // If os.Stat of each filename matches statinfo,
+ // then the entry is still valid, and we can use the
+ // compiler id without any further expense.
+ //
+ // Otherwise, we compute a new validation description
+ // and compiler id (below).
+ exe, err := exec.LookPath(compiler)
+ if err != nil {
+ return cache.ActionID{}, false
+ }
+
+ h := cache.NewHash("gccCompilerID")
+ fmt.Fprintf(h, "gccCompilerID %q", exe)
+ key := h.Sum()
+ data, _, err := cache.GetBytes(cache.Default(), key)
+ if err == nil && len(data) > len(id) {
+ stats := strings.Split(string(data[:len(data)-len(id)]), "\x00")
+ if len(stats)%2 != 0 {
+ goto Miss
+ }
+ for i := 0; i+2 <= len(stats); i++ {
+ info, err := os.Stat(stats[i])
+ if err != nil || statString(info) != stats[i+1] {
+ goto Miss
+ }
+ }
+ copy(id[:], data[len(data)-len(id):])
+ return id, true
+ Miss:
+ }
+
+ // Validation failed. Compute a new description (in buf) and compiler ID (in h).
+ // For now, there are only at most two filenames in the stat information.
+ // The first one is the compiler executable we invoke.
+ // The second is the underlying compiler as reported by -v -###
+ // (see b.gccToolID implementation in buildid.go).
+ toolID, exe2, err := b.gccToolID(compiler, "c")
+ if err != nil {
+ return cache.ActionID{}, false
+ }
+
+ exes := []string{exe, exe2}
+ str.Uniq(&exes)
+ fmt.Fprintf(h, "gccCompilerID %q %q\n", exes, toolID)
+ id = h.Sum()
+
+ var buf bytes.Buffer
+ for _, exe := range exes {
+ if exe == "" {
+ continue
+ }
+ info, err := os.Stat(exe)
+ if err != nil {
+ return cache.ActionID{}, false
+ }
+ buf.WriteString(exe)
+ buf.WriteString("\x00")
+ buf.WriteString(statString(info))
+ buf.WriteString("\x00")
+ }
+ buf.Write(id[:])
+
+ cache.PutBytes(cache.Default(), key, buf.Bytes())
+ if b.gccCompilerIDCache == nil {
+ b.gccCompilerIDCache = make(map[string]cache.ActionID)
+ }
+ b.gccCompilerIDCache[compiler] = id
+ return id, true
+}
+
+// gccArchArgs returns arguments to pass to gcc based on the architecture.
+func (b *Builder) gccArchArgs() []string {
+ switch cfg.Goarch {
+ case "386":
+ return []string{"-m32"}
+ case "amd64":
+ if cfg.Goos == "darwin" {
+ return []string{"-arch", "x86_64", "-m64"}
+ }
+ return []string{"-m64"}
+ case "arm64":
+ if cfg.Goos == "darwin" {
+ return []string{"-arch", "arm64"}
+ }
+ case "arm":
+ return []string{"-marm"} // not thumb
+ case "s390x":
+ return []string{"-m64", "-march=z196"}
+ case "mips64", "mips64le":
+ args := []string{"-mabi=64"}
+ if cfg.GOMIPS64 == "hardfloat" {
+ return append(args, "-mhard-float")
+ } else if cfg.GOMIPS64 == "softfloat" {
+ return append(args, "-msoft-float")
+ }
+ case "mips", "mipsle":
+ args := []string{"-mabi=32", "-march=mips32"}
+ if cfg.GOMIPS == "hardfloat" {
+ return append(args, "-mhard-float", "-mfp32", "-mno-odd-spreg")
+ } else if cfg.GOMIPS == "softfloat" {
+ return append(args, "-msoft-float")
+ }
+ case "loong64":
+ return []string{"-mabi=lp64d"}
+ case "ppc64":
+ if cfg.Goos == "aix" {
+ return []string{"-maix64"}
+ }
+ }
+ return nil
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+//
+// The environment variable must be quoted correctly for
+// quoted.Split. This should be done before building
+// anything, for example, in BuildInit.
+func envList(key, def string) []string {
+ v := cfg.Getenv(key)
+ if v == "" {
+ v = def
+ }
+ args, err := quoted.Split(v)
+ if err != nil {
+ panic(fmt.Sprintf("could not parse environment variable %s with value %q: %v", key, v, err))
+ }
+ return args
+}
+
+// CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo.
+func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) {
+ if cppflags, err = buildFlags("CPPFLAGS", "", p.CgoCPPFLAGS, checkCompilerFlags); err != nil {
+ return
+ }
+ if cflags, err = buildFlags("CFLAGS", defaultCFlags, p.CgoCFLAGS, checkCompilerFlags); err != nil {
+ return
+ }
+ if cxxflags, err = buildFlags("CXXFLAGS", defaultCFlags, p.CgoCXXFLAGS, checkCompilerFlags); err != nil {
+ return
+ }
+ if fflags, err = buildFlags("FFLAGS", defaultCFlags, p.CgoFFLAGS, checkCompilerFlags); err != nil {
+ return
+ }
+ if ldflags, err = buildFlags("LDFLAGS", defaultCFlags, p.CgoLDFLAGS, checkLinkerFlags); err != nil {
+ return
+ }
+
+ return
+}
+
+func buildFlags(name, defaults string, fromPackage []string, check func(string, string, []string) error) ([]string, error) {
+ if err := check(name, "#cgo "+name, fromPackage); err != nil {
+ return nil, err
+ }
+ return str.StringList(envList("CGO_"+name, defaults), fromPackage), nil
+}
+
+var cgoRe = lazyregexp.New(`[/\\:]`)
+
+func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) {
+ p := a.Package
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+ cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
+ // If we are compiling Objective-C code, then we need to link against libobjc
+ if len(mfiles) > 0 {
+ cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc")
+ }
+
+ // Likewise for Fortran, except there are many Fortran compilers.
+ // Support gfortran out of the box and let others pass the correct link options
+ // via CGO_LDFLAGS
+ if len(ffiles) > 0 {
+ fc := cfg.Getenv("FC")
+ if fc == "" {
+ fc = "gfortran"
+ }
+ if strings.Contains(fc, "gfortran") {
+ cgoLDFLAGS = append(cgoLDFLAGS, "-lgfortran")
+ }
+ }
+
+ // Scrutinize CFLAGS and related for flags that might cause
+ // problems if we are using internal linking (for example, use of
+ // plugins, LTO, etc) by calling a helper routine that builds on
+ // the existing CGO flags allow-lists. If we see anything
+ // suspicious, emit a special token file "preferlinkext" (known to
+ // the linker) in the object file to signal the that it should not
+ // try to link internally and should revert to external linking.
+ // The token we pass is a suggestion, not a mandate; if a user is
+ // explicitly asking for a specific linkmode via the "-linkmode"
+ // flag, the token will be ignored. NB: in theory we could ditch
+ // the token approach and just pass a flag to the linker when we
+ // eventually invoke it, and the linker flag could then be
+ // documented (although coming up with a simple explanation of the
+ // flag might be challenging). For more context see issues #58619,
+ // #58620, and #58848.
+ flagSources := []string{"CGO_CFLAGS", "CGO_CXXFLAGS", "CGO_FFLAGS"}
+ flagLists := [][]string{cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS}
+ if flagsNotCompatibleWithInternalLinking(flagSources, flagLists) {
+ tokenFile := objdir + "preferlinkext"
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "echo > %s", tokenFile)
+ }
+ if !cfg.BuildN {
+ if err := os.WriteFile(tokenFile, nil, 0666); err != nil {
+ return nil, nil, err
+ }
+ }
+ outObj = append(outObj, tokenFile)
+ }
+
+ if cfg.BuildMSan {
+ cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
+ cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
+ }
+ if cfg.BuildASan {
+ cgoCFLAGS = append([]string{"-fsanitize=address"}, cgoCFLAGS...)
+ cgoLDFLAGS = append([]string{"-fsanitize=address"}, cgoLDFLAGS...)
+ }
+
+ // Allows including _cgo_export.h, as well as the user's .h files,
+ // from .[ch] files in the package.
+ cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", objdir)
+
+ // cgo
+ // TODO: CGO_FLAGS?
+ gofiles := []string{objdir + "_cgo_gotypes.go"}
+ cfiles := []string{"_cgo_export.c"}
+ for _, fn := range cgofiles {
+ f := strings.TrimSuffix(filepath.Base(fn), ".go")
+ gofiles = append(gofiles, objdir+f+".cgo1.go")
+ cfiles = append(cfiles, f+".cgo2.c")
+ }
+
+ // TODO: make cgo not depend on $GOARCH?
+
+ cgoflags := []string{}
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+ }
+ if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo" || p.ImportPath == "runtime/asan") {
+ cgoflags = append(cgoflags, "-import_syscall=false")
+ }
+
+ // Update $CGO_LDFLAGS with p.CgoLDFLAGS.
+ // These flags are recorded in the generated _cgo_gotypes.go file
+ // using //go:cgo_ldflag directives, the compiler records them in the
+ // object file for the package, and then the Go linker passes them
+ // along to the host linker. At this point in the code, cgoLDFLAGS
+ // consists of the original $CGO_LDFLAGS (unchecked) and all the
+ // flags put together from source code (checked).
+ cgoenv := b.cCompilerEnv()
+ if len(cgoLDFLAGS) > 0 {
+ flags := make([]string, len(cgoLDFLAGS))
+ for i, f := range cgoLDFLAGS {
+ flags[i] = strconv.Quote(f)
+ }
+ cgoenv = append(cgoenv, "CGO_LDFLAGS="+strings.Join(flags, " "))
+ }
+
+ if cfg.BuildToolchainName == "gccgo" {
+ if b.gccSupportsFlag([]string{BuildToolchain.compiler()}, "-fsplit-stack") {
+ cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack")
+ }
+ cgoflags = append(cgoflags, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath)
+ }
+ if !BuildToolchain.(gccgoToolchain).supportsCgoIncomplete(b) {
+ cgoflags = append(cgoflags, "-gccgo_define_cgoincomplete")
+ }
+ }
+
+ switch cfg.BuildBuildmode {
+ case "c-archive", "c-shared":
+ // Tell cgo that if there are any exported functions
+ // it should generate a header file that C code can
+ // #include.
+ cgoflags = append(cgoflags, "-exportheader="+objdir+"_cgo_install.h")
+ }
+
+ // Rewrite overlaid paths in cgo files.
+ // cgo adds //line and #line pragmas in generated files with these paths.
+ var trimpath []string
+ for i := range cgofiles {
+ path := mkAbs(p.Dir, cgofiles[i])
+ if opath, ok := fsys.OverlayPath(path); ok {
+ cgofiles[i] = opath
+ trimpath = append(trimpath, opath+"=>"+path)
+ }
+ }
+ if len(trimpath) > 0 {
+ cgoflags = append(cgoflags, "-trimpath", strings.Join(trimpath, ";"))
+ }
+
+ if err := b.run(a, p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, "-objdir", objdir, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil {
+ return nil, nil, err
+ }
+ outGo = append(outGo, gofiles...)
+
+ // Use sequential object file names to keep them distinct
+ // and short enough to fit in the .a header file name slots.
+ // We no longer collect them all into _all.o, and we'd like
+ // tools to see both the .o suffix and unique names, so
+ // we need to make them short enough not to be truncated
+ // in the final archive.
+ oseq := 0
+ nextOfile := func() string {
+ oseq++
+ return objdir + fmt.Sprintf("_x%03d.o", oseq)
+ }
+
+ // gcc
+ cflags := str.StringList(cgoCPPFLAGS, cgoCFLAGS)
+ for _, cfile := range cfiles {
+ ofile := nextOfile()
+ if err := b.gcc(a, p, a.Objdir, ofile, cflags, objdir+cfile); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, ofile)
+ }
+
+ for _, file := range gccfiles {
+ ofile := nextOfile()
+ if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, ofile)
+ }
+
+ cxxflags := str.StringList(cgoCPPFLAGS, cgoCXXFLAGS)
+ for _, file := range gxxfiles {
+ ofile := nextOfile()
+ if err := b.gxx(a, p, a.Objdir, ofile, cxxflags, file); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, ofile)
+ }
+
+ for _, file := range mfiles {
+ ofile := nextOfile()
+ if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, ofile)
+ }
+
+ fflags := str.StringList(cgoCPPFLAGS, cgoFFLAGS)
+ for _, file := range ffiles {
+ ofile := nextOfile()
+ if err := b.gfortran(a, p, a.Objdir, ofile, fflags, file); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, ofile)
+ }
+
+ switch cfg.BuildToolchainName {
+ case "gc":
+ importGo := objdir + "_cgo_import.go"
+ dynOutGo, dynOutObj, err := b.dynimport(a, p, objdir, importGo, cgoExe, cflags, cgoLDFLAGS, outObj)
+ if err != nil {
+ return nil, nil, err
+ }
+ if dynOutGo != "" {
+ outGo = append(outGo, dynOutGo)
+ }
+ if dynOutObj != "" {
+ outObj = append(outObj, dynOutObj)
+ }
+
+ case "gccgo":
+ defunC := objdir + "_cgo_defun.c"
+ defunObj := objdir + "_cgo_defun.o"
+ if err := BuildToolchain.cc(b, a, defunObj, defunC); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, defunObj)
+
+ default:
+ noCompiler()
+ }
+
+ // Double check the //go:cgo_ldflag comments in the generated files.
+ // The compiler only permits such comments in files whose base name
+ // starts with "_cgo_". Make sure that the comments in those files
+ // are safe. This is a backstop against people somehow smuggling
+ // such a comment into a file generated by cgo.
+ if cfg.BuildToolchainName == "gc" && !cfg.BuildN {
+ var flags []string
+ for _, f := range outGo {
+ if !strings.HasPrefix(filepath.Base(f), "_cgo_") {
+ continue
+ }
+
+ src, err := os.ReadFile(f)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ const cgoLdflag = "//go:cgo_ldflag"
+ idx := bytes.Index(src, []byte(cgoLdflag))
+ for idx >= 0 {
+ // We are looking at //go:cgo_ldflag.
+ // Find start of line.
+ start := bytes.LastIndex(src[:idx], []byte("\n"))
+ if start == -1 {
+ start = 0
+ }
+
+ // Find end of line.
+ end := bytes.Index(src[idx:], []byte("\n"))
+ if end == -1 {
+ end = len(src)
+ } else {
+ end += idx
+ }
+
+ // Check for first line comment in line.
+ // We don't worry about /* */ comments,
+ // which normally won't appear in files
+ // generated by cgo.
+ commentStart := bytes.Index(src[start:], []byte("//"))
+ commentStart += start
+ // If that line comment is //go:cgo_ldflag,
+ // it's a match.
+ if bytes.HasPrefix(src[commentStart:], []byte(cgoLdflag)) {
+ // Pull out the flag, and unquote it.
+ // This is what the compiler does.
+ flag := string(src[idx+len(cgoLdflag) : end])
+ flag = strings.TrimSpace(flag)
+ flag = strings.Trim(flag, `"`)
+ flags = append(flags, flag)
+ }
+ src = src[end:]
+ idx = bytes.Index(src, []byte(cgoLdflag))
+ }
+ }
+
+ // We expect to find the contents of cgoLDFLAGS in flags.
+ if len(cgoLDFLAGS) > 0 {
+ outer:
+ for i := range flags {
+ for j, f := range cgoLDFLAGS {
+ if f != flags[i+j] {
+ continue outer
+ }
+ }
+ flags = append(flags[:i], flags[i+len(cgoLDFLAGS):]...)
+ break
+ }
+ }
+
+ if err := checkLinkerFlags("LDFLAGS", "go:cgo_ldflag", flags); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return outGo, outObj, nil
+}
+
+// flagsNotCompatibleWithInternalLinking scans the list of cgo
+// compiler flags (C/C++/Fortran) looking for flags that might cause
+// problems if the build in question uses internal linking. The
+// primary culprits are use of plugins or use of LTO, but we err on
+// the side of caution, supporting only those flags that are on the
+// allow-list for safe flags from security perspective. Return is TRUE
+// if a sensitive flag is found, FALSE otherwise.
+func flagsNotCompatibleWithInternalLinking(sourceList []string, flagListList [][]string) bool {
+ for i := range sourceList {
+ sn := sourceList[i]
+ fll := flagListList[i]
+ if err := checkCompilerFlagsForInternalLink(sn, sn, fll); err != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// dynimport creates a Go source file named importGo containing
+// //go:cgo_import_dynamic directives for each symbol or library
+// dynamically imported by the object files outObj.
+// dynOutGo, if not empty, is a new Go file to build as part of the package.
+// dynOutObj, if not empty, is a new file to add to the generated archive.
+func (b *Builder) dynimport(a *Action, p *load.Package, objdir, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) (dynOutGo, dynOutObj string, err error) {
+ cfile := objdir + "_cgo_main.c"
+ ofile := objdir + "_cgo_main.o"
+ if err := b.gcc(a, p, objdir, ofile, cflags, cfile); err != nil {
+ return "", "", err
+ }
+
+ // Gather .syso files from this package and all (transitive) dependencies.
+ var syso []string
+ seen := make(map[*Action]bool)
+ var gatherSyso func(*Action)
+ gatherSyso = func(a1 *Action) {
+ if seen[a1] {
+ return
+ }
+ seen[a1] = true
+ if p1 := a1.Package; p1 != nil {
+ syso = append(syso, mkAbsFiles(p1.Dir, p1.SysoFiles)...)
+ }
+ for _, a2 := range a1.Deps {
+ gatherSyso(a2)
+ }
+ }
+ gatherSyso(a)
+ sort.Strings(syso)
+ str.Uniq(&syso)
+ linkobj := str.StringList(ofile, outObj, syso)
+ dynobj := objdir + "_cgo_.o"
+
+ ldflags := cgoLDFLAGS
+ if (cfg.Goarch == "arm" && cfg.Goos == "linux") || cfg.Goos == "android" {
+ if !str.Contains(ldflags, "-no-pie") {
+ // we need to use -pie for Linux/ARM to get accurate imported sym (added in https://golang.org/cl/5989058)
+ // this seems to be outdated, but we don't want to break existing builds depending on this (Issue 45940)
+ ldflags = append(ldflags, "-pie")
+ }
+ if str.Contains(ldflags, "-pie") && str.Contains(ldflags, "-static") {
+ // -static -pie doesn't make sense, and causes link errors.
+ // Issue 26197.
+ n := make([]string, 0, len(ldflags)-1)
+ for _, flag := range ldflags {
+ if flag != "-static" {
+ n = append(n, flag)
+ }
+ }
+ ldflags = n
+ }
+ }
+ if err := b.gccld(a, p, objdir, dynobj, ldflags, linkobj); err != nil {
+ // We only need this information for internal linking.
+ // If this link fails, mark the object as requiring
+ // external linking. This link can fail for things like
+ // syso files that have unexpected dependencies.
+ // cmd/link explicitly looks for the name "dynimportfail".
+ // See issue #52863.
+ fail := objdir + "dynimportfail"
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "echo > %s", fail)
+ }
+ if !cfg.BuildN {
+ if err := os.WriteFile(fail, nil, 0666); err != nil {
+ return "", "", err
+ }
+ }
+ return "", fail, nil
+ }
+
+ // cgo -dynimport
+ var cgoflags []string
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = []string{"-dynlinker"} // record path to dynamic linker
+ }
+ err = b.run(a, base.Cwd(), p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags)
+ if err != nil {
+ return "", "", err
+ }
+ return importGo, "", nil
+}
+
+// Run SWIG on all SWIG input files.
+// TODO: Don't build a shared library, once SWIG emits the necessary
+// pragmas for external linking.
+func (b *Builder) swig(a *Action, p *load.Package, objdir string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+ if err := b.swigVersionCheck(); err != nil {
+ return nil, nil, nil, err
+ }
+
+ intgosize, err := b.swigIntSize(objdir)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for _, f := range p.SwigFiles {
+ goFile, cFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, false, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cFile != "" {
+ outC = append(outC, cFile)
+ }
+ }
+ for _, f := range p.SwigCXXFiles {
+ goFile, cxxFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, true, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cxxFile != "" {
+ outCXX = append(outCXX, cxxFile)
+ }
+ }
+ return outGo, outC, outCXX, nil
+}
+
+// Make sure SWIG is new enough.
+var (
+ swigCheckOnce sync.Once
+ swigCheck error
+)
+
+func (b *Builder) swigDoVersionCheck() error {
+ out, err := b.runOut(nil, ".", nil, "swig", "-version")
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`[vV]ersion +(\d+)([.]\d+)?([.]\d+)?`)
+ matches := re.FindSubmatch(out)
+ if matches == nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+
+ major, err := strconv.Atoi(string(matches[1]))
+ if err != nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+ const errmsg = "must have SWIG version >= 3.0.6"
+ if major < 3 {
+ return errors.New(errmsg)
+ }
+ if major > 3 {
+ // 4.0 or later
+ return nil
+ }
+
+ // We have SWIG version 3.x.
+ if len(matches[2]) > 0 {
+ minor, err := strconv.Atoi(string(matches[2][1:]))
+ if err != nil {
+ return nil
+ }
+ if minor > 0 {
+ // 3.1 or later
+ return nil
+ }
+ }
+
+ // We have SWIG version 3.0.x.
+ if len(matches[3]) > 0 {
+ patch, err := strconv.Atoi(string(matches[3][1:]))
+ if err != nil {
+ return nil
+ }
+ if patch < 6 {
+ // Before 3.0.6.
+ return errors.New(errmsg)
+ }
+ }
+
+ return nil
+}
+
+func (b *Builder) swigVersionCheck() error {
+ swigCheckOnce.Do(func() {
+ swigCheck = b.swigDoVersionCheck()
+ })
+ return swigCheck
+}
+
+// Find the value to pass for the -intgosize option to swig.
+var (
+ swigIntSizeOnce sync.Once
+ swigIntSize string
+ swigIntSizeError error
+)
+
+// This code fails to build if sizeof(int) <= 32
+const swigIntSizeCode = `
+package main
+const i int = 1 << 32
+`
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9. Run only once.
+func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) {
+ if cfg.BuildN {
+ return "$INTBITS", nil
+ }
+ src := filepath.Join(b.WorkDir, "swig_intsize.go")
+ if err = os.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil {
+ return
+ }
+ srcs := []string{src}
+
+ p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs)
+
+ if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, srcs); e != nil {
+ return "32", nil
+ }
+ return "64", nil
+}
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9.
+func (b *Builder) swigIntSize(objdir string) (intsize string, err error) {
+ swigIntSizeOnce.Do(func() {
+ swigIntSize, swigIntSizeError = b.swigDoIntSize(objdir)
+ })
+ return swigIntSize, swigIntSizeError
+}
+
+// Run SWIG on one SWIG input file.
+func (b *Builder) swigOne(a *Action, p *load.Package, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p)
+ if err != nil {
+ return "", "", err
+ }
+
+ var cflags []string
+ if cxx {
+ cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
+ } else {
+ cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS)
+ }
+
+ n := 5 // length of ".swig"
+ if cxx {
+ n = 8 // length of ".swigcxx"
+ }
+ base := file[:len(file)-n]
+ goFile := base + ".go"
+ gccBase := base + "_wrap."
+ gccExt := "c"
+ if cxx {
+ gccExt = "cxx"
+ }
+
+ gccgo := cfg.BuildToolchainName == "gccgo"
+
+ // swig
+ args := []string{
+ "-go",
+ "-cgo",
+ "-intgosize", intgosize,
+ "-module", base,
+ "-o", objdir + gccBase + gccExt,
+ "-outdir", objdir,
+ }
+
+ for _, f := range cflags {
+ if len(f) > 3 && f[:2] == "-I" {
+ args = append(args, f)
+ }
+ }
+
+ if gccgo {
+ args = append(args, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ args = append(args, "-go-pkgpath", pkgpath)
+ }
+ }
+ if cxx {
+ args = append(args, "-c++")
+ }
+
+ out, err := b.runOut(a, p.Dir, nil, "swig", args, file)
+ if err != nil {
+ if len(out) > 0 {
+ if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) {
+ return "", "", errors.New("must have SWIG version >= 3.0.6")
+ }
+ // swig error
+ err = formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), b.processOutput(out))
+ }
+ return "", "", err
+ }
+ if len(out) > 0 {
+ b.showOutput(a, p.Dir, p.Desc(), b.processOutput(out)) // swig warning
+ }
+
+ // If the input was x.swig, the output is x.go in the objdir.
+ // But there might be an x.go in the original dir too, and if it
+ // uses cgo as well, cgo will be processing both and will
+ // translate both into x.cgo1.go in the objdir, overwriting one.
+ // Rename x.go to _x_swig.go to avoid this problem.
+ // We ignore files in the original dir that begin with underscore
+ // so _x_swig.go cannot conflict with an original file we were
+ // going to compile.
+ goFile = objdir + goFile
+ newGoFile := objdir + "_" + base + "_swig.go"
+ if cfg.BuildX || cfg.BuildN {
+ b.Showcmd("", "mv %s %s", goFile, newGoFile)
+ }
+ if !cfg.BuildN {
+ if err := os.Rename(goFile, newGoFile); err != nil {
+ return "", "", err
+ }
+ }
+ return newGoFile, objdir + gccBase + gccExt, nil
+}
+
+// disableBuildID adjusts a linker command line to avoid creating a
+// build ID when creating an object file rather than an executable or
+// shared library. Some systems, such as Ubuntu, always add
+// --build-id to every link, but we don't want a build ID when we are
+// producing an object file. On some of those system a plain -r (not
+// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a
+// plain -r. I don't know how to turn off --build-id when using clang
+// other than passing a trailing --build-id=none. So that is what we
+// do, but only on systems likely to support it, which is to say,
+// systems that normally use gold or the GNU linker.
+func (b *Builder) disableBuildID(ldflags []string) []string {
+ switch cfg.Goos {
+ case "android", "dragonfly", "linux", "netbsd":
+ ldflags = append(ldflags, "-Wl,--build-id=none")
+ }
+ return ldflags
+}
+
+// mkAbsFiles converts files into a list of absolute files,
+// assuming they were originally relative to dir,
+// and returns that new list.
+func mkAbsFiles(dir string, files []string) []string {
+ abs := make([]string, len(files))
+ for i, f := range files {
+ if !filepath.IsAbs(f) {
+ f = filepath.Join(dir, f)
+ }
+ abs[i] = f
+ }
+ return abs
+}
+
+// passLongArgsInResponseFiles modifies cmd such that, for
+// certain programs, long arguments are passed in "response files", a
+// file on disk with the arguments, with one arg per line. An actual
+// argument starting with '@' means that the rest of the argument is
+// a filename of arguments to expand.
+//
+// See issues 18468 (Windows) and 37768 (Darwin).
+func passLongArgsInResponseFiles(cmd *exec.Cmd) (cleanup func()) {
+ cleanup = func() {} // no cleanup by default
+
+ var argLen int
+ for _, arg := range cmd.Args {
+ argLen += len(arg)
+ }
+
+ // If we're not approaching 32KB of args, just pass args normally.
+ // (use 30KB instead to be conservative; not sure how accounting is done)
+ if !useResponseFile(cmd.Path, argLen) {
+ return
+ }
+
+ tf, err := os.CreateTemp("", "args")
+ if err != nil {
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ cleanup = func() { os.Remove(tf.Name()) }
+ var buf bytes.Buffer
+ for _, arg := range cmd.Args[1:] {
+ fmt.Fprintf(&buf, "%s\n", encodeArg(arg))
+ }
+ if _, err := tf.Write(buf.Bytes()); err != nil {
+ tf.Close()
+ cleanup()
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ if err := tf.Close(); err != nil {
+ cleanup()
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ cmd.Args = []string{cmd.Args[0], "@" + tf.Name()}
+ return cleanup
+}
+
+func useResponseFile(path string, argLen int) bool {
+ // Unless the program uses objabi.Flagparse, which understands
+ // response files, don't use response files.
+ // TODO: Note that other toolchains like CC are missing here for now.
+ prog := strings.TrimSuffix(filepath.Base(path), ".exe")
+ switch prog {
+ case "compile", "link", "cgo", "asm", "cover":
+ default:
+ return false
+ }
+
+ if argLen > sys.ExecArgLengthLimit {
+ return true
+ }
+
+ // On the Go build system, use response files about 10% of the
+ // time, just to exercise this codepath.
+ isBuilder := os.Getenv("GO_BUILDER_NAME") != ""
+ if isBuilder && rand.Intn(10) == 0 {
+ return true
+ }
+
+ return false
+}
+
+// encodeArg encodes an argument for response file writing.
+func encodeArg(arg string) string {
+ // If there aren't any characters we need to reencode, fastpath out.
+ if !strings.ContainsAny(arg, "\\\n") {
+ return arg
+ }
+ var b strings.Builder
+ for _, r := range arg {
+ switch r {
+ case '\\':
+ b.WriteByte('\\')
+ b.WriteByte('\\')
+ case '\n':
+ b.WriteByte('\\')
+ b.WriteByte('n')
+ default:
+ b.WriteRune(r)
+ }
+ }
+ return b.String()
+}
diff --git a/src/cmd/go/internal/work/exec_test.go b/src/cmd/go/internal/work/exec_test.go
new file mode 100644
index 0000000..8bbf25b
--- /dev/null
+++ b/src/cmd/go/internal/work/exec_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "bytes"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+ "unicode/utf8"
+)
+
+func TestEncodeArgs(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ arg, want string
+ }{
+ {"", ""},
+ {"hello", "hello"},
+ {"hello\n", "hello\\n"},
+ {"hello\\", "hello\\\\"},
+ {"hello\nthere", "hello\\nthere"},
+ {"\\\n", "\\\\\\n"},
+ }
+ for _, test := range tests {
+ if got := encodeArg(test.arg); got != test.want {
+ t.Errorf("encodeArg(%q) = %q, want %q", test.arg, got, test.want)
+ }
+ }
+}
+
+func TestEncodeDecode(t *testing.T) {
+ t.Parallel()
+ tests := []string{
+ "",
+ "hello",
+ "hello\\there",
+ "hello\nthere",
+ "hello 中国",
+ "hello \n中\\国",
+ }
+ for _, arg := range tests {
+ if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
+ t.Errorf("objabi.DecodeArg(encodeArg(%q)) = %q", arg, got)
+ }
+ }
+}
+
+func TestEncodeDecodeFuzz(t *testing.T) {
+ if testing.Short() {
+ t.Skip("fuzz test is slow")
+ }
+ t.Parallel()
+
+ nRunes := sys.ExecArgLengthLimit + 100
+ rBuffer := make([]rune, nRunes)
+ buf := bytes.NewBuffer([]byte(string(rBuffer)))
+
+ seed := time.Now().UnixNano()
+ t.Logf("rand seed: %v", seed)
+ rng := rand.New(rand.NewSource(seed))
+
+ for i := 0; i < 50; i++ {
+ // Generate a random string of runes.
+ buf.Reset()
+ for buf.Len() < sys.ExecArgLengthLimit+1 {
+ var r rune
+ for {
+ r = rune(rng.Intn(utf8.MaxRune + 1))
+ if utf8.ValidRune(r) {
+ break
+ }
+ }
+ fmt.Fprintf(buf, "%c", r)
+ }
+ arg := buf.String()
+
+ if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
+ t.Errorf("[%d] objabi.DecodeArg(encodeArg(%q)) = %q [seed: %v]", i, arg, got, seed)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
new file mode 100644
index 0000000..26b4e0f
--- /dev/null
+++ b/src/cmd/go/internal/work/gc.go
@@ -0,0 +1,728 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "internal/platform"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/load"
+ "cmd/go/internal/str"
+ "cmd/internal/objabi"
+ "cmd/internal/quoted"
+ "crypto/sha1"
+)
+
+// Tests can override this by setting $TESTGO_TOOLCHAIN_VERSION.
+var ToolchainVersion = runtime.Version()
+
+// The 'path' used for GOROOT_FINAL when -trimpath is specified
+const trimPathGoRootFinal string = "$GOROOT"
+
+var runtimePackages = map[string]struct{}{
+ "internal/abi": struct{}{},
+ "internal/bytealg": struct{}{},
+ "internal/coverage/rtcov": struct{}{},
+ "internal/cpu": struct{}{},
+ "internal/goarch": struct{}{},
+ "internal/goos": struct{}{},
+ "runtime": struct{}{},
+ "runtime/internal/atomic": struct{}{},
+ "runtime/internal/math": struct{}{},
+ "runtime/internal/sys": struct{}{},
+ "runtime/internal/syscall": struct{}{},
+}
+
+// The Go toolchain.
+
+type gcToolchain struct{}
+
+func (gcToolchain) compiler() string {
+ return base.Tool("compile")
+}
+
+func (gcToolchain) linker() string {
+ return base.Tool("link")
+}
+
+func pkgPath(a *Action) string {
+ p := a.Package
+ ppath := p.ImportPath
+ if cfg.BuildBuildmode == "plugin" {
+ ppath = pluginPath(a)
+ } else if p.Name == "main" && !p.Internal.ForceLibrary {
+ ppath = "main"
+ }
+ return ppath
+}
+
+func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
+ p := a.Package
+ objdir := a.Objdir
+ if archive != "" {
+ ofile = archive
+ } else {
+ out := "_go_.o"
+ ofile = objdir + out
+ }
+
+ pkgpath := pkgPath(a)
+ defaultGcFlags := []string{"-p", pkgpath}
+ if p.Module != nil {
+ v := p.Module.GoVersion
+ if v == "" {
+ v = gover.DefaultGoModVersion
+ }
+ if allowedVersion(v) {
+ defaultGcFlags = append(defaultGcFlags, "-lang=go"+gover.Lang(v))
+ }
+ }
+ if p.Standard {
+ defaultGcFlags = append(defaultGcFlags, "-std")
+ }
+ _, compilingRuntime := runtimePackages[p.ImportPath]
+ compilingRuntime = compilingRuntime && p.Standard
+ if compilingRuntime {
+ // runtime compiles with a special gc flag to check for
+ // memory allocations that are invalid in the runtime package,
+ // and to implement some special compiler pragmas.
+ defaultGcFlags = append(defaultGcFlags, "-+")
+ }
+
+ // If we're giving the compiler the entire package (no C etc files), tell it that,
+ // so that it can give good error messages about forward declarations.
+ // Exceptions: a few standard packages have forward declarations for
+ // pieces supplied behind-the-scenes by package runtime.
+ extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
+ if p.Standard {
+ switch p.ImportPath {
+ case "bytes", "internal/poll", "net", "os":
+ fallthrough
+ case "runtime/metrics", "runtime/pprof", "runtime/trace":
+ fallthrough
+ case "sync", "syscall", "time":
+ extFiles++
+ }
+ }
+ if extFiles == 0 {
+ defaultGcFlags = append(defaultGcFlags, "-complete")
+ }
+ if cfg.BuildContext.InstallSuffix != "" {
+ defaultGcFlags = append(defaultGcFlags, "-installsuffix", cfg.BuildContext.InstallSuffix)
+ }
+ if a.buildID != "" {
+ defaultGcFlags = append(defaultGcFlags, "-buildid", a.buildID)
+ }
+ if p.Internal.OmitDebug || cfg.Goos == "plan9" || cfg.Goarch == "wasm" {
+ defaultGcFlags = append(defaultGcFlags, "-dwarf=false")
+ }
+ if strings.HasPrefix(ToolchainVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") {
+ defaultGcFlags = append(defaultGcFlags, "-goversion", ToolchainVersion)
+ }
+ if p.Internal.CoverageCfg != "" {
+ defaultGcFlags = append(defaultGcFlags, "-coveragecfg="+p.Internal.CoverageCfg)
+ }
+ if p.Internal.PGOProfile != "" {
+ defaultGcFlags = append(defaultGcFlags, "-pgoprofile="+p.Internal.PGOProfile)
+ }
+ if symabis != "" {
+ defaultGcFlags = append(defaultGcFlags, "-symabis", symabis)
+ }
+
+ gcflags := str.StringList(forcedGcflags, p.Internal.Gcflags)
+ if p.Internal.FuzzInstrument {
+ gcflags = append(gcflags, fuzzInstrumentFlags()...)
+ }
+ if compilingRuntime {
+ // Remove -N, if present.
+ // It is not possible to build the runtime with no optimizations,
+ // because the compiler cannot eliminate enough write barriers.
+ for i := 0; i < len(gcflags); i++ {
+ if gcflags[i] == "-N" {
+ copy(gcflags[i:], gcflags[i+1:])
+ gcflags = gcflags[:len(gcflags)-1]
+ i--
+ }
+ }
+ }
+ // Add -c=N to use concurrent backend compilation, if possible.
+ if c := gcBackendConcurrency(gcflags); c > 1 {
+ defaultGcFlags = append(defaultGcFlags, fmt.Sprintf("-c=%d", c))
+ }
+
+ args := []any{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), defaultGcFlags, gcflags}
+ if p.Internal.LocalPrefix == "" {
+ args = append(args, "-nolocalimports")
+ } else {
+ args = append(args, "-D", p.Internal.LocalPrefix)
+ }
+ if importcfg != nil {
+ if err := b.writeFile(objdir+"importcfg", importcfg); err != nil {
+ return "", nil, err
+ }
+ args = append(args, "-importcfg", objdir+"importcfg")
+ }
+ if embedcfg != nil {
+ if err := b.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+ return "", nil, err
+ }
+ args = append(args, "-embedcfg", objdir+"embedcfg")
+ }
+ if ofile == archive {
+ args = append(args, "-pack")
+ }
+ if asmhdr {
+ args = append(args, "-asmhdr", objdir+"go_asm.h")
+ }
+
+ for _, f := range gofiles {
+ f := mkAbs(p.Dir, f)
+
+ // Handle overlays. Convert path names using OverlayPath
+ // so these paths can be handed directly to tools.
+ // Deleted files won't show up in when scanning directories earlier,
+ // so OverlayPath will never return "" (meaning a deleted file) here.
+ // TODO(#39958): Handle cases where the package directory
+ // doesn't exist on disk (this can happen when all the package's
+ // files are in an overlay): the code expects the package directory
+ // to exist and runs some tools in that directory.
+ // TODO(#39958): Process the overlays when the
+ // gofiles, cgofiles, cfiles, sfiles, and cxxfiles variables are
+ // created in (*Builder).build. Doing that requires rewriting the
+ // code that uses those values to expect absolute paths.
+ f, _ = fsys.OverlayPath(f)
+
+ args = append(args, f)
+ }
+
+ output, err = b.runOut(a, base.Cwd(), nil, args...)
+ return ofile, output, err
+}
+
+// gcBackendConcurrency returns the backend compiler concurrency level for a package compilation.
+func gcBackendConcurrency(gcflags []string) int {
+ // First, check whether we can use -c at all for this compilation.
+ canDashC := concurrentGCBackendCompilationEnabledByDefault
+
+ switch e := os.Getenv("GO19CONCURRENTCOMPILATION"); e {
+ case "0":
+ canDashC = false
+ case "1":
+ canDashC = true
+ case "":
+ // Not set. Use default.
+ default:
+ log.Fatalf("GO19CONCURRENTCOMPILATION must be 0, 1, or unset, got %q", e)
+ }
+
+ // TODO: Test and delete these conditions.
+ if cfg.ExperimentErr != nil || cfg.Experiment.FieldTrack || cfg.Experiment.PreemptibleLoops {
+ canDashC = false
+ }
+
+ if !canDashC {
+ return 1
+ }
+
+ // Decide how many concurrent backend compilations to allow.
+ //
+ // If we allow too many, in theory we might end up with p concurrent processes,
+ // each with c concurrent backend compiles, all fighting over the same resources.
+ // However, in practice, that seems not to happen too much.
+ // Most build graphs are surprisingly serial, so p==1 for much of the build.
+ // Furthermore, concurrent backend compilation is only enabled for a part
+ // of the overall compiler execution, so c==1 for much of the build.
+ // So don't worry too much about that interaction for now.
+ //
+ // However, in practice, setting c above 4 tends not to help very much.
+ // See the analysis in CL 41192.
+ //
+ // TODO(josharian): attempt to detect whether this particular compilation
+ // is likely to be a bottleneck, e.g. when:
+ // - it has no successor packages to compile (usually package main)
+ // - all paths through the build graph pass through it
+ // - critical path scheduling says it is high priority
+ // and in such a case, set c to runtime.GOMAXPROCS(0).
+ // By default this is the same as runtime.NumCPU.
+ // We do this now when p==1.
+ // To limit parallelism, set GOMAXPROCS below numCPU; this may be useful
+ // on a low-memory builder, or if a deterministic build order is required.
+ c := runtime.GOMAXPROCS(0)
+ if cfg.BuildP == 1 {
+ // No process parallelism, do not cap compiler parallelism.
+ return c
+ }
+ // Some process parallelism. Set c to min(4, maxprocs).
+ if c > 4 {
+ c = 4
+ }
+ return c
+}
+
+// trimpath returns the -trimpath argument to use
+// when compiling the action.
+func (a *Action) trimpath() string {
+ // Keep in sync with Builder.ccompile
+ // The trimmed paths are a little different, but we need to trim in the
+ // same situations.
+
+ // Strip the object directory entirely.
+ objdir := a.Objdir
+ if len(objdir) > 1 && objdir[len(objdir)-1] == filepath.Separator {
+ objdir = objdir[:len(objdir)-1]
+ }
+ rewrite := ""
+
+ rewriteDir := a.Package.Dir
+ if cfg.BuildTrimpath {
+ importPath := a.Package.Internal.OrigImportPath
+ if m := a.Package.Module; m != nil && m.Version != "" {
+ rewriteDir = m.Path + "@" + m.Version + strings.TrimPrefix(importPath, m.Path)
+ } else {
+ rewriteDir = importPath
+ }
+ rewrite += a.Package.Dir + "=>" + rewriteDir + ";"
+ }
+
+ // Add rewrites for overlays. The 'from' and 'to' paths in overlays don't need to have
+ // same basename, so go from the overlay contents file path (passed to the compiler)
+ // to the path the disk path would be rewritten to.
+
+ cgoFiles := make(map[string]bool)
+ for _, f := range a.Package.CgoFiles {
+ cgoFiles[f] = true
+ }
+
+ // TODO(matloob): Higher up in the stack, when the logic for deciding when to make copies
+ // of c/c++/m/f/hfiles is consolidated, use the same logic that Build uses to determine
+ // whether to create the copies in objdir to decide whether to rewrite objdir to the
+ // package directory here.
+ var overlayNonGoRewrites string // rewrites for non-go files
+ hasCgoOverlay := false
+ if fsys.OverlayFile != "" {
+ for _, filename := range a.Package.AllFiles() {
+ path := filename
+ if !filepath.IsAbs(path) {
+ path = filepath.Join(a.Package.Dir, path)
+ }
+ base := filepath.Base(path)
+ isGo := strings.HasSuffix(filename, ".go") || strings.HasSuffix(filename, ".s")
+ isCgo := cgoFiles[filename] || !isGo
+ overlayPath, isOverlay := fsys.OverlayPath(path)
+ if isCgo && isOverlay {
+ hasCgoOverlay = true
+ }
+ if !isCgo && isOverlay {
+ rewrite += overlayPath + "=>" + filepath.Join(rewriteDir, base) + ";"
+ } else if isCgo {
+ // Generate rewrites for non-Go files copied to files in objdir.
+ if filepath.Dir(path) == a.Package.Dir {
+ // This is a file copied to objdir.
+ overlayNonGoRewrites += filepath.Join(objdir, base) + "=>" + filepath.Join(rewriteDir, base) + ";"
+ }
+ } else {
+ // Non-overlay Go files are covered by the a.Package.Dir rewrite rule above.
+ }
+ }
+ }
+ if hasCgoOverlay {
+ rewrite += overlayNonGoRewrites
+ }
+ rewrite += objdir + "=>"
+
+ return rewrite
+}
+
+func asmArgs(a *Action, p *load.Package) []any {
+ // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
+ inc := filepath.Join(cfg.GOROOT, "pkg", "include")
+ pkgpath := pkgPath(a)
+ args := []any{cfg.BuildToolexec, base.Tool("asm"), "-p", pkgpath, "-trimpath", a.trimpath(), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags}
+ if p.ImportPath == "runtime" && cfg.Goarch == "386" {
+ for _, arg := range forcedAsmflags {
+ if arg == "-dynlink" {
+ args = append(args, "-D=GOBUILDMODE_shared=1")
+ }
+ }
+ }
+ if objabi.IsRuntimePackagePath(pkgpath) {
+ args = append(args, "-compiling-runtime")
+ }
+
+ if cfg.Goarch == "386" {
+ // Define GO386_value from cfg.GO386.
+ args = append(args, "-D", "GO386_"+cfg.GO386)
+ }
+
+ if cfg.Goarch == "amd64" {
+ // Define GOAMD64_value from cfg.GOAMD64.
+ args = append(args, "-D", "GOAMD64_"+cfg.GOAMD64)
+ }
+
+ if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" {
+ // Define GOMIPS_value from cfg.GOMIPS.
+ args = append(args, "-D", "GOMIPS_"+cfg.GOMIPS)
+ }
+
+ if cfg.Goarch == "mips64" || cfg.Goarch == "mips64le" {
+ // Define GOMIPS64_value from cfg.GOMIPS64.
+ args = append(args, "-D", "GOMIPS64_"+cfg.GOMIPS64)
+ }
+
+ if cfg.Goarch == "ppc64" || cfg.Goarch == "ppc64le" {
+ // Define GOPPC64_power8..N from cfg.PPC64.
+ // We treat each powerpc version as a superset of functionality.
+ switch cfg.GOPPC64 {
+ case "power10":
+ args = append(args, "-D", "GOPPC64_power10")
+ fallthrough
+ case "power9":
+ args = append(args, "-D", "GOPPC64_power9")
+ fallthrough
+ default: // This should always be power8.
+ args = append(args, "-D", "GOPPC64_power8")
+ }
+ }
+
+ return args
+}
+
+func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+ p := a.Package
+ args := asmArgs(a, p)
+
+ var ofiles []string
+ for _, sfile := range sfiles {
+ overlayPath, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile))
+ ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o"
+ ofiles = append(ofiles, ofile)
+ args1 := append(args, "-o", ofile, overlayPath)
+ if err := b.run(a, p.Dir, p.ImportPath, nil, args1...); err != nil {
+ return nil, err
+ }
+ }
+ return ofiles, nil
+}
+
+func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+ mkSymabis := func(p *load.Package, sfiles []string, path string) error {
+ args := asmArgs(a, p)
+ args = append(args, "-gensymabis", "-o", path)
+ for _, sfile := range sfiles {
+ if p.ImportPath == "runtime/cgo" && strings.HasPrefix(sfile, "gcc_") {
+ continue
+ }
+ op, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile))
+ args = append(args, op)
+ }
+
+ // Supply an empty go_asm.h as if the compiler had been run.
+ // -gensymabis parsing is lax enough that we don't need the
+ // actual definitions that would appear in go_asm.h.
+ if err := b.writeFile(a.Objdir+"go_asm.h", nil); err != nil {
+ return err
+ }
+
+ return b.run(a, p.Dir, p.ImportPath, nil, args...)
+ }
+
+ var symabis string // Only set if we actually create the file
+ p := a.Package
+ if len(sfiles) != 0 {
+ symabis = a.Objdir + "symabis"
+ if err := mkSymabis(p, sfiles, symabis); err != nil {
+ return "", err
+ }
+ }
+
+ return symabis, nil
+}
+
+// toolVerify checks that the command line args writes the same output file
+// if run using newTool instead.
+// Unused now but kept around for future use.
+func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []any) error {
+ newArgs := make([]any, len(args))
+ copy(newArgs, args)
+ newArgs[1] = base.Tool(newTool)
+ newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
+ if err := b.run(a, p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+ return err
+ }
+ data1, err := os.ReadFile(ofile)
+ if err != nil {
+ return err
+ }
+ data2, err := os.ReadFile(ofile + ".new")
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(data1, data2) {
+ return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " "))
+ }
+ os.Remove(ofile + ".new")
+ return nil
+}
+
+func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(a.Objdir, f))
+ }
+ absAfile := mkAbs(a.Objdir, afile)
+
+ // The archive file should have been created by the compiler.
+ // Since it used to not work that way, verify.
+ if !cfg.BuildN {
+ if _, err := os.Stat(absAfile); err != nil {
+ base.Fatalf("os.Stat of archive file failed: %v", err)
+ }
+ }
+
+ p := a.Package
+ if cfg.BuildN || cfg.BuildX {
+ cmdline := str.StringList(base.Tool("pack"), "r", absAfile, absOfiles)
+ b.Showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
+ }
+ if cfg.BuildN {
+ return nil
+ }
+ if err := packInternal(absAfile, absOfiles); err != nil {
+ return formatOutput(b.WorkDir, p.Dir, p.ImportPath, p.Desc(), err.Error()+"\n")
+ }
+ return nil
+}
+
+func packInternal(afile string, ofiles []string) error {
+ dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0)
+ if err != nil {
+ return err
+ }
+ defer dst.Close() // only for error returns or panics
+ w := bufio.NewWriter(dst)
+
+ for _, ofile := range ofiles {
+ src, err := os.Open(ofile)
+ if err != nil {
+ return err
+ }
+ fi, err := src.Stat()
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Note: Not using %-16.16s format because we care
+ // about bytes, not runes.
+ name := fi.Name()
+ if len(name) > 16 {
+ name = name[:16]
+ } else {
+ name += strings.Repeat(" ", 16-len(name))
+ }
+ size := fi.Size()
+ fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n",
+ name, 0, 0, 0, 0644, size)
+ n, err := io.Copy(w, src)
+ src.Close()
+ if err == nil && n < size {
+ err = io.ErrUnexpectedEOF
+ } else if err == nil && n > size {
+ err = fmt.Errorf("file larger than size reported by stat")
+ }
+ if err != nil {
+ return fmt.Errorf("copying %s to %s: %v", ofile, afile, err)
+ }
+ if size&1 != 0 {
+ w.WriteByte(0)
+ }
+ }
+
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return dst.Close()
+}
+
+// setextld sets the appropriate linker flags for the specified compiler.
+func setextld(ldflags []string, compiler []string) ([]string, error) {
+ for _, f := range ldflags {
+ if f == "-extld" || strings.HasPrefix(f, "-extld=") {
+ // don't override -extld if supplied
+ return ldflags, nil
+ }
+ }
+ joined, err := quoted.Join(compiler)
+ if err != nil {
+ return nil, err
+ }
+ return append(ldflags, "-extld="+joined), nil
+}
+
+// pluginPath computes the package path for a plugin main package.
+//
+// This is typically the import path of the main package p, unless the
+// plugin is being built directly from source files. In that case we
+// combine the package build ID with the contents of the main package
+// source files. This allows us to identify two different plugins
+// built from two source files with the same name.
+func pluginPath(a *Action) string {
+ p := a.Package
+ if p.ImportPath != "command-line-arguments" {
+ return p.ImportPath
+ }
+ h := sha1.New()
+ buildID := a.buildID
+ if a.Mode == "link" {
+ // For linking, use the main package's build ID instead of
+ // the binary's build ID, so it is the same hash used in
+ // compiling and linking.
+ // When compiling, we use actionID/actionID (instead of
+ // actionID/contentID) as a temporary build ID to compute
+ // the hash. Do the same here. (See buildid.go:useCache)
+ // The build ID matters because it affects the overall hash
+ // in the plugin's pseudo-import path returned below.
+ // We need to use the same import path when compiling and linking.
+ id := strings.Split(buildID, buildIDSeparator)
+ buildID = id[1] + buildIDSeparator + id[1]
+ }
+ fmt.Fprintf(h, "build ID: %s\n", buildID)
+ for _, file := range str.StringList(p.GoFiles, p.CgoFiles, p.SFiles) {
+ data, err := os.ReadFile(filepath.Join(p.Dir, file))
+ if err != nil {
+ base.Fatalf("go: %s", err)
+ }
+ h.Write(data)
+ }
+ return fmt.Sprintf("plugin/unnamed-%x", h.Sum(nil))
+}
+
+func (gcToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
+ cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0
+ for _, a := range root.Deps {
+ if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ var ldflags []string
+ if cfg.BuildContext.InstallSuffix != "" {
+ ldflags = append(ldflags, "-installsuffix", cfg.BuildContext.InstallSuffix)
+ }
+ if root.Package.Internal.OmitDebug {
+ ldflags = append(ldflags, "-s", "-w")
+ }
+ if cfg.BuildBuildmode == "plugin" {
+ ldflags = append(ldflags, "-pluginpath", pluginPath(root))
+ }
+
+ // Store BuildID inside toolchain binaries as a unique identifier of the
+ // tool being run, for use by content-based staleness determination.
+ if root.Package.Goroot && strings.HasPrefix(root.Package.ImportPath, "cmd/") {
+ // External linking will include our build id in the external
+ // linker's build id, which will cause our build id to not
+ // match the next time the tool is built.
+ // Rely on the external build id instead.
+ if !platform.MustLinkExternal(cfg.Goos, cfg.Goarch, false) {
+ ldflags = append(ldflags, "-X=cmd/internal/objabi.buildID="+root.buildID)
+ }
+ }
+
+ // Store default GODEBUG in binaries.
+ if root.Package.DefaultGODEBUG != "" {
+ ldflags = append(ldflags, "-X=runtime.godebugDefault="+root.Package.DefaultGODEBUG)
+ }
+
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+ } else {
+ compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+ }
+ ldflags = append(ldflags, "-buildmode="+ldBuildmode)
+ if root.buildID != "" {
+ ldflags = append(ldflags, "-buildid="+root.buildID)
+ }
+ ldflags = append(ldflags, forcedLdflags...)
+ ldflags = append(ldflags, root.Package.Internal.Ldflags...)
+ ldflags, err := setextld(ldflags, compiler)
+ if err != nil {
+ return err
+ }
+
+ // On OS X when using external linking to build a shared library,
+ // the argument passed here to -o ends up recorded in the final
+ // shared library in the LC_ID_DYLIB load command.
+ // To avoid putting the temporary output directory name there
+ // (and making the resulting shared library useless),
+ // run the link in the output directory so that -o can name
+ // just the final path element.
+ // On Windows, DLL file name is recorded in PE file
+ // export section, so do like on OS X.
+ // On Linux, for a shared object, at least with the Gold linker,
+ // the output file path is recorded in the .gnu.version_d section.
+ dir := "."
+ if cfg.BuildBuildmode == "c-shared" || cfg.BuildBuildmode == "plugin" {
+ dir, out = filepath.Split(out)
+ }
+
+ env := []string{}
+ if cfg.BuildTrimpath {
+ env = append(env, "GOROOT_FINAL="+trimPathGoRootFinal)
+ }
+ return b.run(root, dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags, mainpkg)
+}
+
+func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
+ ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix}
+ ldflags = append(ldflags, "-buildmode=shared")
+ ldflags = append(ldflags, forcedLdflags...)
+ ldflags = append(ldflags, root.Package.Internal.Ldflags...)
+ cxx := false
+ for _, a := range allactions {
+ if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+ } else {
+ compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+ }
+ ldflags, err := setextld(ldflags, compiler)
+ if err != nil {
+ return err
+ }
+ for _, d := range toplevelactions {
+ if !strings.HasSuffix(d.Target, ".a") { // omit unsafe etc and actions for other shared libraries
+ continue
+ }
+ ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target)
+ }
+ return b.run(root, ".", out, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags)
+}
+
+func (gcToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+ return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(a.Package.Dir, cfile))
+}
diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
new file mode 100644
index 0000000..a048b7f
--- /dev/null
+++ b/src/cmd/go/internal/work/gccgo.go
@@ -0,0 +1,677 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/load"
+ "cmd/go/internal/str"
+ "cmd/internal/pkgpath"
+)
+
+// The Gccgo toolchain.
+
+type gccgoToolchain struct{}
+
+var GccgoName, GccgoBin string
+var gccgoErr error
+
+func init() {
+ GccgoName = cfg.Getenv("GCCGO")
+ if GccgoName == "" {
+ GccgoName = "gccgo"
+ }
+ GccgoBin, gccgoErr = exec.LookPath(GccgoName)
+}
+
+func (gccgoToolchain) compiler() string {
+ checkGccgoBin()
+ return GccgoBin
+}
+
+func (gccgoToolchain) linker() string {
+ checkGccgoBin()
+ return GccgoBin
+}
+
+func (gccgoToolchain) ar() string {
+ ar := cfg.Getenv("AR")
+ if ar == "" {
+ ar = "ar"
+ }
+ return ar
+}
+
+func checkGccgoBin() {
+ if gccgoErr == nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "cmd/go: gccgo: %s\n", gccgoErr)
+ base.SetExitStatus(2)
+ base.Exit()
+}
+
+func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
+ p := a.Package
+ objdir := a.Objdir
+ out := "_go_.o"
+ ofile = objdir + out
+ gcargs := []string{"-g"}
+ gcargs = append(gcargs, b.gccArchArgs()...)
+ gcargs = append(gcargs, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build")
+ gcargs = append(gcargs, "-gno-record-gcc-switches")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath)
+ }
+ if p.Internal.LocalPrefix != "" {
+ gcargs = append(gcargs, "-fgo-relative-import-path="+p.Internal.LocalPrefix)
+ }
+
+ args := str.StringList(tools.compiler(), "-c", gcargs, "-o", ofile, forcedGccgoflags)
+ if importcfg != nil {
+ if b.gccSupportsFlag(args[:1], "-fgo-importcfg=/dev/null") {
+ if err := b.writeFile(objdir+"importcfg", importcfg); err != nil {
+ return "", nil, err
+ }
+ args = append(args, "-fgo-importcfg="+objdir+"importcfg")
+ } else {
+ root := objdir + "_importcfgroot_"
+ if err := buildImportcfgSymlinks(b, root, importcfg); err != nil {
+ return "", nil, err
+ }
+ args = append(args, "-I", root)
+ }
+ }
+ if embedcfg != nil && b.gccSupportsFlag(args[:1], "-fgo-embedcfg=/dev/null") {
+ if err := b.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+ return "", nil, err
+ }
+ args = append(args, "-fgo-embedcfg="+objdir+"embedcfg")
+ }
+
+ if b.gccSupportsFlag(args[:1], "-ffile-prefix-map=a=b") {
+ if cfg.BuildTrimpath {
+ args = append(args, "-ffile-prefix-map="+base.Cwd()+"=.")
+ args = append(args, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build")
+ }
+ if fsys.OverlayFile != "" {
+ for _, name := range gofiles {
+ absPath := mkAbs(p.Dir, name)
+ overlayPath, ok := fsys.OverlayPath(absPath)
+ if !ok {
+ continue
+ }
+ toPath := absPath
+ // gccgo only applies the last matching rule, so also handle the case where
+ // BuildTrimpath is true and the path is relative to base.Cwd().
+ if cfg.BuildTrimpath && str.HasFilePathPrefix(toPath, base.Cwd()) {
+ toPath = "." + toPath[len(base.Cwd()):]
+ }
+ args = append(args, "-ffile-prefix-map="+overlayPath+"="+toPath)
+ }
+ }
+ }
+
+ args = append(args, a.Package.Internal.Gccgoflags...)
+ for _, f := range gofiles {
+ f := mkAbs(p.Dir, f)
+ // Overlay files if necessary.
+ // See comment on gctoolchain.gc about overlay TODOs
+ f, _ = fsys.OverlayPath(f)
+ args = append(args, f)
+ }
+
+ output, err = b.runOut(a, p.Dir, nil, args)
+ return ofile, output, err
+}
+
+// buildImportcfgSymlinks builds in root a tree of symlinks
+// implementing the directives from importcfg.
+// This serves as a temporary transition mechanism until
+// we can depend on gccgo reading an importcfg directly.
+// (The Go 1.9 and later gc compilers already do.)
+func buildImportcfgSymlinks(b *Builder, root string, importcfg []byte) error {
+ for lineNum, line := range strings.Split(string(importcfg), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ before, after, _ := strings.Cut(args, "=")
+ switch verb {
+ default:
+ base.Fatalf("importcfg:%d: unknown directive %q", lineNum, verb)
+ case "packagefile":
+ if before == "" || after == "" {
+ return fmt.Errorf(`importcfg:%d: invalid packagefile: syntax is "packagefile path=filename": %s`, lineNum, line)
+ }
+ archive := gccgoArchive(root, before)
+ if err := b.Mkdir(filepath.Dir(archive)); err != nil {
+ return err
+ }
+ if err := b.Symlink(after, archive); err != nil {
+ return err
+ }
+ case "importmap":
+ if before == "" || after == "" {
+ return fmt.Errorf(`importcfg:%d: invalid importmap: syntax is "importmap old=new": %s`, lineNum, line)
+ }
+ beforeA := gccgoArchive(root, before)
+ afterA := gccgoArchive(root, after)
+ if err := b.Mkdir(filepath.Dir(beforeA)); err != nil {
+ return err
+ }
+ if err := b.Mkdir(filepath.Dir(afterA)); err != nil {
+ return err
+ }
+ if err := b.Symlink(afterA, beforeA); err != nil {
+ return err
+ }
+ case "packageshlib":
+ return fmt.Errorf("gccgo -importcfg does not support shared libraries")
+ }
+ }
+ return nil
+}
+
+func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+ p := a.Package
+ var ofiles []string
+ for _, sfile := range sfiles {
+ base := filepath.Base(sfile)
+ ofile := a.Objdir + base[:len(base)-len(".s")] + ".o"
+ ofiles = append(ofiles, ofile)
+ sfile, _ = fsys.OverlayPath(mkAbs(p.Dir, sfile))
+ defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch}
+ if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath)
+ }
+ defs = tools.maybePIC(defs)
+ defs = append(defs, b.gccArchArgs()...)
+ err := b.run(a, p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", a.Objdir, "-c", "-o", ofile, defs, sfile)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ofiles, nil
+}
+
+func (gccgoToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+ return "", nil
+}
+
+func gccgoArchive(basedir, imp string) string {
+ end := filepath.FromSlash(imp + ".a")
+ afile := filepath.Join(basedir, end)
+ // add "lib" to the final element
+ return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile))
+}
+
+func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+ p := a.Package
+ objdir := a.Objdir
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(objdir, f))
+ }
+ var arArgs []string
+ if cfg.Goos == "aix" && cfg.Goarch == "ppc64" {
+ // AIX puts both 32-bit and 64-bit objects in the same archive.
+ // Tell the AIX "ar" command to only care about 64-bit objects.
+ arArgs = []string{"-X64"}
+ }
+ absAfile := mkAbs(objdir, afile)
+ // Try with D modifier first, then without if that fails.
+ output, err := b.runOut(a, p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
+ if err != nil {
+ return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
+ }
+
+ if len(output) > 0 {
+ // Show the output if there is any even without errors.
+ b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(output))
+ }
+
+ return nil
+}
+
+func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error {
+ // gccgo needs explicit linking with all package dependencies,
+ // and all LDFLAGS from cgo dependencies.
+ afiles := []string{}
+ shlibs := []string{}
+ ldflags := b.gccArchArgs()
+ cgoldflags := []string{}
+ usesCgo := false
+ cxx := false
+ objc := false
+ fortran := false
+ if root.Package != nil {
+ cxx = len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0
+ objc = len(root.Package.MFiles) > 0
+ fortran = len(root.Package.FFiles) > 0
+ }
+
+ readCgoFlags := func(flagsFile string) error {
+ flags, err := os.ReadFile(flagsFile)
+ if err != nil {
+ return err
+ }
+ const ldflagsPrefix = "_CGO_LDFLAGS="
+ for _, line := range strings.Split(string(flags), "\n") {
+ if strings.HasPrefix(line, ldflagsPrefix) {
+ flag := line[len(ldflagsPrefix):]
+ // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
+ // but they don't mean anything to the linker so filter
+ // them out.
+ if flag != "-g" && !strings.HasPrefix(flag, "-O") {
+ cgoldflags = append(cgoldflags, flag)
+ }
+ }
+ }
+ return nil
+ }
+
+ var arArgs []string
+ if cfg.Goos == "aix" && cfg.Goarch == "ppc64" {
+ // AIX puts both 32-bit and 64-bit objects in the same archive.
+ // Tell the AIX "ar" command to only care about 64-bit objects.
+ arArgs = []string{"-X64"}
+ }
+
+ newID := 0
+ readAndRemoveCgoFlags := func(archive string) (string, error) {
+ newID++
+ newArchive := root.Objdir + fmt.Sprintf("_pkg%d_.a", newID)
+ if err := b.copyFile(newArchive, archive, 0666, false); err != nil {
+ return "", err
+ }
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "ar d %s _cgo_flags", newArchive)
+ if cfg.BuildN {
+ // TODO(rsc): We could do better about showing the right _cgo_flags even in -n mode.
+ // Either the archive is already built and we can read them out,
+ // or we're printing commands to build the archive and can
+ // forward the _cgo_flags directly to this step.
+ return "", nil
+ }
+ }
+ err := b.run(root, root.Objdir, desc, nil, tools.ar(), arArgs, "x", newArchive, "_cgo_flags")
+ if err != nil {
+ return "", err
+ }
+ err = b.run(root, ".", desc, nil, tools.ar(), arArgs, "d", newArchive, "_cgo_flags")
+ if err != nil {
+ return "", err
+ }
+ err = readCgoFlags(filepath.Join(root.Objdir, "_cgo_flags"))
+ if err != nil {
+ return "", err
+ }
+ return newArchive, nil
+ }
+
+ // If using -linkshared, find the shared library deps.
+ haveShlib := make(map[string]bool)
+ targetBase := filepath.Base(root.Target)
+ if cfg.BuildLinkshared {
+ for _, a := range root.Deps {
+ p := a.Package
+ if p == nil || p.Shlib == "" {
+ continue
+ }
+
+ // The .a we are linking into this .so
+ // will have its Shlib set to this .so.
+ // Don't start thinking we want to link
+ // this .so into itself.
+ base := filepath.Base(p.Shlib)
+ if base != targetBase {
+ haveShlib[base] = true
+ }
+ }
+ }
+
+ // Arrange the deps into afiles and shlibs.
+ addedShlib := make(map[string]bool)
+ for _, a := range root.Deps {
+ p := a.Package
+ if p != nil && p.Shlib != "" && haveShlib[filepath.Base(p.Shlib)] {
+ // This is a package linked into a shared
+ // library that we will put into shlibs.
+ continue
+ }
+
+ if haveShlib[filepath.Base(a.Target)] {
+ // This is a shared library we want to link against.
+ if !addedShlib[a.Target] {
+ shlibs = append(shlibs, a.Target)
+ addedShlib[a.Target] = true
+ }
+ continue
+ }
+
+ if p != nil {
+ target := a.built
+ if p.UsesCgo() || p.UsesSwig() {
+ var err error
+ target, err = readAndRemoveCgoFlags(target)
+ if err != nil {
+ continue
+ }
+ }
+
+ afiles = append(afiles, target)
+ }
+ }
+
+ for _, a := range allactions {
+ // Gather CgoLDFLAGS, but not from standard packages.
+ // The go tool can dig up runtime/cgo from GOROOT and
+ // think that it should use its CgoLDFLAGS, but gccgo
+ // doesn't use runtime/cgo.
+ if a.Package == nil {
+ continue
+ }
+ if !a.Package.Standard {
+ cgoldflags = append(cgoldflags, a.Package.CgoLDFLAGS...)
+ }
+ if len(a.Package.CgoFiles) > 0 {
+ usesCgo = true
+ }
+ if a.Package.UsesSwig() {
+ usesCgo = true
+ }
+ if len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0 {
+ cxx = true
+ }
+ if len(a.Package.MFiles) > 0 {
+ objc = true
+ }
+ if len(a.Package.FFiles) > 0 {
+ fortran = true
+ }
+ }
+
+ wholeArchive := []string{"-Wl,--whole-archive"}
+ noWholeArchive := []string{"-Wl,--no-whole-archive"}
+ if cfg.Goos == "aix" {
+ wholeArchive = nil
+ noWholeArchive = nil
+ }
+ ldflags = append(ldflags, wholeArchive...)
+ ldflags = append(ldflags, afiles...)
+ ldflags = append(ldflags, noWholeArchive...)
+
+ ldflags = append(ldflags, cgoldflags...)
+ ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
+ if root.Package != nil {
+ ldflags = append(ldflags, root.Package.CgoLDFLAGS...)
+ }
+ if cfg.Goos != "aix" {
+ ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)")
+ }
+
+ if root.buildID != "" {
+ // On systems that normally use gold or the GNU linker,
+ // use the --build-id option to write a GNU build ID note.
+ switch cfg.Goos {
+ case "android", "dragonfly", "linux", "netbsd":
+ ldflags = append(ldflags, fmt.Sprintf("-Wl,--build-id=0x%x", root.buildID))
+ }
+ }
+
+ var rLibPath string
+ if cfg.Goos == "aix" {
+ rLibPath = "-Wl,-blibpath="
+ } else {
+ rLibPath = "-Wl,-rpath="
+ }
+ for _, shlib := range shlibs {
+ ldflags = append(
+ ldflags,
+ "-L"+filepath.Dir(shlib),
+ rLibPath+filepath.Dir(shlib),
+ "-l"+strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(shlib), "lib"),
+ ".so"))
+ }
+
+ var realOut string
+ goLibBegin := str.StringList(wholeArchive, "-lgolibbegin", noWholeArchive)
+ switch buildmode {
+ case "exe":
+ if usesCgo && cfg.Goos == "linux" {
+ ldflags = append(ldflags, "-Wl,-E")
+ }
+
+ case "c-archive":
+ // Link the Go files into a single .o, and also link
+ // in -lgolibbegin.
+ //
+ // We need to use --whole-archive with -lgolibbegin
+ // because it doesn't define any symbols that will
+ // cause the contents to be pulled in; it's just
+ // initialization code.
+ //
+ // The user remains responsible for linking against
+ // -lgo -lpthread -lm in the final link. We can't use
+ // -r to pick them up because we can't combine
+ // split-stack and non-split-stack code in a single -r
+ // link, and libgo picks up non-split-stack code from
+ // libffi.
+ ldflags = append(ldflags, "-Wl,-r", "-nostdlib")
+ ldflags = append(ldflags, goLibBegin...)
+
+ if nopie := b.gccNoPie([]string{tools.linker()}); nopie != "" {
+ ldflags = append(ldflags, nopie)
+ }
+
+ // We are creating an object file, so we don't want a build ID.
+ if root.buildID == "" {
+ ldflags = b.disableBuildID(ldflags)
+ }
+
+ realOut = out
+ out = out + ".o"
+
+ case "c-shared":
+ ldflags = append(ldflags, "-shared", "-nostdlib")
+ ldflags = append(ldflags, goLibBegin...)
+ ldflags = append(ldflags, "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
+
+ case "shared":
+ if cfg.Goos != "aix" {
+ ldflags = append(ldflags, "-zdefs")
+ }
+ ldflags = append(ldflags, "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
+
+ default:
+ base.Fatalf("-buildmode=%s not supported for gccgo", buildmode)
+ }
+
+ switch buildmode {
+ case "exe", "c-shared":
+ if cxx {
+ ldflags = append(ldflags, "-lstdc++")
+ }
+ if objc {
+ ldflags = append(ldflags, "-lobjc")
+ }
+ if fortran {
+ fc := cfg.Getenv("FC")
+ if fc == "" {
+ fc = "gfortran"
+ }
+ // support gfortran out of the box and let others pass the correct link options
+ // via CGO_LDFLAGS
+ if strings.Contains(fc, "gfortran") {
+ ldflags = append(ldflags, "-lgfortran")
+ }
+ }
+ }
+
+ if err := b.run(root, ".", desc, nil, tools.linker(), "-o", out, ldflags, forcedGccgoflags, root.Package.Internal.Gccgoflags); err != nil {
+ return err
+ }
+
+ switch buildmode {
+ case "c-archive":
+ if err := b.run(root, ".", desc, nil, tools.ar(), arArgs, "rc", realOut, out); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tools gccgoToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error {
+ return tools.link(b, root, out, importcfg, root.Deps, ldBuildmode, root.Package.ImportPath)
+}
+
+func (tools gccgoToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error {
+ return tools.link(b, root, out, importcfg, allactions, "shared", out)
+}
+
+func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+ p := a.Package
+ inc := filepath.Join(cfg.GOROOT, "pkg", "include")
+ cfile = mkAbs(p.Dir, cfile)
+ defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch}
+ defs = append(defs, b.gccArchArgs()...)
+ if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`)
+ }
+ compiler := envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+ if b.gccSupportsFlag(compiler, "-fsplit-stack") {
+ defs = append(defs, "-fsplit-stack")
+ }
+ defs = tools.maybePIC(defs)
+ if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+ defs = append(defs, "-ffile-prefix-map="+base.Cwd()+"=.")
+ defs = append(defs, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build")
+ } else if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+ defs = append(defs, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build")
+ }
+ if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") {
+ defs = append(defs, "-gno-record-gcc-switches")
+ }
+ return b.run(a, p.Dir, p.ImportPath, nil, compiler, "-Wall", "-g",
+ "-I", a.Objdir, "-I", inc, "-o", ofile, defs, "-c", cfile)
+}
+
+// maybePIC adds -fPIC to the list of arguments if needed.
+func (tools gccgoToolchain) maybePIC(args []string) []string {
+ switch cfg.BuildBuildmode {
+ case "c-shared", "shared", "plugin":
+ args = append(args, "-fPIC")
+ }
+ return args
+}
+
+func gccgoPkgpath(p *load.Package) string {
+ if p.Internal.Build.IsCommand() && !p.Internal.ForceLibrary {
+ return ""
+ }
+ return p.ImportPath
+}
+
+var gccgoToSymbolFuncOnce sync.Once
+var gccgoToSymbolFunc func(string) string
+
+func (tools gccgoToolchain) gccgoCleanPkgpath(b *Builder, p *load.Package) string {
+ gccgoToSymbolFuncOnce.Do(func() {
+ tmpdir := b.WorkDir
+ if cfg.BuildN {
+ tmpdir = os.TempDir()
+ }
+ fn, err := pkgpath.ToSymbolFunc(tools.compiler(), tmpdir)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ gccgoToSymbolFunc = fn
+ })
+
+ return gccgoToSymbolFunc(gccgoPkgpath(p))
+}
+
+var (
+ gccgoSupportsCgoIncompleteOnce sync.Once
+ gccgoSupportsCgoIncomplete bool
+)
+
+const gccgoSupportsCgoIncompleteCode = `
+package p
+
+import "runtime/cgo"
+
+type I cgo.Incomplete
+`
+
+// supportsCgoIncomplete reports whether the gccgo/GoLLVM compiler
+// being used supports cgo.Incomplete, which was added in GCC 13.
+func (tools gccgoToolchain) supportsCgoIncomplete(b *Builder) bool {
+ gccgoSupportsCgoIncompleteOnce.Do(func() {
+ fail := func(err error) {
+ fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+
+ tmpdir := b.WorkDir
+ if cfg.BuildN {
+ tmpdir = os.TempDir()
+ }
+ f, err := os.CreateTemp(tmpdir, "*_gccgo_cgoincomplete.go")
+ if err != nil {
+ fail(err)
+ }
+ fn := f.Name()
+ f.Close()
+ defer os.Remove(fn)
+
+ if err := os.WriteFile(fn, []byte(gccgoSupportsCgoIncompleteCode), 0644); err != nil {
+ fail(err)
+ }
+
+ on := strings.TrimSuffix(fn, ".go") + ".o"
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd(tmpdir, "%s -c -o %s %s || true", tools.compiler(), on, fn)
+ // Since this function affects later builds,
+ // and only generates temporary files,
+ // we run the command even with -n.
+ }
+ cmd := exec.Command(tools.compiler(), "-c", "-o", on, fn)
+ cmd.Dir = tmpdir
+ var buf strings.Builder
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err = cmd.Run()
+ if out := buf.String(); len(out) > 0 {
+ b.showOutput(nil, tmpdir, b.fmtcmd(tmpdir, "%s -c -o %s %s", tools.compiler(), on, fn), buf.String())
+ }
+ gccgoSupportsCgoIncomplete = err == nil
+ })
+ return gccgoSupportsCgoIncomplete
+}
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
new file mode 100644
index 0000000..29116cb
--- /dev/null
+++ b/src/cmd/go/internal/work/init.go
@@ -0,0 +1,424 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Build initialization (after flag parsing).
+
+package work
+
+import (
+ "bytes"
+ "cmd/go/internal/base"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/modload"
+ "cmd/internal/quoted"
+ "fmt"
+ "internal/platform"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var buildInitStarted = false
+
+func BuildInit() {
+ if buildInitStarted {
+ base.Fatalf("go: internal error: work.BuildInit called more than once")
+ }
+ buildInitStarted = true
+ base.AtExit(closeBuilders)
+
+ modload.Init()
+ instrumentInit()
+ buildModeInit()
+ if err := fsys.Init(base.Cwd()); err != nil {
+ base.Fatal(err)
+ }
+
+ // Make sure -pkgdir is absolute, because we run commands
+ // in different directories.
+ if cfg.BuildPkgdir != "" && !filepath.IsAbs(cfg.BuildPkgdir) {
+ p, err := filepath.Abs(cfg.BuildPkgdir)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go: evaluating -pkgdir: %v\n", err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ cfg.BuildPkgdir = p
+ }
+
+ if cfg.BuildP <= 0 {
+ base.Fatalf("go: -p must be a positive integer: %v\n", cfg.BuildP)
+ }
+
+ // Make sure CC, CXX, and FC are absolute paths.
+ for _, key := range []string{"CC", "CXX", "FC"} {
+ value := cfg.Getenv(key)
+ args, err := quoted.Split(value)
+ if err != nil {
+ base.Fatalf("go: %s environment variable could not be parsed: %v", key, err)
+ }
+ if len(args) == 0 {
+ continue
+ }
+ path := args[0]
+ if !filepath.IsAbs(path) && path != filepath.Base(path) {
+ base.Fatalf("go: %s environment variable is relative; must be absolute path: %s\n", key, path)
+ }
+ }
+
+ // Set covermode if not already set.
+ // Ensure that -race and -covermode are compatible.
+ if cfg.BuildCoverMode == "" {
+ cfg.BuildCoverMode = "set"
+ if cfg.BuildRace {
+ // Default coverage mode is atomic when -race is set.
+ cfg.BuildCoverMode = "atomic"
+ }
+ }
+ if cfg.BuildRace && cfg.BuildCoverMode != "atomic" {
+ base.Fatalf(`-covermode must be "atomic", not %q, when -race is enabled`, cfg.BuildCoverMode)
+ }
+}
+
+// fuzzInstrumentFlags returns compiler flags that enable fuzzing instrumentation
+// on supported platforms.
+//
+// On unsupported platforms, fuzzInstrumentFlags returns nil, meaning no
+// instrumentation is added. 'go test -fuzz' still works without coverage,
+// but it generates random inputs without guidance, so it's much less effective.
+func fuzzInstrumentFlags() []string {
+ if !platform.FuzzInstrumented(cfg.Goos, cfg.Goarch) {
+ return nil
+ }
+ return []string{"-d=libfuzzer"}
+}
+
+func instrumentInit() {
+ if !cfg.BuildRace && !cfg.BuildMSan && !cfg.BuildASan {
+ return
+ }
+ if cfg.BuildRace && cfg.BuildMSan {
+ fmt.Fprintf(os.Stderr, "go: may not use -race and -msan simultaneously\n")
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ if cfg.BuildRace && cfg.BuildASan {
+ fmt.Fprintf(os.Stderr, "go: may not use -race and -asan simultaneously\n")
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ if cfg.BuildMSan && cfg.BuildASan {
+ fmt.Fprintf(os.Stderr, "go: may not use -msan and -asan simultaneously\n")
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ if cfg.BuildMSan && !platform.MSanSupported(cfg.Goos, cfg.Goarch) {
+ fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ if cfg.BuildRace && !platform.RaceDetectorSupported(cfg.Goos, cfg.Goarch) {
+ fmt.Fprintf(os.Stderr, "-race is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ if cfg.BuildASan && !platform.ASanSupported(cfg.Goos, cfg.Goarch) {
+ fmt.Fprintf(os.Stderr, "-asan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ // The current implementation is only compatible with the ASan library from version
+ // v7 to v9 (See the description in src/runtime/asan/asan.go). Therefore, using the
+ // -asan option must use a compatible version of ASan library, which requires that
+ // the gcc version is not less than 7 and the clang version is not less than 9,
+ // otherwise a segmentation fault will occur.
+ if cfg.BuildASan {
+ if err := compilerRequiredAsanVersion(); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ }
+
+ mode := "race"
+ if cfg.BuildMSan {
+ mode = "msan"
+ // MSAN needs PIE on all platforms except linux/amd64.
+ // https://github.com/llvm/llvm-project/blob/llvmorg-13.0.1/clang/lib/Driver/SanitizerArgs.cpp#L621
+ if cfg.BuildBuildmode == "default" && (cfg.Goos != "linux" || cfg.Goarch != "amd64") {
+ cfg.BuildBuildmode = "pie"
+ }
+ }
+ if cfg.BuildASan {
+ mode = "asan"
+ }
+ modeFlag := "-" + mode
+
+ // Check that cgo is enabled.
+ // Note: On macOS, -race does not require cgo. -asan and -msan still do.
+ if !cfg.BuildContext.CgoEnabled && (cfg.Goos != "darwin" || cfg.BuildASan || cfg.BuildMSan) {
+ if runtime.GOOS != cfg.Goos || runtime.GOARCH != cfg.Goarch {
+ fmt.Fprintf(os.Stderr, "go: %s requires cgo\n", modeFlag)
+ } else {
+ fmt.Fprintf(os.Stderr, "go: %s requires cgo; enable cgo by setting CGO_ENABLED=1\n", modeFlag)
+ }
+
+ base.SetExitStatus(2)
+ base.Exit()
+ }
+ forcedGcflags = append(forcedGcflags, modeFlag)
+ forcedLdflags = append(forcedLdflags, modeFlag)
+
+ if cfg.BuildContext.InstallSuffix != "" {
+ cfg.BuildContext.InstallSuffix += "_"
+ }
+ cfg.BuildContext.InstallSuffix += mode
+ cfg.BuildContext.ToolTags = append(cfg.BuildContext.ToolTags, mode)
+}
+
+func buildModeInit() {
+ gccgo := cfg.BuildToolchainName == "gccgo"
+ var codegenArg string
+
+ // Configure the build mode first, then verify that it is supported.
+ // That way, if the flag is completely bogus we will prefer to error out with
+ // "-buildmode=%s not supported" instead of naming the specific platform.
+
+ switch cfg.BuildBuildmode {
+ case "archive":
+ pkgsFilter = pkgsNotMain
+ case "c-archive":
+ pkgsFilter = oneMainPkg
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch cfg.Goos {
+ case "darwin", "ios":
+ switch cfg.Goarch {
+ case "arm64":
+ codegenArg = "-shared"
+ }
+
+ case "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "solaris":
+ // Use -shared so that the result is
+ // suitable for inclusion in a PIE or
+ // shared library.
+ codegenArg = "-shared"
+ }
+ }
+ cfg.ExeSuffix = ".a"
+ ldBuildmode = "c-archive"
+ case "c-shared":
+ pkgsFilter = oneMainPkg
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch cfg.Goos {
+ case "linux", "android", "freebsd":
+ codegenArg = "-shared"
+ case "windows":
+ // Do not add usual .exe suffix to the .dll file.
+ cfg.ExeSuffix = ""
+ }
+ }
+ ldBuildmode = "c-shared"
+ case "default":
+ ldBuildmode = "exe"
+ if platform.DefaultPIE(cfg.Goos, cfg.Goarch, cfg.BuildRace) {
+ ldBuildmode = "pie"
+ if cfg.Goos != "windows" && !gccgo {
+ codegenArg = "-shared"
+ }
+ }
+ case "exe":
+ pkgsFilter = pkgsMain
+ ldBuildmode = "exe"
+ // Set the pkgsFilter to oneMainPkg if the user passed a specific binary output
+ // and is using buildmode=exe for a better error message.
+ // See issue #20017.
+ if cfg.BuildO != "" {
+ pkgsFilter = oneMainPkg
+ }
+ case "pie":
+ if cfg.BuildRace {
+ base.Fatalf("-buildmode=pie not supported when -race is enabled")
+ }
+ if gccgo {
+ codegenArg = "-fPIE"
+ } else {
+ switch cfg.Goos {
+ case "aix", "windows":
+ default:
+ codegenArg = "-shared"
+ }
+ }
+ ldBuildmode = "pie"
+ case "shared":
+ pkgsFilter = pkgsNotMain
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ codegenArg = "-dynlink"
+ }
+ if cfg.BuildO != "" {
+ base.Fatalf("-buildmode=shared and -o not supported together")
+ }
+ ldBuildmode = "shared"
+ case "plugin":
+ pkgsFilter = oneMainPkg
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ codegenArg = "-dynlink"
+ }
+ cfg.ExeSuffix = ".so"
+ ldBuildmode = "plugin"
+ default:
+ base.Fatalf("buildmode=%s not supported", cfg.BuildBuildmode)
+ }
+
+ if cfg.BuildBuildmode != "default" && !platform.BuildModeSupported(cfg.BuildToolchainName, cfg.BuildBuildmode, cfg.Goos, cfg.Goarch) {
+ base.Fatalf("-buildmode=%s not supported on %s/%s\n", cfg.BuildBuildmode, cfg.Goos, cfg.Goarch)
+ }
+
+ if cfg.BuildLinkshared {
+ if !platform.BuildModeSupported(cfg.BuildToolchainName, "shared", cfg.Goos, cfg.Goarch) {
+ base.Fatalf("-linkshared not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+ }
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ forcedAsmflags = append(forcedAsmflags, "-D=GOBUILDMODE_shared=1",
+ "-linkshared")
+ codegenArg = "-dynlink"
+ forcedGcflags = append(forcedGcflags, "-linkshared")
+ // TODO(mwhudson): remove -w when that gets fixed in linker.
+ forcedLdflags = append(forcedLdflags, "-linkshared", "-w")
+ }
+ }
+ if codegenArg != "" {
+ if gccgo {
+ forcedGccgoflags = append([]string{codegenArg}, forcedGccgoflags...)
+ } else {
+ forcedAsmflags = append([]string{codegenArg}, forcedAsmflags...)
+ forcedGcflags = append([]string{codegenArg}, forcedGcflags...)
+ }
+ // Don't alter InstallSuffix when modifying default codegen args.
+ if cfg.BuildBuildmode != "default" || cfg.BuildLinkshared {
+ if cfg.BuildContext.InstallSuffix != "" {
+ cfg.BuildContext.InstallSuffix += "_"
+ }
+ cfg.BuildContext.InstallSuffix += codegenArg[1:]
+ }
+ }
+
+ switch cfg.BuildMod {
+ case "":
+ // Behavior will be determined automatically, as if no flag were passed.
+ case "readonly", "vendor", "mod":
+ if !cfg.ModulesEnabled && !base.InGOFLAGS("-mod") {
+ base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod)
+ }
+ default:
+ base.Fatalf("-mod=%s not supported (can be '', 'mod', 'readonly', or 'vendor')", cfg.BuildMod)
+ }
+ if !cfg.ModulesEnabled {
+ if cfg.ModCacheRW && !base.InGOFLAGS("-modcacherw") {
+ base.Fatalf("build flag -modcacherw only valid when using modules")
+ }
+ if cfg.ModFile != "" && !base.InGOFLAGS("-mod") {
+ base.Fatalf("build flag -modfile only valid when using modules")
+ }
+ }
+}
+
+type version struct {
+ name string
+ major, minor int
+}
+
+var compiler struct {
+ sync.Once
+ version
+ err error
+}
+
+// compilerVersion detects the version of $(go env CC).
+// It returns a non-nil error if the compiler matches a known version schema but
+// the version could not be parsed, or if $(go env CC) could not be determined.
+func compilerVersion() (version, error) {
+ compiler.Once.Do(func() {
+ compiler.err = func() error {
+ compiler.name = "unknown"
+ cc := os.Getenv("CC")
+ out, err := exec.Command(cc, "--version").Output()
+ if err != nil {
+ // Compiler does not support "--version" flag: not Clang or GCC.
+ return err
+ }
+
+ var match [][]byte
+ if bytes.HasPrefix(out, []byte("gcc")) {
+ compiler.name = "gcc"
+ out, err := exec.Command(cc, "-v").CombinedOutput()
+ if err != nil {
+ // gcc, but does not support gcc's "-v" flag?!
+ return err
+ }
+ gccRE := regexp.MustCompile(`gcc version (\d+)\.(\d+)`)
+ match = gccRE.FindSubmatch(out)
+ } else {
+ clangRE := regexp.MustCompile(`clang version (\d+)\.(\d+)`)
+ if match = clangRE.FindSubmatch(out); len(match) > 0 {
+ compiler.name = "clang"
+ }
+ }
+
+ if len(match) < 3 {
+ return nil // "unknown"
+ }
+ if compiler.major, err = strconv.Atoi(string(match[1])); err != nil {
+ return err
+ }
+ if compiler.minor, err = strconv.Atoi(string(match[2])); err != nil {
+ return err
+ }
+ return nil
+ }()
+ })
+ return compiler.version, compiler.err
+}
+
+// compilerRequiredAsanVersion is a copy of the function defined in
+// cmd/cgo/internal/testsanitizers/cc_test.go
+// compilerRequiredAsanVersion reports whether the compiler is the version
+// required by Asan.
+func compilerRequiredAsanVersion() error {
+ compiler, err := compilerVersion()
+ if err != nil {
+ return fmt.Errorf("-asan: the version of $(go env CC) could not be parsed")
+ }
+
+ switch compiler.name {
+ case "gcc":
+ if runtime.GOARCH == "ppc64le" && compiler.major < 9 {
+ return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+ }
+ if compiler.major < 7 {
+ return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+ }
+ case "clang":
+ if compiler.major < 9 {
+ return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+ }
+ default:
+ return fmt.Errorf("-asan: C compiler is not gcc or clang")
+ }
+ return nil
+}
diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
new file mode 100644
index 0000000..270a34e
--- /dev/null
+++ b/src/cmd/go/internal/work/security.go
@@ -0,0 +1,334 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Checking of compiler and linker flags.
+// We must avoid flags like -fplugin=, which can allow
+// arbitrary code execution during the build.
+// Do not make changes here without carefully
+// considering the implications.
+// (That's why the code is isolated in a file named security.go.)
+//
+// Note that -Wl,foo means split foo on commas and pass to
+// the linker, so that -Wl,-foo,bar means pass -foo bar to
+// the linker. Similarly -Wa,foo for the assembler and so on.
+// If any of these are permitted, the wildcard portion must
+// disallow commas.
+//
+// Note also that GNU binutils accept any argument @foo
+// as meaning "read more flags from the file foo", so we must
+// guard against any command-line argument beginning with @,
+// even things like "-I @foo".
+// We use load.SafeArg (which is even more conservative)
+// to reject these.
+//
+// Even worse, gcc -I@foo (one arg) turns into cc1 -I @foo (two args),
+// so although gcc doesn't expand the @foo, cc1 will.
+// So out of paranoia, we reject @ at the beginning of every
+// flag argument that might be split into its own argument.
+
+package work
+
+import (
+ "fmt"
+ "internal/lazyregexp"
+ "regexp"
+ "strings"
+
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/load"
+)
+
+var re = lazyregexp.New
+
+var validCompilerFlags = []*lazyregexp.Regexp{
+ re(`-D([A-Za-z_][A-Za-z0-9_]*)(=[^@\-]*)?`),
+ re(`-U([A-Za-z_][A-Za-z0-9_]*)`),
+ re(`-F([^@\-].*)`),
+ re(`-I([^@\-].*)`),
+ re(`-O`),
+ re(`-O([^@\-].*)`),
+ re(`-W`),
+ re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
+ re(`-Wa,-mbig-obj`),
+ re(`-Wp,-D([A-Za-z_][A-Za-z0-9_]*)(=[^@,\-]*)?`),
+ re(`-Wp,-U([A-Za-z_][A-Za-z0-9_]*)`),
+ re(`-ansi`),
+ re(`-f(no-)?asynchronous-unwind-tables`),
+ re(`-f(no-)?blocks`),
+ re(`-f(no-)builtin-[a-zA-Z0-9_]*`),
+ re(`-f(no-)?common`),
+ re(`-f(no-)?constant-cfstrings`),
+ re(`-fdiagnostics-show-note-include-stack`),
+ re(`-f(no-)?eliminate-unused-debug-types`),
+ re(`-f(no-)?exceptions`),
+ re(`-f(no-)?fast-math`),
+ re(`-f(no-)?inline-functions`),
+ re(`-finput-charset=([^@\-].*)`),
+ re(`-f(no-)?fat-lto-objects`),
+ re(`-f(no-)?keep-inline-dllexport`),
+ re(`-f(no-)?lto`),
+ re(`-fmacro-backtrace-limit=(.+)`),
+ re(`-fmessage-length=(.+)`),
+ re(`-f(no-)?modules`),
+ re(`-f(no-)?objc-arc`),
+ re(`-f(no-)?objc-nonfragile-abi`),
+ re(`-f(no-)?objc-legacy-dispatch`),
+ re(`-f(no-)?omit-frame-pointer`),
+ re(`-f(no-)?openmp(-simd)?`),
+ re(`-f(no-)?permissive`),
+ re(`-f(no-)?(pic|PIC|pie|PIE)`),
+ re(`-f(no-)?plt`),
+ re(`-f(no-)?rtti`),
+ re(`-f(no-)?split-stack`),
+ re(`-f(no-)?stack-(.+)`),
+ re(`-f(no-)?strict-aliasing`),
+ re(`-f(un)signed-char`),
+ re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
+ re(`-f(no-)?visibility-inlines-hidden`),
+ re(`-fsanitize=(.+)`),
+ re(`-ftemplate-depth-(.+)`),
+ re(`-fvisibility=(.+)`),
+ re(`-g([^@\-].*)?`),
+ re(`-m32`),
+ re(`-m64`),
+ re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-m(no-)?v?aes`),
+ re(`-marm`),
+ re(`-m(no-)?avx[0-9a-z]*`),
+ re(`-mfloat-abi=([^@\-].*)`),
+ re(`-mfpmath=[0-9a-z,+]*`),
+ re(`-m(no-)?avx[0-9a-z.]*`),
+ re(`-m(no-)?ms-bitfields`),
+ re(`-m(no-)?stack-(.+)`),
+ re(`-mmacosx-(.+)`),
+ re(`-mios-simulator-version-min=(.+)`),
+ re(`-miphoneos-version-min=(.+)`),
+ re(`-mtvos-simulator-version-min=(.+)`),
+ re(`-mtvos-version-min=(.+)`),
+ re(`-mwatchos-simulator-version-min=(.+)`),
+ re(`-mwatchos-version-min=(.+)`),
+ re(`-mnop-fun-dllimport`),
+ re(`-m(no-)?sse[0-9.]*`),
+ re(`-m(no-)?ssse3`),
+ re(`-mthumb(-interwork)?`),
+ re(`-mthreads`),
+ re(`-mwindows`),
+ re(`--param=ssp-buffer-size=[0-9]*`),
+ re(`-pedantic(-errors)?`),
+ re(`-pipe`),
+ re(`-pthread`),
+ re(`-?-std=([^@\-].*)`),
+ re(`-?-stdlib=([^@\-].*)`),
+ re(`--sysroot=([^@\-].*)`),
+ re(`-w`),
+ re(`-x([^@\-].*)`),
+ re(`-v`),
+}
+
+var validCompilerFlagsWithNextArg = []string{
+ "-arch",
+ "-D",
+ "-U",
+ "-I",
+ "-F",
+ "-framework",
+ "-include",
+ "-isysroot",
+ "-isystem",
+ "--sysroot",
+ "-target",
+ "-x",
+}
+
+var validLinkerFlags = []*lazyregexp.Regexp{
+ re(`-F([^@\-].*)`),
+ re(`-l([^@\-].*)`),
+ re(`-L([^@\-].*)`),
+ re(`-O`),
+ re(`-O([^@\-].*)`),
+ re(`-f(no-)?(pic|PIC|pie|PIE)`),
+ re(`-f(no-)?openmp(-simd)?`),
+ re(`-fsanitize=([^@\-].*)`),
+ re(`-flat_namespace`),
+ re(`-g([^@\-].*)?`),
+ re(`-headerpad_max_install_names`),
+ re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-mfloat-abi=([^@\-].*)`),
+ re(`-mmacosx-(.+)`),
+ re(`-mios-simulator-version-min=(.+)`),
+ re(`-miphoneos-version-min=(.+)`),
+ re(`-mthreads`),
+ re(`-mwindows`),
+ re(`-(pic|PIC|pie|PIE)`),
+ re(`-pthread`),
+ re(`-rdynamic`),
+ re(`-shared`),
+ re(`-?-static([-a-z0-9+]*)`),
+ re(`-?-stdlib=([^@\-].*)`),
+ re(`-v`),
+
+ // Note that any wildcards in -Wl need to exclude comma,
+ // since -Wl splits its argument at commas and passes
+ // them all to the linker uninterpreted. Allowing comma
+ // in a wildcard would allow tunneling arbitrary additional
+ // linker arguments through one of these.
+ re(`-Wl,--(no-)?allow-multiple-definition`),
+ re(`-Wl,--(no-)?allow-shlib-undefined`),
+ re(`-Wl,--(no-)?as-needed`),
+ re(`-Wl,-Bdynamic`),
+ re(`-Wl,-berok`),
+ re(`-Wl,-Bstatic`),
+ re(`-Wl,-Bsymbolic-functions`),
+ re(`-Wl,-O[0-9]+`),
+ re(`-Wl,-d[ny]`),
+ re(`-Wl,--disable-new-dtags`),
+ re(`-Wl,-e[=,][a-zA-Z0-9]+`),
+ re(`-Wl,--enable-new-dtags`),
+ re(`-Wl,--end-group`),
+ re(`-Wl,--(no-)?export-dynamic`),
+ re(`-Wl,-E`),
+ re(`-Wl,-framework,[^,@\-][^,]+`),
+ re(`-Wl,--hash-style=(sysv|gnu|both)`),
+ re(`-Wl,-headerpad_max_install_names`),
+ re(`-Wl,--no-undefined`),
+ re(`-Wl,-R,?([^@\-,][^,@]*$)`),
+ re(`-Wl,--just-symbols[=,]([^,@\-][^,@]+)`),
+ re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-s`),
+ re(`-Wl,-search_paths_first`),
+ re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
+ re(`-Wl,--start-group`),
+ re(`-Wl,-?-static`),
+ re(`-Wl,-?-subsystem,(native|windows|console|posix|xbox)`),
+ re(`-Wl,-syslibroot[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-?-unresolved-symbols=[^,]+`),
+ re(`-Wl,--(no-)?warn-([^,]+)`),
+ re(`-Wl,-?-wrap[=,][^,@\-][^,]*`),
+ re(`-Wl,-z,(no)?execstack`),
+ re(`-Wl,-z,relro`),
+
+ re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so|tbd)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
+ re(`\./.*\.(a|o|obj|dll|dylib|so|tbd)`),
+}
+
+var validLinkerFlagsWithNextArg = []string{
+ "-arch",
+ "-F",
+ "-l",
+ "-L",
+ "-framework",
+ "-isysroot",
+ "--sysroot",
+ "-target",
+ "-Wl,-framework",
+ "-Wl,-rpath",
+ "-Wl,-R",
+ "-Wl,--just-symbols",
+ "-Wl,-undefined",
+}
+
+func checkCompilerFlags(name, source string, list []string) error {
+ checkOverrides := true
+ return checkFlags(name, source, list, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides)
+}
+
+func checkLinkerFlags(name, source string, list []string) error {
+ checkOverrides := true
+ return checkFlags(name, source, list, validLinkerFlags, validLinkerFlagsWithNextArg, checkOverrides)
+}
+
+// checkCompilerFlagsForInternalLink returns an error if 'list'
+// contains a flag or flags that may not be fully supported by
+// internal linking (meaning that we should punt the link to the
+// external linker).
+func checkCompilerFlagsForInternalLink(name, source string, list []string) error {
+ checkOverrides := false
+ if err := checkFlags(name, source, list, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides); err != nil {
+ return err
+ }
+ // Currently the only flag on the allow list that causes problems
+ // for the linker is "-flto"; check for it manually here.
+ for _, fl := range list {
+ if strings.HasPrefix(fl, "-flto") {
+ return fmt.Errorf("flag %q triggers external linking", fl)
+ }
+ }
+ return nil
+}
+
+func checkFlags(name, source string, list []string, valid []*lazyregexp.Regexp, validNext []string, checkOverrides bool) error {
+ // Let users override rules with $CGO_CFLAGS_ALLOW, $CGO_CFLAGS_DISALLOW, etc.
+ var (
+ allow *regexp.Regexp
+ disallow *regexp.Regexp
+ )
+ if checkOverrides {
+ if env := cfg.Getenv("CGO_" + name + "_ALLOW"); env != "" {
+ r, err := regexp.Compile(env)
+ if err != nil {
+ return fmt.Errorf("parsing $CGO_%s_ALLOW: %v", name, err)
+ }
+ allow = r
+ }
+ if env := cfg.Getenv("CGO_" + name + "_DISALLOW"); env != "" {
+ r, err := regexp.Compile(env)
+ if err != nil {
+ return fmt.Errorf("parsing $CGO_%s_DISALLOW: %v", name, err)
+ }
+ disallow = r
+ }
+ }
+
+Args:
+ for i := 0; i < len(list); i++ {
+ arg := list[i]
+ if disallow != nil && disallow.FindString(arg) == arg {
+ goto Bad
+ }
+ if allow != nil && allow.FindString(arg) == arg {
+ continue Args
+ }
+ for _, re := range valid {
+ if re.FindString(arg) == arg { // must be complete match
+ continue Args
+ }
+ }
+ for _, x := range validNext {
+ if arg == x {
+ if i+1 < len(list) && load.SafeArg(list[i+1]) {
+ i++
+ continue Args
+ }
+
+ // Permit -Wl,-framework -Wl,name.
+ if i+1 < len(list) &&
+ strings.HasPrefix(arg, "-Wl,") &&
+ strings.HasPrefix(list[i+1], "-Wl,") &&
+ load.SafeArg(list[i+1][4:]) &&
+ !strings.Contains(list[i+1][4:], ",") {
+ i++
+ continue Args
+ }
+
+ // Permit -I= /path, -I $SYSROOT.
+ if i+1 < len(list) && arg == "-I" {
+ if (strings.HasPrefix(list[i+1], "=") || strings.HasPrefix(list[i+1], "$SYSROOT")) &&
+ load.SafeArg(list[i+1][1:]) {
+ i++
+ continue Args
+ }
+ }
+
+ if i+1 < len(list) {
+ return fmt.Errorf("invalid flag in %s: %s %s (see https://golang.org/s/invalidflag)", source, arg, list[i+1])
+ }
+ return fmt.Errorf("invalid flag in %s: %s without argument (see https://golang.org/s/invalidflag)", source, arg)
+ }
+ }
+ Bad:
+ return fmt.Errorf("invalid flag in %s: %s", source, arg)
+ }
+ return nil
+}
diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go
new file mode 100644
index 0000000..c05ba7b
--- /dev/null
+++ b/src/cmd/go/internal/work/security_test.go
@@ -0,0 +1,318 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+var goodCompilerFlags = [][]string{
+ {"-DFOO"},
+ {"-Dfoo=bar"},
+ {"-Ufoo"},
+ {"-Ufoo1"},
+ {"-F/Qt"},
+ {"-F", "/Qt"},
+ {"-I/"},
+ {"-I/etc/passwd"},
+ {"-I."},
+ {"-O"},
+ {"-O2"},
+ {"-Osmall"},
+ {"-W"},
+ {"-Wall"},
+ {"-Wp,-Dfoo=bar"},
+ {"-Wp,-Ufoo"},
+ {"-Wp,-Dfoo1"},
+ {"-Wp,-Ufoo1"},
+ {"-flto"},
+ {"-fobjc-arc"},
+ {"-fno-objc-arc"},
+ {"-fomit-frame-pointer"},
+ {"-fno-omit-frame-pointer"},
+ {"-fpic"},
+ {"-fno-pic"},
+ {"-fPIC"},
+ {"-fno-PIC"},
+ {"-fpie"},
+ {"-fno-pie"},
+ {"-fPIE"},
+ {"-fno-PIE"},
+ {"-fsplit-stack"},
+ {"-fno-split-stack"},
+ {"-fstack-xxx"},
+ {"-fno-stack-xxx"},
+ {"-fsanitize=hands"},
+ {"-g"},
+ {"-ggdb"},
+ {"-march=souza"},
+ {"-mcpu=123"},
+ {"-mfpu=123"},
+ {"-mtune=happybirthday"},
+ {"-mstack-overflow"},
+ {"-mno-stack-overflow"},
+ {"-mmacosx-version"},
+ {"-mnop-fun-dllimport"},
+ {"-pthread"},
+ {"-std=c99"},
+ {"-xc"},
+ {"-D", "FOO"},
+ {"-D", "foo=bar"},
+ {"-I", "."},
+ {"-I", "/etc/passwd"},
+ {"-I", "世界"},
+ {"-I", "=/usr/include/libxml2"},
+ {"-I", "dir"},
+ {"-I", "$SYSROOT/dir"},
+ {"-isystem", "/usr/include/mozjs-68"},
+ {"-include", "/usr/include/mozjs-68/RequiredDefines.h"},
+ {"-framework", "Chocolate"},
+ {"-x", "c"},
+ {"-v"},
+}
+
+var badCompilerFlags = [][]string{
+ {"-D@X"},
+ {"-D-X"},
+ {"-Ufoo=bar"},
+ {"-F@dir"},
+ {"-F-dir"},
+ {"-I@dir"},
+ {"-I-dir"},
+ {"-O@1"},
+ {"-Wa,-foo"},
+ {"-W@foo"},
+ {"-Wp,-DX,-D@X"},
+ {"-Wp,-UX,-U@X"},
+ {"-g@gdb"},
+ {"-g-gdb"},
+ {"-march=@dawn"},
+ {"-march=-dawn"},
+ {"-std=@c99"},
+ {"-std=-c99"},
+ {"-x@c"},
+ {"-x-c"},
+ {"-D", "@foo"},
+ {"-D", "-foo"},
+ {"-I", "@foo"},
+ {"-I", "-foo"},
+ {"-I", "=@obj"},
+ {"-include", "@foo"},
+ {"-framework", "-Caffeine"},
+ {"-framework", "@Home"},
+ {"-x", "--c"},
+ {"-x", "@obj"},
+}
+
+func TestCheckCompilerFlags(t *testing.T) {
+ for _, f := range goodCompilerFlags {
+ if err := checkCompilerFlags("test", "test", f); err != nil {
+ t.Errorf("unexpected error for %q: %v", f, err)
+ }
+ }
+ for _, f := range badCompilerFlags {
+ if err := checkCompilerFlags("test", "test", f); err == nil {
+ t.Errorf("missing error for %q", f)
+ }
+ }
+}
+
+var goodLinkerFlags = [][]string{
+ {"-Fbar"},
+ {"-lbar"},
+ {"-Lbar"},
+ {"-fpic"},
+ {"-fno-pic"},
+ {"-fPIC"},
+ {"-fno-PIC"},
+ {"-fpie"},
+ {"-fno-pie"},
+ {"-fPIE"},
+ {"-fno-PIE"},
+ {"-fsanitize=hands"},
+ {"-g"},
+ {"-ggdb"},
+ {"-march=souza"},
+ {"-mcpu=123"},
+ {"-mfpu=123"},
+ {"-mtune=happybirthday"},
+ {"-pic"},
+ {"-pthread"},
+ {"-Wl,--hash-style=both"},
+ {"-Wl,-rpath,foo"},
+ {"-Wl,-rpath,$ORIGIN/foo"},
+ {"-Wl,-R", "/foo"},
+ {"-Wl,-R", "foo"},
+ {"-Wl,-R,foo"},
+ {"-Wl,--just-symbols=foo"},
+ {"-Wl,--just-symbols,foo"},
+ {"-Wl,--warn-error"},
+ {"-Wl,--no-warn-error"},
+ {"foo.so"},
+ {"_世界.dll"},
+ {"./x.o"},
+ {"libcgosotest.dylib"},
+ {"-F", "framework"},
+ {"-l", "."},
+ {"-l", "/etc/passwd"},
+ {"-l", "世界"},
+ {"-L", "framework"},
+ {"-framework", "Chocolate"},
+ {"-v"},
+ {"-Wl,-sectcreate,__TEXT,__info_plist,${SRCDIR}/Info.plist"},
+ {"-Wl,-framework", "-Wl,Chocolate"},
+ {"-Wl,-framework,Chocolate"},
+ {"-Wl,-unresolved-symbols=ignore-all"},
+ {"libcgotbdtest.tbd"},
+ {"./libcgotbdtest.tbd"},
+}
+
+var badLinkerFlags = [][]string{
+ {"-DFOO"},
+ {"-Dfoo=bar"},
+ {"-W"},
+ {"-Wall"},
+ {"-fobjc-arc"},
+ {"-fno-objc-arc"},
+ {"-fomit-frame-pointer"},
+ {"-fno-omit-frame-pointer"},
+ {"-fsplit-stack"},
+ {"-fno-split-stack"},
+ {"-fstack-xxx"},
+ {"-fno-stack-xxx"},
+ {"-mstack-overflow"},
+ {"-mno-stack-overflow"},
+ {"-mnop-fun-dllimport"},
+ {"-std=c99"},
+ {"-xc"},
+ {"-D", "FOO"},
+ {"-D", "foo=bar"},
+ {"-I", "FOO"},
+ {"-L", "@foo"},
+ {"-L", "-foo"},
+ {"-x", "c"},
+ {"-D@X"},
+ {"-D-X"},
+ {"-I@dir"},
+ {"-I-dir"},
+ {"-O@1"},
+ {"-Wa,-foo"},
+ {"-W@foo"},
+ {"-g@gdb"},
+ {"-g-gdb"},
+ {"-march=@dawn"},
+ {"-march=-dawn"},
+ {"-std=@c99"},
+ {"-std=-c99"},
+ {"-x@c"},
+ {"-x-c"},
+ {"-D", "@foo"},
+ {"-D", "-foo"},
+ {"-I", "@foo"},
+ {"-I", "-foo"},
+ {"-l", "@foo"},
+ {"-l", "-foo"},
+ {"-framework", "-Caffeine"},
+ {"-framework", "@Home"},
+ {"-Wl,-framework,-Caffeine"},
+ {"-Wl,-framework", "-Wl,@Home"},
+ {"-Wl,-framework", "@Home"},
+ {"-Wl,-framework,Chocolate,@Home"},
+ {"-Wl,--hash-style=foo"},
+ {"-x", "--c"},
+ {"-x", "@obj"},
+ {"-Wl,-rpath,@foo"},
+ {"-Wl,-R,foo,bar"},
+ {"-Wl,-R,@foo"},
+ {"-Wl,--just-symbols,@foo"},
+ {"../x.o"},
+ {"-Wl,-R,"},
+ {"-Wl,-O"},
+ {"-Wl,-e="},
+ {"-Wl,-e,"},
+ {"-Wl,-R,-flag"},
+}
+
+func TestCheckLinkerFlags(t *testing.T) {
+ for _, f := range goodLinkerFlags {
+ if err := checkLinkerFlags("test", "test", f); err != nil {
+ t.Errorf("unexpected error for %q: %v", f, err)
+ }
+ }
+ for _, f := range badLinkerFlags {
+ if err := checkLinkerFlags("test", "test", f); err == nil {
+ t.Errorf("missing error for %q", f)
+ }
+ }
+}
+
+func TestCheckFlagAllowDisallow(t *testing.T) {
+ if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err == nil {
+ t.Fatalf("missing error for -disallow")
+ }
+ os.Setenv("CGO_TEST_ALLOW", "-disallo")
+ if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err == nil {
+ t.Fatalf("missing error for -disallow with CGO_TEST_ALLOW=-disallo")
+ }
+ os.Setenv("CGO_TEST_ALLOW", "-disallow")
+ if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err != nil {
+ t.Fatalf("unexpected error for -disallow with CGO_TEST_ALLOW=-disallow: %v", err)
+ }
+ os.Unsetenv("CGO_TEST_ALLOW")
+
+ if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err != nil {
+ t.Fatalf("unexpected error for -Wall: %v", err)
+ }
+ os.Setenv("CGO_TEST_DISALLOW", "-Wall")
+ if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err == nil {
+ t.Fatalf("missing error for -Wall with CGO_TEST_DISALLOW=-Wall")
+ }
+ os.Setenv("CGO_TEST_ALLOW", "-Wall") // disallow wins
+ if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err == nil {
+ t.Fatalf("missing error for -Wall with CGO_TEST_DISALLOW=-Wall and CGO_TEST_ALLOW=-Wall")
+ }
+
+ os.Setenv("CGO_TEST_ALLOW", "-fplugin.*")
+ os.Setenv("CGO_TEST_DISALLOW", "-fplugin=lint.so")
+ if err := checkCompilerFlags("TEST", "test", []string{"-fplugin=faster.so"}); err != nil {
+ t.Fatalf("unexpected error for -fplugin=faster.so: %v", err)
+ }
+ if err := checkCompilerFlags("TEST", "test", []string{"-fplugin=lint.so"}); err == nil {
+ t.Fatalf("missing error for -fplugin=lint.so: %v", err)
+ }
+}
+
+func TestCheckCompilerFlagsForInternalLink(t *testing.T) {
+ // Any "bad" compiler flag should trigger external linking.
+ for _, f := range badCompilerFlags {
+ if err := checkCompilerFlagsForInternalLink("test", "test", f); err == nil {
+ t.Errorf("missing error for %q", f)
+ }
+ }
+
+ // All "good" compiler flags should not trigger external linking,
+ // except for anything that begins with "-flto".
+ for _, f := range goodCompilerFlags {
+ foundLTO := false
+ for _, s := range f {
+ if strings.Contains(s, "-flto") {
+ foundLTO = true
+ }
+ }
+ if err := checkCompilerFlagsForInternalLink("test", "test", f); err != nil {
+ // expect error for -flto
+ if !foundLTO {
+ t.Errorf("unexpected error for %q: %v", f, err)
+ }
+ } else {
+ // expect no error for everything else
+ if foundLTO {
+ t.Errorf("missing error for %q: %v", f, err)
+ }
+ }
+ }
+}
diff --git a/src/cmd/go/internal/work/shell_test.go b/src/cmd/go/internal/work/shell_test.go
new file mode 100644
index 0000000..24bef4e
--- /dev/null
+++ b/src/cmd/go/internal/work/shell_test.go
@@ -0,0 +1,139 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package work
+
+import (
+ "bytes"
+ "internal/testenv"
+ "strings"
+ "testing"
+ "unicode"
+)
+
+func FuzzSplitPkgConfigOutput(f *testing.F) {
+ testenv.MustHaveExecPath(f, "/bin/sh")
+
+ f.Add([]byte(`$FOO`))
+ f.Add([]byte(`\$FOO`))
+ f.Add([]byte(`${FOO}`))
+ f.Add([]byte(`\${FOO}`))
+ f.Add([]byte(`$(/bin/false)`))
+ f.Add([]byte(`\$(/bin/false)`))
+ f.Add([]byte(`$((0))`))
+ f.Add([]byte(`\$((0))`))
+ f.Add([]byte(`unescaped space`))
+ f.Add([]byte(`escaped\ space`))
+ f.Add([]byte(`"unterminated quote`))
+ f.Add([]byte(`'unterminated quote`))
+ f.Add([]byte(`unterminated escape\`))
+ f.Add([]byte(`"quote with unterminated escape\`))
+ f.Add([]byte(`'quoted "double quotes"'`))
+ f.Add([]byte(`"quoted 'single quotes'"`))
+ f.Add([]byte(`"\$0"`))
+ f.Add([]byte(`"\$\0"`))
+ f.Add([]byte(`"\$"`))
+ f.Add([]byte(`"\$ "`))
+
+ // Example positive inputs from TestSplitPkgConfigOutput.
+ // Some bare newlines have been removed so that the inputs
+ // are valid in the shell script we use for comparison.
+ f.Add([]byte(`-r:foo -L/usr/white\ space/lib -lfoo\ bar -lbar\ baz`))
+ f.Add([]byte(`-lextra\ fun\ arg\\`))
+ f.Add([]byte("\textra whitespace\r"))
+ f.Add([]byte(" \r "))
+ f.Add([]byte(`"-r:foo" "-L/usr/white space/lib" "-lfoo bar" "-lbar baz"`))
+ f.Add([]byte(`"-lextra fun arg\\"`))
+ f.Add([]byte(`" \r\n\ "`))
+ f.Add([]byte(`""`))
+ f.Add([]byte(``))
+ f.Add([]byte(`"\\"`))
+ f.Add([]byte(`"\x"`))
+ f.Add([]byte(`"\\x"`))
+ f.Add([]byte(`'\\'`))
+ f.Add([]byte(`'\x'`))
+ f.Add([]byte(`"\\x"`))
+ f.Add([]byte("\\\n"))
+ f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED='"/test/share/doc"'`))
+ f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED="/test/share/doc"`))
+ f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED=\"/test/share/doc\"`))
+ f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED='/test/share/doc'`))
+ f.Add([]byte(`-DQUOTED='/te\st/share/d\oc'`))
+ f.Add([]byte(`-Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world`))
+ f.Add([]byte(`"broken\"" \\\a "a"`))
+
+ // Example negative inputs from TestSplitPkgConfigOutput.
+ f.Add([]byte(`" \r\n `))
+ f.Add([]byte(`"-r:foo" "-L/usr/white space/lib "-lfoo bar" "-lbar baz"`))
+ f.Add([]byte(`"-lextra fun arg\\`))
+ f.Add([]byte(`broken flag\`))
+ f.Add([]byte(`extra broken flag \`))
+ f.Add([]byte(`\`))
+ f.Add([]byte(`"broken\"" "extra" \`))
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ t.Parallel()
+
+ if bytes.ContainsAny(b, "*?[#~%\x00{}!") {
+ t.Skipf("skipping %#q: contains a sometimes-quoted character", b)
+ }
+ // splitPkgConfigOutput itself rejects inputs that contain unquoted
+ // shell operator characters. (Quoted shell characters are fine.)
+
+ for _, c := range b {
+ if c > unicode.MaxASCII {
+ t.Skipf("skipping %#q: contains a non-ASCII character %q", b, c)
+ }
+ if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) {
+ t.Skipf("skipping %#q: contains non-graphic character %q", b, c)
+ }
+ }
+
+ args, err := splitPkgConfigOutput(b)
+ if err != nil {
+ // We haven't checked that the shell would actually reject this input too,
+ // but if splitPkgConfigOutput rejected it it's probably too dangerous to
+ // run in the script.
+ t.Logf("%#q: %v", b, err)
+ return
+ }
+ t.Logf("splitPkgConfigOutput(%#q) = %#q", b, args)
+ if len(args) == 0 {
+ t.Skipf("skipping %#q: contains no arguments", b)
+ }
+
+ var buf strings.Builder
+ for _, arg := range args {
+ buf.WriteString(arg)
+ buf.WriteString("\n")
+ }
+ wantOut := buf.String()
+
+ if strings.Count(wantOut, "\n") != len(args)+bytes.Count(b, []byte("\n")) {
+ // One of the newlines in b was treated as a delimiter and not part of an
+ // argument. Our bash test script would interpret that as a syntax error.
+ t.Skipf("skipping %#q: contains a bare newline", b)
+ }
+
+ // We use the printf shell command to echo the arguments because, per
+ // https://pubs.opengroup.org/onlinepubs/9699919799/utilities/echo.html#tag_20_37_16:
+ // “It is not possible to use echo portably across all POSIX systems unless
+ // both -n (as the first argument) and escape sequences are omitted.”
+ cmd := testenv.Command(t, "/bin/sh", "-c", "printf '%s\n' "+string(b))
+ cmd.Env = append(cmd.Environ(), "LC_ALL=POSIX", "POSIXLY_CORRECT=1")
+ cmd.Stderr = new(strings.Builder)
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("%#q: %v\n%s", cmd.Args, err, cmd.Stderr)
+ }
+
+ if string(out) != wantOut {
+ t.Logf("%#q:\n%#q", cmd.Args, out)
+ t.Logf("want:\n%#q", wantOut)
+ t.Errorf("parsed args do not match")
+ }
+ })
+}
diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go
new file mode 100644
index 0000000..8d975b0
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/edit.go
@@ -0,0 +1,340 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work edit
+
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modload"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/mod/module"
+
+ "golang.org/x/mod/modfile"
+)
+
+var cmdEdit = &base.Command{
+ UsageLine: "go work edit [editing flags] [go.work]",
+ Short: "edit go.work from tools or scripts",
+ Long: `Edit provides a command-line interface for editing go.work,
+for use primarily by tools or scripts. It only reads go.work;
+it does not look up information about the modules involved.
+If no file is specified, Edit looks for a go.work file in the current
+directory and its parent directories
+
+The editing flags specify a sequence of editing operations.
+
+The -fmt flag reformats the go.work file without making other changes.
+This reformatting is also implied by any other modifications that use or
+rewrite the go.mod file. The only time this flag is needed is if no other
+flags are specified, as in 'go work edit -fmt'.
+
+The -use=path and -dropuse=path flags
+add and drop a use directive from the go.work file's set of module directories.
+
+The -replace=old[@v]=new[@v] flag adds a replacement of the given
+module path and version pair. If the @v in old@v is omitted, a
+replacement without a version on the left side is added, which applies
+to all versions of the old module path. If the @v in new@v is omitted,
+the new path should be a local module root directory, not a module
+path. Note that -replace overrides any redundant replacements for old[@v],
+so omitting @v will drop existing replacements for specific versions.
+
+The -dropreplace=old[@v] flag drops a replacement of the given
+module path and version pair. If the @v is omitted, a replacement without
+a version on the left side is dropped.
+
+The -use, -dropuse, -replace, and -dropreplace,
+editing flags may be repeated, and the changes are applied in the order given.
+
+The -go=version flag sets the expected Go language version.
+
+The -toolchain=name flag sets the Go toolchain to use.
+
+The -print flag prints the final go.work in its text format instead of
+writing it back to go.mod.
+
+The -json flag prints the final go.work file in JSON format instead of
+writing it back to go.mod. The JSON output corresponds to these Go types:
+
+ type GoWork struct {
+ Go string
+ Toolchain string
+ Use []Use
+ Replace []Replace
+ }
+
+ type Use struct {
+ DiskPath string
+ ModulePath string
+ }
+
+ type Replace struct {
+ Old Module
+ New Module
+ }
+
+ type Module struct {
+ Path string
+ Version string
+ }
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+}
+
+var (
+ editFmt = cmdEdit.Flag.Bool("fmt", false, "")
+ editGo = cmdEdit.Flag.String("go", "", "")
+ editToolchain = cmdEdit.Flag.String("toolchain", "", "")
+ editJSON = cmdEdit.Flag.Bool("json", false, "")
+ editPrint = cmdEdit.Flag.Bool("print", false, "")
+ workedits []func(file *modfile.WorkFile) // edits specified in flags
+)
+
+type flagFunc func(string)
+
+func (f flagFunc) String() string { return "" }
+func (f flagFunc) Set(s string) error { f(s); return nil }
+
+func init() {
+ cmdEdit.Run = runEditwork // break init cycle
+
+ cmdEdit.Flag.Var(flagFunc(flagEditworkUse), "use", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkDropUse), "dropuse", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkReplace), "replace", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkDropReplace), "dropreplace", "")
+ base.AddChdirFlag(&cmdEdit.Flag)
+}
+
+func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
+ if *editJSON && *editPrint {
+ base.Fatalf("go: cannot use both -json and -print")
+ }
+
+ if len(args) > 1 {
+ base.Fatalf("go: 'go help work edit' accepts at most one argument")
+ }
+ var gowork string
+ if len(args) == 1 {
+ gowork = args[0]
+ } else {
+ modload.InitWorkfile()
+ gowork = modload.WorkFilePath()
+ }
+ if gowork == "" {
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+ }
+
+ if *editGo != "" && *editGo != "none" {
+ if !modfile.GoVersionRE.MatchString(*editGo) {
+ base.Fatalf(`go work: invalid -go option; expecting something like "-go %s"`, gover.Local())
+ }
+ }
+ if *editToolchain != "" && *editToolchain != "none" {
+ if !modfile.ToolchainRE.MatchString(*editToolchain) {
+ base.Fatalf(`go work: invalid -toolchain option; expecting something like "-toolchain go%s"`, gover.Local())
+ }
+ }
+
+ anyFlags := *editGo != "" ||
+ *editToolchain != "" ||
+ *editJSON ||
+ *editPrint ||
+ *editFmt ||
+ len(workedits) > 0
+
+ if !anyFlags {
+ base.Fatalf("go: no flags specified (see 'go help work edit').")
+ }
+
+ workFile, err := modload.ReadWorkFile(gowork)
+ if err != nil {
+ base.Fatalf("go: errors parsing %s:\n%s", base.ShortPath(gowork), err)
+ }
+
+ if *editGo == "none" {
+ workFile.DropGoStmt()
+ } else if *editGo != "" {
+ if err := workFile.AddGoStmt(*editGo); err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ }
+ if *editToolchain == "none" {
+ workFile.DropToolchainStmt()
+ } else if *editToolchain != "" {
+ if err := workFile.AddToolchainStmt(*editToolchain); err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ }
+
+ if len(workedits) > 0 {
+ for _, edit := range workedits {
+ edit(workFile)
+ }
+ }
+
+ workFile.SortBlocks()
+ workFile.Cleanup() // clean file after edits
+
+ // Note: No call to modload.UpdateWorkFile here.
+ // Edit's job is only to make the edits on the command line,
+ // not to apply the kinds of semantic changes that
+ // UpdateWorkFile does (or would eventually do, if we
+ // decide to add the module comments in go.work).
+
+ if *editJSON {
+ editPrintJSON(workFile)
+ return
+ }
+
+ if *editPrint {
+ os.Stdout.Write(modfile.Format(workFile.Syntax))
+ return
+ }
+
+ modload.WriteWorkFile(gowork, workFile)
+}
+
+// flagEditworkUse implements the -use flag.
+func flagEditworkUse(arg string) {
+ workedits = append(workedits, func(f *modfile.WorkFile) {
+ _, mf, err := modload.ReadModFile(filepath.Join(arg, "go.mod"), nil)
+ modulePath := ""
+ if err == nil {
+ modulePath = mf.Module.Mod.Path
+ }
+ f.AddUse(modload.ToDirectoryPath(arg), modulePath)
+ if err := f.AddUse(modload.ToDirectoryPath(arg), ""); err != nil {
+ base.Fatalf("go: -use=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagEditworkDropUse implements the -dropuse flag.
+func flagEditworkDropUse(arg string) {
+ workedits = append(workedits, func(f *modfile.WorkFile) {
+ if err := f.DropUse(modload.ToDirectoryPath(arg)); err != nil {
+ base.Fatalf("go: -dropdirectory=%s: %v", arg, err)
+ }
+ })
+}
+
+// allowedVersionArg returns whether a token may be used as a version in go.mod.
+// We don't call modfile.CheckPathVersion, because that insists on versions
+// being in semver form, but here we want to allow versions like "master" or
+// "1234abcdef", which the go command will resolve the next time it runs (or
+// during -fix). Even so, we need to make sure the version is a valid token.
+func allowedVersionArg(arg string) bool {
+ return !modfile.MustQuote(arg)
+}
+
+// parsePathVersionOptional parses path[@version], using adj to
+// describe any errors.
+func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
+ before, after, found := strings.Cut(arg, "@")
+ if !found {
+ path = arg
+ } else {
+ path, version = strings.TrimSpace(before), strings.TrimSpace(after)
+ }
+ if err := module.CheckImportPath(path); err != nil {
+ if !allowDirPath || !modfile.IsDirectoryPath(path) {
+ return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
+ }
+ }
+ if path != arg && !allowedVersionArg(version) {
+ return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
+ }
+ return path, version, nil
+}
+
+// flagEditworkReplace implements the -replace flag.
+func flagEditworkReplace(arg string) {
+ before, after, found := strings.Cut(arg, "=")
+ if !found {
+ base.Fatalf("go: -replace=%s: need old[@v]=new[@w] (missing =)", arg)
+ }
+ old, new := strings.TrimSpace(before), strings.TrimSpace(after)
+ if strings.HasPrefix(new, ">") {
+ base.Fatalf("go: -replace=%s: separator between old and new is =, not =>", arg)
+ }
+ oldPath, oldVersion, err := parsePathVersionOptional("old", old, false)
+ if err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ newPath, newVersion, err := parsePathVersionOptional("new", new, true)
+ if err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ if newPath == new && !modfile.IsDirectoryPath(new) {
+ base.Fatalf("go: -replace=%s: unversioned new path must be local directory", arg)
+ }
+
+ workedits = append(workedits, func(f *modfile.WorkFile) {
+ if err := f.AddReplace(oldPath, oldVersion, newPath, newVersion); err != nil {
+ base.Fatalf("go: -replace=%s: %v", arg, err)
+ }
+ })
+}
+
+// flagEditworkDropReplace implements the -dropreplace flag.
+func flagEditworkDropReplace(arg string) {
+ path, version, err := parsePathVersionOptional("old", arg, true)
+ if err != nil {
+ base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+ }
+ workedits = append(workedits, func(f *modfile.WorkFile) {
+ if err := f.DropReplace(path, version); err != nil {
+ base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+ }
+ })
+}
+
+type replaceJSON struct {
+ Old module.Version
+ New module.Version
+}
+
+// editPrintJSON prints the -json output.
+func editPrintJSON(workFile *modfile.WorkFile) {
+ var f workfileJSON
+ if workFile.Go != nil {
+ f.Go = workFile.Go.Version
+ }
+ for _, d := range workFile.Use {
+ f.Use = append(f.Use, useJSON{DiskPath: d.Path, ModPath: d.ModulePath})
+ }
+
+ for _, r := range workFile.Replace {
+ f.Replace = append(f.Replace, replaceJSON{r.Old, r.New})
+ }
+ data, err := json.MarshalIndent(&f, "", "\t")
+ if err != nil {
+ base.Fatalf("go: internal error: %v", err)
+ }
+ data = append(data, '\n')
+ os.Stdout.Write(data)
+}
+
+// workfileJSON is the -json output data structure.
+type workfileJSON struct {
+ Go string `json:",omitempty"`
+ Use []useJSON
+ Replace []replaceJSON
+}
+
+type useJSON struct {
+ DiskPath string
+ ModPath string `json:",omitempty"`
+}
diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go
new file mode 100644
index 0000000..02240b8
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/init.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work init
+
+package workcmd
+
+import (
+ "context"
+ "path/filepath"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modload"
+
+ "golang.org/x/mod/modfile"
+)
+
+var cmdInit = &base.Command{
+ UsageLine: "go work init [moddirs]",
+ Short: "initialize workspace file",
+ Long: `Init initializes and writes a new go.work file in the
+current directory, in effect creating a new workspace at the current
+directory.
+
+go work init optionally accepts paths to the workspace modules as
+arguments. If the argument is omitted, an empty workspace with no
+modules will be created.
+
+Each argument path is added to a use directive in the go.work file. The
+current go version will also be listed in the go.work file.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+ Run: runInit,
+}
+
+func init() {
+ base.AddChdirFlag(&cmdInit.Flag)
+ base.AddModCommonFlags(&cmdInit.Flag)
+}
+
+func runInit(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ modload.ForceUseModules = true
+
+ gowork := modload.WorkFilePath()
+ if gowork == "" {
+ gowork = filepath.Join(base.Cwd(), "go.work")
+ }
+
+ if _, err := fsys.Stat(gowork); err == nil {
+ base.Fatalf("go: %s already exists", gowork)
+ }
+
+ goV := gover.Local() // Use current Go version by default
+ wf := new(modfile.WorkFile)
+ wf.Syntax = new(modfile.FileSyntax)
+ wf.AddGoStmt(goV)
+ workUse(ctx, gowork, wf, args)
+ modload.WriteWorkFile(gowork, wf)
+}
diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go
new file mode 100644
index 0000000..719cf76
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/sync.go
@@ -0,0 +1,146 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work sync
+
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/toolchain"
+ "context"
+
+ "golang.org/x/mod/module"
+)
+
+var cmdSync = &base.Command{
+ UsageLine: "go work sync",
+ Short: "sync workspace build list to modules",
+ Long: `Sync syncs the workspace's build list back to the
+workspace's modules
+
+The workspace's build list is the set of versions of all the
+(transitive) dependency modules used to do builds in the workspace. go
+work sync generates that build list using the Minimal Version Selection
+algorithm, and then syncs those versions back to each of modules
+specified in the workspace (with use directives).
+
+The syncing is done by sequentially upgrading each of the dependency
+modules specified in a workspace module to the version in the build list
+if the dependency module's version is not already the same as the build
+list's version. Note that Minimal Version Selection guarantees that the
+build list's version of each module is always the same or higher than
+that in each workspace module.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+ Run: runSync,
+}
+
+func init() {
+ base.AddChdirFlag(&cmdSync.Flag)
+ base.AddModCommonFlags(&cmdSync.Flag)
+}
+
+func runSync(ctx context.Context, cmd *base.Command, args []string) {
+ modload.ForceUseModules = true
+ modload.InitWorkfile()
+ if modload.WorkFilePath() == "" {
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+ }
+
+ _, err := modload.LoadModGraph(ctx, "")
+ if err != nil {
+ toolchain.SwitchOrFatal(ctx, err)
+ }
+ mustSelectFor := map[module.Version][]module.Version{}
+
+ mms := modload.MainModules
+
+ opts := modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: false,
+ LoadTests: true,
+ AllowErrors: true,
+ SilencePackageErrors: true,
+ SilenceUnmatchedWarnings: true,
+ }
+ for _, m := range mms.Versions() {
+ opts.MainModule = m
+ _, pkgs := modload.LoadPackages(ctx, opts, "all")
+ opts.MainModule = module.Version{} // reset
+
+ var (
+ mustSelect []module.Version
+ inMustSelect = map[module.Version]bool{}
+ )
+ for _, pkg := range pkgs {
+ if r := modload.PackageModule(pkg); r.Version != "" && !inMustSelect[r] {
+ // r has a known version, so force that version.
+ mustSelect = append(mustSelect, r)
+ inMustSelect[r] = true
+ }
+ }
+ gover.ModSort(mustSelect) // ensure determinism
+ mustSelectFor[m] = mustSelect
+ }
+
+ workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it.
+
+ var goV string
+ for _, m := range mms.Versions() {
+ if mms.ModRoot(m) == "" && m.Path == "command-line-arguments" {
+ // This is not a real module.
+ // TODO(#49228): Remove this special case once the special
+ // command-line-arguments module is gone.
+ continue
+ }
+
+ // Use EnterModule to reset the global state in modload to be in
+ // single-module mode using the modroot of m.
+ modload.EnterModule(ctx, mms.ModRoot(m))
+
+ // Edit the build list in the same way that 'go get' would if we
+ // requested the relevant module versions explicitly.
+ // TODO(#57001): Do we need a toolchain.SwitchOrFatal here,
+ // and do we need to pass a toolchain.Switcher in LoadPackages?
+ // If so, think about saving the WriteGoMods for after the loop,
+ // so we don't write some go.mods with the "before" toolchain
+ // and others with the "after" toolchain. If nothing else, that
+ // discrepancy could show up in auto-recorded toolchain lines.
+ changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m])
+ if err != nil {
+ continue
+ }
+ if changed {
+ modload.LoadPackages(ctx, modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ Tidy: true,
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: false,
+ LoadTests: true,
+ AllowErrors: true,
+ SilenceMissingStdImports: true,
+ SilencePackageErrors: true,
+ }, "all")
+ modload.WriteGoMod(ctx, modload.WriteOpts{})
+ }
+ goV = gover.Max(goV, modload.MainModules.GoVersion())
+ }
+
+ wf, err := modload.ReadWorkFile(workFilePath)
+ if err != nil {
+ base.Fatal(err)
+ }
+ modload.UpdateWorkGoVersion(wf, goV)
+ modload.UpdateWorkFile(wf)
+ if err := modload.WriteWorkFile(workFilePath, wf); err != nil {
+ base.Fatal(err)
+ }
+}
diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go
new file mode 100644
index 0000000..5547711
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/use.go
@@ -0,0 +1,254 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work use
+
+package workcmd
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/gover"
+ "cmd/go/internal/modload"
+ "cmd/go/internal/str"
+ "cmd/go/internal/toolchain"
+
+ "golang.org/x/mod/modfile"
+)
+
+var cmdUse = &base.Command{
+ UsageLine: "go work use [-r] [moddirs]",
+ Short: "add modules to workspace file",
+ Long: `Use provides a command-line interface for adding
+directories, optionally recursively, to a go.work file.
+
+A use directive will be added to the go.work file for each argument
+directory listed on the command line go.work file, if it exists,
+or removed from the go.work file if it does not exist.
+Use fails if any remaining use directives refer to modules that
+do not exist.
+
+Use updates the go line in go.work to specify a version at least as
+new as all the go lines in the used modules, both preexisting ones
+and newly added ones. With no arguments, this update is the only
+thing that go work use does.
+
+The -r flag searches recursively for modules in the argument
+directories, and the use command operates as if each of the directories
+were specified as arguments: namely, use directives will be added for
+directories that exist, and removed for directories that do not exist.
+
+
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+}
+
+var useR = cmdUse.Flag.Bool("r", false, "")
+
+func init() {
+ cmdUse.Run = runUse // break init cycle
+
+ base.AddChdirFlag(&cmdUse.Flag)
+ base.AddModCommonFlags(&cmdUse.Flag)
+}
+
+func runUse(ctx context.Context, cmd *base.Command, args []string) {
+ modload.ForceUseModules = true
+ modload.InitWorkfile()
+ gowork := modload.WorkFilePath()
+ if gowork == "" {
+ base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+ }
+ wf, err := modload.ReadWorkFile(gowork)
+ if err != nil {
+ base.Fatal(err)
+ }
+ workUse(ctx, gowork, wf, args)
+ modload.WriteWorkFile(gowork, wf)
+}
+
+func workUse(ctx context.Context, gowork string, wf *modfile.WorkFile, args []string) {
+ workDir := filepath.Dir(gowork) // absolute, since gowork itself is absolute
+
+ haveDirs := make(map[string][]string) // absolute → original(s)
+ for _, use := range wf.Use {
+ var abs string
+ if filepath.IsAbs(use.Path) {
+ abs = filepath.Clean(use.Path)
+ } else {
+ abs = filepath.Join(workDir, use.Path)
+ }
+ haveDirs[abs] = append(haveDirs[abs], use.Path)
+ }
+
+ // keepDirs maps each absolute path to keep to the literal string to use for
+ // that path (either an absolute or a relative path), or the empty string if
+ // all entries for the absolute path should be removed.
+ keepDirs := make(map[string]string)
+
+ var sw toolchain.Switcher
+
+ // lookDir updates the entry in keepDirs for the directory dir,
+ // which is either absolute or relative to the current working directory
+ // (not necessarily the directory containing the workfile).
+ lookDir := func(dir string) {
+ absDir, dir := pathRel(workDir, dir)
+
+ file := base.ShortPath(filepath.Join(absDir, "go.mod"))
+ fi, err := fsys.Stat(file)
+ if err != nil {
+ if os.IsNotExist(err) {
+ keepDirs[absDir] = ""
+ } else {
+ sw.Error(err)
+ }
+ return
+ }
+
+ if !fi.Mode().IsRegular() {
+ sw.Error(fmt.Errorf("%v is not a regular file", file))
+ return
+ }
+
+ if dup := keepDirs[absDir]; dup != "" && dup != dir {
+ base.Errorf(`go: already added "%s" as "%s"`, dir, dup)
+ }
+ keepDirs[absDir] = dir
+ }
+
+ for _, useDir := range args {
+ absArg, _ := pathRel(workDir, useDir)
+
+ info, err := fsys.Stat(base.ShortPath(absArg))
+ if err != nil {
+ // Errors raised from os.Stat are formatted to be more user-friendly.
+ if os.IsNotExist(err) {
+ err = fmt.Errorf("directory %v does not exist", base.ShortPath(absArg))
+ }
+ sw.Error(err)
+ continue
+ } else if !info.IsDir() {
+ sw.Error(fmt.Errorf("%s is not a directory", base.ShortPath(absArg)))
+ continue
+ }
+
+ if !*useR {
+ lookDir(useDir)
+ continue
+ }
+
+ // Add or remove entries for any subdirectories that still exist.
+ // If the root itself is a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ fsys.Walk(str.WithFilePathSeparator(useDir), func(path string, info fs.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ if info.Mode()&fs.ModeSymlink != 0 {
+ if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+ fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", base.ShortPath(path))
+ }
+ }
+ return nil
+ }
+ lookDir(path)
+ return nil
+ })
+
+ // Remove entries for subdirectories that no longer exist.
+ // Because they don't exist, they will be skipped by Walk.
+ for absDir := range haveDirs {
+ if str.HasFilePathPrefix(absDir, absArg) {
+ if _, ok := keepDirs[absDir]; !ok {
+ keepDirs[absDir] = "" // Mark for deletion.
+ }
+ }
+ }
+ }
+
+ // Update the work file.
+ for absDir, keepDir := range keepDirs {
+ nKept := 0
+ for _, dir := range haveDirs[absDir] {
+ if dir == keepDir { // (note that dir is always non-empty)
+ nKept++
+ } else {
+ wf.DropUse(dir)
+ }
+ }
+ if keepDir != "" && nKept != 1 {
+ // If we kept more than one copy, delete them all.
+ // We'll recreate a unique copy with AddUse.
+ if nKept > 1 {
+ wf.DropUse(keepDir)
+ }
+ wf.AddUse(keepDir, "")
+ }
+ }
+
+ // Read the Go versions from all the use entries, old and new (but not dropped).
+ goV := gover.FromGoWork(wf)
+ for _, use := range wf.Use {
+ if use.Path == "" { // deleted
+ continue
+ }
+ var abs string
+ if filepath.IsAbs(use.Path) {
+ abs = filepath.Clean(use.Path)
+ } else {
+ abs = filepath.Join(workDir, use.Path)
+ }
+ _, mf, err := modload.ReadModFile(base.ShortPath(filepath.Join(abs, "go.mod")), nil)
+ if err != nil {
+ sw.Error(err)
+ continue
+ }
+ goV = gover.Max(goV, gover.FromGoMod(mf))
+ }
+ sw.Switch(ctx)
+ base.ExitIfErrors()
+
+ modload.UpdateWorkGoVersion(wf, goV)
+ modload.UpdateWorkFile(wf)
+}
+
+// pathRel returns the absolute and canonical forms of dir for use in a
+// go.work file located in directory workDir.
+//
+// If dir is relative, it is interpreted relative to base.Cwd()
+// and its canonical form is relative to workDir if possible.
+// If dir is absolute or cannot be made relative to workDir,
+// its canonical form is absolute.
+//
+// Canonical absolute paths are clean.
+// Canonical relative paths are clean and slash-separated.
+func pathRel(workDir, dir string) (abs, canonical string) {
+ if filepath.IsAbs(dir) {
+ abs = filepath.Clean(dir)
+ return abs, abs
+ }
+
+ abs = filepath.Join(base.Cwd(), dir)
+ rel, err := filepath.Rel(workDir, abs)
+ if err != nil {
+ // The path can't be made relative to the go.work file,
+ // so it must be kept absolute instead.
+ return abs, abs
+ }
+
+ // Normalize relative paths to use slashes, so that checked-in go.work
+ // files with relative paths within the repo are platform-independent.
+ return abs, modload.ToDirectoryPath(rel)
+}
diff --git a/src/cmd/go/internal/workcmd/work.go b/src/cmd/go/internal/workcmd/work.go
new file mode 100644
index 0000000..c99cc2a
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/work.go
@@ -0,0 +1,78 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package workcmd implements the “go work” command.
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+)
+
+var CmdWork = &base.Command{
+ UsageLine: "go work",
+ Short: "workspace maintenance",
+ Long: `Work provides access to operations on workspaces.
+
+Note that support for workspaces is built into many other commands, not
+just 'go work'.
+
+See 'go help modules' for information about Go's module system of which
+workspaces are a part.
+
+See https://go.dev/ref/mod#workspaces for an in-depth reference on
+workspaces.
+
+See https://go.dev/doc/tutorial/workspaces for an introductory
+tutorial on workspaces.
+
+A workspace is specified by a go.work file that specifies a set of
+module directories with the "use" directive. These modules are used as
+root modules by the go command for builds and related operations. A
+workspace that does not specify modules to be used cannot be used to do
+builds from local modules.
+
+go.work files are line-oriented. Each line holds a single directive,
+made up of a keyword followed by arguments. For example:
+
+ go 1.18
+
+ use ../foo/bar
+ use ./baz
+
+ replace example.com/foo v1.2.3 => example.com/bar v1.4.5
+
+The leading keyword can be factored out of adjacent lines to create a block,
+like in Go imports.
+
+ use (
+ ../foo/bar
+ ./baz
+ )
+
+The use directive specifies a module to be included in the workspace's
+set of main modules. The argument to the use directive is the directory
+containing the module's go.mod file.
+
+The go directive specifies the version of Go the file was written at. It
+is possible there may be future changes in the semantics of workspaces
+that could be controlled by this version, but for now the version
+specified has no effect.
+
+The replace directive has the same syntax as the replace directive in a
+go.mod file and takes precedence over replaces in go.mod files. It is
+primarily intended to override conflicting replaces in different workspace
+modules.
+
+To determine whether the go command is operating in workspace mode, use
+the "go env GOWORK" command. This will specify the workspace file being
+used.
+`,
+
+ Commands: []*base.Command{
+ cmdEdit,
+ cmdInit,
+ cmdSync,
+ cmdUse,
+ },
+}