summaryrefslogtreecommitdiffstats
path: root/src/cmd/go/internal/modindex
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
commitccd992355df7192993c666236047820244914598 (patch)
treef00fea65147227b7743083c6148396f74cd66935 /src/cmd/go/internal/modindex
parentInitial commit. (diff)
downloadgolang-1.21-ccd992355df7192993c666236047820244914598.tar.xz
golang-1.21-ccd992355df7192993c666236047820244914598.zip
Adding upstream version 1.21.8.upstream/1.21.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cmd/go/internal/modindex')
-rw-r--r--src/cmd/go/internal/modindex/build.go950
-rw-r--r--src/cmd/go/internal/modindex/build_read.go594
-rw-r--r--src/cmd/go/internal/modindex/index_format.txt63
-rw-r--r--src/cmd/go/internal/modindex/index_test.go104
-rw-r--r--src/cmd/go/internal/modindex/read.go1037
-rw-r--r--src/cmd/go/internal/modindex/scan.go290
-rw-r--r--src/cmd/go/internal/modindex/syslist.go78
-rw-r--r--src/cmd/go/internal/modindex/syslist_test.go65
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso1
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log0
-rw-r--r--src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c0
-rw-r--r--src/cmd/go/internal/modindex/write.go164
14 files changed, 3346 insertions, 0 deletions
diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go
new file mode 100644
index 0000000..b57f2f6
--- /dev/null
+++ b/src/cmd/go/internal/modindex/build.go
@@ -0,0 +1,950 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/build.go with unused parts
+// removed.
+
+package modindex
+
+import (
+ "bytes"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/build/constraint"
+ "go/token"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Context specifies the supporting context for a build.
+type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go paths
+
+ // Dir is the caller's working directory, or the empty string to use
+ // the current directory of the running process. In module mode, this is used
+ // to locate the main module.
+ //
+ // If Dir is non-empty, directories passed to Import and ImportDir must
+ // be absolute.
+ Dir string
+
+ CgoEnabled bool // whether cgo files are included
+ UseAllFiles bool // use files regardless of //go:build lines, file names
+ Compiler string // compiler to assume when computing target paths
+
+ // The build, tool, and release tags specify build constraints
+ // that should be considered satisfied when processing +build lines.
+ // Clients creating a new context may customize BuildTags, which
+ // defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags.
+ // ToolTags defaults to build tags appropriate to the current Go toolchain configuration.
+ // ReleaseTags defaults to the list of Go releases the current release is compatible with.
+ // BuildTags is not set for the Default build Context.
+ // In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints
+ // consider the values of GOARCH and GOOS as satisfied tags.
+ // The last element in ReleaseTags is assumed to be the current release.
+ BuildTags []string
+ ToolTags []string
+ ReleaseTags []string
+
+ // The install suffix specifies a suffix to use in the name of the installation
+ // directory. By default it is empty, but custom builds that need to keep
+ // their outputs separate can set InstallSuffix to do so. For example, when
+ // using the race detector, the go command uses InstallSuffix = "race", so
+ // that on a Linux/386 system, packages are written to a directory named
+ // "linux_386_race" instead of the usual "linux_386".
+ InstallSuffix string
+
+ // By default, Import uses the operating system's file system calls
+ // to read directories and files. To read from other sources,
+ // callers can set the following functions. They all have default
+ // behaviors that use the local file system, so clients need only set
+ // the functions whose behaviors they wish to change.
+
+ // JoinPath joins the sequence of path fragments into a single path.
+ // If JoinPath is nil, Import uses filepath.Join.
+ JoinPath func(elem ...string) string
+
+ // SplitPathList splits the path list into a slice of individual paths.
+ // If SplitPathList is nil, Import uses filepath.SplitList.
+ SplitPathList func(list string) []string
+
+ // IsAbsPath reports whether path is an absolute path.
+ // If IsAbsPath is nil, Import uses filepath.IsAbs.
+ IsAbsPath func(path string) bool
+
+ // IsDir reports whether the path names a directory.
+ // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+ IsDir func(path string) bool
+
+ // HasSubdir reports whether dir is lexically a subdirectory of
+ // root, perhaps multiple levels below. It does not try to check
+ // whether dir exists.
+ // If so, HasSubdir sets rel to a slash-separated path that
+ // can be joined to root to produce a path equivalent to dir.
+ // If HasSubdir is nil, Import uses an implementation built on
+ // filepath.EvalSymlinks.
+ HasSubdir func(root, dir string) (rel string, ok bool)
+
+ // ReadDir returns a slice of fs.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // If ReadDir is nil, Import uses ioutil.ReadDir.
+ ReadDir func(dir string) ([]fs.FileInfo, error)
+
+ // OpenFile opens a file (not a directory) for reading.
+ // If OpenFile is nil, Import uses os.Open.
+ OpenFile func(path string) (io.ReadCloser, error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+ if f := ctxt.JoinPath; f != nil {
+ return f(elem...)
+ }
+ return filepath.Join(elem...)
+}
+
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+ if f := ctxt.SplitPathList; f != nil {
+ return f(s)
+ }
+ return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+ if f := ctxt.IsAbsPath; f != nil {
+ return f(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// isDir calls ctxt.IsDir (if not nil) or else uses fsys.Stat.
+func isDir(path string) bool {
+ fi, err := fsys.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
+
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
+ }
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
+ }
+ return hasSubdir(rootSym, dirSym)
+}
+
+// hasSubdir reports if dir is within root by performing lexical analysis only.
+func hasSubdir(root, dir string) (rel string, ok bool) {
+ root = str.WithFilePathSeparator(filepath.Clean(root))
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+ return filepath.ToSlash(dir[len(root):]), true
+}
+
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT.
+ // Do not get confused by this common mistake.
+ continue
+ }
+ if strings.HasPrefix(p, "~") {
+ // Path segments starting with ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
+ all = append(all, p)
+ }
+ return all
+}
+
+var defaultToolTags, defaultReleaseTags []string
+
+// NoGoError is the error used by Import to describe a directory
+// containing no buildable Go source files. (It may still contain
+// test files, files hidden by build tags, and so on.)
+type NoGoError struct {
+ Dir string
+}
+
+func (e *NoGoError) Error() string {
+ return "no buildable Go source files in " + e.Dir
+}
+
+// MultiplePackageError describes a directory containing
+// multiple buildable Go source files for multiple packages.
+type MultiplePackageError struct {
+ Dir string // directory containing files
+ Packages []string // package names found
+ Files []string // corresponding files: Files[i] declares package Packages[i]
+}
+
+func (e *MultiplePackageError) Error() string {
+ // Error string limited to two entries for compatibility.
+ return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
+}
+
+func nameExt(name string) string {
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ return ""
+ }
+ return name[i:]
+}
+
+func fileListForExt(p *build.Package, ext string) *[]string {
+ switch ext {
+ case ".c":
+ return &p.CFiles
+ case ".cc", ".cpp", ".cxx":
+ return &p.CXXFiles
+ case ".m":
+ return &p.MFiles
+ case ".h", ".hh", ".hpp", ".hxx":
+ return &p.HFiles
+ case ".f", ".F", ".for", ".f90":
+ return &p.FFiles
+ case ".s", ".S", ".sx":
+ return &p.SFiles
+ case ".swig":
+ return &p.SwigFiles
+ case ".swigcxx":
+ return &p.SwigCXXFiles
+ case ".syso":
+ return &p.SysoFiles
+ }
+ return nil
+}
+
+var errNoModules = errors.New("not using modules")
+
+func findImportComment(data []byte) (s string, line int) {
+ // expect keyword package
+ word, data := parseWord(data)
+ if string(word) != "package" {
+ return "", 0
+ }
+
+ // expect package name
+ _, data = parseWord(data)
+
+ // now ready for import comment, a // or /* */ comment
+ // beginning and ending on the current line.
+ for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
+ data = data[1:]
+ }
+
+ var comment []byte
+ switch {
+ case bytes.HasPrefix(data, slashSlash):
+ comment, _, _ = bytes.Cut(data[2:], newline)
+ case bytes.HasPrefix(data, slashStar):
+ var ok bool
+ comment, _, ok = bytes.Cut(data[2:], starSlash)
+ if !ok {
+ // malformed comment
+ return "", 0
+ }
+ if bytes.Contains(comment, newline) {
+ return "", 0
+ }
+ }
+ comment = bytes.TrimSpace(comment)
+
+ // split comment into `import`, `"pkg"`
+ word, arg := parseWord(comment)
+ if string(word) != "import" {
+ return "", 0
+ }
+
+ line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
+ return strings.TrimSpace(string(arg)), line
+}
+
+var (
+ slashSlash = []byte("//")
+ slashStar = []byte("/*")
+ starSlash = []byte("*/")
+ newline = []byte("\n")
+)
+
+// skipSpaceOrComment returns data with any leading spaces or comments removed.
+func skipSpaceOrComment(data []byte) []byte {
+ for len(data) > 0 {
+ switch data[0] {
+ case ' ', '\t', '\r', '\n':
+ data = data[1:]
+ continue
+ case '/':
+ if bytes.HasPrefix(data, slashSlash) {
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+1:]
+ continue
+ }
+ if bytes.HasPrefix(data, slashStar) {
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+2:]
+ continue
+ }
+ }
+ break
+ }
+ return data
+}
+
+// parseWord skips any leading spaces or comments in data
+// and then parses the beginning of data as an identifier or keyword,
+// returning that word and what remains after the word.
+func parseWord(data []byte) (word, rest []byte) {
+ data = skipSpaceOrComment(data)
+
+ // Parse past leading word characters.
+ rest = data
+ for {
+ r, size := utf8.DecodeRune(rest)
+ if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
+ rest = rest[size:]
+ continue
+ }
+ break
+ }
+
+ word = data[:len(data)-len(rest)]
+ if len(word) == 0 {
+ return nil, nil
+ }
+
+ return word, rest
+}
+
+var dummyPkg build.Package
+
+// fileInfo records information learned about a file included in a build.
+type fileInfo struct {
+ name string // full name including dir
+ header []byte
+ fset *token.FileSet
+ parsed *ast.File
+ parseErr error
+ imports []fileImport
+ embeds []fileEmbed
+ directives []build.Directive
+
+ // Additional fields added to go/build's fileinfo for the purposes of the modindex package.
+ binaryOnly bool
+ goBuildConstraint string
+ plusBuildConstraints []string
+}
+
+type fileImport struct {
+ path string
+ pos token.Pos
+ doc *ast.CommentGroup
+}
+
+type fileEmbed struct {
+ pattern string
+ pos token.Position
+}
+
+var errNonSource = errors.New("non source file")
+
+// getFileInfo extracts the information needed from each go file for the module
+// index.
+//
+// If Name denotes a Go program, matchFile reads until the end of the
+// Imports and returns that section of the file in the FileInfo's Header field,
+// even though it only considers text until the first non-comment
+// for +build lines.
+//
+// getFileInfo will return errNonSource if the file is not a source or object
+// file and shouldn't even be added to IgnoredFiles.
+func getFileInfo(dir, name string, fset *token.FileSet) (*fileInfo, error) {
+ if strings.HasPrefix(name, "_") ||
+ strings.HasPrefix(name, ".") {
+ return nil, nil
+ }
+
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ i = len(name)
+ }
+ ext := name[i:]
+
+ if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil {
+ // skip
+ return nil, errNonSource
+ }
+
+ info := &fileInfo{name: filepath.Join(dir, name), fset: fset}
+ if ext == ".syso" {
+ // binary, no reading
+ return info, nil
+ }
+
+ f, err := fsys.Open(info.name)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(matloob) should we decide whether to ignore binary only here or earlier
+ // when we create the index file?
+ var ignoreBinaryOnly bool
+ if strings.HasSuffix(name, ".go") {
+ err = readGoInfo(f, info)
+ if strings.HasSuffix(name, "_test.go") {
+ ignoreBinaryOnly = true // ignore //go:binary-only-package comments in _test.go files
+ }
+ } else {
+ info.header, err = readComments(f)
+ }
+ f.Close()
+ if err != nil {
+ return nil, fmt.Errorf("read %s: %v", info.name, err)
+ }
+
+ // Look for +build comments to accept or reject the file.
+ info.goBuildConstraint, info.plusBuildConstraints, info.binaryOnly, err = getConstraints(info.header)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", name, err)
+ }
+
+ if ignoreBinaryOnly && info.binaryOnly {
+ info.binaryOnly = false // override info.binaryOnly
+ }
+
+ return info, nil
+}
+
+func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+ all := make([]string, 0, len(m))
+ for path := range m {
+ all = append(all, path)
+ }
+ sort.Strings(all)
+ return all, m
+}
+
+var (
+ bSlashSlash = []byte(slashSlash)
+ bStarSlash = []byte(starSlash)
+ bSlashStar = []byte(slashStar)
+ bPlusBuild = []byte("+build")
+
+ goBuildComment = []byte("//go:build")
+
+ errMultipleGoBuild = errors.New("multiple //go:build comments")
+)
+
+func isGoBuildComment(line []byte) bool {
+ if !bytes.HasPrefix(line, goBuildComment) {
+ return false
+ }
+ line = bytes.TrimSpace(line)
+ rest := line[len(goBuildComment):]
+ return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
+}
+
+// Special comment denoting a binary-only package.
+// See https://golang.org/design/2775-binary-only-packages
+// for more about the design of binary-only packages.
+var binaryOnlyComment = []byte("//go:binary-only-package")
+
+func getConstraints(content []byte) (goBuild string, plusBuild []string, binaryOnly bool, err error) {
+ // Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ // Also identify any //go:build comments.
+ content, goBuildBytes, sawBinaryOnly, err := parseFileHeader(content)
+ if err != nil {
+ return "", nil, false, err
+ }
+
+ // If //go:build line is present, it controls, so no need to look for +build .
+ // Otherwise, get plusBuild constraints.
+ if goBuildBytes == nil {
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) {
+ continue
+ }
+ text := string(line)
+ if !constraint.IsPlusBuild(text) {
+ continue
+ }
+ plusBuild = append(plusBuild, text)
+ }
+ }
+
+ return string(goBuildBytes), plusBuild, sawBinaryOnly, nil
+}
+
+func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
+ end := 0
+ p := content
+ ended := false // found non-blank, non-// line, so stopped accepting // +build lines
+ inSlashStar := false // in /* */ comment
+
+Lines:
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 && !ended { // Blank line
+ // Remember position of most recent blank line.
+ // When we find the first non-blank, non-// line,
+ // this "end" position marks the latest file position
+ // where a // +build line can appear.
+ // (It must appear _before_ a blank line before the non-blank, non-// line.
+ // Yes, that's confusing, which is part of why we moved to //go:build lines.)
+ // Note that ended==false here means that inSlashStar==false,
+ // since seeing a /* would have set ended==true.
+ end = len(content) - len(p)
+ continue Lines
+ }
+ if !bytes.HasPrefix(line, slashSlash) { // Not comment line
+ ended = true
+ }
+
+ if !inSlashStar && isGoBuildComment(line) {
+ if goBuild != nil {
+ return nil, nil, false, errMultipleGoBuild
+ }
+ goBuild = line
+ }
+ if !inSlashStar && bytes.Equal(line, binaryOnlyComment) {
+ sawBinaryOnly = true
+ }
+
+ Comments:
+ for len(line) > 0 {
+ if inSlashStar {
+ if i := bytes.Index(line, starSlash); i >= 0 {
+ inSlashStar = false
+ line = bytes.TrimSpace(line[i+len(starSlash):])
+ continue Comments
+ }
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashSlash) {
+ continue Lines
+ }
+ if bytes.HasPrefix(line, bSlashStar) {
+ inSlashStar = true
+ line = bytes.TrimSpace(line[len(bSlashStar):])
+ continue Comments
+ }
+ // Found non-comment text.
+ break Lines
+ }
+ }
+
+ return content[:end], goBuild, sawBinaryOnly, nil
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
+// that affect the way cgo's C code is built.
+func (ctxt *Context) saveCgo(filename string, di *build.Package, text string) error {
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":")
+ if !ok {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.matchAuto(c, nil) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ for i, arg := range args {
+ if arg, ok = expandSrcDir(arg, di.Dir); !ok {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ args[i] = arg
+ }
+
+ switch verb {
+ case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS":
+ // Change relative paths to absolute.
+ ctxt.makePathsAbsolute(args, di.Dir)
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "CPPFLAGS":
+ di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
+ case "CXXFLAGS":
+ di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
+ case "FFLAGS":
+ di.CgoFFLAGS = append(di.CgoFFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
+}
+
+// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
+// the result is safe for the shell.
+func expandSrcDir(str string, srcdir string) (string, bool) {
+ // "\" delimited paths cause safeCgoName to fail
+ // so convert native paths with a different delimiter
+ // to "/" before starting (eg: on windows).
+ srcdir = filepath.ToSlash(srcdir)
+
+ chunks := strings.Split(str, "${SRCDIR}")
+ if len(chunks) < 2 {
+ return str, safeCgoName(str)
+ }
+ ok := true
+ for _, chunk := range chunks {
+ ok = ok && (chunk == "" || safeCgoName(chunk))
+ }
+ ok = ok && (srcdir == "" || safeCgoName(srcdir))
+ res := strings.Join(chunks, srcdir)
+ return res, ok && res != ""
+}
+
+// makePathsAbsolute looks for compiler options that take paths and
+// makes them absolute. We do this because through the 1.8 release we
+// ran the compiler in the package directory, so any relative -I or -L
+// options would be relative to that directory. In 1.9 we changed to
+// running the compiler in the build directory, to get consistent
+// build results (issue #19964). To keep builds working, we change any
+// relative -I or -L options to be absolute.
+//
+// Using filepath.IsAbs and filepath.Join here means the results will be
+// different on different systems, but that's OK: -I and -L options are
+// inherently system-dependent.
+func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) {
+ nextPath := false
+ for i, arg := range args {
+ if nextPath {
+ if !filepath.IsAbs(arg) {
+ args[i] = filepath.Join(srcDir, arg)
+ }
+ nextPath = false
+ } else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") {
+ if len(arg) == 2 {
+ nextPath = true
+ } else {
+ if !filepath.IsAbs(arg[2:]) {
+ args[i] = arg[:2] + filepath.Join(srcDir, arg[2:])
+ }
+ }
+ }
+ }
+}
+
+// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
+// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
+// See golang.org/issue/6038.
+// The @ is for OS X. See golang.org/issue/13720.
+// The % is for Jenkins. See golang.org/issue/16959.
+// The ! is because module paths may use them. See golang.org/issue/26716.
+// The ~ and ^ are for sr.ht. See golang.org/issue/32260.
+const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^"
+
+func safeCgoName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// matchAuto interprets text as either a +build or //go:build expression (whichever works),
+// reporting whether the expression matches the build context.
+//
+// matchAuto is only used for testing of tag evaluation
+// and in #cgo lines, which accept either syntax.
+func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool {
+ if strings.ContainsAny(text, "&|()") {
+ text = "//go:build " + text
+ } else {
+ text = "// +build " + text
+ }
+ x, err := constraint.Parse(text)
+ if err != nil {
+ return false
+ }
+ return ctxt.eval(x, allTags)
+}
+
+func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
+ return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) })
+}
+
+// matchTag reports whether the name is one of:
+//
+// cgo (if cgo is enabled)
+// $GOOS
+// $GOARCH
+// boringcrypto
+// ctxt.Compiler
+// linux (if GOOS == android)
+// solaris (if GOOS == illumos)
+// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+//
+// It records all consulted tags in allTags.
+func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
+ if allTags != nil {
+ allTags[name] = true
+ }
+
+ // special tags
+ if ctxt.CgoEnabled && name == "cgo" {
+ return true
+ }
+ if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
+ return true
+ }
+ if ctxt.GOOS == "android" && name == "linux" {
+ return true
+ }
+ if ctxt.GOOS == "illumos" && name == "solaris" {
+ return true
+ }
+ if ctxt.GOOS == "ios" && name == "darwin" {
+ return true
+ }
+ if name == "unix" && unixOS[ctxt.GOOS] {
+ return true
+ }
+ if name == "boringcrypto" {
+ name = "goexperiment.boringcrypto" // boringcrypto is an old name for goexperiment.boringcrypto
+ }
+
+ // other tags
+ for _, tag := range ctxt.BuildTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ToolTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ReleaseTags {
+ if tag == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+// Exceptions:
+// if GOOS=android, then files with GOOS=linux are also matched.
+// if GOOS=illumos, then files with GOOS=solaris are also matched.
+// if GOOS=ios, then files with GOOS=darwin are also matched.
+func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
+ name, _, _ = strings.Cut(name, ".")
+
+ // Before Go 1.4, a file called "linux.go" would be equivalent to having a
+ // build tag "linux" in that file. For Go 1.4 and beyond, we require this
+ // auto-tagging to apply only to files with a non-empty prefix, so
+ // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
+ // systems, such as android, to arrive without breaking existing code with
+ // innocuous source code in "android.go". The easiest fix: cut everything
+ // in the name before the initial _.
+ i := strings.Index(name, "_")
+ if i < 0 {
+ return true
+ }
+ name = name[i:] // ignore everything before first _
+
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ if allTags != nil {
+ // In case we short-circuit on l[n-1].
+ allTags[l[n-2]] = true
+ }
+ return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags)
+ }
+ if n >= 1 && (knownOS[l[n-1]] || knownArch[l[n-1]]) {
+ return ctxt.matchTag(l[n-1], allTags)
+ }
+ return true
+}
diff --git a/src/cmd/go/internal/modindex/build_read.go b/src/cmd/go/internal/modindex/build_read.go
new file mode 100644
index 0000000..9137200
--- /dev/null
+++ b/src/cmd/go/internal/modindex/build_read.go
@@ -0,0 +1,594 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/read.go with unused parts
+// removed.
+
+package modindex
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+ pos token.Position
+}
+
+var bom = []byte{0xef, 0xbb, 0xbf}
+
+func newImportReader(name string, r io.Reader) *importReader {
+ b := bufio.NewReader(r)
+ // Remove leading UTF-8 BOM.
+ // Per https://golang.org/ref/spec#Source_code_representation:
+ // a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
+ // if it is the first Unicode code point in the source text.
+ if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
+ b.Discard(3)
+ }
+ return &importReader{
+ b: b,
+ pos: token.Position{
+ Filename: name,
+ Line: 1,
+ Column: 1,
+ },
+ }
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// readByteNoBuf is like readByte but doesn't buffer the byte.
+// It exhausts r.buf before reading from r.b.
+func (r *importReader) readByteNoBuf() byte {
+ var c byte
+ var err error
+ if len(r.buf) > 0 {
+ c = r.buf[0]
+ r.buf = r.buf[1:]
+ } else {
+ c, err = r.b.ReadByte()
+ if err == nil && c == 0 {
+ err = errNUL
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ return 0
+ }
+ r.pos.Offset++
+ if c == '\n' {
+ r.pos.Line++
+ r.pos.Column = 1
+ } else {
+ r.pos.Column++
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+var goEmbed = []byte("go:embed")
+
+// findEmbed advances the input reader to the next //go:embed comment.
+// It reports whether it found a comment.
+// (Otherwise it found an error or EOF.)
+func (r *importReader) findEmbed(first bool) bool {
+ // The import block scan stopped after a non-space character,
+ // so the reader is not at the start of a line on the first call.
+ // After that, each //go:embed extraction leaves the reader
+ // at the end of a line.
+ startLine := !first
+ var c byte
+ for r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ Reswitch:
+ switch c {
+ default:
+ startLine = false
+
+ case '\n':
+ startLine = true
+
+ case ' ', '\t':
+ // leave startLine alone
+
+ case '"':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '\\' {
+ r.readByteNoBuf()
+ if r.err != nil {
+ r.syntaxError()
+ return false
+ }
+ continue
+ }
+ if c == '"' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+ goto Reswitch
+
+ case '`':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '`' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+
+ case '\'':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '\\' {
+ r.readByteNoBuf()
+ if r.err != nil {
+ r.syntaxError()
+ return false
+ }
+ continue
+ }
+ if c == '\'' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+
+ case '/':
+ c = r.readByteNoBuf()
+ switch c {
+ default:
+ startLine = false
+ goto Reswitch
+
+ case '*':
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByteNoBuf()
+ }
+ startLine = false
+
+ case '/':
+ if startLine {
+ // Try to read this as a //go:embed comment.
+ for i := range goEmbed {
+ c = r.readByteNoBuf()
+ if c != goEmbed[i] {
+ goto SkipSlashSlash
+ }
+ }
+ c = r.readByteNoBuf()
+ if c == ' ' || c == '\t' {
+ // Found one!
+ return true
+ }
+ }
+ SkipSlashSlash:
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ }
+ startLine = true
+ }
+ }
+ }
+ return false
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '`':
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// readComments is like io.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+ r := newImportReader("", f)
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// readGoInfo expects a Go file as input and reads the file up to and including the import section.
+// It records what it learned in *info.
+// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
+// info.imports and info.embeds.
+//
+// It only returns an error if there are problems reading the file,
+// not for syntax errors in the file itself.
+func readGoInfo(f io.Reader, info *fileInfo) error {
+ r := newImportReader(info.name, f)
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ info.header = r.buf
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ info.header = r.buf[:len(r.buf)-1]
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ info.header = r.buf
+ }
+ if r.err != nil {
+ return r.err
+ }
+
+ if info.fset == nil {
+ return nil
+ }
+
+ // Parse file header & record imports.
+ info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments)
+ if info.parseErr != nil {
+ return nil
+ }
+
+ hasEmbed := false
+ for _, decl := range info.parsed.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
+ }
+ if path == "embed" {
+ hasEmbed = true
+ }
+
+ doc := spec.Doc
+ if doc == nil && len(d.Specs) == 1 {
+ doc = d.Doc
+ }
+ info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
+ }
+ }
+
+ // Extract directives.
+ for _, group := range info.parsed.Comments {
+ if group.Pos() >= info.parsed.Package {
+ break
+ }
+ for _, c := range group.List {
+ if strings.HasPrefix(c.Text, "//go:") {
+ info.directives = append(info.directives, build.Directive{Text: c.Text, Pos: info.fset.Position(c.Slash)})
+ }
+ }
+ }
+
+ // If the file imports "embed",
+ // we have to look for //go:embed comments
+ // in the remainder of the file.
+ // The compiler will enforce the mapping of comments to
+ // declared variables. We just need to know the patterns.
+ // If there were //go:embed comments earlier in the file
+ // (near the package statement or imports), the compiler
+ // will reject them. They can be (and have already been) ignored.
+ if hasEmbed {
+ var line []byte
+ for first := true; r.findEmbed(first); first = false {
+ line = line[:0]
+ pos := r.pos
+ for {
+ c := r.readByteNoBuf()
+ if c == '\n' || r.err != nil || r.eof {
+ break
+ }
+ line = append(line, c)
+ }
+ // Add args if line is well-formed.
+ // Ignore badly-formed lines - the compiler will report them when it finds them,
+ // and we can pretend they are not there to help go list succeed with what it knows.
+ embs, err := parseGoEmbed(string(line), pos)
+ if err == nil {
+ info.embeds = append(info.embeds, embs...)
+ }
+ }
+ }
+
+ return nil
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// This is based on a similar function in cmd/compile/internal/gc/noder.go;
+// this version calculates position information as well.
+func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) {
+ trimBytes := func(n int) {
+ pos.Offset += n
+ pos.Column += utf8.RuneCountInString(args[:n])
+ args = args[n:]
+ }
+ trimSpace := func() {
+ trim := strings.TrimLeftFunc(args, unicode.IsSpace)
+ trimBytes(len(args) - len(trim))
+ }
+
+ var list []fileEmbed
+ for trimSpace(); args != ""; trimSpace() {
+ var path string
+ pathPos := pos
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ trimBytes(i)
+
+ case '`':
+ var ok bool
+ path, _, ok = strings.Cut(args[1:], "`")
+ if !ok {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ trimBytes(1 + len(path) + 1)
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ trimBytes(i + 1)
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, fileEmbed{path, pathPos})
+ }
+ return list, nil
+}
diff --git a/src/cmd/go/internal/modindex/index_format.txt b/src/cmd/go/internal/modindex/index_format.txt
new file mode 100644
index 0000000..8b1d2c6
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_format.txt
@@ -0,0 +1,63 @@
+This file documents the index format that is read and written by this package.
+The index format is an encoding of a series of RawPackage structs
+
+Field names refer to fields on RawPackage and rawFile.
+The file uses little endian encoding for the uint32s.
+Strings are written into the string table at the end of the file.
+Each string is prefixed with a uvarint-encoded length.
+Bools are written as uint32s: 0 for false and 1 for true.
+
+The following is the format for a full module:
+
+“go index v2\n”
+str uint32 - offset of string table
+n uint32 - number of packages
+for each rawPackage:
+ dirname - string offset
+ package - offset where package begins
+for each rawPackage:
+ error uint32 - string offset // error is produced by fsys.ReadDir or fmt.Errorf
+ dir uint32 - string offset (directory path relative to module root)
+ len(sourceFiles) uint32
+ sourceFiles [n]uint32 - offset to source file (relative to start of index file)
+ for each sourceFile:
+ error - string offset // error is either produced by fmt.Errorf,errors.New or is io.EOF
+ parseError - string offset // if non-empty, a json-encoded parseError struct (see below). Is either produced by io.ReadAll,os.ReadFile,errors.New or is scanner.Error,scanner.ErrorList
+ synopsis - string offset
+ name - string offset
+ pkgName - string offset
+ ignoreFile - int32 bool // report the file in Ignored(Go|Other)Files because there was an error reading it or parsing its build constraints.
+ binaryOnly uint32 bool
+ cgoDirectives string offset // the #cgo directive lines in the comment on import "C"
+ goBuildConstraint - string offset
+ len(plusBuildConstraints) - uint32
+ plusBuildConstraints - [n]uint32 (string offsets)
+ len(imports) uint32
+ for each rawImport:
+ path - string offset
+ position - file, offset, line, column - uint32
+ len(embeds) uint32
+ for each embed:
+ pattern - string offset
+ position - file, offset, line, column - uint32
+ len(directives) uint32
+ for each directive:
+ text - string offset
+ position - file, offset, line, column - uint32
+[string table]
+0xFF (marker)
+
+The following is the format for a single indexed package:
+
+“go index v0\n”
+str uint32 - offset of string table
+for the single RawPackage:
+ [same RawPackage format as above]
+[string table]
+
+The following is the definition of the json-serialized parseError struct:
+
+type parseError struct {
+ ErrorList *scanner.ErrorList // non-nil if the error was an ErrorList, nil otherwise
+ ErrorString string // non-empty for all other cases
+}
diff --git a/src/cmd/go/internal/modindex/index_test.go b/src/cmd/go/internal/modindex/index_test.go
new file mode 100644
index 0000000..6bc62f3
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_test.go
@@ -0,0 +1,104 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "go/build"
+ "internal/diff"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+func init() {
+ isTest = true
+ enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
+}
+
+func TestIndex(t *testing.T) {
+ src := filepath.Join(runtime.GOROOT(), "src")
+ checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
+ p := m.Package(pkg)
+ bp, err := p.Import(build.Default, build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bp1, err := build.Default.Import(".", filepath.Join(src, pkg), build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(bp, bp1) {
+ t.Errorf("mismatch")
+ t.Logf("index:\n%s", hex.Dump(data))
+
+ js, err := json.MarshalIndent(bp, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ js1, err := json.MarshalIndent(bp1, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
+ t.FailNow()
+ }
+ }
+
+ // Check packages in increasing complexity, one at a time.
+ pkgs := []string{
+ "crypto",
+ "encoding",
+ "unsafe",
+ "encoding/json",
+ "runtime",
+ "net",
+ }
+ var raws []*rawPackage
+ for _, pkg := range pkgs {
+ raw := importRaw(src, pkg)
+ raws = append(raws, raw)
+ t.Run(pkg, func(t *testing.T) {
+ data := encodeModuleBytes([]*rawPackage{raw})
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkPkg(t, m, pkg, data)
+ })
+ }
+
+ // Check that a multi-package index works too.
+ t.Run("all", func(t *testing.T) {
+ data := encodeModuleBytes(raws)
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, pkg := range pkgs {
+ checkPkg(t, m, pkg, data)
+ }
+ })
+}
+
+func TestImportRaw_IgnoreNonGo(t *testing.T) {
+ path := filepath.Join("testdata", "ignore_non_source")
+ p := importRaw(path, ".")
+
+ wantFiles := []string{"a.syso", "b.go", "c.c"}
+
+ var gotFiles []string
+ for i := range p.sourceFiles {
+ gotFiles = append(gotFiles, p.sourceFiles[i].name)
+ }
+
+ if !reflect.DeepEqual(gotFiles, wantFiles) {
+ t.Errorf("names of files in importRaw(testdata/ignore_non_source): got %v; want %v",
+ gotFiles, wantFiles)
+ }
+}
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
new file mode 100644
index 0000000..83d5faf
--- /dev/null
+++ b/src/cmd/go/internal/modindex/read.go
@@ -0,0 +1,1037 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/build/constraint"
+ "go/token"
+ "internal/godebug"
+ "internal/goroot"
+ "path"
+ "path/filepath"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/par"
+ "cmd/go/internal/str"
+)
+
+// enabled is used to flag off the behavior of the module index on tip.
+// It will be removed before the release.
+// TODO(matloob): Remove enabled once we have more confidence on the
+// module index.
+var enabled = godebug.New("#goindex").Value() != "0"
+
+// Module represents and encoded module index file. It is used to
+// do the equivalent of build.Import of packages in the module and answer other
+// questions based on the index file's data.
+type Module struct {
+ modroot string
+ d *decoder
+ n int // number of packages
+}
+
+// moduleHash returns an ActionID corresponding to the state of the module
+// located at filesystem path modroot.
+func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) {
+ // We expect modules stored within the module cache to be checksummed and
+ // immutable, and we expect released modules within GOROOT to change only
+ // infrequently (when the Go version changes).
+ if !ismodcache {
+ // The contents of this module may change over time. We don't want to pay
+ // the cost to detect changes and re-index whenever they occur, so just
+ // don't index it at all.
+ //
+ // Note that this is true even for modules in GOROOT/src: non-release builds
+ // of the Go toolchain may have arbitrary development changes on top of the
+ // commit reported by runtime.Version, or could be completely artificial due
+ // to lacking a `git` binary (like "devel gomote.XXXXX", as synthesized by
+ // "gomote push" as of 2022-06-15). (Release builds shouldn't have
+ // modifications, but we don't want to use a behavior for releases that we
+ // haven't tested during development.)
+ return cache.ActionID{}, ErrNotIndexed
+ }
+
+ h := cache.NewHash("moduleIndex")
+ // TODO(bcmills): Since modules in the index are checksummed, we could
+ // probably improve the cache hit rate by keying off of the module
+ // path@version (perhaps including the checksum?) instead of the module root
+ // directory.
+ fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot)
+ return h.Sum(), nil
+}
+
+const modTimeCutoff = 2 * time.Second
+
+// dirHash returns an ActionID corresponding to the state of the package
+// located at filesystem path pkgdir.
+func dirHash(modroot, pkgdir string) (cache.ActionID, error) {
+ h := cache.NewHash("moduleIndex")
+ fmt.Fprintf(h, "modroot %s\n", modroot)
+ fmt.Fprintf(h, "package %s %s %v\n", runtime.Version(), indexVersion, pkgdir)
+ entries, err := fsys.ReadDir(pkgdir)
+ if err != nil {
+ // pkgdir might not be a directory. give up on hashing.
+ return cache.ActionID{}, ErrNotIndexed
+ }
+ cutoff := time.Now().Add(-modTimeCutoff)
+ for _, info := range entries {
+ if info.IsDir() {
+ continue
+ }
+
+ if !info.Mode().IsRegular() {
+ return cache.ActionID{}, ErrNotIndexed
+ }
+ // To avoid problems for very recent files where a new
+ // write might not change the mtime due to file system
+ // mtime precision, reject caching if a file was read that
+ // is less than modTimeCutoff old.
+ //
+ // This is the same strategy used for hashing test inputs.
+ // See hashOpen in cmd/go/internal/test/test.go for the
+ // corresponding code.
+ if info.ModTime().After(cutoff) {
+ return cache.ActionID{}, ErrNotIndexed
+ }
+
+ fmt.Fprintf(h, "file %v %v %v\n", info.Name(), info.ModTime(), info.Size())
+ }
+ return h.Sum(), nil
+}
+
+var ErrNotIndexed = errors.New("not in module index")
+
+var (
+ errDisabled = fmt.Errorf("%w: module indexing disabled", ErrNotIndexed)
+ errNotFromModuleCache = fmt.Errorf("%w: not from module cache", ErrNotIndexed)
+)
+
+// GetPackage returns the IndexPackage for the package at the given path.
+// It will return ErrNotIndexed if the directory should be read without
+// using the index, for instance because the index is disabled, or the package
+// is not in a module.
+func GetPackage(modroot, pkgdir string) (*IndexPackage, error) {
+ mi, err := GetModule(modroot)
+ if err == nil {
+ return mi.Package(relPath(pkgdir, modroot)), nil
+ }
+ if !errors.Is(err, errNotFromModuleCache) {
+ return nil, err
+ }
+ if cfg.BuildContext.Compiler == "gccgo" && str.HasPathPrefix(modroot, cfg.GOROOTsrc) {
+ return nil, err // gccgo has no sources for GOROOT packages.
+ }
+ return openIndexPackage(modroot, pkgdir)
+}
+
+// GetModule returns the Module for the given modroot.
+// It will return ErrNotIndexed if the directory should be read without
+// using the index, for instance because the index is disabled, or the package
+// is not in a module.
+func GetModule(modroot string) (*Module, error) {
+ if !enabled || cache.DefaultDir() == "off" {
+ return nil, errDisabled
+ }
+ if modroot == "" {
+ panic("modindex.GetPackage called with empty modroot")
+ }
+ if cfg.BuildMod == "vendor" {
+ // Even if the main module is in the module cache,
+ // its vendored dependencies are not loaded from their
+ // usual cached locations.
+ return nil, errNotFromModuleCache
+ }
+ modroot = filepath.Clean(modroot)
+ if str.HasFilePathPrefix(modroot, cfg.GOROOTsrc) || !str.HasFilePathPrefix(modroot, cfg.GOMODCACHE) {
+ return nil, errNotFromModuleCache
+ }
+ return openIndexModule(modroot, true)
+}
+
+var mcache par.ErrCache[string, *Module]
+
+// openIndexModule returns the module index for modPath.
+// It will return ErrNotIndexed if the module can not be read
+// using the index because it contains symlinks.
+func openIndexModule(modroot string, ismodcache bool) (*Module, error) {
+ return mcache.Do(modroot, func() (*Module, error) {
+ fsys.Trace("openIndexModule", modroot)
+ id, err := moduleHash(modroot, ismodcache)
+ if err != nil {
+ return nil, err
+ }
+ data, _, err := cache.GetMmap(cache.Default(), id)
+ if err != nil {
+ // Couldn't read from modindex. Assume we couldn't read from
+ // the index because the module hasn't been indexed yet.
+ data, err = indexModule(modroot)
+ if err != nil {
+ return nil, err
+ }
+ if err = cache.PutBytes(cache.Default(), id, data); err != nil {
+ return nil, err
+ }
+ }
+ mi, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ return mi, nil
+ })
+}
+
+var pcache par.ErrCache[[2]string, *IndexPackage]
+
+func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
+ return pcache.Do([2]string{modroot, pkgdir}, func() (*IndexPackage, error) {
+ fsys.Trace("openIndexPackage", pkgdir)
+ id, err := dirHash(modroot, pkgdir)
+ if err != nil {
+ return nil, err
+ }
+ data, _, err := cache.GetMmap(cache.Default(), id)
+ if err != nil {
+ // Couldn't read from index. Assume we couldn't read from
+ // the index because the package hasn't been indexed yet.
+ data = indexPackage(modroot, pkgdir)
+ if err = cache.PutBytes(cache.Default(), id, data); err != nil {
+ return nil, err
+ }
+ }
+ pkg, err := packageFromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ return pkg, nil
+ })
+}
+
+var errCorrupt = errors.New("corrupt index")
+
+// protect marks the start of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// It should not be used for trivial accesses which would be
+// dwarfed by the overhead of the defer.
+func protect() bool {
+ return debug.SetPanicOnFault(true)
+}
+
+var isTest = false
+
+// unprotect marks the end of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// end looks for panics due to errCorrupt or bad mmap accesses.
+// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
+// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
+func unprotect(old bool, errp *error) {
+ // SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
+ // that all its errors satisfy this interface, we'll only check for these errors so that
+ // we don't suppress panics that could have been produced from other sources.
+ type addrer interface {
+ Addr() uintptr
+ }
+
+ debug.SetPanicOnFault(old)
+
+ if e := recover(); e != nil {
+ if _, ok := e.(addrer); ok || e == errCorrupt {
+ // This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
+ err := fmt.Errorf("error reading module index: %v", e)
+ if errp != nil {
+ *errp = err
+ return
+ }
+ if isTest {
+ panic(err)
+ }
+ base.Fatalf("%v", err)
+ }
+ // The panic was likely not caused by SetPanicOnFault.
+ panic(e)
+ }
+}
+
+// fromBytes returns a *Module given the encoded representation.
+func fromBytes(moddir string, data []byte) (m *Module, err error) {
+ if !enabled {
+ panic("use of index")
+ }
+
+ defer unprotect(protect(), &err)
+
+ if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
+ return nil, errCorrupt
+ }
+
+ const hdr = len(indexVersion + "\n")
+ d := &decoder{data: data}
+ str := d.intAt(hdr)
+ if str < hdr+8 || len(d.data) < str {
+ return nil, errCorrupt
+ }
+ d.data, d.str = data[:str], d.data[str:]
+ // Check that string table looks valid.
+ // First string is empty string (length 0),
+ // and we leave a marker byte 0xFF at the end
+ // just to make sure that the file is not truncated.
+ if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
+ return nil, errCorrupt
+ }
+
+ n := d.intAt(hdr + 4)
+ if n < 0 || n > (len(d.data)-8)/8 {
+ return nil, errCorrupt
+ }
+
+ m = &Module{
+ moddir,
+ d,
+ n,
+ }
+ return m, nil
+}
+
+// packageFromBytes returns a *IndexPackage given the encoded representation.
+func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
+ m, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
+ }
+ if m.n != 1 {
+ return nil, fmt.Errorf("corrupt single-package index")
+ }
+ return m.pkg(0), nil
+}
+
+// pkgDir returns the dir string of the i'th package in the index.
+func (m *Module) pkgDir(i int) string {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.stringAt(12 + 8 + 8*i)
+}
+
+// pkgOff returns the offset of the data for the i'th package in the index.
+func (m *Module) pkgOff(i int) int {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.intAt(12 + 8 + 8*i + 4)
+}
+
+// Walk calls f for each package in the index, passing the path to that package relative to the module root.
+func (m *Module) Walk(f func(path string)) {
+ defer unprotect(protect(), nil)
+ for i := 0; i < m.n; i++ {
+ f(m.pkgDir(i))
+ }
+}
+
+// relPath returns the path relative to the module's root.
+func relPath(path, modroot string) string {
+ return str.TrimFilePathPrefix(filepath.Clean(path), filepath.Clean(modroot))
+}
+
+var installgorootAll = godebug.New("installgoroot").Value() == "all"
+
+// Import is the equivalent of build.Import given the information in Module.
+func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
+ defer unprotect(protect(), &err)
+
+ ctxt := (*Context)(&bctxt)
+
+ p = &build.Package{}
+
+ p.ImportPath = "."
+ p.Dir = filepath.Join(rp.modroot, rp.dir)
+
+ var pkgerr error
+ switch ctxt.Compiler {
+ case "gccgo", "gc":
+ default:
+ // Save error for end of function.
+ pkgerr = fmt.Errorf("import %q: unknown compiler %q", p.Dir, ctxt.Compiler)
+ }
+
+ if p.Dir == "" {
+ return p, fmt.Errorf("import %q: import of unknown directory", p.Dir)
+ }
+
+ // goroot and gopath
+ inTestdata := func(sub string) bool {
+ return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata")
+ }
+ var pkga string
+ if !inTestdata(rp.dir) {
+ // In build.go, p.Root should only be set in the non-local-import case, or in
+ // GOROOT or GOPATH. Since module mode only calls Import with path set to "."
+ // and the module index doesn't apply outside modules, the GOROOT case is
+ // the only case where p.Root needs to be set.
+ if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc {
+ p.Root = ctxt.GOROOT
+ p.Goroot = true
+ modprefix := str.TrimFilePathPrefix(rp.modroot, cfg.GOROOTsrc)
+ p.ImportPath = rp.dir
+ if modprefix != "" {
+ p.ImportPath = filepath.Join(modprefix, p.ImportPath)
+ }
+
+ // Set GOROOT-specific fields (sometimes for modules in a GOPATH directory).
+ // The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj)
+ // are only set in build.Import if p.Root != "".
+ var pkgtargetroot string
+ suffix := ""
+ if ctxt.InstallSuffix != "" {
+ suffix = "_" + ctxt.InstallSuffix
+ }
+ switch ctxt.Compiler {
+ case "gccgo":
+ pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ dir, elem := path.Split(p.ImportPath)
+ pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
+ case "gc":
+ pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
+ }
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ // Always set PkgTargetRoot. It might be used when building in shared
+ // mode.
+ p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
+
+ // Set the install target if applicable.
+ if !p.Goroot || (installgorootAll && p.ImportPath != "unsafe" && p.ImportPath != "builtin") {
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
+ }
+ }
+ }
+ }
+
+ if rp.error != nil {
+ if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot {
+ return p, nil
+ }
+ return p, rp.error
+ }
+
+ if mode&build.FindOnly != 0 {
+ return p, pkgerr
+ }
+
+ // We need to do a second round of bad file processing.
+ var badGoError error
+ badGoFiles := make(map[string]bool)
+ badGoFile := func(name string, err error) {
+ if badGoError == nil {
+ badGoError = err
+ }
+ if !badGoFiles[name] {
+ p.InvalidGoFiles = append(p.InvalidGoFiles, name)
+ badGoFiles[name] = true
+ }
+ }
+
+ var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
+ var firstFile string
+ embedPos := make(map[string][]token.Position)
+ testEmbedPos := make(map[string][]token.Position)
+ xTestEmbedPos := make(map[string][]token.Position)
+ importPos := make(map[string][]token.Position)
+ testImportPos := make(map[string][]token.Position)
+ xTestImportPos := make(map[string][]token.Position)
+ allTags := make(map[string]bool)
+ for _, tf := range rp.sourceFiles {
+ name := tf.name()
+ // Check errors for go files and call badGoFiles to put them in
+ // InvalidGoFiles if they do have an error.
+ if strings.HasSuffix(name, ".go") {
+ if error := tf.error(); error != "" {
+ badGoFile(name, errors.New(tf.error()))
+ continue
+ } else if parseError := tf.parseError(); parseError != "" {
+ badGoFile(name, parseErrorFromString(tf.parseError()))
+ // Fall through: we still want to list files with parse errors.
+ }
+ }
+
+ var shouldBuild = true
+ if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
+ shouldBuild = false
+ } else if goBuildConstraint := tf.goBuildConstraint(); goBuildConstraint != "" {
+ x, err := constraint.Parse(goBuildConstraint)
+ if err != nil {
+ return p, fmt.Errorf("%s: parsing //go:build line: %v", name, err)
+ }
+ shouldBuild = ctxt.eval(x, allTags)
+ } else if plusBuildConstraints := tf.plusBuildConstraints(); len(plusBuildConstraints) > 0 {
+ for _, text := range plusBuildConstraints {
+ if x, err := constraint.Parse(text); err == nil {
+ if !ctxt.eval(x, allTags) {
+ shouldBuild = false
+ }
+ }
+ }
+ }
+
+ ext := nameExt(name)
+ if !shouldBuild || tf.ignoreFile() {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ } else if fileListForExt(p, ext) != nil {
+ p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
+ }
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
+ case ".go":
+ // keep going
+ case ".S", ".sx":
+ // special case for cgo, handled at end
+ Sfiles = append(Sfiles, name)
+ continue
+ default:
+ if list := fileListForExt(p, ext); list != nil {
+ *list = append(*list, name)
+ }
+ continue
+ }
+
+ pkg := tf.pkgName()
+ if pkg == "documentation" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ continue
+ }
+ isTest := strings.HasSuffix(name, "_test.go")
+ isXTest := false
+ if isTest && strings.HasSuffix(tf.pkgName(), "_test") && p.Name != tf.pkgName() {
+ isXTest = true
+ pkg = pkg[:len(pkg)-len("_test")]
+ }
+
+ if !isTest && tf.binaryOnly() {
+ p.BinaryOnly = true
+ }
+
+ if p.Name == "" {
+ p.Name = pkg
+ firstFile = name
+ } else if pkg != p.Name {
+ // TODO(#45999): The choice of p.Name is arbitrary based on file iteration
+ // order. Instead of resolving p.Name arbitrarily, we should clear out the
+ // existing Name and mark the existing files as also invalid.
+ badGoFile(name, &MultiplePackageError{
+ Dir: p.Dir,
+ Packages: []string{p.Name, pkg},
+ Files: []string{firstFile, name},
+ })
+ }
+ // Grab the first package comment as docs, provided it is not from a test file.
+ if p.Doc == "" && !isTest && !isXTest {
+ if synopsis := tf.synopsis(); synopsis != "" {
+ p.Doc = synopsis
+ }
+ }
+
+ // Record Imports and information about cgo.
+ isCgo := false
+ imports := tf.imports()
+ for _, imp := range imports {
+ if imp.path == "C" {
+ if isTest {
+ badGoFile(name, fmt.Errorf("use of cgo in test %s not supported", name))
+ continue
+ }
+ isCgo = true
+ }
+ }
+ if directives := tf.cgoDirectives(); directives != "" {
+ if err := ctxt.saveCgo(name, p, directives); err != nil {
+ badGoFile(name, err)
+ }
+ }
+
+ var fileList *[]string
+ var importMap, embedMap map[string][]token.Position
+ var directives *[]build.Directive
+ switch {
+ case isCgo:
+ allTags["cgo"] = true
+ if ctxt.CgoEnabled {
+ fileList = &p.CgoFiles
+ importMap = importPos
+ embedMap = embedPos
+ directives = &p.Directives
+ } else {
+ // Ignore Imports and Embeds from cgo files if cgo is disabled.
+ fileList = &p.IgnoredGoFiles
+ }
+ case isXTest:
+ fileList = &p.XTestGoFiles
+ importMap = xTestImportPos
+ embedMap = xTestEmbedPos
+ directives = &p.XTestDirectives
+ case isTest:
+ fileList = &p.TestGoFiles
+ importMap = testImportPos
+ embedMap = testEmbedPos
+ directives = &p.TestDirectives
+ default:
+ fileList = &p.GoFiles
+ importMap = importPos
+ embedMap = embedPos
+ directives = &p.Directives
+ }
+ *fileList = append(*fileList, name)
+ if importMap != nil {
+ for _, imp := range imports {
+ importMap[imp.path] = append(importMap[imp.path], imp.position)
+ }
+ }
+ if embedMap != nil {
+ for _, e := range tf.embeds() {
+ embedMap[e.pattern] = append(embedMap[e.pattern], e.position)
+ }
+ }
+ if directives != nil {
+ *directives = append(*directives, tf.directives()...)
+ }
+ }
+
+ p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
+ p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
+ p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
+
+ p.Imports, p.ImportPos = cleanDecls(importPos)
+ p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
+ p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
+
+ for tag := range allTags {
+ p.AllTags = append(p.AllTags, tag)
+ }
+ sort.Strings(p.AllTags)
+
+ if len(p.CgoFiles) > 0 {
+ p.SFiles = append(p.SFiles, Sfiles...)
+ sort.Strings(p.SFiles)
+ } else {
+ p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
+ sort.Strings(p.IgnoredOtherFiles)
+ }
+
+ if badGoError != nil {
+ return p, badGoError
+ }
+ if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ return p, &build.NoGoError{Dir: p.Dir}
+ }
+ return p, pkgerr
+}
+
+// IsStandardPackage reports whether path is a standard package
+// for the goroot and compiler using the module index if possible,
+// and otherwise falling back to internal/goroot.IsStandardPackage
+func IsStandardPackage(goroot_, compiler, path string) bool {
+ if !enabled || compiler != "gc" {
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+
+ reldir := filepath.FromSlash(path) // relative dir path in module index for package
+ modroot := filepath.Join(goroot_, "src")
+ if str.HasFilePathPrefix(reldir, "cmd") {
+ reldir = str.TrimFilePathPrefix(reldir, "cmd")
+ modroot = filepath.Join(modroot, "cmd")
+ }
+ if _, err := GetPackage(modroot, filepath.Join(modroot, reldir)); err == nil {
+ // Note that goroot.IsStandardPackage doesn't check that the directory
+ // actually contains any go files-- merely that it exists. GetPackage
+ // returning a nil error is enough for us to know the directory exists.
+ return true
+ } else if errors.Is(err, ErrNotIndexed) {
+ // Fall back because package isn't indexable. (Probably because
+ // a file was modified recently)
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+ return false
+}
+
+// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index.
+func (rp *IndexPackage) IsDirWithGoFiles() (_ bool, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("error reading module index: %v", e)
+ }
+ }()
+ for _, sf := range rp.sourceFiles {
+ if strings.HasSuffix(sf.name(), ".go") {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ScanDir implements imports.ScanDir using the information in the index.
+func (rp *IndexPackage) ScanDir(tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) {
+ // TODO(matloob) dir should eventually be relative to indexed directory
+ // TODO(matloob): skip reading raw package and jump straight to data we need?
+
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("error reading module index: %v", e)
+ }
+ }()
+
+ imports_ := make(map[string]bool)
+ testImports := make(map[string]bool)
+ numFiles := 0
+
+Files:
+ for _, sf := range rp.sourceFiles {
+ name := sf.name()
+ if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") || !strings.HasSuffix(name, ".go") || !imports.MatchFile(name, tags) {
+ continue
+ }
+
+ // The following section exists for backwards compatibility reasons:
+ // scanDir ignores files with import "C" when collecting the list
+ // of imports unless the "cgo" tag is provided. The following comment
+ // is copied from the original.
+ //
+ // import "C" is implicit requirement of cgo tag.
+ // When listing files on the command line (explicitFiles=true)
+ // we do not apply build tag filtering but we still do apply
+ // cgo filtering, so no explicitFiles check here.
+ // Why? Because we always have, and it's not worth breaking
+ // that behavior now.
+ imps := sf.imports() // TODO(matloob): directly read import paths to avoid the extra strings?
+ for _, imp := range imps {
+ if imp.path == "C" && !tags["cgo"] && !tags["*"] {
+ continue Files
+ }
+ }
+
+ if !shouldBuild(sf, tags) {
+ continue
+ }
+ numFiles++
+ m := imports_
+ if strings.HasSuffix(name, "_test.go") {
+ m = testImports
+ }
+ for _, p := range imps {
+ m[p.path] = true
+ }
+ }
+ if numFiles == 0 {
+ return nil, nil, imports.ErrNoGo
+ }
+ return keys(imports_), keys(testImports), nil
+}
+
+func keys(m map[string]bool) []string {
+ list := make([]string, 0, len(m))
+ for k := range m {
+ list = append(list, k)
+ }
+ sort.Strings(list)
+ return list
+}
+
+// implements imports.ShouldBuild in terms of an index sourcefile.
+func shouldBuild(sf *sourceFile, tags map[string]bool) bool {
+ if goBuildConstraint := sf.goBuildConstraint(); goBuildConstraint != "" {
+ x, err := constraint.Parse(goBuildConstraint)
+ if err != nil {
+ return false
+ }
+ return imports.Eval(x, tags, true)
+ }
+
+ plusBuildConstraints := sf.plusBuildConstraints()
+ for _, text := range plusBuildConstraints {
+ if x, err := constraint.Parse(text); err == nil {
+ if !imports.Eval(x, tags, true) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// IndexPackage holds the information needed to access information in the
+// index needed to load a package in a specific directory.
+type IndexPackage struct {
+ error error
+ dir string // directory of the package relative to the modroot
+
+ modroot string
+
+ // Source files
+ sourceFiles []*sourceFile
+}
+
+var errCannotFindPackage = errors.New("cannot find package")
+
+// Package and returns finds the package with the given path (relative to the module root).
+// If the package does not exist, Package returns an IndexPackage that will return an
+// appropriate error from its methods.
+func (m *Module) Package(path string) *IndexPackage {
+ defer unprotect(protect(), nil)
+
+ i, ok := sort.Find(m.n, func(i int) int {
+ return strings.Compare(path, m.pkgDir(i))
+ })
+ if !ok {
+ return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
+ }
+ return m.pkg(i)
+}
+
+// pkg returns the i'th IndexPackage in m.
+func (m *Module) pkg(i int) *IndexPackage {
+ r := m.d.readAt(m.pkgOff(i))
+ p := new(IndexPackage)
+ if errstr := r.string(); errstr != "" {
+ p.error = errors.New(errstr)
+ }
+ p.dir = r.string()
+ p.sourceFiles = make([]*sourceFile, r.int())
+ for i := range p.sourceFiles {
+ p.sourceFiles[i] = &sourceFile{
+ d: m.d,
+ pos: r.int(),
+ }
+ }
+ p.modroot = m.modroot
+ return p
+}
+
+// sourceFile represents the information of a given source file in the module index.
+type sourceFile struct {
+ d *decoder // encoding of this source file
+ pos int // start of sourceFile encoding in d
+ onceReadImports sync.Once
+ savedImports []rawImport // saved imports so that they're only read once
+}
+
+// Offsets for fields in the sourceFile.
+const (
+ sourceFileError = 4 * iota
+ sourceFileParseError
+ sourceFileSynopsis
+ sourceFileName
+ sourceFilePkgName
+ sourceFileIgnoreFile
+ sourceFileBinaryOnly
+ sourceFileCgoDirectives
+ sourceFileGoBuildConstraint
+ sourceFileNumPlusBuildConstraints
+)
+
+func (sf *sourceFile) error() string {
+ return sf.d.stringAt(sf.pos + sourceFileError)
+}
+func (sf *sourceFile) parseError() string {
+ return sf.d.stringAt(sf.pos + sourceFileParseError)
+}
+func (sf *sourceFile) synopsis() string {
+ return sf.d.stringAt(sf.pos + sourceFileSynopsis)
+}
+func (sf *sourceFile) name() string {
+ return sf.d.stringAt(sf.pos + sourceFileName)
+}
+func (sf *sourceFile) pkgName() string {
+ return sf.d.stringAt(sf.pos + sourceFilePkgName)
+}
+func (sf *sourceFile) ignoreFile() bool {
+ return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
+}
+func (sf *sourceFile) binaryOnly() bool {
+ return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
+}
+func (sf *sourceFile) cgoDirectives() string {
+ return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
+}
+func (sf *sourceFile) goBuildConstraint() string {
+ return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
+}
+
+func (sf *sourceFile) plusBuildConstraints() []string {
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ pos += 4
+ ret := make([]string, n)
+ for i := 0; i < n; i++ {
+ ret[i] = sf.d.stringAt(pos)
+ pos += 4
+ }
+ return ret
+}
+
+func (sf *sourceFile) importsOffset() int {
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ // each build constraint is 1 uint32
+ return pos + 4 + n*4
+}
+
+func (sf *sourceFile) embedsOffset() int {
+ pos := sf.importsOffset()
+ n := sf.d.intAt(pos)
+ // each import is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
+}
+
+func (sf *sourceFile) directivesOffset() int {
+ pos := sf.embedsOffset()
+ n := sf.d.intAt(pos)
+ // each embed is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
+}
+
+func (sf *sourceFile) imports() []rawImport {
+ sf.onceReadImports.Do(func() {
+ importsOffset := sf.importsOffset()
+ r := sf.d.readAt(importsOffset)
+ numImports := r.int()
+ ret := make([]rawImport, numImports)
+ for i := 0; i < numImports; i++ {
+ ret[i] = rawImport{r.string(), r.tokpos()}
+ }
+ sf.savedImports = ret
+ })
+ return sf.savedImports
+}
+
+func (sf *sourceFile) embeds() []embed {
+ embedsOffset := sf.embedsOffset()
+ r := sf.d.readAt(embedsOffset)
+ numEmbeds := r.int()
+ ret := make([]embed, numEmbeds)
+ for i := range ret {
+ ret[i] = embed{r.string(), r.tokpos()}
+ }
+ return ret
+}
+
+func (sf *sourceFile) directives() []build.Directive {
+ directivesOffset := sf.directivesOffset()
+ r := sf.d.readAt(directivesOffset)
+ numDirectives := r.int()
+ ret := make([]build.Directive, numDirectives)
+ for i := range ret {
+ ret[i] = build.Directive{Text: r.string(), Pos: r.tokpos()}
+ }
+ return ret
+}
+
+func asString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// A decoder helps decode the index format.
+type decoder struct {
+ data []byte // data after header
+ str []byte // string table
+}
+
+// intAt returns the int at the given offset in d.data.
+func (d *decoder) intAt(off int) int {
+ if off < 0 || len(d.data)-off < 4 {
+ panic(errCorrupt)
+ }
+ i := binary.LittleEndian.Uint32(d.data[off : off+4])
+ if int32(i)>>31 != 0 {
+ panic(errCorrupt)
+ }
+ return int(i)
+}
+
+// boolAt returns the bool at the given offset in d.data.
+func (d *decoder) boolAt(off int) bool {
+ return d.intAt(off) != 0
+}
+
+// stringAt returns the string pointed at by the int at the given offset in d.data.
+func (d *decoder) stringAt(off int) string {
+ return d.stringTableAt(d.intAt(off))
+}
+
+// stringTableAt returns the string at the given offset in the string table d.str.
+func (d *decoder) stringTableAt(off int) string {
+ if off < 0 || off >= len(d.str) {
+ panic(errCorrupt)
+ }
+ s := d.str[off:]
+ v, n := binary.Uvarint(s)
+ if n <= 0 || v > uint64(len(s[n:])) {
+ panic(errCorrupt)
+ }
+ return asString(s[n : n+int(v)])
+}
+
+// A reader reads sequential fields from a section of the index format.
+type reader struct {
+ d *decoder
+ pos int
+}
+
+// readAt returns a reader starting at the given position in d.
+func (d *decoder) readAt(pos int) *reader {
+ return &reader{d, pos}
+}
+
+// int reads the next int.
+func (r *reader) int() int {
+ i := r.d.intAt(r.pos)
+ r.pos += 4
+ return i
+}
+
+// string reads the next string.
+func (r *reader) string() string {
+ return r.d.stringTableAt(r.int())
+}
+
+// bool reads the next bool.
+func (r *reader) bool() bool {
+ return r.int() != 0
+}
+
+// tokpos reads the next token.Position.
+func (r *reader) tokpos() token.Position {
+ return token.Position{
+ Filename: r.string(),
+ Offset: r.int(),
+ Line: r.int(),
+ Column: r.int(),
+ }
+}
diff --git a/src/cmd/go/internal/modindex/scan.go b/src/cmd/go/internal/modindex/scan.go
new file mode 100644
index 0000000..6ca73e2
--- /dev/null
+++ b/src/cmd/go/internal/modindex/scan.go
@@ -0,0 +1,290 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/str"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/doc"
+ "go/scanner"
+ "go/token"
+ "io/fs"
+ "path/filepath"
+ "strings"
+)
+
+// moduleWalkErr returns filepath.SkipDir if the directory isn't relevant
+// when indexing a module or generating a filehash, ErrNotIndexed,
+// if the module shouldn't be indexed, and nil otherwise.
+func moduleWalkErr(root string, path string, info fs.FileInfo, err error) error {
+ if err != nil {
+ return ErrNotIndexed
+ }
+ // stop at module boundaries
+ if info.IsDir() && path != root {
+ if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() {
+ return filepath.SkipDir
+ }
+ }
+ if info.Mode()&fs.ModeSymlink != 0 {
+ if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+ // return an error to make the module hash invalid.
+ // Symlink directories in modules are tricky, so we won't index
+ // modules that contain them.
+ // TODO(matloob): perhaps don't return this error if the symlink leads to
+ // a directory with a go.mod file.
+ return ErrNotIndexed
+ }
+ }
+ return nil
+}
+
+// indexModule indexes the module at the given directory and returns its
+// encoded representation. It returns ErrNotIndexed if the module can't
+// be indexed because it contains symlinks.
+func indexModule(modroot string) ([]byte, error) {
+ fsys.Trace("indexModule", modroot)
+ var packages []*rawPackage
+
+ // If the root itself is a symlink to a directory,
+ // we want to follow it (see https://go.dev/issue/50807).
+ // Add a trailing separator to force that to happen.
+ root := str.WithFilePathSeparator(modroot)
+ err := fsys.Walk(root, func(path string, info fs.FileInfo, err error) error {
+ if err := moduleWalkErr(root, path, info, err); err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+ if !strings.HasPrefix(path, root) {
+ panic(fmt.Errorf("path %v in walk doesn't have modroot %v as prefix", path, modroot))
+ }
+ rel := path[len(root):]
+ packages = append(packages, importRaw(modroot, rel))
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return encodeModuleBytes(packages), nil
+}
+
+// indexPackage indexes the package at the given directory and returns its
+// encoded representation. It returns ErrNotIndexed if the package can't
+// be indexed.
+func indexPackage(modroot, pkgdir string) []byte {
+ fsys.Trace("indexPackage", pkgdir)
+ p := importRaw(modroot, relPath(pkgdir, modroot))
+ return encodePackageBytes(p)
+}
+
+// rawPackage holds the information from each package that's needed to
+// fill a build.Package once the context is available.
+type rawPackage struct {
+ error string
+ dir string // directory containing package sources, relative to the module root
+
+ // Source files
+ sourceFiles []*rawFile
+}
+
+type parseError struct {
+ ErrorList *scanner.ErrorList
+ ErrorString string
+}
+
+// parseErrorToString converts the error from parsing the file into a string
+// representation. A nil error is converted to an empty string, and all other
+// errors are converted to a JSON-marshalled parseError struct, with ErrorList
+// set for errors of type scanner.ErrorList, and ErrorString set to the error's
+// string representation for all other errors.
+func parseErrorToString(err error) string {
+ if err == nil {
+ return ""
+ }
+ var p parseError
+ if e, ok := err.(scanner.ErrorList); ok {
+ p.ErrorList = &e
+ } else {
+ p.ErrorString = e.Error()
+ }
+ s, err := json.Marshal(p)
+ if err != nil {
+ panic(err) // This should be impossible because scanner.Error contains only strings and ints.
+ }
+ return string(s)
+}
+
+// parseErrorFromString converts a string produced by parseErrorToString back
+// to an error. An empty string is converted to a nil error, and all
+// other strings are expected to be JSON-marshalled parseError structs.
+// The two functions are meant to preserve the structure of an
+// error of type scanner.ErrorList in a round trip, but may not preserve the
+// structure of other errors.
+func parseErrorFromString(s string) error {
+ if s == "" {
+ return nil
+ }
+ var p parseError
+ if err := json.Unmarshal([]byte(s), &p); err != nil {
+ base.Fatalf(`go: invalid parse error value in index: %q. This indicates a corrupted index. Run "go clean -cache" to reset the module cache.`, s)
+ }
+ if p.ErrorList != nil {
+ return *p.ErrorList
+ }
+ return errors.New(p.ErrorString)
+}
+
+// rawFile is the struct representation of the file holding all
+// information in its fields.
+type rawFile struct {
+ error string
+ parseError string
+
+ name string
+ synopsis string // doc.Synopsis of package comment... Compute synopsis on all of these?
+ pkgName string
+ ignoreFile bool // starts with _ or . or should otherwise always be ignored
+ binaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment)
+ cgoDirectives string // the #cgo directive lines in the comment on import "C"
+ goBuildConstraint string
+ plusBuildConstraints []string
+ imports []rawImport
+ embeds []embed
+ directives []build.Directive
+}
+
+type rawImport struct {
+ path string
+ position token.Position
+}
+
+type embed struct {
+ pattern string
+ position token.Position
+}
+
+// importRaw fills the rawPackage from the package files in srcDir.
+// dir is the package's path relative to the modroot.
+func importRaw(modroot, reldir string) *rawPackage {
+ p := &rawPackage{
+ dir: reldir,
+ }
+
+ absdir := filepath.Join(modroot, reldir)
+
+ // We still haven't checked
+ // that p.dir directory exists. This is the right time to do that check.
+ // We can't do it earlier, because we want to gather partial information for the
+ // non-nil *build.Package returned when an error occurs.
+ // We need to do this before we return early on FindOnly flag.
+ if !isDir(absdir) {
+ // package was not found
+ p.error = fmt.Errorf("cannot find package in:\n\t%s", absdir).Error()
+ return p
+ }
+
+ entries, err := fsys.ReadDir(absdir)
+ if err != nil {
+ p.error = err.Error()
+ return p
+ }
+
+ fset := token.NewFileSet()
+ for _, d := range entries {
+ if d.IsDir() {
+ continue
+ }
+ if d.Mode()&fs.ModeSymlink != 0 {
+ if isDir(filepath.Join(absdir, d.Name())) {
+ // Symlinks to directories are not source files.
+ continue
+ }
+ }
+
+ name := d.Name()
+ ext := nameExt(name)
+
+ if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") {
+ continue
+ }
+ info, err := getFileInfo(absdir, name, fset)
+ if err == errNonSource {
+ // not a source or object file. completely ignore in the index
+ continue
+ } else if err != nil {
+ p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, error: err.Error()})
+ continue
+ } else if info == nil {
+ p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, ignoreFile: true})
+ continue
+ }
+ rf := &rawFile{
+ name: name,
+ goBuildConstraint: info.goBuildConstraint,
+ plusBuildConstraints: info.plusBuildConstraints,
+ binaryOnly: info.binaryOnly,
+ directives: info.directives,
+ }
+ if info.parsed != nil {
+ rf.pkgName = info.parsed.Name.Name
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ p.sourceFiles = append(p.sourceFiles, rf)
+ if ext != ".go" {
+ continue
+ }
+
+ if info.parseErr != nil {
+ rf.parseError = parseErrorToString(info.parseErr)
+ // Fall through: we might still have a partial AST in info.Parsed,
+ // and we want to list files with parse errors anyway.
+ }
+
+ if info.parsed != nil && info.parsed.Doc != nil {
+ rf.synopsis = doc.Synopsis(info.parsed.Doc.Text())
+ }
+
+ var cgoDirectives []string
+ for _, imp := range info.imports {
+ if imp.path == "C" {
+ cgoDirectives = append(cgoDirectives, extractCgoDirectives(imp.doc.Text())...)
+ }
+ rf.imports = append(rf.imports, rawImport{path: imp.path, position: fset.Position(imp.pos)})
+ }
+ rf.cgoDirectives = strings.Join(cgoDirectives, "\n")
+ for _, emb := range info.embeds {
+ rf.embeds = append(rf.embeds, embed{emb.pattern, emb.pos})
+ }
+
+ }
+ return p
+}
+
+// extractCgoDirectives filters only the lines containing #cgo directives from the input,
+// which is the comment on import "C".
+func extractCgoDirectives(doc string) []string {
+ var out []string
+ for _, line := range strings.Split(doc, "\n") {
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ out = append(out, line)
+ }
+ return out
+}
diff --git a/src/cmd/go/internal/modindex/syslist.go b/src/cmd/go/internal/modindex/syslist.go
new file mode 100644
index 0000000..41adcc5
--- /dev/null
+++ b/src/cmd/go/internal/modindex/syslist.go
@@ -0,0 +1,78 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/syslist_test.go.
+
+package modindex
+
+// knownOS is the list of past, present, and future known GOOS values.
+// Do not remove from this list, as it is used for filename matching.
+// If you add an entry to this list, look at unixOS, below.
+var knownOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "js": true,
+ "linux": true,
+ "nacl": true,
+ "netbsd": true,
+ "openbsd": true,
+ "plan9": true,
+ "solaris": true,
+ "wasip1": true,
+ "windows": true,
+ "zos": true,
+}
+
+// unixOS is the set of GOOS values matched by the "unix" build tag.
+// This is not used for filename matching.
+// This list also appears in cmd/dist/build.go.
+var unixOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "linux": true,
+ "netbsd": true,
+ "openbsd": true,
+ "solaris": true,
+}
+
+// knownArch is the list of past, present, and future known GOARCH values.
+// Do not remove from this list, as it is used for filename matching.
+var knownArch = map[string]bool{
+ "386": true,
+ "amd64": true,
+ "amd64p32": true,
+ "arm": true,
+ "armbe": true,
+ "arm64": true,
+ "arm64be": true,
+ "loong64": true,
+ "mips": true,
+ "mipsle": true,
+ "mips64": true,
+ "mips64le": true,
+ "mips64p32": true,
+ "mips64p32le": true,
+ "ppc": true,
+ "ppc64": true,
+ "ppc64le": true,
+ "riscv": true,
+ "riscv64": true,
+ "s390": true,
+ "s390x": true,
+ "sparc": true,
+ "sparc64": true,
+ "wasm": true,
+}
diff --git a/src/cmd/go/internal/modindex/syslist_test.go b/src/cmd/go/internal/modindex/syslist_test.go
new file mode 100644
index 0000000..1a61562
--- /dev/null
+++ b/src/cmd/go/internal/modindex/syslist_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a lightly modified copy go/build/syslist_test.go.
+
+package modindex
+
+import (
+ "go/build"
+ "runtime"
+ "testing"
+)
+
+var (
+ thisOS = runtime.GOOS
+ thisArch = runtime.GOARCH
+ otherOS = anotherOS()
+ otherArch = anotherArch()
+)
+
+func anotherOS() string {
+ if thisOS != "darwin" && thisOS != "ios" {
+ return "darwin"
+ }
+ return "linux"
+}
+
+func anotherArch() string {
+ if thisArch != "amd64" {
+ return "amd64"
+ }
+ return "386"
+}
+
+type GoodFileTest struct {
+ name string
+ result bool
+}
+
+var tests = []GoodFileTest{
+ {"file.go", true},
+ {"file.c", true},
+ {"file_foo.go", true},
+ {"file_" + thisArch + ".go", true},
+ {"file_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".go", true},
+ {"file_" + otherOS + ".go", false},
+ {"file_" + thisOS + "_" + thisArch + ".go", true},
+ {"file_" + otherOS + "_" + thisArch + ".go", false},
+ {"file_" + thisOS + "_" + otherArch + ".go", false},
+ {"file_" + otherOS + "_" + otherArch + ".go", false},
+ {"file_foo_" + thisArch + ".go", true},
+ {"file_foo_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".c", true},
+ {"file_" + otherOS + ".c", false},
+}
+
+func TestGoodOSArch(t *testing.T) {
+ for _, test := range tests {
+ if (*Context)(&build.Default).goodOSArchFile(test.name, make(map[string]bool)) != test.result {
+ t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
+ }
+ }
+}
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso b/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso
new file mode 100644
index 0000000..9527d05
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso
@@ -0,0 +1 @@
+package ignore_non_source
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go b/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json b/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log b/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log
diff --git a/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c b/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c
diff --git a/src/cmd/go/internal/modindex/write.go b/src/cmd/go/internal/modindex/write.go
new file mode 100644
index 0000000..cd18ad9
--- /dev/null
+++ b/src/cmd/go/internal/modindex/write.go
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "cmd/go/internal/base"
+ "encoding/binary"
+ "go/token"
+ "sort"
+)
+
+const indexVersion = "go index v2" // 11 bytes (plus \n), to align uint32s in index
+
+// encodeModuleBytes produces the encoded representation of the module index.
+// encodeModuleBytes may modify the packages slice.
+func encodeModuleBytes(packages []*rawPackage) []byte {
+ e := newEncoder()
+ e.Bytes([]byte(indexVersion + "\n"))
+ stringTableOffsetPos := e.Pos() // fill this at the end
+ e.Uint32(0) // string table offset
+ sort.Slice(packages, func(i, j int) bool {
+ return packages[i].dir < packages[j].dir
+ })
+ e.Int(len(packages))
+ packagesPos := e.Pos()
+ for _, p := range packages {
+ e.String(p.dir)
+ e.Int(0)
+ }
+ for i, p := range packages {
+ e.IntAt(e.Pos(), packagesPos+8*i+4)
+ encodePackage(e, p)
+ }
+ e.IntAt(e.Pos(), stringTableOffsetPos)
+ e.Bytes(e.stringTable)
+ e.Bytes([]byte{0xFF}) // end of string table marker
+ return e.b
+}
+
+func encodePackageBytes(p *rawPackage) []byte {
+ return encodeModuleBytes([]*rawPackage{p})
+}
+
+func encodePackage(e *encoder, p *rawPackage) {
+ e.String(p.error)
+ e.String(p.dir)
+ e.Int(len(p.sourceFiles)) // number of source files
+ sourceFileOffsetPos := e.Pos() // the pos of the start of the source file offsets
+ for range p.sourceFiles {
+ e.Int(0)
+ }
+ for i, f := range p.sourceFiles {
+ e.IntAt(e.Pos(), sourceFileOffsetPos+4*i)
+ encodeFile(e, f)
+ }
+}
+
+func encodeFile(e *encoder, f *rawFile) {
+ e.String(f.error)
+ e.String(f.parseError)
+ e.String(f.synopsis)
+ e.String(f.name)
+ e.String(f.pkgName)
+ e.Bool(f.ignoreFile)
+ e.Bool(f.binaryOnly)
+ e.String(f.cgoDirectives)
+ e.String(f.goBuildConstraint)
+
+ e.Int(len(f.plusBuildConstraints))
+ for _, s := range f.plusBuildConstraints {
+ e.String(s)
+ }
+
+ e.Int(len(f.imports))
+ for _, m := range f.imports {
+ e.String(m.path)
+ e.Position(m.position)
+ }
+
+ e.Int(len(f.embeds))
+ for _, embed := range f.embeds {
+ e.String(embed.pattern)
+ e.Position(embed.position)
+ }
+
+ e.Int(len(f.directives))
+ for _, d := range f.directives {
+ e.String(d.Text)
+ e.Position(d.Pos)
+ }
+}
+
+func newEncoder() *encoder {
+ e := &encoder{strings: make(map[string]int)}
+
+ // place the empty string at position 0 in the string table
+ e.stringTable = append(e.stringTable, 0)
+ e.strings[""] = 0
+
+ return e
+}
+
+func (e *encoder) Position(position token.Position) {
+ e.String(position.Filename)
+ e.Int(position.Offset)
+ e.Int(position.Line)
+ e.Int(position.Column)
+}
+
+type encoder struct {
+ b []byte
+ stringTable []byte
+ strings map[string]int
+}
+
+func (e *encoder) Pos() int {
+ return len(e.b)
+}
+
+func (e *encoder) Bytes(b []byte) {
+ e.b = append(e.b, b...)
+}
+
+func (e *encoder) String(s string) {
+ if n, ok := e.strings[s]; ok {
+ e.Int(n)
+ return
+ }
+ pos := len(e.stringTable)
+ e.strings[s] = pos
+ e.Int(pos)
+ e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
+ e.stringTable = append(e.stringTable, s...)
+}
+
+func (e *encoder) Bool(b bool) {
+ if b {
+ e.Uint32(1)
+ } else {
+ e.Uint32(0)
+ }
+}
+
+func (e *encoder) Uint32(n uint32) {
+ e.b = binary.LittleEndian.AppendUint32(e.b, n)
+}
+
+// Int encodes n. Note that all ints are written to the index as uint32s,
+// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
+func (e *encoder) Int(n int) {
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
+ }
+ e.Uint32(uint32(n))
+}
+
+func (e *encoder) IntAt(n int, at int) {
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
+ }
+ binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
+}