summaryrefslogtreecommitdiffstats
path: root/src/cmd/covdata
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
commit43a123c1ae6613b3efeed291fa552ecd909d3acf (patch)
treefd92518b7024bc74031f78a1cf9e454b65e73665 /src/cmd/covdata
parentInitial commit. (diff)
downloadgolang-1.20-upstream.tar.xz
golang-1.20-upstream.zip
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/cmd/covdata/argsmerge.go64
-rw-r--r--src/cmd/covdata/covdata.go224
-rw-r--r--src/cmd/covdata/doc.go84
-rw-r--r--src/cmd/covdata/dump.go349
-rw-r--r--src/cmd/covdata/export_test.go7
-rw-r--r--src/cmd/covdata/merge.go109
-rw-r--r--src/cmd/covdata/metamerge.go433
-rw-r--r--src/cmd/covdata/subtractintersect.go196
-rw-r--r--src/cmd/covdata/testdata/dep.go17
-rw-r--r--src/cmd/covdata/testdata/prog1.go48
-rw-r--r--src/cmd/covdata/testdata/prog2.go29
-rw-r--r--src/cmd/covdata/tool_test.go944
12 files changed, 2504 insertions, 0 deletions
diff --git a/src/cmd/covdata/argsmerge.go b/src/cmd/covdata/argsmerge.go
new file mode 100644
index 0000000..8815a4a
--- /dev/null
+++ b/src/cmd/covdata/argsmerge.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+type argvalues struct {
+ osargs []string
+ goos string
+ goarch string
+}
+
+type argstate struct {
+ state argvalues
+ initialized bool
+}
+
+func ssleq(s1 []string, s2 []string) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *argstate) Merge(state argvalues) {
+ if !a.initialized {
+ a.state = state
+ a.initialized = true
+ return
+ }
+ if !ssleq(a.state.osargs, state.osargs) {
+ a.state.osargs = nil
+ }
+ if state.goos != a.state.goos {
+ a.state.goos = ""
+ }
+ if state.goarch != a.state.goarch {
+ a.state.goarch = ""
+ }
+}
+
+func (a *argstate) ArgsSummary() map[string]string {
+ m := make(map[string]string)
+ if len(a.state.osargs) != 0 {
+ m["argc"] = fmt.Sprintf("%d", len(a.state.osargs))
+ for k, a := range a.state.osargs {
+ m[fmt.Sprintf("argv%d", k)] = a
+ }
+ }
+ if a.state.goos != "" {
+ m["GOOS"] = a.state.goos
+ }
+ if a.state.goarch != "" {
+ m["GOARCH"] = a.state.goarch
+ }
+ return m
+}
diff --git a/src/cmd/covdata/covdata.go b/src/cmd/covdata/covdata.go
new file mode 100644
index 0000000..95bc30d
--- /dev/null
+++ b/src/cmd/covdata/covdata.go
@@ -0,0 +1,224 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/cov"
+ "cmd/internal/pkgpattern"
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+)
+
+var verbflag = flag.Int("v", 0, "Verbose trace output level")
+var hflag = flag.Bool("h", false, "Panic on fatal errors (for stack trace)")
+var hwflag = flag.Bool("hw", false, "Panic on warnings (for stack trace)")
+var indirsflag = flag.String("i", "", "Input dirs to examine (comma separated)")
+var pkgpatflag = flag.String("pkg", "", "Restrict output to package(s) matching specified package pattern.")
+var cpuprofileflag = flag.String("cpuprofile", "", "Write CPU profile to specified file")
+var memprofileflag = flag.String("memprofile", "", "Write memory profile to specified file")
+var memprofilerateflag = flag.Int("memprofilerate", 0, "Set memprofile sampling rate to value")
+
+var matchpkg func(name string) bool
+
+var atExitFuncs []func()
+
+func atExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
+
+func dbgtrace(vlevel int, s string, a ...interface{}) {
+ if *verbflag >= vlevel {
+ fmt.Printf(s, a...)
+ fmt.Printf("\n")
+ }
+}
+
+func warn(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "warning: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ if *hwflag {
+ panic("unexpected warning")
+ }
+}
+
+func fatal(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "error: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ if *hflag {
+ panic("fatal error")
+ }
+ Exit(1)
+}
+
+func usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata [command]\n")
+ fmt.Fprintf(os.Stderr, `
+Commands are:
+
+textfmt convert coverage data to textual format
+percent output total percentage of statements covered
+pkglist output list of package import paths
+func output coverage profile information for each function
+merge merge data files together
+subtract subtract one set of data files from another set
+intersect generate intersection of two sets of data files
+debugdump dump data in human-readable format for debugging purposes
+`)
+ fmt.Fprintf(os.Stderr, "\nFor help on a specific subcommand, try:\n")
+ fmt.Fprintf(os.Stderr, "\ngo tool covdata <cmd> -help\n")
+ Exit(2)
+}
+
+type covOperation interface {
+ cov.CovDataVisitor
+ Setup()
+ Usage(string)
+}
+
+// Modes of operation.
+const (
+ funcMode = "func"
+ mergeMode = "merge"
+ intersectMode = "intersect"
+ subtractMode = "subtract"
+ percentMode = "percent"
+ pkglistMode = "pkglist"
+ textfmtMode = "textfmt"
+ debugDumpMode = "debugdump"
+)
+
+func main() {
+ // First argument should be mode/subcommand.
+ if len(os.Args) < 2 {
+ usage("missing command selector")
+ }
+
+ // Select mode
+ var op covOperation
+ cmd := os.Args[1]
+ switch cmd {
+ case mergeMode:
+ op = makeMergeOp()
+ case debugDumpMode:
+ op = makeDumpOp(debugDumpMode)
+ case textfmtMode:
+ op = makeDumpOp(textfmtMode)
+ case percentMode:
+ op = makeDumpOp(percentMode)
+ case funcMode:
+ op = makeDumpOp(funcMode)
+ case pkglistMode:
+ op = makeDumpOp(pkglistMode)
+ case subtractMode:
+ op = makeSubtractIntersectOp(subtractMode)
+ case intersectMode:
+ op = makeSubtractIntersectOp(intersectMode)
+ default:
+ usage(fmt.Sprintf("unknown command selector %q", cmd))
+ }
+
+ // Edit out command selector, then parse flags.
+ os.Args = append(os.Args[:1], os.Args[2:]...)
+ flag.Usage = func() {
+ op.Usage("")
+ }
+ flag.Parse()
+
+ // Mode-independent flag setup
+ dbgtrace(1, "starting mode-independent setup")
+ if flag.NArg() != 0 {
+ op.Usage("unknown extra arguments")
+ }
+ if *pkgpatflag != "" {
+ pats := strings.Split(*pkgpatflag, ",")
+ matchers := []func(name string) bool{}
+ for _, p := range pats {
+ if p == "" {
+ continue
+ }
+ f := pkgpattern.MatchSimplePattern(p)
+ matchers = append(matchers, f)
+ }
+ matchpkg = func(name string) bool {
+ for _, f := range matchers {
+ if f(name) {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ if *cpuprofileflag != "" {
+ f, err := os.Create(*cpuprofileflag)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fatal("%v", err)
+ }
+ atExit(pprof.StopCPUProfile)
+ }
+ if *memprofileflag != "" {
+ if *memprofilerateflag != 0 {
+ runtime.MemProfileRate = *memprofilerateflag
+ }
+ f, err := os.Create(*memprofileflag)
+ if err != nil {
+ fatal("%v", err)
+ }
+ atExit(func() {
+ runtime.GC()
+ const writeLegacyFormat = 1
+ if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+ fatal("%v", err)
+ }
+ })
+ } else {
+ // Not doing memory profiling; disable it entirely.
+ runtime.MemProfileRate = 0
+ }
+
+ // Mode-dependent setup.
+ op.Setup()
+
+ // ... off and running now.
+ dbgtrace(1, "starting perform")
+
+ indirs := strings.Split(*indirsflag, ",")
+ vis := cov.CovDataVisitor(op)
+ var flags cov.CovDataReaderFlags
+ if *hflag {
+ flags |= cov.PanicOnError
+ }
+ if *hwflag {
+ flags |= cov.PanicOnWarning
+ }
+ reader := cov.MakeCovDataReader(vis, indirs, *verbflag, flags, matchpkg)
+ st := 0
+ if err := reader.Visit(); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ st = 1
+ }
+ dbgtrace(1, "leaving main")
+ Exit(st)
+}
diff --git a/src/cmd/covdata/doc.go b/src/cmd/covdata/doc.go
new file mode 100644
index 0000000..924a742
--- /dev/null
+++ b/src/cmd/covdata/doc.go
@@ -0,0 +1,84 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+//
+// Covdata is a program for manipulating and generating reports
+// from 2nd-generation coverage testing output files, those produced
+// from running applications or integration tests. E.g.
+//
+// $ mkdir ./profiledir
+// $ go build -cover -o myapp.exe .
+// $ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
+// $ ls ./profiledir
+// covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
+// covmeta.cce1b350af34b6d0fb59cc1725f0ee27
+// $
+//
+// Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
+// selecting a specific reporting, merging, or data manipulation operation.
+// Descriptions on the various modes (run "go tool cover <mode> -help" for
+// specifics on usage of a given mode:
+//
+// 1. Report percent of statements covered in each profiled package
+//
+// $ go tool covdata percent -i=profiledir
+// cov-example/p coverage: 41.1% of statements
+// main coverage: 87.5% of statements
+// $
+//
+//
+// 2. Report import paths of packages profiled
+//
+// $ go tool covdata pkglist -i=profiledir
+// cov-example/p
+// main
+// $
+//
+// 3. Report percent statements covered by function:
+//
+// $ go tool covdata func -i=profiledir
+// cov-example/p/p.go:12: emptyFn 0.0%
+// cov-example/p/p.go:32: Small 100.0%
+// cov-example/p/p.go:47: Medium 90.9%
+// ...
+// $
+//
+// 4. Convert coverage data to legacy textual format:
+//
+// $ go tool covdata textfmt -i=profiledir -o=cov.txt
+// $ head cov.txt
+// mode: set
+// cov-example/p/p.go:12.22,13.2 0 0
+// cov-example/p/p.go:15.31,16.2 1 0
+// cov-example/p/p.go:16.3,18.3 0 0
+// cov-example/p/p.go:19.3,21.3 0 0
+// ...
+// $ go tool cover -html=cov.txt
+// $
+//
+// 5. Merge profiles together:
+//
+// $ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
+// $
+//
+// 6. Subtract one profile from another
+//
+// $ go tool covdata subtract -i=indir1,indir2 -o=outdir
+// $
+//
+// 7. Intersect profiles
+//
+// $ go tool covdata intersect -i=indir1,indir2 -o=outdir
+// $
+//
+// 8. Dump a profile for debugging purposes.
+//
+// $ go tool covdata debugdump -i=indir
+// <human readable output>
+// $
+//
+*/
+
+package main
diff --git a/src/cmd/covdata/dump.go b/src/cmd/covdata/dump.go
new file mode 100644
index 0000000..6226717
--- /dev/null
+++ b/src/cmd/covdata/dump.go
@@ -0,0 +1,349 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "go tool
+// covdata" sub-commands that relate to dumping text format summaries
+// and reports: "pkglist", "func", "debugdump", "percent", and
+// "textfmt".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/calloc"
+ "internal/coverage/cformat"
+ "internal/coverage/cmerge"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+ "sort"
+ "strings"
+)
+
+var textfmtoutflag *string
+var liveflag *bool
+
+func makeDumpOp(cmd string) covOperation {
+ if cmd == textfmtMode || cmd == percentMode {
+ textfmtoutflag = flag.String("o", "", "Output text format to file")
+ }
+ if cmd == debugDumpMode {
+ liveflag = flag.Bool("live", false, "Select only live (executed) functions for dump output.")
+ }
+ d := &dstate{
+ cmd: cmd,
+ cm: &cmerge.Merger{},
+ }
+ if d.cmd == pkglistMode {
+ d.pkgpaths = make(map[string]struct{})
+ }
+ return d
+}
+
+// dstate encapsulates state and provides methods for implementing
+// various dump operations. Specifically, dstate implements the
+// CovDataVisitor interface, and is designed to be used in
+// concert with the CovDataReader utility, which abstracts away most
+// of the grubby details of reading coverage data files.
+type dstate struct {
+ // for batch allocation of counter arrays
+ calloc.BatchCounterAlloc
+
+ // counter merging state + methods
+ cm *cmerge.Merger
+
+ // counter data formatting helper
+ format *cformat.Formatter
+
+ // 'mm' stores values read from a counter data file; the pkfunc key
+ // is a pkgid/funcid pair that uniquely identifies a function in
+ // instrumented application.
+ mm map[pkfunc]decodecounter.FuncPayload
+
+ // pkm maps package ID to the number of functions in the package
+ // with that ID. It is used to report inconsistencies in counter
+ // data (for example, a counter data entry with pkgid=N funcid=10
+ // where package N only has 3 functions).
+ pkm map[uint32]uint32
+
+ // pkgpaths records all package import paths encountered while
+ // visiting coverage data files (used to implement the "pkglist"
+ // subcommand).
+ pkgpaths map[string]struct{}
+
+ // Current package name and import path.
+ pkgName string
+ pkgImportPath string
+
+ // Module path for current package (may be empty).
+ modulePath string
+
+ // Dump subcommand (ex: "textfmt", "debugdump", etc).
+ cmd string
+
+ // File to which we will write text format output, if enabled.
+ textfmtoutf *os.File
+
+ // Total and covered statements (used by "debugdump" subcommand).
+ totalStmts, coveredStmts int
+
+ // Records whether preamble has been emitted for current pkg
+ // (used when in "debugdump" mode)
+ preambleEmitted bool
+}
+
+func (d *dstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=<directories>\n\n", d.cmd)
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ switch d.cmd {
+ case pkglistMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata pkglist -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n")
+ fmt.Fprintf(os.Stderr, " \tand writes out a list of the import paths\n")
+ fmt.Fprintf(os.Stderr, " \tof all compiled packages.\n")
+ case textfmtMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata textfmt -i=dir1,dir2 -o=out.txt\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n")
+ fmt.Fprintf(os.Stderr, " \tand emits text format into file 'out.txt'\n")
+ case percentMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata percent -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n")
+ fmt.Fprintf(os.Stderr, " \tand emits percentage of statements covered\n\n")
+ case funcMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata func -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n")
+ fmt.Fprintf(os.Stderr, " \tand writes out coverage profile data for\n")
+ fmt.Fprintf(os.Stderr, " \teach function.\n")
+ case debugDumpMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata debugdump [flags] -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data from dir1+dir2 and dumps\n")
+ fmt.Fprintf(os.Stderr, " \tcontents in human-readable form to stdout, for\n")
+ fmt.Fprintf(os.Stderr, " \tdebugging purposes.\n")
+ default:
+ panic("unexpected")
+ }
+ Exit(2)
+}
+
+// Setup is called once at program startup time to vet flag values
+// and do any necessary setup operations.
+func (d *dstate) Setup() {
+ if *indirsflag == "" {
+ d.Usage("select input directories with '-i' option")
+ }
+ if d.cmd == textfmtMode || (d.cmd == percentMode && *textfmtoutflag != "") {
+ if *textfmtoutflag == "" {
+ d.Usage("select output file name with '-o' option")
+ }
+ var err error
+ d.textfmtoutf, err = os.Create(*textfmtoutflag)
+ if err != nil {
+ d.Usage(fmt.Sprintf("unable to open textfmt output file %q: %v", *textfmtoutflag, err))
+ }
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("/* WARNING: the format of this dump is not stable and is\n")
+ fmt.Printf(" * expected to change from one Go release to the next.\n")
+ fmt.Printf(" *\n")
+ fmt.Printf(" * produced by:\n")
+ args := append([]string{os.Args[0]}, debugDumpMode)
+ args = append(args, os.Args[1:]...)
+ fmt.Printf(" *\t%s\n", strings.Join(args, " "))
+ fmt.Printf(" */\n")
+ }
+}
+
+func (d *dstate) BeginPod(p pods.Pod) {
+ d.mm = make(map[pkfunc]decodecounter.FuncPayload)
+}
+
+func (d *dstate) EndPod(p pods.Pod) {
+ if d.cmd == debugDumpMode {
+ d.cm.ResetModeAndGranularity()
+ }
+}
+
+func (d *dstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx)
+ if d.cmd == debugDumpMode {
+ fmt.Printf("data file %s", cdf)
+ if cdr.Goos() != "" {
+ fmt.Printf(" GOOS=%s", cdr.Goos())
+ }
+ if cdr.Goarch() != "" {
+ fmt.Printf(" GOARCH=%s", cdr.Goarch())
+ }
+ if len(cdr.OsArgs()) != 0 {
+ fmt.Printf(" program args: %+v\n", cdr.OsArgs())
+ }
+ fmt.Printf("\n")
+ }
+}
+
+func (d *dstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (d *dstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ if nf, ok := d.pkm[data.PkgIdx]; !ok || data.FuncIdx > nf {
+ warn("func payload inconsistency: id [p=%d,f=%d] nf=%d len(ctrs)=%d in VisitFuncCounterData, ignored", data.PkgIdx, data.FuncIdx, nf, len(data.Counters))
+ return
+ }
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+ val, found := d.mm[key]
+
+ dbgtrace(5, "ctr visit pk=%d fid=%d found=%v len(val.ctrs)=%d len(data.ctrs)=%d", data.PkgIdx, data.FuncIdx, found, len(val.Counters), len(data.Counters))
+
+ if len(val.Counters) < len(data.Counters) {
+ t := val.Counters
+ val.Counters = d.AllocateCounters(len(data.Counters))
+ copy(val.Counters, t)
+ }
+ err, overflow := d.cm.MergeCounters(val.Counters, data.Counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ d.mm[key] = val
+}
+
+func (d *dstate) EndCounters() {
+}
+
+func (d *dstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ newgran := mfr.CounterGranularity()
+ newmode := mfr.CounterMode()
+ if err := d.cm.SetModeAndGranularity(mdf, newmode, newgran); err != nil {
+ fatal("%v", err)
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("Cover mode: %s\n", newmode.String())
+ fmt.Printf("Cover granularity: %s\n", newgran.String())
+ }
+ if d.format == nil {
+ d.format = cformat.NewFormatter(mfr.CounterMode())
+ }
+
+ // To provide an additional layer of checking when reading counter
+ // data, walk the meta-data file to determine the set of legal
+ // package/function combinations. This will help catch bugs in the
+ // counter file reader.
+ d.pkm = make(map[uint32]uint32)
+ np := uint32(mfr.NumPackages())
+ payload := []byte{}
+ for pkIdx := uint32(0); pkIdx < np; pkIdx++ {
+ var pd *decodemeta.CoverageMetaDataDecoder
+ var err error
+ pd, payload, err = mfr.GetPackageDecoder(pkIdx, payload)
+ if err != nil {
+ fatal("reading pkg %d from meta-file %s: %s", pkIdx, mdf, err)
+ }
+ d.pkm[pkIdx] = pd.NumFuncs()
+ }
+}
+
+func (d *dstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ d.preambleEmitted = false
+ d.pkgImportPath = pd.PackagePath()
+ d.pkgName = pd.PackageName()
+ d.modulePath = pd.ModulePath()
+ if d.cmd == pkglistMode {
+ d.pkgpaths[d.pkgImportPath] = struct{}{}
+ }
+ d.format.SetPackage(pd.PackagePath())
+}
+
+func (d *dstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (d *dstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ var counters []uint32
+ key := pkfunc{pk: pkgIdx, fcn: fnIdx}
+ v, haveCounters := d.mm[key]
+
+ dbgtrace(5, "meta visit pk=%d fid=%d fname=%s file=%s found=%v len(val.ctrs)=%d", pkgIdx, fnIdx, fd.Funcname, fd.Srcfile, haveCounters, len(v.Counters))
+
+ suppressOutput := false
+ if haveCounters {
+ counters = v.Counters
+ } else if d.cmd == debugDumpMode && *liveflag {
+ suppressOutput = true
+ }
+
+ if d.cmd == debugDumpMode && !suppressOutput {
+ if !d.preambleEmitted {
+ fmt.Printf("\nPackage path: %s\n", d.pkgImportPath)
+ fmt.Printf("Package name: %s\n", d.pkgName)
+ fmt.Printf("Module path: %s\n", d.modulePath)
+ d.preambleEmitted = true
+ }
+ fmt.Printf("\nFunc: %s\n", fd.Funcname)
+ fmt.Printf("Srcfile: %s\n", fd.Srcfile)
+ fmt.Printf("Literal: %v\n", fd.Lit)
+ }
+ for i := 0; i < len(fd.Units); i++ {
+ u := fd.Units[i]
+ var count uint32
+ if counters != nil {
+ count = counters[i]
+ }
+ d.format.AddUnit(fd.Srcfile, fd.Funcname, fd.Lit, u, count)
+ if d.cmd == debugDumpMode && !suppressOutput {
+ fmt.Printf("%d: L%d:C%d -- L%d:C%d ",
+ i, u.StLine, u.StCol, u.EnLine, u.EnCol)
+ if u.Parent != 0 {
+ fmt.Printf("Parent:%d = %d\n", u.Parent, count)
+ } else {
+ fmt.Printf("NS=%d = %d\n", u.NxStmts, count)
+ }
+ }
+ d.totalStmts += int(u.NxStmts)
+ if count != 0 {
+ d.coveredStmts += int(u.NxStmts)
+ }
+ }
+}
+
+func (d *dstate) Finish() {
+ // d.format maybe nil here if the specified input dir was empty.
+ if d.format != nil {
+ if d.cmd == percentMode {
+ d.format.EmitPercent(os.Stdout, "", false)
+ }
+ if d.cmd == funcMode {
+ d.format.EmitFuncs(os.Stdout)
+ }
+ if d.textfmtoutf != nil {
+ if err := d.format.EmitTextual(d.textfmtoutf); err != nil {
+ fatal("writing to %s: %v", *textfmtoutflag, err)
+ }
+ }
+ }
+ if d.textfmtoutf != nil {
+ if err := d.textfmtoutf.Close(); err != nil {
+ fatal("closing textfmt output file %s: %v", *textfmtoutflag, err)
+ }
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("totalStmts: %d coveredStmts: %d\n", d.totalStmts, d.coveredStmts)
+ }
+ if d.cmd == pkglistMode {
+ pkgs := make([]string, 0, len(d.pkgpaths))
+ for p := range d.pkgpaths {
+ pkgs = append(pkgs, p)
+ }
+ sort.Strings(pkgs)
+ for _, p := range pkgs {
+ fmt.Printf("%s\n", p)
+ }
+ }
+}
diff --git a/src/cmd/covdata/export_test.go b/src/cmd/covdata/export_test.go
new file mode 100644
index 0000000..e4592ee
--- /dev/null
+++ b/src/cmd/covdata/export_test.go
@@ -0,0 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func Main() { main() }
diff --git a/src/cmd/covdata/merge.go b/src/cmd/covdata/merge.go
new file mode 100644
index 0000000..225861d
--- /dev/null
+++ b/src/cmd/covdata/merge.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "merge"
+// subcommand of "go tool covdata".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+)
+
+var outdirflag *string
+var pcombineflag *bool
+
+func makeMergeOp() covOperation {
+ outdirflag = flag.String("o", "", "Output directory to write")
+ pcombineflag = flag.Bool("pcombine", false, "Combine profiles derived from distinct program executables")
+ m := &mstate{
+ mm: newMetaMerge(),
+ }
+ return m
+}
+
+// mstate encapsulates state and provides methods for implementing the
+// merge operation. This type implements the CovDataVisitor interface,
+// and is designed to be used in concert with the CovDataReader
+// utility, which abstracts away most of the grubby details of reading
+// coverage data files. Most of the heavy lifting for merging is done
+// using apis from 'metaMerge' (this is mainly a wrapper around that
+// functionality).
+type mstate struct {
+ mm *metaMerge
+}
+
+func (m *mstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata merge -i=<directories> -o=<dir>\n\n")
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ fmt.Fprintf(os.Stderr, " go tool covdata merge -i=dir1,dir2,dir3 -o=outdir\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges all files in dir1/dir2/dir3\n")
+ fmt.Fprintf(os.Stderr, " \tinto output dir outdir\n")
+ Exit(2)
+}
+
+func (m *mstate) Setup() {
+ if *indirsflag == "" {
+ m.Usage("select input directories with '-i' option")
+ }
+ if *outdirflag == "" {
+ m.Usage("select output directory with '-o' option")
+ }
+}
+
+func (m *mstate) BeginPod(p pods.Pod) {
+ m.mm.beginPod()
+}
+
+func (m *mstate) EndPod(p pods.Pod) {
+ m.mm.endPod(*pcombineflag)
+}
+
+func (m *mstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx)
+ m.mm.beginCounterDataFile(cdr)
+}
+
+func (m *mstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (m *mstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ m.mm.visitFuncCounterData(data)
+}
+
+func (m *mstate) EndCounters() {
+}
+
+func (m *mstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ m.mm.visitMetaDataFile(mdf, mfr)
+}
+
+func (m *mstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ dbgtrace(3, "VisitPackage(pk=%d path=%s)", pkgIdx, pd.PackagePath())
+ m.mm.visitPackage(pd, pkgIdx, *pcombineflag)
+}
+
+func (m *mstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (m *mstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ m.mm.visitFunc(pkgIdx, fnIdx, fd, mergeMode, *pcombineflag)
+}
+
+func (m *mstate) Finish() {
+ if *pcombineflag {
+ finalHash := m.mm.emitMeta(*outdirflag, true)
+ m.mm.emitCounters(*outdirflag, finalHash)
+ }
+}
diff --git a/src/cmd/covdata/metamerge.go b/src/cmd/covdata/metamerge.go
new file mode 100644
index 0000000..7f15742
--- /dev/null
+++ b/src/cmd/covdata/metamerge.go
@@ -0,0 +1,433 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis that support merging of
+// meta-data information. It helps implement the "merge", "subtract",
+// and "intersect" subcommands.
+
+import (
+ "crypto/md5"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/calloc"
+ "internal/coverage/cmerge"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/encodecounter"
+ "internal/coverage/encodemeta"
+ "internal/coverage/slicewriter"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
+ "unsafe"
+)
+
+// metaMerge provides state and methods to help manage the process
+// of selecting or merging meta data files. There are three cases
+// of interest here: the "-pcombine" flag provided by merge, the
+// "-pkg" option provided by all merge/subtract/intersect, and
+// a regular vanilla merge with no package selection
+//
+// In the -pcombine case, we're essentially glomming together all the
+// meta-data for all packages and all functions, meaning that
+// everything we see in a given package needs to be added into the
+// meta-data file builder; we emit a single meta-data file at the end
+// of the run.
+//
+// In the -pkg case, we will typically emit a single meta-data file
+// per input pod, where that new meta-data file contains entries for
+// just the selected packages.
+//
+// In the third case (vanilla merge with no combining or package
+// selection) we can carry over meta-data files without touching them
+// at all (only counter data files will be merged).
+type metaMerge struct {
+ calloc.BatchCounterAlloc
+ cmerge.Merger
+ // maps package import path to package state
+ pkm map[string]*pkstate
+ // list of packages
+ pkgs []*pkstate
+ // current package state
+ p *pkstate
+ // current pod state
+ pod *podstate
+ // counter data file osargs/goos/goarch state
+ astate *argstate
+}
+
+// pkstate
+type pkstate struct {
+ // index of package within meta-data file.
+ pkgIdx uint32
+ // this maps function index within the package to counter data payload
+ ctab map[uint32]decodecounter.FuncPayload
+ // pointer to meta-data blob for package
+ mdblob []byte
+ // filled in only for -pcombine merges
+ *pcombinestate
+}
+
+type podstate struct {
+ pmm map[pkfunc]decodecounter.FuncPayload
+ mdf string
+ mfr *decodemeta.CoverageMetaFileReader
+ fileHash [16]byte
+}
+
+type pkfunc struct {
+ pk, fcn uint32
+}
+
+// pcombinestate
+type pcombinestate struct {
+ // Meta-data builder for the package.
+ cmdb *encodemeta.CoverageMetaDataBuilder
+ // Maps function meta-data hash to new function index in the
+ // new version of the package we're building.
+ ftab map[[16]byte]uint32
+}
+
+func newMetaMerge() *metaMerge {
+ return &metaMerge{
+ pkm: make(map[string]*pkstate),
+ astate: &argstate{},
+ }
+}
+
+func (mm *metaMerge) visitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ dbgtrace(2, "visitMetaDataFile(mdf=%s)", mdf)
+
+ // Record meta-data file name.
+ mm.pod.mdf = mdf
+ // Keep a pointer to the file-level reader.
+ mm.pod.mfr = mfr
+ // Record file hash.
+ mm.pod.fileHash = mfr.FileHash()
+ // Counter mode and granularity -- detect and record clashes here.
+ newgran := mfr.CounterGranularity()
+ newmode := mfr.CounterMode()
+ if err := mm.SetModeAndGranularity(mdf, newmode, newgran); err != nil {
+ fatal("%v", err)
+ }
+}
+
+func (mm *metaMerge) beginCounterDataFile(cdr *decodecounter.CounterDataReader) {
+ state := argvalues{
+ osargs: cdr.OsArgs(),
+ goos: cdr.Goos(),
+ goarch: cdr.Goarch(),
+ }
+ mm.astate.Merge(state)
+}
+
+func copyMetaDataFile(inpath, outpath string) {
+ inf, err := os.Open(inpath)
+ if err != nil {
+ fatal("opening input meta-data file %s: %v", inpath, err)
+ }
+ defer inf.Close()
+
+ fi, err := inf.Stat()
+ if err != nil {
+ fatal("accessing input meta-data file %s: %v", inpath, err)
+ }
+
+ outf, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
+ if err != nil {
+ fatal("opening output meta-data file %s: %v", outpath, err)
+ }
+
+ _, err = io.Copy(outf, inf)
+ outf.Close()
+ if err != nil {
+ fatal("writing output meta-data file %s: %v", outpath, err)
+ }
+}
+
+func (mm *metaMerge) beginPod() {
+ mm.pod = &podstate{
+ pmm: make(map[pkfunc]decodecounter.FuncPayload),
+ }
+}
+
+// metaEndPod handles actions needed when we're done visiting all of
+// the things in a pod -- counter files and meta-data file. There are
+// three cases of interest here:
+//
+// Case 1: in an unconditonal merge (we're not selecting a specific set of
+// packages using "-pkg", and the "-pcombine" option is not in use),
+// we can simply copy over the meta-data file from input to output.
+//
+// Case 2: if this is a select merge (-pkg is in effect), then at
+// this point we write out a new smaller meta-data file that includes
+// only the packages of interest). At this point we also emit a merged
+// counter data file as well.
+//
+// Case 3: if "-pcombine" is in effect, we don't write anything at
+// this point (all writes will happen at the end of the run).
+func (mm *metaMerge) endPod(pcombine bool) {
+ if pcombine {
+ // Just clear out the pod data, we'll do all the
+ // heavy lifting at the end.
+ mm.pod = nil
+ return
+ }
+
+ finalHash := mm.pod.fileHash
+ if matchpkg != nil {
+ // Emit modified meta-data file for this pod.
+ finalHash = mm.emitMeta(*outdirflag, pcombine)
+ } else {
+ // Copy meta-data file for this pod to the output directory.
+ inpath := mm.pod.mdf
+ mdfbase := filepath.Base(mm.pod.mdf)
+ outpath := filepath.Join(*outdirflag, mdfbase)
+ copyMetaDataFile(inpath, outpath)
+ }
+
+ // Emit acccumulated counter data for this pod.
+ mm.emitCounters(*outdirflag, finalHash)
+
+ // Reset package state.
+ mm.pkm = make(map[string]*pkstate)
+ mm.pkgs = nil
+ mm.pod = nil
+
+ // Reset counter mode and granularity
+ mm.ResetModeAndGranularity()
+}
+
+// emitMeta encodes and writes out a new coverage meta-data file as
+// part of a merge operation, specifically a merge with the
+// "-pcombine" flag.
+func (mm *metaMerge) emitMeta(outdir string, pcombine bool) [16]byte {
+ fh := md5.New()
+ blobs := [][]byte{}
+ tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
+ for _, p := range mm.pkgs {
+ var blob []byte
+ if pcombine {
+ mdw := &slicewriter.WriteSeeker{}
+ p.cmdb.Emit(mdw)
+ blob = mdw.BytesWritten()
+ } else {
+ blob = p.mdblob
+ }
+ ph := md5.Sum(blob)
+ blobs = append(blobs, blob)
+ if _, err := fh.Write(ph[:]); err != nil {
+ panic(fmt.Sprintf("internal error: md5 sum failed: %v", err))
+ }
+ tlen += uint64(len(blob))
+ }
+ var finalHash [16]byte
+ fhh := fh.Sum(nil)
+ copy(finalHash[:], fhh)
+
+ // Open meta-file for writing.
+ fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, finalHash)
+ fpath := filepath.Join(outdir, fn)
+ mf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ fatal("unable to open output meta-data file %s: %v", fpath, err)
+ }
+
+ // Encode and write.
+ mfw := encodemeta.NewCoverageMetaFileWriter(fpath, mf)
+ err = mfw.Write(finalHash, blobs, mm.Mode(), mm.Granularity())
+ if err != nil {
+ fatal("error writing %s: %v\n", fpath, err)
+ }
+ return finalHash
+}
+
+func (mm *metaMerge) emitCounters(outdir string, metaHash [16]byte) {
+ // Open output file. The file naming scheme is intended to mimic
+ // that used when running a coverage-instrumented binary, for
+ // consistency (however the process ID is not meaningful here, so
+ // use a value of zero).
+ var dummyPID int
+ fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, dummyPID, time.Now().UnixNano())
+ fpath := filepath.Join(outdir, fn)
+ cf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ fatal("opening counter data file %s: %v", fpath, err)
+ }
+ defer func() {
+ if err := cf.Close(); err != nil {
+ fatal("error closing output meta-data file %s: %v", fpath, err)
+ }
+ }()
+
+ args := mm.astate.ArgsSummary()
+ cfw := encodecounter.NewCoverageDataWriter(cf, coverage.CtrULeb128)
+ if err := cfw.Write(metaHash, args, mm); err != nil {
+ fatal("counter file write failed: %v", err)
+ }
+ mm.astate = &argstate{}
+}
+
+// NumFuncs is used while writing the counter data files; it
+// implements the 'NumFuncs' method required by the interface
+// internal/coverage/encodecounter/CounterVisitor.
+func (mm *metaMerge) NumFuncs() (int, error) {
+ rval := 0
+ for _, p := range mm.pkgs {
+ rval += len(p.ctab)
+ }
+ return rval, nil
+}
+
+// VisitFuncs is used while writing the counter data files; it
+// implements the 'VisitFuncs' method required by the interface
+// internal/coverage/encodecounter/CounterVisitor.
+func (mm *metaMerge) VisitFuncs(f encodecounter.CounterVisitorFn) error {
+ if *verbflag >= 4 {
+ fmt.Printf("counterVisitor invoked\n")
+ }
+ // For each package, for each function, construct counter
+ // array and then call "f" on it.
+ for pidx, p := range mm.pkgs {
+ fids := make([]int, 0, len(p.ctab))
+ for fid := range p.ctab {
+ fids = append(fids, int(fid))
+ }
+ sort.Ints(fids)
+ if *verbflag >= 4 {
+ fmt.Printf("fids for pk=%d: %+v\n", pidx, fids)
+ }
+ for _, fid := range fids {
+ fp := p.ctab[uint32(fid)]
+ if *verbflag >= 4 {
+ fmt.Printf("counter write for pk=%d fid=%d len(ctrs)=%d\n", pidx, fid, len(fp.Counters))
+ }
+ if err := f(uint32(pidx), uint32(fid), fp.Counters); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (mm *metaMerge) visitPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32, pcombine bool) {
+ p, ok := mm.pkm[pd.PackagePath()]
+ if !ok {
+ p = &pkstate{
+ pkgIdx: uint32(len(mm.pkgs)),
+ }
+ mm.pkgs = append(mm.pkgs, p)
+ mm.pkm[pd.PackagePath()] = p
+ if pcombine {
+ p.pcombinestate = new(pcombinestate)
+ cmdb, err := encodemeta.NewCoverageMetaDataBuilder(pd.PackagePath(), pd.PackageName(), pd.ModulePath())
+ if err != nil {
+ fatal("fatal error creating meta-data builder: %v", err)
+ }
+ dbgtrace(2, "install new pkm entry for package %s pk=%d", pd.PackagePath(), pkgIdx)
+ p.cmdb = cmdb
+ p.ftab = make(map[[16]byte]uint32)
+ } else {
+ var err error
+ p.mdblob, err = mm.pod.mfr.GetPackagePayload(pkgIdx, nil)
+ if err != nil {
+ fatal("error extracting package %d payload from %s: %v",
+ pkgIdx, mm.pod.mdf, err)
+ }
+ }
+ p.ctab = make(map[uint32]decodecounter.FuncPayload)
+ }
+ mm.p = p
+}
+
+func (mm *metaMerge) visitFuncCounterData(data decodecounter.FuncPayload) {
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+ val := mm.pod.pmm[key]
+ // FIXME: in theory either A) len(val.Counters) is zero, or B)
+ // the two lengths are equal. Assert if not? Of course, we could
+ // see odd stuff if there is source file skew.
+ if *verbflag > 4 {
+ fmt.Printf("visit pk=%d fid=%d len(counters)=%d\n", data.PkgIdx, data.FuncIdx, len(data.Counters))
+ }
+ if len(val.Counters) < len(data.Counters) {
+ t := val.Counters
+ val.Counters = mm.AllocateCounters(len(data.Counters))
+ copy(val.Counters, t)
+ }
+ err, overflow := mm.MergeCounters(val.Counters, data.Counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ mm.pod.pmm[key] = val
+}
+
+func (mm *metaMerge) visitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc, verb string, pcombine bool) {
+ if *verbflag >= 3 {
+ fmt.Printf("visit pk=%d fid=%d func %s\n", pkgIdx, fnIdx, fd.Funcname)
+ }
+
+ var counters []uint32
+ key := pkfunc{pk: pkgIdx, fcn: fnIdx}
+ v, haveCounters := mm.pod.pmm[key]
+ if haveCounters {
+ counters = v.Counters
+ }
+
+ if pcombine {
+ // If the merge is running in "combine programs" mode, then hash
+ // the function and look it up in the package ftab to see if we've
+ // encountered it before. If we haven't, then register it with the
+ // meta-data builder.
+ fnhash := encodemeta.HashFuncDesc(fd)
+ gfidx, ok := mm.p.ftab[fnhash]
+ if !ok {
+ // We haven't seen this function before, need to add it to
+ // the meta data.
+ gfidx = uint32(mm.p.cmdb.AddFunc(*fd))
+ mm.p.ftab[fnhash] = gfidx
+ if *verbflag >= 3 {
+ fmt.Printf("new meta entry for fn %s fid=%d\n", fd.Funcname, gfidx)
+ }
+ }
+ fnIdx = gfidx
+ }
+ if !haveCounters {
+ return
+ }
+
+ // Install counters in package ctab.
+ gfp, ok := mm.p.ctab[fnIdx]
+ if ok {
+ if verb == "subtract" || verb == "intersect" {
+ panic("should never see this for intersect/subtract")
+ }
+ if *verbflag >= 3 {
+ fmt.Printf("counter merge for %s fidx=%d\n", fd.Funcname, fnIdx)
+ }
+ // Merge.
+ err, overflow := mm.MergeCounters(gfp.Counters, counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ mm.p.ctab[fnIdx] = gfp
+ } else {
+ if *verbflag >= 3 {
+ fmt.Printf("null merge for %s fidx %d\n", fd.Funcname, fnIdx)
+ }
+ gfp := v
+ gfp.PkgIdx = mm.p.pkgIdx
+ gfp.FuncIdx = fnIdx
+ mm.p.ctab[fnIdx] = gfp
+ }
+}
diff --git a/src/cmd/covdata/subtractintersect.go b/src/cmd/covdata/subtractintersect.go
new file mode 100644
index 0000000..5d71e3d
--- /dev/null
+++ b/src/cmd/covdata/subtractintersect.go
@@ -0,0 +1,196 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "subtract" and
+// "intersect" subcommands of "go tool covdata".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+ "strings"
+)
+
+// makeSubtractIntersectOp creates a subtract or intersect operation.
+// 'mode' here must be either "subtract" or "intersect".
+func makeSubtractIntersectOp(mode string) covOperation {
+ outdirflag = flag.String("o", "", "Output directory to write")
+ s := &sstate{
+ mode: mode,
+ mm: newMetaMerge(),
+ inidx: -1,
+ }
+ return s
+}
+
+// sstate holds state needed to implement subtraction and intersection
+// operations on code coverage data files. This type provides methods
+// to implement the CovDataVisitor interface, and is designed to be
+// used in concert with the CovDataReader utility, which abstracts
+// away most of the grubby details of reading coverage data files.
+type sstate struct {
+ mm *metaMerge
+ inidx int
+ mode string
+ // Used only for intersection; keyed by pkg/fn ID, it keeps track of
+ // just the set of functions for which we have data in the current
+ // input directory.
+ imm map[pkfunc]struct{}
+}
+
+func (s *sstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=dir1,dir2 -o=<dir>\n\n", s.mode)
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ op := "from"
+ if s.mode == intersectMode {
+ op = "with"
+ }
+ fmt.Fprintf(os.Stderr, " go tool covdata %s -i=dir1,dir2 -o=outdir\n\n", s.mode)
+ fmt.Fprintf(os.Stderr, " \t%ss dir2 %s dir1, writing result\n", s.mode, op)
+ fmt.Fprintf(os.Stderr, " \tinto output dir outdir.\n")
+ os.Exit(2)
+}
+
+func (s *sstate) Setup() {
+ if *indirsflag == "" {
+ usage("select input directories with '-i' option")
+ }
+ indirs := strings.Split(*indirsflag, ",")
+ if s.mode == subtractMode && len(indirs) != 2 {
+ usage("supply exactly two input dirs for subtract operation")
+ }
+ if *outdirflag == "" {
+ usage("select output directory with '-o' option")
+ }
+}
+
+func (s *sstate) BeginPod(p pods.Pod) {
+ s.mm.beginPod()
+}
+
+func (s *sstate) EndPod(p pods.Pod) {
+ const pcombine = false
+ s.mm.endPod(pcombine)
+}
+
+func (s *sstate) EndCounters() {
+ if s.imm != nil {
+ s.pruneCounters()
+ }
+}
+
+// pruneCounters performs a function-level partial intersection using the
+// current POD counter data (s.mm.pod.pmm) and the intersected data from
+// PODs in previous dirs (s.imm).
+func (s *sstate) pruneCounters() {
+ pkeys := make([]pkfunc, 0, len(s.mm.pod.pmm))
+ for k := range s.mm.pod.pmm {
+ pkeys = append(pkeys, k)
+ }
+ // Remove anything from pmm not found in imm. We don't need to
+ // go the other way (removing things from imm not found in pmm)
+ // since we don't add anything to imm if there is no pmm entry.
+ for _, k := range pkeys {
+ if _, found := s.imm[k]; !found {
+ delete(s.mm.pod.pmm, k)
+ }
+ }
+ s.imm = nil
+}
+
+func (s *sstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visiting counter data file %s diridx %d", cdf, dirIdx)
+ if s.inidx != dirIdx {
+ if s.inidx > dirIdx {
+ // We're relying on having data files presented in
+ // the order they appear in the inputs (e.g. first all
+ // data files from input dir 0, then dir 1, etc).
+ panic("decreasing dir index, internal error")
+ }
+ if dirIdx == 0 {
+ // No need to keep track of the functions in the first
+ // directory, since that info will be replicated in
+ // s.mm.pod.pmm.
+ s.imm = nil
+ } else {
+ // We're now starting to visit the Nth directory, N != 0.
+ if s.mode == intersectMode {
+ if s.imm != nil {
+ s.pruneCounters()
+ }
+ s.imm = make(map[pkfunc]struct{})
+ }
+ }
+ s.inidx = dirIdx
+ }
+}
+
+func (s *sstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (s *sstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+
+ if *verbflag >= 5 {
+ fmt.Printf("ctr visit fid=%d pk=%d inidx=%d data.Counters=%+v\n", data.FuncIdx, data.PkgIdx, s.inidx, data.Counters)
+ }
+
+ // If we're processing counter data from the initial (first) input
+ // directory, then just install it into the counter data map
+ // as usual.
+ if s.inidx == 0 {
+ s.mm.visitFuncCounterData(data)
+ return
+ }
+
+ // If we're looking at counter data from a dir other than
+ // the first, then perform the intersect/subtract.
+ if val, ok := s.mm.pod.pmm[key]; ok {
+ if s.mode == subtractMode {
+ for i := 0; i < len(data.Counters); i++ {
+ if data.Counters[i] != 0 {
+ val.Counters[i] = 0
+ }
+ }
+ } else if s.mode == intersectMode {
+ s.imm[key] = struct{}{}
+ for i := 0; i < len(data.Counters); i++ {
+ if data.Counters[i] == 0 {
+ val.Counters[i] = 0
+ }
+ }
+ }
+ }
+}
+
+func (s *sstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ if s.mode == intersectMode {
+ s.imm = make(map[pkfunc]struct{})
+ }
+ s.mm.visitMetaDataFile(mdf, mfr)
+}
+
+func (s *sstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ s.mm.visitPackage(pd, pkgIdx, false)
+}
+
+func (s *sstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (s *sstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ s.mm.visitFunc(pkgIdx, fnIdx, fd, s.mode, false)
+}
+
+func (s *sstate) Finish() {
+}
diff --git a/src/cmd/covdata/testdata/dep.go b/src/cmd/covdata/testdata/dep.go
new file mode 100644
index 0000000..2127ab2
--- /dev/null
+++ b/src/cmd/covdata/testdata/dep.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dep
+
+func Dep1() int {
+ return 42
+}
+
+func PDep(x int) {
+ if x != 1010101 {
+ println(x)
+ } else {
+ panic("bad")
+ }
+}
diff --git a/src/cmd/covdata/testdata/prog1.go b/src/cmd/covdata/testdata/prog1.go
new file mode 100644
index 0000000..76e9e91
--- /dev/null
+++ b/src/cmd/covdata/testdata/prog1.go
@@ -0,0 +1,48 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "prog/dep"
+)
+
+//go:noinline
+func first() {
+ println("whee")
+}
+
+//go:noinline
+func second() {
+ println("oy")
+}
+
+//go:noinline
+func third(x int) int {
+ if x != 0 {
+ return 42
+ }
+ println("blarg")
+ return 0
+}
+
+//go:noinline
+func fourth() int {
+ return 99
+}
+
+func main() {
+ println(dep.Dep1())
+ dep.PDep(2)
+ if len(os.Args) > 1 {
+ second()
+ third(1)
+ } else if len(os.Args) > 2 {
+ fourth()
+ } else {
+ first()
+ third(0)
+ }
+}
diff --git a/src/cmd/covdata/testdata/prog2.go b/src/cmd/covdata/testdata/prog2.go
new file mode 100644
index 0000000..e51e786
--- /dev/null
+++ b/src/cmd/covdata/testdata/prog2.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "prog/dep"
+)
+
+//go:noinline
+func fifth() {
+ println("hubba")
+}
+
+//go:noinline
+func sixth() {
+ println("wha?")
+}
+
+func main() {
+ println(dep.Dep1())
+ if len(os.Args) > 1 {
+ fifth()
+ } else {
+ sixth()
+ }
+}
diff --git a/src/cmd/covdata/tool_test.go b/src/cmd/covdata/tool_test.go
new file mode 100644
index 0000000..42334ea
--- /dev/null
+++ b/src/cmd/covdata/tool_test.go
@@ -0,0 +1,944 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main_test
+
+import (
+ cmdcovdata "cmd/covdata"
+ "flag"
+ "fmt"
+ "internal/coverage/pods"
+ "internal/goexperiment"
+ "internal/testenv"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+)
+
+// testcovdata returns the path to the unit test executable to be used as
+// standin for 'go tool covdata'.
+func testcovdata(t testing.TB) string {
+ exe, err := os.Executable()
+ if err != nil {
+ t.Helper()
+ t.Fatal(err)
+ }
+ return exe
+}
+
+// Top level tempdir for test.
+var testTempDir string
+
+// If set, this will preserve all the tmpdir files from the test run.
+var preserveTmp = flag.Bool("preservetmp", false, "keep tmpdir files for debugging")
+
+// TestMain used here so that we can leverage the test executable
+// itself as a cmd/covdata executable; compare to similar usage in
+// the cmd/go tests.
+func TestMain(m *testing.M) {
+ // When CMDCOVDATA_TEST_RUN_MAIN is set, we're reusing the test
+ // binary as cmd/cover. In this case we run the main func exported
+ // via export_test.go, and exit; CMDCOVDATA_TEST_RUN_MAIN is set below
+ // for actual test invocations.
+ if os.Getenv("CMDCOVDATA_TEST_RUN_MAIN") != "" {
+ cmdcovdata.Main()
+ os.Exit(0)
+ }
+ flag.Parse()
+ topTmpdir, err := os.MkdirTemp("", "cmd-covdata-test-")
+ if err != nil {
+ log.Fatal(err)
+ }
+ testTempDir = topTmpdir
+ if !*preserveTmp {
+ defer os.RemoveAll(topTmpdir)
+ } else {
+ fmt.Fprintf(os.Stderr, "debug: preserving tmpdir %s\n", topTmpdir)
+ }
+ os.Setenv("CMDCOVDATA_TEST_RUN_MAIN", "true")
+ os.Exit(m.Run())
+}
+
+var tdmu sync.Mutex
+var tdcount int
+
+func tempDir(t *testing.T) string {
+ tdmu.Lock()
+ dir := filepath.Join(testTempDir, fmt.Sprintf("%03d", tdcount))
+ tdcount++
+ if err := os.Mkdir(dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer tdmu.Unlock()
+ return dir
+}
+
+const debugtrace = false
+
+func gobuild(t *testing.T, indir string, bargs []string) {
+ t.Helper()
+
+ if debugtrace {
+ if indir != "" {
+ t.Logf("in dir %s: ", indir)
+ }
+ t.Logf("cmd: %s %+v\n", testenv.GoToolPath(t), bargs)
+ }
+ cmd := testenv.Command(t, testenv.GoToolPath(t), bargs...)
+ cmd.Dir = indir
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## build output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("build error: %v", err)
+ }
+}
+
+func emitFile(t *testing.T, dst, src string) {
+ payload, err := os.ReadFile(src)
+ if err != nil {
+ t.Fatalf("error reading %q: %v", src, err)
+ }
+ if err := os.WriteFile(dst, payload, 0666); err != nil {
+ t.Fatalf("writing %q: %v", dst, err)
+ }
+}
+
+const mainPkgPath = "prog"
+
+func buildProg(t *testing.T, prog string, dir string, tag string, flags []string) (string, string) {
+ // Create subdirs.
+ subdir := filepath.Join(dir, prog+"dir"+tag)
+ if err := os.Mkdir(subdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", subdir, err)
+ }
+ depdir := filepath.Join(subdir, "dep")
+ if err := os.Mkdir(depdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", depdir, err)
+ }
+
+ // Emit program.
+ insrc := filepath.Join("testdata", prog+".go")
+ src := filepath.Join(subdir, prog+".go")
+ emitFile(t, src, insrc)
+ indep := filepath.Join("testdata", "dep.go")
+ dep := filepath.Join(depdir, "dep.go")
+ emitFile(t, dep, indep)
+
+ // Emit go.mod.
+ mod := filepath.Join(subdir, "go.mod")
+ modsrc := "\nmodule " + mainPkgPath + "\n\ngo 1.19\n"
+ if err := os.WriteFile(mod, []byte(modsrc), 0666); err != nil {
+ t.Fatal(err)
+ }
+ exepath := filepath.Join(subdir, prog+".exe")
+ bargs := []string{"build", "-cover", "-o", exepath}
+ bargs = append(bargs, flags...)
+ gobuild(t, subdir, bargs)
+ return exepath, subdir
+}
+
+type state struct {
+ dir string
+ exedir1 string
+ exedir2 string
+ exedir3 string
+ exepath1 string
+ exepath2 string
+ exepath3 string
+ tool string
+ outdirs [4]string
+}
+
+const debugWorkDir = false
+
+func TestCovTool(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ if !goexperiment.CoverageRedesign {
+ t.Skipf("stubbed out due to goexperiment.CoverageRedesign=false")
+ }
+ dir := tempDir(t)
+ if testing.Short() {
+ t.Skip()
+ }
+ if debugWorkDir {
+ // debugging
+ dir = "/tmp/qqq"
+ os.RemoveAll(dir)
+ os.Mkdir(dir, 0777)
+ }
+
+ s := state{
+ dir: dir,
+ }
+ s.exepath1, s.exedir1 = buildProg(t, "prog1", dir, "", nil)
+ s.exepath2, s.exedir2 = buildProg(t, "prog2", dir, "", nil)
+ flags := []string{"-covermode=atomic"}
+ s.exepath3, s.exedir3 = buildProg(t, "prog1", dir, "atomic", flags)
+
+ // Reuse unit test executable as tool to be tested.
+ s.tool = testcovdata(t)
+
+ // Create a few coverage output dirs.
+ for i := 0; i < 4; i++ {
+ d := filepath.Join(dir, fmt.Sprintf("covdata%d", i))
+ s.outdirs[i] = d
+ if err := os.Mkdir(d, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", d, err)
+ }
+ }
+
+ // Run instrumented program to generate some coverage data output files,
+ // as follows:
+ //
+ // <tmp>/covdata0 -- prog1.go compiled -cover
+ // <tmp>/covdata1 -- prog1.go compiled -cover
+ // <tmp>/covdata2 -- prog1.go compiled -covermode=atomic
+ // <tmp>/covdata3 -- prog1.go compiled -covermode=atomic
+ //
+ for m := 0; m < 2; m++ {
+ for k := 0; k < 2; k++ {
+ args := []string{}
+ if k != 0 {
+ args = append(args, "foo", "bar")
+ }
+ for i := 0; i <= k; i++ {
+ exepath := s.exepath1
+ if m != 0 {
+ exepath = s.exepath3
+ }
+ cmd := testenv.Command(t, exepath, args...)
+ cmd.Env = append(cmd.Env, "GOCOVERDIR="+s.outdirs[m*2+k])
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## instrumented run output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("instrumented run error: %v", err)
+ }
+ }
+ }
+ }
+
+ // At this point we can fork off a bunch of child tests
+ // to check different tool modes.
+ t.Run("MergeSimple", func(t *testing.T) {
+ t.Parallel()
+ testMergeSimple(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testMergeSimple(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("MergeSelect", func(t *testing.T) {
+ t.Parallel()
+ testMergeSelect(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testMergeSelect(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("MergePcombine", func(t *testing.T) {
+ t.Parallel()
+ testMergeCombinePrograms(t, s)
+ })
+ t.Run("Dump", func(t *testing.T) {
+ t.Parallel()
+ testDump(t, s)
+ })
+ t.Run("Percent", func(t *testing.T) {
+ t.Parallel()
+ testPercent(t, s)
+ })
+ t.Run("PkgList", func(t *testing.T) {
+ t.Parallel()
+ testPkgList(t, s)
+ })
+ t.Run("Textfmt", func(t *testing.T) {
+ t.Parallel()
+ testTextfmt(t, s)
+ })
+ t.Run("Subtract", func(t *testing.T) {
+ t.Parallel()
+ testSubtract(t, s)
+ })
+ t.Run("Intersect", func(t *testing.T) {
+ t.Parallel()
+ testIntersect(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testIntersect(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("CounterClash", func(t *testing.T) {
+ t.Parallel()
+ testCounterClash(t, s)
+ })
+ t.Run("TestEmpty", func(t *testing.T) {
+ t.Parallel()
+ testEmpty(t, s)
+ })
+ t.Run("TestCommandLineErrors", func(t *testing.T) {
+ t.Parallel()
+ testCommandLineErrors(t, s, s.outdirs[0])
+ })
+}
+
+const showToolInvocations = true
+
+func runToolOp(t *testing.T, s state, op string, args []string) []string {
+ // Perform tool run.
+ t.Helper()
+ args = append([]string{op}, args...)
+ if showToolInvocations {
+ t.Logf("%s cmd is: %s %+v", op, s.tool, args)
+ }
+ cmd := testenv.Command(t, s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "## %s output: %s\n", op, string(b))
+ t.Fatalf("%q run error: %v", op, err)
+ }
+ output := strings.TrimSpace(string(b))
+ lines := strings.Split(output, "\n")
+ if len(lines) == 1 && lines[0] == "" {
+ lines = nil
+ }
+ return lines
+}
+
+func testDump(t *testing.T, s state) {
+ // Run the dumper on the two dirs we generated.
+ dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "debugdump", dargs)
+
+ // Sift through the output to make sure it has some key elements.
+ testpoints := []struct {
+ tag string
+ re *regexp.Regexp
+ }{
+ {
+ "args",
+ regexp.MustCompile(`^data file .+ GOOS=.+ GOARCH=.+ program args: .+$`),
+ },
+ {
+ "main package",
+ regexp.MustCompile(`^Package path: ` + mainPkgPath + `\s*$`),
+ },
+ {
+ "main function",
+ regexp.MustCompile(`^Func: main\s*$`),
+ },
+ }
+
+ bad := false
+ for _, testpoint := range testpoints {
+ found := false
+ for _, line := range lines {
+ if m := testpoint.re.FindStringSubmatch(line); m != nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("dump output regexp match failed for %q", testpoint.tag)
+ bad = true
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testPercent(t *testing.T, s state) {
+ // Run the dumper on the two dirs we generated.
+ dargs := []string{"-pkg=" + mainPkgPath, "-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "percent", dargs)
+
+ // Sift through the output to make sure it has the needful.
+ testpoints := []struct {
+ tag string
+ re *regexp.Regexp
+ }{
+ {
+ "statement coverage percent",
+ regexp.MustCompile(`coverage: \d+\.\d% of statements\s*$`),
+ },
+ }
+
+ bad := false
+ for _, testpoint := range testpoints {
+ found := false
+ for _, line := range lines {
+ if m := testpoint.re.FindStringSubmatch(line); m != nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("percent output regexp match failed for %s", testpoint.tag)
+ bad = true
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testPkgList(t *testing.T, s state) {
+ dargs := []string{"-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "pkglist", dargs)
+
+ want := []string{mainPkgPath, mainPkgPath + "/dep"}
+ bad := false
+ if len(lines) != 2 {
+ t.Errorf("expect pkglist to return two lines")
+ bad = true
+ } else {
+ for i := 0; i < 2; i++ {
+ lines[i] = strings.TrimSpace(lines[i])
+ if want[i] != lines[i] {
+ t.Errorf("line %d want %s got %s", i, want[i], lines[i])
+ bad = true
+ }
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testTextfmt(t *testing.T, s state) {
+ outf := s.dir + "/" + "t.txt"
+ dargs := []string{"-pkg=" + mainPkgPath, "-i=" + s.outdirs[0] + "," + s.outdirs[1],
+ "-o", outf}
+ lines := runToolOp(t, s, "textfmt", dargs)
+
+ // No output expected.
+ if len(lines) != 0 {
+ dumplines(lines)
+ t.Errorf("unexpected output from go tool covdata textfmt")
+ }
+
+ // Open and read the first few bits of the file.
+ payload, err := os.ReadFile(outf)
+ if err != nil {
+ t.Errorf("opening %s: %v\n", outf, err)
+ }
+ lines = strings.Split(string(payload), "\n")
+ want0 := "mode: set"
+ if lines[0] != want0 {
+ dumplines(lines[0:10])
+ t.Errorf("textfmt: want %s got %s", want0, lines[0])
+ }
+ want1 := mainPkgPath + "/prog1.go:13.14,15.2 1 1"
+ if lines[1] != want1 {
+ dumplines(lines[0:10])
+ t.Errorf("textfmt: want %s got %s", want1, lines[1])
+ }
+}
+
+func dumplines(lines []string) {
+ for i := range lines {
+ fmt.Fprintf(os.Stderr, "%s\n", lines[i])
+ }
+}
+
+type dumpCheck struct {
+ tag string
+ re *regexp.Regexp
+ negate bool
+ nonzero bool
+ zero bool
+}
+
+// runDumpChecks examines the output of "go tool covdata debugdump"
+// for a given output directory, looking for the presence or absence
+// of specific markers.
+func runDumpChecks(t *testing.T, s state, dir string, flags []string, checks []dumpCheck) {
+ dargs := []string{"-i", dir}
+ dargs = append(dargs, flags...)
+ lines := runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Fatalf("dump run produced no output")
+ }
+
+ bad := false
+ for _, check := range checks {
+ found := false
+ for _, line := range lines {
+ if m := check.re.FindStringSubmatch(line); m != nil {
+ found = true
+ if check.negate {
+ t.Errorf("tag %q: unexpected match", check.tag)
+ bad = true
+
+ }
+ if check.nonzero || check.zero {
+ if len(m) < 2 {
+ t.Errorf("tag %s: submatch failed (short m)", check.tag)
+ bad = true
+ continue
+ }
+ if m[1] == "" {
+ t.Errorf("tag %s: submatch failed", check.tag)
+ bad = true
+ continue
+ }
+ i, err := strconv.Atoi(m[1])
+ if err != nil {
+ t.Errorf("tag %s: match Atoi failed on %s",
+ check.tag, m[1])
+ continue
+ }
+ if check.zero && i != 0 {
+ t.Errorf("tag %s: match zero failed on %s",
+ check.tag, m[1])
+ } else if check.nonzero && i == 0 {
+ t.Errorf("tag %s: match nonzero failed on %s",
+ check.tag, m[1])
+ }
+ }
+ break
+ }
+ }
+ if !found && !check.negate {
+ t.Errorf("dump output regexp match failed for %s", check.tag)
+ bad = true
+ }
+ }
+ if bad {
+ fmt.Printf("output from 'dump' run:\n")
+ dumplines(lines)
+ }
+}
+
+func testMergeSimple(t *testing.T, s state, indir1, indir2, tag string) {
+ outdir := filepath.Join(s.dir, "simpleMergeOut"+tag)
+ if err := os.Mkdir(outdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", outdir, err)
+ }
+
+ // Merge the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", outdir)
+ margs := []string{ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced %d lines of unexpected output", len(lines))
+ dumplines(lines)
+ }
+
+ // We expect the merge tool to produce exactly two files: a meta
+ // data file and a counter file. If we get more than just this one
+ // pair, something went wrong.
+ podlist, err := pods.CollectPods([]string{outdir}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(podlist) != 1 {
+ t.Fatalf("expected 1 pod, got %d pods", len(podlist))
+ }
+ ncdfs := len(podlist[0].CounterDataFiles)
+ if ncdfs != 1 {
+ t.Fatalf("expected 1 counter data file, got %d", ncdfs)
+ }
+
+ // Sift through the output to make sure it has some key elements.
+ // In particular, we want to see entries for all three functions
+ // ("first", "second", and "third").
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "second function",
+ re: regexp.MustCompile(`^Func: second\s*$`),
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ {
+ tag: "third function unit 0",
+ re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 1",
+ re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 2",
+ re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`),
+ nonzero: true,
+ },
+ }
+ flags := []string{"-live", "-pkg=" + mainPkgPath}
+ runDumpChecks(t, s, outdir, flags, testpoints)
+}
+
+func testMergeSelect(t *testing.T, s state, indir1, indir2 string, tag string) {
+ outdir := filepath.Join(s.dir, "selectMergeOut"+tag)
+ if err := os.Mkdir(outdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", outdir, err)
+ }
+
+ // Merge two input dirs into a final result, but filter
+ // based on package.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", outdir)
+ margs := []string{"-pkg=" + mainPkgPath + "/dep", ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced %d lines of unexpected output", len(lines))
+ dumplines(lines)
+ }
+
+ // Dump the files in the merged output dir and examine the result.
+ // We expect to see only the functions in package "dep".
+ dargs := []string{"-i=" + outdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Fatalf("dump run produced no output")
+ }
+ want := map[string]int{
+ "Package path: " + mainPkgPath + "/dep": 0,
+ "Func: Dep1": 0,
+ "Func: PDep": 0,
+ }
+ bad := false
+ for _, line := range lines {
+ if v, ok := want[line]; ok {
+ if v != 0 {
+ t.Errorf("duplicate line %s", line)
+ bad = true
+ break
+ }
+ want[line] = 1
+ continue
+ }
+ // no other functions or packages expected.
+ if strings.HasPrefix(line, "Func:") || strings.HasPrefix(line, "Package path:") {
+ t.Errorf("unexpected line: %s", line)
+ bad = true
+ break
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testMergeCombinePrograms(t *testing.T, s state) {
+
+ // Run the new program, emitting output into a new set
+ // of outdirs.
+ runout := [2]string{}
+ for k := 0; k < 2; k++ {
+ runout[k] = filepath.Join(s.dir, fmt.Sprintf("newcovdata%d", k))
+ if err := os.Mkdir(runout[k], 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", runout[k], err)
+ }
+ args := []string{}
+ if k != 0 {
+ args = append(args, "foo", "bar")
+ }
+ cmd := testenv.Command(t, s.exepath2, args...)
+ cmd.Env = append(cmd.Env, "GOCOVERDIR="+runout[k])
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## instrumented run output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("instrumented run error: %v", err)
+ }
+ }
+
+ // Create out dir for -pcombine merge.
+ moutdir := filepath.Join(s.dir, "mergeCombineOut")
+ if err := os.Mkdir(moutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", moutdir, err)
+ }
+
+ // Run a merge over both programs, using the -pcombine
+ // flag to do maximal combining.
+ ins := fmt.Sprintf("-i=%s,%s,%s,%s", s.outdirs[0], s.outdirs[1],
+ runout[0], runout[1])
+ out := fmt.Sprintf("-o=%s", moutdir)
+ margs := []string{"-pcombine", ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced unexpected output: %v", lines)
+ }
+
+ // We expect the merge tool to produce exacty two files: a meta
+ // data file and a counter file. If we get more than just this one
+ // pair, something went wrong.
+ podlist, err := pods.CollectPods([]string{moutdir}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(podlist) != 1 {
+ t.Fatalf("expected 1 pod, got %d pods", len(podlist))
+ }
+ ncdfs := len(podlist[0].CounterDataFiles)
+ if ncdfs != 1 {
+ t.Fatalf("expected 1 counter data file, got %d", ncdfs)
+ }
+
+ // Sift through the output to make sure it has some key elements.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "sixth function",
+ re: regexp.MustCompile(`^Func: sixth\s*$`),
+ },
+ }
+
+ flags := []string{"-live", "-pkg=" + mainPkgPath}
+ runDumpChecks(t, s, moutdir, flags, testpoints)
+}
+
+func testSubtract(t *testing.T, s state) {
+ // Create out dir for subtract merge.
+ soutdir := filepath.Join(s.dir, "subtractOut")
+ if err := os.Mkdir(soutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", soutdir, err)
+ }
+
+ // Subtract the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[1])
+ out := fmt.Sprintf("-o=%s", soutdir)
+ sargs := []string{ins, out}
+ lines := runToolOp(t, s, "subtract", sargs)
+ if len(lines) != 0 {
+ t.Errorf("subtract run produced unexpected output: %+v", lines)
+ }
+
+ // Dump the files in the subtract output dir and examine the result.
+ dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + soutdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Errorf("dump run produced no output")
+ }
+
+ // Vet the output.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "dep function",
+ re: regexp.MustCompile(`^Func: Dep1\s*$`),
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ {
+ tag: "third function unit 0",
+ re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`),
+ zero: true,
+ },
+ {
+ tag: "third function unit 1",
+ re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 2",
+ re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`),
+ zero: true,
+ },
+ }
+ flags := []string{}
+ runDumpChecks(t, s, soutdir, flags, testpoints)
+}
+
+func testIntersect(t *testing.T, s state, indir1, indir2, tag string) {
+ // Create out dir for intersection.
+ ioutdir := filepath.Join(s.dir, "intersectOut"+tag)
+ if err := os.Mkdir(ioutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", ioutdir, err)
+ }
+
+ // Intersect the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", ioutdir)
+ sargs := []string{ins, out}
+ lines := runToolOp(t, s, "intersect", sargs)
+ if len(lines) != 0 {
+ t.Errorf("intersect run produced unexpected output: %+v", lines)
+ }
+
+ // Dump the files in the subtract output dir and examine the result.
+ dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + ioutdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Errorf("dump run produced no output")
+ }
+
+ // Vet the output.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ negate: true,
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ }
+ flags := []string{"-live"}
+ runDumpChecks(t, s, ioutdir, flags, testpoints)
+}
+
+func testCounterClash(t *testing.T, s state) {
+ // Create out dir.
+ ccoutdir := filepath.Join(s.dir, "ccOut")
+ if err := os.Mkdir(ccoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", ccoutdir, err)
+ }
+
+ // Try to merge covdata0 (from prog1.go -countermode=set) with
+ // covdata1 (from prog1.go -countermode=atomic"). This should
+ // produce a counter mode clash error.
+ ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[3])
+ out := fmt.Sprintf("-o=%s", ccoutdir)
+ args := append([]string{}, "merge", ins, out, "-pcombine")
+ if debugtrace {
+ t.Logf("cc merge command is %s %v\n", s.tool, args)
+ }
+ cmd := testenv.Command(t, s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ t.Logf("%% output: %s\n", string(b))
+ if err == nil {
+ t.Fatalf("clash merge passed unexpectedly")
+ }
+ got := string(b)
+ want := "counter mode clash while reading meta-data"
+ if !strings.Contains(got, want) {
+ t.Errorf("counter clash merge: wanted %s got %s", want, got)
+ }
+}
+
+func testEmpty(t *testing.T, s state) {
+
+ // Create a new empty directory.
+ empty := filepath.Join(s.dir, "empty")
+ if err := os.Mkdir(empty, 0777); err != nil {
+ t.Fatalf("can't create dir %s: %v", empty, err)
+ }
+
+ // Create out dir.
+ eoutdir := filepath.Join(s.dir, "emptyOut")
+ if err := os.Mkdir(eoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", eoutdir, err)
+ }
+
+ // Run various operations (merge, dump, textfmt, and so on)
+ // using the empty directory. We're not interested in the output
+ // here, just making sure that you can do these runs without
+ // any error or crash.
+
+ scenarios := []struct {
+ tag string
+ args []string
+ }{
+ {
+ tag: "merge",
+ args: []string{"merge", "-o", eoutdir},
+ },
+ {
+ tag: "textfmt",
+ args: []string{"textfmt", "-o", filepath.Join(eoutdir, "foo.txt")},
+ },
+ {
+ tag: "func",
+ args: []string{"func"},
+ },
+ {
+ tag: "pkglist",
+ args: []string{"pkglist"},
+ },
+ {
+ tag: "debugdump",
+ args: []string{"debugdump"},
+ },
+ {
+ tag: "percent",
+ args: []string{"percent"},
+ },
+ }
+
+ for _, x := range scenarios {
+ ins := fmt.Sprintf("-i=%s", empty)
+ args := append([]string{}, x.args...)
+ args = append(args, ins)
+ if false {
+ t.Logf("cmd is %s %v\n", s.tool, args)
+ }
+ cmd := testenv.Command(t, s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ t.Logf("%% output: %s\n", string(b))
+ if err != nil {
+ t.Fatalf("command %s %+v failed with %v",
+ s.tool, x.args, err)
+ }
+ }
+}
+
+func testCommandLineErrors(t *testing.T, s state, outdir string) {
+
+ // Create out dir.
+ eoutdir := filepath.Join(s.dir, "errorsOut")
+ if err := os.Mkdir(eoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", eoutdir, err)
+ }
+
+ // Run various operations (merge, dump, textfmt, and so on)
+ // using the empty directory. We're not interested in the output
+ // here, just making sure that you can do these runs without
+ // any error or crash.
+
+ scenarios := []struct {
+ tag string
+ args []string
+ exp string
+ }{
+ {
+ tag: "input missing",
+ args: []string{"merge", "-o", eoutdir, "-i", "not there"},
+ exp: "error: reading inputs: ",
+ },
+ {
+ tag: "badv",
+ args: []string{"textfmt", "-i", outdir, "-v=abc"},
+ },
+ }
+
+ for _, x := range scenarios {
+ args := append([]string{}, x.args...)
+ if false {
+ t.Logf("cmd is %s %v\n", s.tool, args)
+ }
+ cmd := testenv.Command(t, s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Logf("%% output: %s\n", string(b))
+ t.Fatalf("command %s %+v unexpectedly succeeded",
+ s.tool, x.args)
+ } else {
+ if !strings.Contains(string(b), x.exp) {
+ t.Fatalf("command %s %+v:\ngot:\n%s\nwanted to see: %v\n",
+ s.tool, x.args, string(b), x.exp)
+ }
+ }
+ }
+}