summaryrefslogtreecommitdiffstats
path: root/src/testing
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:25:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:25:22 +0000
commitf6ad4dcef54c5ce997a4bad5a6d86de229015700 (patch)
tree7cfa4e31ace5c2bd95c72b154d15af494b2bcbef /src/testing
parentInitial commit. (diff)
downloadgolang-1.22-f6ad4dcef54c5ce997a4bad5a6d86de229015700.tar.xz
golang-1.22-f6ad4dcef54c5ce997a4bad5a6d86de229015700.zip
Adding upstream version 1.22.1.upstream/1.22.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/testing')
-rw-r--r--src/testing/allocs.go45
-rw-r--r--src/testing/allocs_test.go29
-rw-r--r--src/testing/benchmark.go840
-rw-r--r--src/testing/benchmark_test.go213
-rw-r--r--src/testing/cover.go124
-rw-r--r--src/testing/example.go97
-rw-r--r--src/testing/export_test.go7
-rw-r--r--src/testing/flag_test.go89
-rw-r--r--src/testing/fstest/mapfs.go244
-rw-r--r--src/testing/fstest/mapfs_test.go47
-rw-r--r--src/testing/fstest/testfs.go624
-rw-r--r--src/testing/fstest/testfs_test.go78
-rw-r--r--src/testing/fuzz.go731
-rw-r--r--src/testing/helper_test.go116
-rw-r--r--src/testing/helperfuncs_test.go124
-rw-r--r--src/testing/internal/testdeps/deps.go199
-rw-r--r--src/testing/iotest/example_test.go22
-rw-r--r--src/testing/iotest/logger.go54
-rw-r--r--src/testing/iotest/logger_test.go153
-rw-r--r--src/testing/iotest/reader.go268
-rw-r--r--src/testing/iotest/reader_test.go261
-rw-r--r--src/testing/iotest/writer.go35
-rw-r--r--src/testing/iotest/writer_test.go39
-rw-r--r--src/testing/match.go317
-rw-r--r--src/testing/match_test.go263
-rw-r--r--src/testing/newcover.go59
-rw-r--r--src/testing/panic_test.go267
-rw-r--r--src/testing/quick/quick.go385
-rw-r--r--src/testing/quick/quick_test.go327
-rw-r--r--src/testing/run_example.go66
-rw-r--r--src/testing/run_example_wasm.go76
-rw-r--r--src/testing/slogtest/example_test.go44
-rw-r--r--src/testing/slogtest/run_test.go31
-rw-r--r--src/testing/slogtest/slogtest.go375
-rw-r--r--src/testing/sub_test.go992
-rw-r--r--src/testing/testing.go2409
-rw-r--r--src/testing/testing_other.go13
-rw-r--r--src/testing/testing_test.go814
-rw-r--r--src/testing/testing_windows.go32
39 files changed, 10909 insertions, 0 deletions
diff --git a/src/testing/allocs.go b/src/testing/allocs.go
new file mode 100644
index 0000000..1eeb2d4
--- /dev/null
+++ b/src/testing/allocs.go
@@ -0,0 +1,45 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "runtime"
+)
+
+// AllocsPerRun returns the average number of allocations during calls to f.
+// Although the return value has type float64, it will always be an integral value.
+//
+// To compute the number of allocations, the function will first be run once as
+// a warm-up. The average number of allocations over the specified number of
+// runs will then be measured and returned.
+//
+// AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore
+// it before returning.
+func AllocsPerRun(runs int, f func()) (avg float64) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+
+ // Warm up the function
+ f()
+
+ // Measure the starting statistics
+ var memstats runtime.MemStats
+ runtime.ReadMemStats(&memstats)
+ mallocs := 0 - memstats.Mallocs
+
+ // Run the function the specified number of times
+ for i := 0; i < runs; i++ {
+ f()
+ }
+
+ // Read the final statistics
+ runtime.ReadMemStats(&memstats)
+ mallocs += memstats.Mallocs
+
+ // Average the mallocs over the runs (not counting the warm-up).
+ // We are forced to return a float64 because the API is silly, but do
+ // the division as integers so we can ask if AllocsPerRun()==1
+ // instead of AllocsPerRun()<2.
+ return float64(mallocs / uint64(runs))
+}
diff --git a/src/testing/allocs_test.go b/src/testing/allocs_test.go
new file mode 100644
index 0000000..bbd3ae7
--- /dev/null
+++ b/src/testing/allocs_test.go
@@ -0,0 +1,29 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import "testing"
+
+var global any
+
+var allocsPerRunTests = []struct {
+ name string
+ fn func()
+ allocs float64
+}{
+ {"alloc *byte", func() { global = new(*byte) }, 1},
+ {"alloc complex128", func() { global = new(complex128) }, 1},
+ {"alloc float64", func() { global = new(float64) }, 1},
+ {"alloc int32", func() { global = new(int32) }, 1},
+ {"alloc byte", func() { global = new(byte) }, 1},
+}
+
+func TestAllocsPerRun(t *testing.T) {
+ for _, tt := range allocsPerRunTests {
+ if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs {
+ t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs)
+ }
+ }
+}
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
new file mode 100644
index 0000000..9491213
--- /dev/null
+++ b/src/testing/benchmark.go
@@ -0,0 +1,840 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "internal/sysinfo"
+ "io"
+ "math"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+)
+
+func initBenchmarkFlags() {
+ matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
+ benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
+ flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx")
+}
+
+var (
+ matchBenchmarks *string
+ benchmarkMemory *bool
+
+ benchTime = durationOrCountFlag{d: 1 * time.Second} // changed during test of testing package
+)
+
+type durationOrCountFlag struct {
+ d time.Duration
+ n int
+ allowZero bool
+}
+
+func (f *durationOrCountFlag) String() string {
+ if f.n > 0 {
+ return fmt.Sprintf("%dx", f.n)
+ }
+ return f.d.String()
+}
+
+func (f *durationOrCountFlag) Set(s string) error {
+ if strings.HasSuffix(s, "x") {
+ n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
+ if err != nil || n < 0 || (!f.allowZero && n == 0) {
+ return fmt.Errorf("invalid count")
+ }
+ *f = durationOrCountFlag{n: int(n)}
+ return nil
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil || d < 0 || (!f.allowZero && d == 0) {
+ return fmt.Errorf("invalid duration")
+ }
+ *f = durationOrCountFlag{d: d}
+ return nil
+}
+
+// Global lock to ensure only one benchmark runs at a time.
+var benchmarkLock sync.Mutex
+
+// Used for every benchmark for measuring memory.
+var memStats runtime.MemStats
+
+// InternalBenchmark is an internal type but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
+type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+}
+
+// B is a type passed to [Benchmark] functions to manage benchmark
+// timing and to specify the number of iterations to run.
+//
+// A benchmark ends when its Benchmark function returns or calls any of the methods
+// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
+// only from the goroutine running the Benchmark function.
+// The other reporting methods, such as the variations of Log and Error,
+// may be called simultaneously from multiple goroutines.
+//
+// Like in tests, benchmark logs are accumulated during execution
+// and dumped to standard output when done. Unlike in tests, benchmark logs
+// are always printed, so as not to hide output whose existence may be
+// affecting benchmark results.
+type B struct {
+ common
+ importPath string // import path of the package containing the benchmark
+ context *benchContext
+ N int
+ previousN int // number of iterations in the previous run
+ previousDuration time.Duration // total duration of the previous run
+ benchFunc func(b *B)
+ benchTime durationOrCountFlag
+ bytes int64
+ missingBytes bool // one of the subbenchmarks does not have bytes set.
+ timerOn bool
+ showAllocResult bool
+ result BenchmarkResult
+ parallelism int // RunParallel creates parallelism*GOMAXPROCS goroutines
+ // The initial states of memStats.Mallocs and memStats.TotalAlloc.
+ startAllocs uint64
+ startBytes uint64
+ // The net total of this test after being run.
+ netAllocs uint64
+ netBytes uint64
+ // Extra metrics collected by ReportMetric.
+ extra map[string]float64
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also be used to resume timing after
+// a call to [B.StopTimer].
+func (b *B) StartTimer() {
+ if !b.timerOn {
+ runtime.ReadMemStats(&memStats)
+ b.startAllocs = memStats.Mallocs
+ b.startBytes = memStats.TotalAlloc
+ b.start = time.Now()
+ b.timerOn = true
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (b *B) StopTimer() {
+ if b.timerOn {
+ b.duration += time.Since(b.start)
+ runtime.ReadMemStats(&memStats)
+ b.netAllocs += memStats.Mallocs - b.startAllocs
+ b.netBytes += memStats.TotalAlloc - b.startBytes
+ b.timerOn = false
+ }
+}
+
+// ResetTimer zeroes the elapsed benchmark time and memory allocation counters
+// and deletes user-reported metrics.
+// It does not affect whether the timer is running.
+func (b *B) ResetTimer() {
+ if b.extra == nil {
+ // Allocate the extra map before reading memory stats.
+ // Pre-size it to make more allocation unlikely.
+ b.extra = make(map[string]float64, 16)
+ } else {
+ clear(b.extra)
+ }
+ if b.timerOn {
+ runtime.ReadMemStats(&memStats)
+ b.startAllocs = memStats.Mallocs
+ b.startBytes = memStats.TotalAlloc
+ b.start = time.Now()
+ }
+ b.duration = 0
+ b.netAllocs = 0
+ b.netBytes = 0
+}
+
+// SetBytes records the number of bytes processed in a single operation.
+// If this is called, the benchmark will report ns/op and MB/s.
+func (b *B) SetBytes(n int64) { b.bytes = n }
+
+// ReportAllocs enables malloc statistics for this benchmark.
+// It is equivalent to setting -test.benchmem, but it only affects the
+// benchmark function that calls ReportAllocs.
+func (b *B) ReportAllocs() {
+ b.showAllocResult = true
+}
+
+// runN runs a single benchmark for the specified number of iterations.
+func (b *B) runN(n int) {
+ benchmarkLock.Lock()
+ defer benchmarkLock.Unlock()
+ defer func() {
+ b.runCleanup(normalPanic)
+ b.checkRaces()
+ }()
+ // Try to get a comparable environment for each run
+ // by clearing garbage from previous runs.
+ runtime.GC()
+ b.resetRaces()
+ b.N = n
+ b.parallelism = 1
+ b.ResetTimer()
+ b.StartTimer()
+ b.benchFunc(b)
+ b.StopTimer()
+ b.previousN = n
+ b.previousDuration = b.duration
+}
+
+// run1 runs the first iteration of benchFunc. It reports whether more
+// iterations of this benchmarks should be run.
+func (b *B) run1() bool {
+ if ctx := b.context; ctx != nil {
+ // Extend maxLen, if needed.
+ if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen {
+ ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
+ }
+ }
+ go func() {
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- true
+ }()
+
+ b.runN(1)
+ }()
+ <-b.signal
+ if b.failed {
+ fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), b.name, b.output)
+ return false
+ }
+ // Only print the output if we know we are not going to proceed.
+ // Otherwise it is printed in processBench.
+ b.mu.RLock()
+ finished := b.finished
+ b.mu.RUnlock()
+ if b.hasSub.Load() || finished {
+ tag := "BENCH"
+ if b.skipped {
+ tag = "SKIP"
+ }
+ if b.chatty != nil && (len(b.output) > 0 || finished) {
+ b.trimOutput()
+ fmt.Fprintf(b.w, "%s--- %s: %s\n%s", b.chatty.prefix(), tag, b.name, b.output)
+ }
+ return false
+ }
+ return true
+}
+
+var labelsOnce sync.Once
+
+// run executes the benchmark in a separate goroutine, including all of its
+// subbenchmarks. b must not have subbenchmarks.
+func (b *B) run() {
+ labelsOnce.Do(func() {
+ fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS)
+ fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH)
+ if b.importPath != "" {
+ fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
+ }
+ if cpu := sysinfo.CPUName(); cpu != "" {
+ fmt.Fprintf(b.w, "cpu: %s\n", cpu)
+ }
+ })
+ if b.context != nil {
+ // Running go test --test.bench
+ b.context.processBench(b) // Must call doBench.
+ } else {
+ // Running func Benchmark.
+ b.doBench()
+ }
+}
+
+func (b *B) doBench() BenchmarkResult {
+ go b.launch()
+ <-b.signal
+ return b.result
+}
+
+// launch launches the benchmark function. It gradually increases the number
+// of benchmark iterations until the benchmark runs for the requested benchtime.
+// launch is run by the doBench function as a separate goroutine.
+// run1 must have been called on b.
+func (b *B) launch() {
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- true
+ }()
+
+ // Run the benchmark for at least the specified amount of time.
+ if b.benchTime.n > 0 {
+ // We already ran a single iteration in run1.
+ // If -benchtime=1x was requested, use that result.
+ // See https://golang.org/issue/32051.
+ if b.benchTime.n > 1 {
+ b.runN(b.benchTime.n)
+ }
+ } else {
+ d := b.benchTime.d
+ for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
+ last := n
+ // Predict required iterations.
+ goalns := d.Nanoseconds()
+ prevIters := int64(b.N)
+ prevns := b.duration.Nanoseconds()
+ if prevns <= 0 {
+ // Round up, to avoid div by zero.
+ prevns = 1
+ }
+ // Order of operations matters.
+ // For very fast benchmarks, prevIters ~= prevns.
+ // If you divide first, you get 0 or 1,
+ // which can hide an order of magnitude in execution time.
+ // So multiply first, then divide.
+ n = goalns * prevIters / prevns
+ // Run more iterations than we think we'll need (1.2x).
+ n += n / 5
+ // Don't grow too fast in case we had timing errors previously.
+ n = min(n, 100*last)
+ // Be sure to run at least one more than last time.
+ n = max(n, last+1)
+ // Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
+ n = min(n, 1e9)
+ b.runN(int(n))
+ }
+ }
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
+}
+
+// Elapsed returns the measured elapsed time of the benchmark.
+// The duration reported by Elapsed matches the one measured by
+// [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
+func (b *B) Elapsed() time.Duration {
+ d := b.duration
+ if b.timerOn {
+ d += time.Since(b.start)
+ }
+ return d
+}
+
+// ReportMetric adds "n unit" to the reported benchmark results.
+// If the metric is per-iteration, the caller should divide by b.N,
+// and by convention units should end in "/op".
+// ReportMetric overrides any previously reported value for the same unit.
+// ReportMetric panics if unit is the empty string or if unit contains
+// any whitespace.
+// If unit is a unit normally reported by the benchmark framework itself
+// (such as "allocs/op"), ReportMetric will override that metric.
+// Setting "ns/op" to 0 will suppress that built-in metric.
+func (b *B) ReportMetric(n float64, unit string) {
+ if unit == "" {
+ panic("metric unit must not be empty")
+ }
+ if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
+ panic("metric unit must not contain whitespace")
+ }
+ b.extra[unit] = n
+}
+
+// BenchmarkResult contains the results of a benchmark run.
+type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ MemAllocs uint64 // The total number of memory allocations.
+ MemBytes uint64 // The total number of bytes allocated.
+
+ // Extra records additional metrics reported by ReportMetric.
+ Extra map[string]float64
+}
+
+// NsPerOp returns the "ns/op" metric.
+func (r BenchmarkResult) NsPerOp() int64 {
+ if v, ok := r.Extra["ns/op"]; ok {
+ return int64(v)
+ }
+ if r.N <= 0 {
+ return 0
+ }
+ return r.T.Nanoseconds() / int64(r.N)
+}
+
+// mbPerSec returns the "MB/s" metric.
+func (r BenchmarkResult) mbPerSec() float64 {
+ if v, ok := r.Extra["MB/s"]; ok {
+ return v
+ }
+ if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
+ return 0
+ }
+ return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+// AllocsPerOp returns the "allocs/op" metric,
+// which is calculated as r.MemAllocs / r.N.
+func (r BenchmarkResult) AllocsPerOp() int64 {
+ if v, ok := r.Extra["allocs/op"]; ok {
+ return int64(v)
+ }
+ if r.N <= 0 {
+ return 0
+ }
+ return int64(r.MemAllocs) / int64(r.N)
+}
+
+// AllocedBytesPerOp returns the "B/op" metric,
+// which is calculated as r.MemBytes / r.N.
+func (r BenchmarkResult) AllocedBytesPerOp() int64 {
+ if v, ok := r.Extra["B/op"]; ok {
+ return int64(v)
+ }
+ if r.N <= 0 {
+ return 0
+ }
+ return int64(r.MemBytes) / int64(r.N)
+}
+
+// String returns a summary of the benchmark results.
+// It follows the benchmark result line format from
+// https://golang.org/design/14313-benchmark-format, not including the
+// benchmark name.
+// Extra metrics override built-in metrics of the same name.
+// String does not include allocs/op or B/op, since those are reported
+// by [BenchmarkResult.MemString].
+func (r BenchmarkResult) String() string {
+ buf := new(strings.Builder)
+ fmt.Fprintf(buf, "%8d", r.N)
+
+ // Get ns/op as a float.
+ ns, ok := r.Extra["ns/op"]
+ if !ok {
+ ns = float64(r.T.Nanoseconds()) / float64(r.N)
+ }
+ if ns != 0 {
+ buf.WriteByte('\t')
+ prettyPrint(buf, ns, "ns/op")
+ }
+
+ if mbs := r.mbPerSec(); mbs != 0 {
+ fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
+ }
+
+ // Print extra metrics that aren't represented in the standard
+ // metrics.
+ var extraKeys []string
+ for k := range r.Extra {
+ switch k {
+ case "ns/op", "MB/s", "B/op", "allocs/op":
+ // Built-in metrics reported elsewhere.
+ continue
+ }
+ extraKeys = append(extraKeys, k)
+ }
+ sort.Strings(extraKeys)
+ for _, k := range extraKeys {
+ buf.WriteByte('\t')
+ prettyPrint(buf, r.Extra[k], k)
+ }
+ return buf.String()
+}
+
+func prettyPrint(w io.Writer, x float64, unit string) {
+ // Print all numbers with 10 places before the decimal point
+ // and small numbers with four sig figs. Field widths are
+ // chosen to fit the whole part in 10 places while aligning
+ // the decimal point of all fractional formats.
+ var format string
+ switch y := math.Abs(x); {
+ case y == 0 || y >= 999.95:
+ format = "%10.0f %s"
+ case y >= 99.995:
+ format = "%12.1f %s"
+ case y >= 9.9995:
+ format = "%13.2f %s"
+ case y >= 0.99995:
+ format = "%14.3f %s"
+ case y >= 0.099995:
+ format = "%15.4f %s"
+ case y >= 0.0099995:
+ format = "%16.5f %s"
+ case y >= 0.00099995:
+ format = "%17.6f %s"
+ default:
+ format = "%18.7f %s"
+ }
+ fmt.Fprintf(w, format, x, unit)
+}
+
+// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
+func (r BenchmarkResult) MemString() string {
+ return fmt.Sprintf("%8d B/op\t%8d allocs/op",
+ r.AllocedBytesPerOp(), r.AllocsPerOp())
+}
+
+// benchmarkName returns full name of benchmark including procs suffix.
+func benchmarkName(name string, n int) string {
+ if n != 1 {
+ return fmt.Sprintf("%s-%d", name, n)
+ }
+ return name
+}
+
+type benchContext struct {
+ match *matcher
+
+ maxLen int // The largest recorded benchmark name.
+ extLen int // Maximum extension length.
+}
+
+// RunBenchmarks is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
+func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
+ runBenchmarks("", matchString, benchmarks)
+}
+
+func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool {
+ // If no flag was specified, don't run benchmarks.
+ if len(*matchBenchmarks) == 0 {
+ return true
+ }
+ // Collect matching benchmarks and determine longest name.
+ maxprocs := 1
+ for _, procs := range cpuList {
+ if procs > maxprocs {
+ maxprocs = procs
+ }
+ }
+ ctx := &benchContext{
+ match: newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip),
+ extLen: len(benchmarkName("", maxprocs)),
+ }
+ var bs []InternalBenchmark
+ for _, Benchmark := range benchmarks {
+ if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched {
+ bs = append(bs, Benchmark)
+ benchName := benchmarkName(Benchmark.Name, maxprocs)
+ if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen {
+ ctx.maxLen = l
+ }
+ }
+ }
+ main := &B{
+ common: common{
+ name: "Main",
+ w: os.Stdout,
+ bench: true,
+ },
+ importPath: importPath,
+ benchFunc: func(b *B) {
+ for _, Benchmark := range bs {
+ b.Run(Benchmark.Name, Benchmark.F)
+ }
+ },
+ benchTime: benchTime,
+ context: ctx,
+ }
+ if Verbose() {
+ main.chatty = newChattyPrinter(main.w)
+ }
+ main.runN(1)
+ return !main.failed
+}
+
+// processBench runs bench b for the configured CPU counts and prints the results.
+func (ctx *benchContext) processBench(b *B) {
+ for i, procs := range cpuList {
+ for j := uint(0); j < *count; j++ {
+ runtime.GOMAXPROCS(procs)
+ benchName := benchmarkName(b.name, procs)
+
+ // If it's chatty, we've already printed this information.
+ if b.chatty == nil {
+ fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
+ }
+ // Recompute the running time for all but the first iteration.
+ if i > 0 || j > 0 {
+ b = &B{
+ common: common{
+ signal: make(chan bool),
+ name: b.name,
+ w: b.w,
+ chatty: b.chatty,
+ bench: true,
+ },
+ benchFunc: b.benchFunc,
+ benchTime: b.benchTime,
+ }
+ b.run1()
+ }
+ r := b.doBench()
+ if b.failed {
+ // The output could be very long here, but probably isn't.
+ // We print it all, regardless, because we don't want to trim the reason
+ // the benchmark failed.
+ fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), benchName, b.output)
+ continue
+ }
+ results := r.String()
+ if b.chatty != nil {
+ fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
+ }
+ if *benchmarkMemory || b.showAllocResult {
+ results += "\t" + r.MemString()
+ }
+ fmt.Fprintln(b.w, results)
+ // Unlike with tests, we ignore the -chatty flag and always print output for
+ // benchmarks since the output generation time will skew the results.
+ if len(b.output) > 0 {
+ b.trimOutput()
+ fmt.Fprintf(b.w, "%s--- BENCH: %s\n%s", b.chatty.prefix(), benchName, b.output)
+ }
+ if p := runtime.GOMAXPROCS(-1); p != procs {
+ fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
+ }
+ if b.chatty != nil && b.chatty.json {
+ b.chatty.Updatef("", "=== NAME %s\n", "")
+ }
+ }
+ }
+}
+
+// If hideStdoutForTesting is true, Run does not print the benchName.
+// This avoids a spurious print during 'go test' on package testing itself,
+// which invokes b.Run in its own tests (see sub_test.go).
+var hideStdoutForTesting = false
+
+// Run benchmarks f as a subbenchmark with the given name. It reports
+// whether there were any failures.
+//
+// A subbenchmark is like any other benchmark. A benchmark that calls Run at
+// least once will not be measured itself and will be called once with N=1.
+func (b *B) Run(name string, f func(b *B)) bool {
+ // Since b has subbenchmarks, we will no longer run it as a benchmark itself.
+ // Release the lock and acquire it on exit to ensure locks stay paired.
+ b.hasSub.Store(true)
+ benchmarkLock.Unlock()
+ defer benchmarkLock.Lock()
+
+ benchName, ok, partial := b.name, true, false
+ if b.context != nil {
+ benchName, ok, partial = b.context.match.fullName(&b.common, name)
+ }
+ if !ok {
+ return true
+ }
+ var pc [maxStackLen]uintptr
+ n := runtime.Callers(2, pc[:])
+ sub := &B{
+ common: common{
+ signal: make(chan bool),
+ name: benchName,
+ parent: &b.common,
+ level: b.level + 1,
+ creator: pc[:n],
+ w: b.w,
+ chatty: b.chatty,
+ bench: true,
+ },
+ importPath: b.importPath,
+ benchFunc: f,
+ benchTime: b.benchTime,
+ context: b.context,
+ }
+ if partial {
+ // Partial name match, like -bench=X/Y matching BenchmarkX.
+ // Only process sub-benchmarks, if any.
+ sub.hasSub.Store(true)
+ }
+
+ if b.chatty != nil {
+ labelsOnce.Do(func() {
+ fmt.Printf("goos: %s\n", runtime.GOOS)
+ fmt.Printf("goarch: %s\n", runtime.GOARCH)
+ if b.importPath != "" {
+ fmt.Printf("pkg: %s\n", b.importPath)
+ }
+ if cpu := sysinfo.CPUName(); cpu != "" {
+ fmt.Printf("cpu: %s\n", cpu)
+ }
+ })
+
+ if !hideStdoutForTesting {
+ if b.chatty.json {
+ b.chatty.Updatef(benchName, "=== RUN %s\n", benchName)
+ }
+ fmt.Println(benchName)
+ }
+ }
+
+ if sub.run1() {
+ sub.run()
+ }
+ b.add(sub.result)
+ return !sub.failed
+}
+
+// add simulates running benchmarks in sequence in a single iteration. It is
+// used to give some meaningful results in case func Benchmark is used in
+// combination with Run.
+func (b *B) add(other BenchmarkResult) {
+ r := &b.result
+ // The aggregated BenchmarkResults resemble running all subbenchmarks as
+ // in sequence in a single benchmark.
+ r.N = 1
+ r.T += time.Duration(other.NsPerOp())
+ if other.Bytes == 0 {
+ // Summing Bytes is meaningless in aggregate if not all subbenchmarks
+ // set it.
+ b.missingBytes = true
+ r.Bytes = 0
+ }
+ if !b.missingBytes {
+ r.Bytes += other.Bytes
+ }
+ r.MemAllocs += uint64(other.AllocsPerOp())
+ r.MemBytes += uint64(other.AllocedBytesPerOp())
+}
+
+// trimOutput shortens the output from a benchmark, which can be very long.
+func (b *B) trimOutput() {
+ // The output is likely to appear multiple times because the benchmark
+ // is run multiple times, but at least it will be seen. This is not a big deal
+ // because benchmarks rarely print, but just in case, we trim it if it's too long.
+ const maxNewlines = 10
+ for nlCount, j := 0, 0; j < len(b.output); j++ {
+ if b.output[j] == '\n' {
+ nlCount++
+ if nlCount >= maxNewlines {
+ b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
+ break
+ }
+ }
+ }
+}
+
+// A PB is used by RunParallel for running parallel benchmarks.
+type PB struct {
+ globalN *atomic.Uint64 // shared between all worker goroutines iteration counter
+ grain uint64 // acquire that many iterations from globalN at once
+ cache uint64 // local cache of acquired iterations
+ bN uint64 // total number of iterations to execute (b.N)
+}
+
+// Next reports whether there are more iterations to execute.
+func (pb *PB) Next() bool {
+ if pb.cache == 0 {
+ n := pb.globalN.Add(pb.grain)
+ if n <= pb.bN {
+ pb.cache = pb.grain
+ } else if n < pb.bN+pb.grain {
+ pb.cache = pb.bN + pb.grain - n
+ } else {
+ return false
+ }
+ }
+ pb.cache--
+ return true
+}
+
+// RunParallel runs a benchmark in parallel.
+// It creates multiple goroutines and distributes b.N iterations among them.
+// The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
+// non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel.
+// RunParallel is usually used with the go test -cpu flag.
+//
+// The body function will be run in each goroutine. It should set up any
+// goroutine-local state and then iterate until pb.Next returns false.
+// It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions,
+// because they have global effect. It should also not call [B.Run].
+//
+// RunParallel reports ns/op values as wall time for the benchmark as a whole,
+// not the sum of wall time or CPU time over each parallel goroutine.
+func (b *B) RunParallel(body func(*PB)) {
+ if b.N == 0 {
+ return // Nothing to do when probing.
+ }
+ // Calculate grain size as number of iterations that take ~100µs.
+ // 100µs is enough to amortize the overhead and provide sufficient
+ // dynamic load balancing.
+ grain := uint64(0)
+ if b.previousN > 0 && b.previousDuration > 0 {
+ grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration)
+ }
+ if grain < 1 {
+ grain = 1
+ }
+ // We expect the inner loop and function call to take at least 10ns,
+ // so do not do more than 100µs/10ns=1e4 iterations.
+ if grain > 1e4 {
+ grain = 1e4
+ }
+
+ var n atomic.Uint64
+ numProcs := b.parallelism * runtime.GOMAXPROCS(0)
+ var wg sync.WaitGroup
+ wg.Add(numProcs)
+ for p := 0; p < numProcs; p++ {
+ go func() {
+ defer wg.Done()
+ pb := &PB{
+ globalN: &n,
+ grain: grain,
+ bN: uint64(b.N),
+ }
+ body(pb)
+ }()
+ }
+ wg.Wait()
+ if n.Load() <= uint64(b.N) && !b.Failed() {
+ b.Fatal("RunParallel: body exited without pb.Next() == false")
+ }
+}
+
+// SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS.
+// There is usually no need to call SetParallelism for CPU-bound benchmarks.
+// If p is less than 1, this call will have no effect.
+func (b *B) SetParallelism(p int) {
+ if p >= 1 {
+ b.parallelism = p
+ }
+}
+
+// Benchmark benchmarks a single function. It is useful for creating
+// custom benchmarks that do not use the "go test" command.
+//
+// If f depends on testing flags, then [Init] must be used to register
+// those flags before calling Benchmark and before calling [flag.Parse].
+//
+// If f calls Run, the result will be an estimate of running all its
+// subbenchmarks that don't call Run in sequence in a single benchmark.
+func Benchmark(f func(b *B)) BenchmarkResult {
+ b := &B{
+ common: common{
+ signal: make(chan bool),
+ w: discard{},
+ },
+ benchFunc: f,
+ benchTime: benchTime,
+ }
+ if b.run1() {
+ b.run()
+ }
+ return b.result
+}
+
+type discard struct{}
+
+func (discard) Write(b []byte) (n int, err error) { return len(b), nil }
diff --git a/src/testing/benchmark_test.go b/src/testing/benchmark_test.go
new file mode 100644
index 0000000..2987170
--- /dev/null
+++ b/src/testing/benchmark_test.go
@@ -0,0 +1,213 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "bytes"
+ "runtime"
+ "sort"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "text/template"
+ "time"
+)
+
+var prettyPrintTests = []struct {
+ v float64
+ expected string
+}{
+ {0, " 0 x"},
+ {1234.1, " 1234 x"},
+ {-1234.1, " -1234 x"},
+ {999.950001, " 1000 x"},
+ {999.949999, " 999.9 x"},
+ {99.9950001, " 100.0 x"},
+ {99.9949999, " 99.99 x"},
+ {-99.9949999, " -99.99 x"},
+ {0.000999950001, " 0.001000 x"},
+ {0.000999949999, " 0.0009999 x"}, // smallest case
+ {0.0000999949999, " 0.0001000 x"},
+}
+
+func TestPrettyPrint(t *testing.T) {
+ for _, tt := range prettyPrintTests {
+ buf := new(strings.Builder)
+ testing.PrettyPrint(buf, tt.v, "x")
+ if tt.expected != buf.String() {
+ t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
+ }
+ }
+}
+
+func TestResultString(t *testing.T) {
+ // Test fractional ns/op handling
+ r := testing.BenchmarkResult{
+ N: 100,
+ T: 240 * time.Nanosecond,
+ }
+ if r.NsPerOp() != 2 {
+ t.Errorf("NsPerOp: expected 2, actual %v", r.NsPerOp())
+ }
+ if want, got := " 100\t 2.400 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
+
+ // Test sub-1 ns/op (issue #31005)
+ r.T = 40 * time.Nanosecond
+ if want, got := " 100\t 0.4000 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
+
+ // Test 0 ns/op
+ r.T = 0
+ if want, got := " 100", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
+}
+
+func TestRunParallel(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ testing.Benchmark(func(b *testing.B) {
+ procs := uint32(0)
+ iters := uint64(0)
+ b.SetParallelism(3)
+ b.RunParallel(func(pb *testing.PB) {
+ atomic.AddUint32(&procs, 1)
+ for pb.Next() {
+ atomic.AddUint64(&iters, 1)
+ }
+ })
+ if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want {
+ t.Errorf("got %v procs, want %v", procs, want)
+ }
+ if iters != uint64(b.N) {
+ t.Errorf("got %v iters, want %v", iters, b.N)
+ }
+ })
+}
+
+func TestRunParallelFail(t *testing.T) {
+ testing.Benchmark(func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ // The function must be able to log/abort
+ // w/o crashing/deadlocking the whole benchmark.
+ b.Log("log")
+ b.Error("error")
+ })
+ })
+}
+
+func TestRunParallelFatal(t *testing.T) {
+ testing.Benchmark(func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if b.N > 1 {
+ b.Fatal("error")
+ }
+ }
+ })
+ })
+}
+
+func TestRunParallelSkipNow(t *testing.T) {
+ testing.Benchmark(func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if b.N > 1 {
+ b.SkipNow()
+ }
+ }
+ })
+ })
+}
+
+func ExampleB_RunParallel() {
+ // Parallel benchmark for text/template.Template.Execute on a single object.
+ testing.Benchmark(func(b *testing.B) {
+ templ := template.Must(template.New("test").Parse("Hello, {{.}}!"))
+ // RunParallel will create GOMAXPROCS goroutines
+ // and distribute work among them.
+ b.RunParallel(func(pb *testing.PB) {
+ // Each goroutine has its own bytes.Buffer.
+ var buf bytes.Buffer
+ for pb.Next() {
+ // The loop body is executed b.N times total across all goroutines.
+ buf.Reset()
+ templ.Execute(&buf, "World")
+ }
+ })
+ })
+}
+
+func TestReportMetric(t *testing.T) {
+ res := testing.Benchmark(func(b *testing.B) {
+ b.ReportMetric(12345, "ns/op")
+ b.ReportMetric(0.2, "frobs/op")
+ })
+ // Test built-in overriding.
+ if res.NsPerOp() != 12345 {
+ t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp())
+ }
+ // Test stringing.
+ res.N = 1 // Make the output stable
+ want := " 1\t 12345 ns/op\t 0.2000 frobs/op"
+ if want != res.String() {
+ t.Errorf("expected %q, actual %q", want, res.String())
+ }
+}
+
+func ExampleB_ReportMetric() {
+ // This reports a custom benchmark metric relevant to a
+ // specific algorithm (in this case, sorting).
+ testing.Benchmark(func(b *testing.B) {
+ var compares int64
+ for i := 0; i < b.N; i++ {
+ s := []int{5, 4, 3, 2, 1}
+ sort.Slice(s, func(i, j int) bool {
+ compares++
+ return s[i] < s[j]
+ })
+ }
+ // This metric is per-operation, so divide by b.N and
+ // report it as a "/op" unit.
+ b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
+ // This metric is per-time, so divide by b.Elapsed and
+ // report it as a "/ns" unit.
+ b.ReportMetric(float64(compares)/float64(b.Elapsed().Nanoseconds()), "compares/ns")
+ })
+}
+
+func ExampleB_ReportMetric_parallel() {
+ // This reports a custom benchmark metric relevant to a
+ // specific algorithm (in this case, sorting) in parallel.
+ testing.Benchmark(func(b *testing.B) {
+ var compares atomic.Int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ s := []int{5, 4, 3, 2, 1}
+ sort.Slice(s, func(i, j int) bool {
+ // Because RunParallel runs the function many
+ // times in parallel, we must increment the
+ // counter atomically to avoid racing writes.
+ compares.Add(1)
+ return s[i] < s[j]
+ })
+ }
+ })
+
+ // NOTE: Report each metric once, after all of the parallel
+ // calls have completed.
+
+ // This metric is per-operation, so divide by b.N and
+ // report it as a "/op" unit.
+ b.ReportMetric(float64(compares.Load())/float64(b.N), "compares/op")
+ // This metric is per-time, so divide by b.Elapsed and
+ // report it as a "/ns" unit.
+ b.ReportMetric(float64(compares.Load())/float64(b.Elapsed().Nanoseconds()), "compares/ns")
+ })
+}
diff --git a/src/testing/cover.go b/src/testing/cover.go
new file mode 100644
index 0000000..6ad43ab
--- /dev/null
+++ b/src/testing/cover.go
@@ -0,0 +1,124 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Support for test coverage.
+
+package testing
+
+import (
+ "fmt"
+ "internal/goexperiment"
+ "os"
+ "sync/atomic"
+)
+
+// CoverBlock records the coverage data for a single basic block.
+// The fields are 1-indexed, as in an editor: The opening line of
+// the file is number 1, for example. Columns are measured
+// in bytes.
+// NOTE: This struct is internal to the testing infrastructure and may change.
+// It is not covered (yet) by the Go 1 compatibility guidelines.
+type CoverBlock struct {
+ Line0 uint32 // Line number for block start.
+ Col0 uint16 // Column number for block start.
+ Line1 uint32 // Line number for block end.
+ Col1 uint16 // Column number for block end.
+ Stmts uint16 // Number of statements included in this block.
+}
+
+var cover Cover
+
+// Cover records information about test coverage checking.
+// NOTE: This struct is internal to the testing infrastructure and may change.
+// It is not covered (yet) by the Go 1 compatibility guidelines.
+type Cover struct {
+ Mode string
+ Counters map[string][]uint32
+ Blocks map[string][]CoverBlock
+ CoveredPackages string
+}
+
+// Coverage reports the current code coverage as a fraction in the range [0, 1].
+// If coverage is not enabled, Coverage returns 0.
+//
+// When running a large set of sequential test cases, checking Coverage after each one
+// can be useful for identifying which test cases exercise new code paths.
+// It is not a replacement for the reports generated by 'go test -cover' and
+// 'go tool cover'.
+func Coverage() float64 {
+ if goexperiment.CoverageRedesign {
+ return coverage2()
+ }
+ var n, d int64
+ for _, counters := range cover.Counters {
+ for i := range counters {
+ if atomic.LoadUint32(&counters[i]) > 0 {
+ n++
+ }
+ d++
+ }
+ }
+ if d == 0 {
+ return 0
+ }
+ return float64(n) / float64(d)
+}
+
+// RegisterCover records the coverage data accumulators for the tests.
+// NOTE: This function is internal to the testing infrastructure and may change.
+// It is not covered (yet) by the Go 1 compatibility guidelines.
+func RegisterCover(c Cover) {
+ cover = c
+}
+
+// mustBeNil checks the error and, if present, reports it and exits.
+func mustBeNil(err error) {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ os.Exit(2)
+ }
+}
+
+// coverReport reports the coverage percentage and writes a coverage profile if requested.
+func coverReport() {
+ if goexperiment.CoverageRedesign {
+ coverReport2()
+ return
+ }
+ var f *os.File
+ var err error
+ if *coverProfile != "" {
+ f, err = os.Create(toOutputDir(*coverProfile))
+ mustBeNil(err)
+ fmt.Fprintf(f, "mode: %s\n", cover.Mode)
+ defer func() { mustBeNil(f.Close()) }()
+ }
+
+ var active, total int64
+ var count uint32
+ for name, counts := range cover.Counters {
+ blocks := cover.Blocks[name]
+ for i := range counts {
+ stmts := int64(blocks[i].Stmts)
+ total += stmts
+ count = atomic.LoadUint32(&counts[i]) // For -mode=atomic.
+ if count > 0 {
+ active += stmts
+ }
+ if f != nil {
+ _, err := fmt.Fprintf(f, "%s:%d.%d,%d.%d %d %d\n", name,
+ blocks[i].Line0, blocks[i].Col0,
+ blocks[i].Line1, blocks[i].Col1,
+ stmts,
+ count)
+ mustBeNil(err)
+ }
+ }
+ }
+ if total == 0 {
+ fmt.Println("coverage: [no statements]")
+ return
+ }
+ fmt.Printf("coverage: %.1f%% of statements%s\n", 100*float64(active)/float64(total), cover.CoveredPackages)
+}
diff --git a/src/testing/example.go b/src/testing/example.go
new file mode 100644
index 0000000..07aa5cb
--- /dev/null
+++ b/src/testing/example.go
@@ -0,0 +1,97 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ Unordered bool
+}
+
+// RunExamples is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
+func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) {
+ _, ok = runExamples(matchString, examples)
+ return ok
+}
+
+func runExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ran, ok bool) {
+ ok = true
+
+ m := newMatcher(matchString, *match, "-test.run", *skip)
+
+ var eg InternalExample
+ for _, eg = range examples {
+ _, matched, _ := m.fullName(nil, eg.Name)
+ if !matched {
+ continue
+ }
+ ran = true
+ if !runExample(eg) {
+ ok = false
+ }
+ }
+
+ return ran, ok
+}
+
+func sortLines(output string) string {
+ lines := strings.Split(output, "\n")
+ sort.Strings(lines)
+ return strings.Join(lines, "\n")
+}
+
+// processRunResult computes a summary and status of the result of running an example test.
+// stdout is the captured output from stdout of the test.
+// recovered is the result of invoking recover after running the test, in case it panicked.
+//
+// If stdout doesn't match the expected output or if recovered is non-nil, it'll print the cause of failure to stdout.
+// If the test is chatty/verbose, it'll print a success message to stdout.
+// If recovered is non-nil, it'll panic with that value.
+// If the test panicked with nil, or invoked runtime.Goexit, it'll be
+// made to fail and panic with errNilPanicOrGoexit
+func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, finished bool, recovered any) (passed bool) {
+ passed = true
+ dstr := fmtDuration(timeSpent)
+ var fail string
+ got := strings.TrimSpace(stdout)
+ want := strings.TrimSpace(eg.Output)
+ if eg.Unordered {
+ if sortLines(got) != sortLines(want) && recovered == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", stdout, eg.Output)
+ }
+ } else {
+ if got != want && recovered == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want)
+ }
+ }
+ if fail != "" || !finished || recovered != nil {
+ fmt.Printf("%s--- FAIL: %s (%s)\n%s", chatty.prefix(), eg.Name, dstr, fail)
+ passed = false
+ } else if chatty.on {
+ fmt.Printf("%s--- PASS: %s (%s)\n", chatty.prefix(), eg.Name, dstr)
+ }
+
+ if chatty.on && chatty.json {
+ fmt.Printf("%s=== NAME %s\n", chatty.prefix(), "")
+ }
+
+ if recovered != nil {
+ // Propagate the previously recovered result, by panicking.
+ panic(recovered)
+ } else if !finished {
+ panic(errNilPanicOrGoexit)
+ }
+
+ return
+}
diff --git a/src/testing/export_test.go b/src/testing/export_test.go
new file mode 100644
index 0000000..0022491
--- /dev/null
+++ b/src/testing/export_test.go
@@ -0,0 +1,7 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+var PrettyPrint = prettyPrint
diff --git a/src/testing/flag_test.go b/src/testing/flag_test.go
new file mode 100644
index 0000000..6f76c23
--- /dev/null
+++ b/src/testing/flag_test.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "flag"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+var testFlagArg = flag.String("test_flag_arg", "", "TestFlag: passing -v option")
+
+const flagTestEnv = "GO_WANT_FLAG_HELPER_PROCESS"
+
+func TestFlag(t *testing.T) {
+ if os.Getenv(flagTestEnv) == "1" {
+ testFlagHelper(t)
+ return
+ }
+
+ testenv.MustHaveExec(t)
+
+ for _, flag := range []string{"", "-test.v", "-test.v=test2json"} {
+ flag := flag
+ t.Run(flag, func(t *testing.T) {
+ t.Parallel()
+ exe, err := os.Executable()
+ if err != nil {
+ exe = os.Args[0]
+ }
+ cmd := exec.Command(exe, "-test.run=^TestFlag$", "-test_flag_arg="+flag)
+ if flag != "" {
+ cmd.Args = append(cmd.Args, flag)
+ }
+ cmd.Env = append(cmd.Environ(), flagTestEnv+"=1")
+ b, err := cmd.CombinedOutput()
+ if len(b) > 0 {
+ // When we set -test.v=test2json, we need to escape the ^V control
+ // character used for JSON framing so that the JSON parser doesn't
+ // misinterpret the subprocess output as output from the parent test.
+ t.Logf("%q", b)
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ })
+ }
+}
+
+// testFlagHelper is called by the TestFlagHelper subprocess.
+func testFlagHelper(t *testing.T) {
+ f := flag.Lookup("test.v")
+ if f == nil {
+ t.Fatal(`flag.Lookup("test.v") failed`)
+ }
+
+ bf, ok := f.Value.(interface{ IsBoolFlag() bool })
+ if !ok {
+ t.Errorf("test.v flag (type %T) does not have IsBoolFlag method", f)
+ } else if !bf.IsBoolFlag() {
+ t.Error("test.v IsBoolFlag() returned false")
+ }
+
+ gf, ok := f.Value.(flag.Getter)
+ if !ok {
+ t.Fatalf("test.v flag (type %T) does not have Get method", f)
+ }
+ v := gf.Get()
+
+ var want any
+ switch *testFlagArg {
+ case "":
+ want = false
+ case "-test.v":
+ want = true
+ case "-test.v=test2json":
+ want = "test2json"
+ default:
+ t.Fatalf("unexpected test_flag_arg %q", *testFlagArg)
+ }
+
+ if v != want {
+ t.Errorf("test.v is %v want %v", v, want)
+ }
+}
diff --git a/src/testing/fstest/mapfs.go b/src/testing/fstest/mapfs.go
new file mode 100644
index 0000000..1409d62
--- /dev/null
+++ b/src/testing/fstest/mapfs.go
@@ -0,0 +1,244 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fstest
+
+import (
+ "io"
+ "io/fs"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+// A MapFS is a simple in-memory file system for use in tests,
+// represented as a map from path names (arguments to Open)
+// to information about the files or directories they represent.
+//
+// The map need not include parent directories for files contained
+// in the map; those will be synthesized if needed.
+// But a directory can still be included by setting the [MapFile.Mode]'s [fs.ModeDir] bit;
+// this may be necessary for detailed control over the directory's [fs.FileInfo]
+// or to create an empty directory.
+//
+// File system operations read directly from the map,
+// so that the file system can be changed by editing the map as needed.
+// An implication is that file system operations must not run concurrently
+// with changes to the map, which would be a race.
+// Another implication is that opening or reading a directory requires
+// iterating over the entire map, so a MapFS should typically be used with not more
+// than a few hundred entries or directory reads.
+type MapFS map[string]*MapFile
+
+// A MapFile describes a single file in a [MapFS].
+type MapFile struct {
+ Data []byte // file content
+ Mode fs.FileMode // fs.FileInfo.Mode
+ ModTime time.Time // fs.FileInfo.ModTime
+ Sys any // fs.FileInfo.Sys
+}
+
+var _ fs.FS = MapFS(nil)
+var _ fs.File = (*openMapFile)(nil)
+
+// Open opens the named file.
+func (fsys MapFS) Open(name string) (fs.File, error) {
+ if !fs.ValidPath(name) {
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+ }
+ file := fsys[name]
+ if file != nil && file.Mode&fs.ModeDir == 0 {
+ // Ordinary file
+ return &openMapFile{name, mapFileInfo{path.Base(name), file}, 0}, nil
+ }
+
+ // Directory, possibly synthesized.
+ // Note that file can be nil here: the map need not contain explicit parent directories for all its files.
+ // But file can also be non-nil, in case the user wants to set metadata for the directory explicitly.
+ // Either way, we need to construct the list of children of this directory.
+ var list []mapFileInfo
+ var elem string
+ var need = make(map[string]bool)
+ if name == "." {
+ elem = "."
+ for fname, f := range fsys {
+ i := strings.Index(fname, "/")
+ if i < 0 {
+ if fname != "." {
+ list = append(list, mapFileInfo{fname, f})
+ }
+ } else {
+ need[fname[:i]] = true
+ }
+ }
+ } else {
+ elem = name[strings.LastIndex(name, "/")+1:]
+ prefix := name + "/"
+ for fname, f := range fsys {
+ if strings.HasPrefix(fname, prefix) {
+ felem := fname[len(prefix):]
+ i := strings.Index(felem, "/")
+ if i < 0 {
+ list = append(list, mapFileInfo{felem, f})
+ } else {
+ need[fname[len(prefix):len(prefix)+i]] = true
+ }
+ }
+ }
+ // If the directory name is not in the map,
+ // and there are no children of the name in the map,
+ // then the directory is treated as not existing.
+ if file == nil && list == nil && len(need) == 0 {
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+ }
+ }
+ for _, fi := range list {
+ delete(need, fi.name)
+ }
+ for name := range need {
+ list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir | 0555}})
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].name < list[j].name
+ })
+
+ if file == nil {
+ file = &MapFile{Mode: fs.ModeDir | 0555}
+ }
+ return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil
+}
+
+// fsOnly is a wrapper that hides all but the fs.FS methods,
+// to avoid an infinite recursion when implementing special
+// methods in terms of helpers that would use them.
+// (In general, implementing these methods using the package fs helpers
+// is redundant and unnecessary, but having the methods may make
+// MapFS exercise more code paths when used in tests.)
+type fsOnly struct{ fs.FS }
+
+func (fsys MapFS) ReadFile(name string) ([]byte, error) {
+ return fs.ReadFile(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) Stat(name string) (fs.FileInfo, error) {
+ return fs.Stat(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) ReadDir(name string) ([]fs.DirEntry, error) {
+ return fs.ReadDir(fsOnly{fsys}, name)
+}
+
+func (fsys MapFS) Glob(pattern string) ([]string, error) {
+ return fs.Glob(fsOnly{fsys}, pattern)
+}
+
+type noSub struct {
+ MapFS
+}
+
+func (noSub) Sub() {} // not the fs.SubFS signature
+
+func (fsys MapFS) Sub(dir string) (fs.FS, error) {
+ return fs.Sub(noSub{fsys}, dir)
+}
+
+// A mapFileInfo implements fs.FileInfo and fs.DirEntry for a given map file.
+type mapFileInfo struct {
+ name string
+ f *MapFile
+}
+
+func (i *mapFileInfo) Name() string { return i.name }
+func (i *mapFileInfo) Size() int64 { return int64(len(i.f.Data)) }
+func (i *mapFileInfo) Mode() fs.FileMode { return i.f.Mode }
+func (i *mapFileInfo) Type() fs.FileMode { return i.f.Mode.Type() }
+func (i *mapFileInfo) ModTime() time.Time { return i.f.ModTime }
+func (i *mapFileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
+func (i *mapFileInfo) Sys() any { return i.f.Sys }
+func (i *mapFileInfo) Info() (fs.FileInfo, error) { return i, nil }
+
+func (i *mapFileInfo) String() string {
+ return fs.FormatFileInfo(i)
+}
+
+// An openMapFile is a regular (non-directory) fs.File open for reading.
+type openMapFile struct {
+ path string
+ mapFileInfo
+ offset int64
+}
+
+func (f *openMapFile) Stat() (fs.FileInfo, error) { return &f.mapFileInfo, nil }
+
+func (f *openMapFile) Close() error { return nil }
+
+func (f *openMapFile) Read(b []byte) (int, error) {
+ if f.offset >= int64(len(f.f.Data)) {
+ return 0, io.EOF
+ }
+ if f.offset < 0 {
+ return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
+ }
+ n := copy(b, f.f.Data[f.offset:])
+ f.offset += int64(n)
+ return n, nil
+}
+
+func (f *openMapFile) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case 0:
+ // offset += 0
+ case 1:
+ offset += f.offset
+ case 2:
+ offset += int64(len(f.f.Data))
+ }
+ if offset < 0 || offset > int64(len(f.f.Data)) {
+ return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid}
+ }
+ f.offset = offset
+ return offset, nil
+}
+
+func (f *openMapFile) ReadAt(b []byte, offset int64) (int, error) {
+ if offset < 0 || offset > int64(len(f.f.Data)) {
+ return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
+ }
+ n := copy(b, f.f.Data[offset:])
+ if n < len(b) {
+ return n, io.EOF
+ }
+ return n, nil
+}
+
+// A mapDir is a directory fs.File (so also an fs.ReadDirFile) open for reading.
+type mapDir struct {
+ path string
+ mapFileInfo
+ entry []mapFileInfo
+ offset int
+}
+
+func (d *mapDir) Stat() (fs.FileInfo, error) { return &d.mapFileInfo, nil }
+func (d *mapDir) Close() error { return nil }
+func (d *mapDir) Read(b []byte) (int, error) {
+ return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
+}
+
+func (d *mapDir) ReadDir(count int) ([]fs.DirEntry, error) {
+ n := len(d.entry) - d.offset
+ if n == 0 && count > 0 {
+ return nil, io.EOF
+ }
+ if count > 0 && n > count {
+ n = count
+ }
+ list := make([]fs.DirEntry, n)
+ for i := range list {
+ list[i] = &d.entry[d.offset+i]
+ }
+ d.offset += n
+ return list, nil
+}
diff --git a/src/testing/fstest/mapfs_test.go b/src/testing/fstest/mapfs_test.go
new file mode 100644
index 0000000..c64dc8d
--- /dev/null
+++ b/src/testing/fstest/mapfs_test.go
@@ -0,0 +1,47 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fstest
+
+import (
+ "fmt"
+ "io/fs"
+ "strings"
+ "testing"
+)
+
+func TestMapFS(t *testing.T) {
+ m := MapFS{
+ "hello": {Data: []byte("hello, world\n")},
+ "fortune/k/ken.txt": {Data: []byte("If a program is too slow, it must have a loop.\n")},
+ }
+ if err := TestFS(m, "hello", "fortune", "fortune/k", "fortune/k/ken.txt"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestMapFSChmodDot(t *testing.T) {
+ m := MapFS{
+ "a/b.txt": &MapFile{Mode: 0666},
+ ".": &MapFile{Mode: 0777 | fs.ModeDir},
+ }
+ buf := new(strings.Builder)
+ fs.WalkDir(m, ".", func(path string, d fs.DirEntry, err error) error {
+ fi, err := d.Info()
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(buf, "%s: %v\n", path, fi.Mode())
+ return nil
+ })
+ want := `
+.: drwxrwxrwx
+a: dr-xr-xr-x
+a/b.txt: -rw-rw-rw-
+`[1:]
+ got := buf.String()
+ if want != got {
+ t.Errorf("MapFS modes want:\n%s\ngot:\n%s\n", want, got)
+ }
+}
diff --git a/src/testing/fstest/testfs.go b/src/testing/fstest/testfs.go
new file mode 100644
index 0000000..78b0b82
--- /dev/null
+++ b/src/testing/fstest/testfs.go
@@ -0,0 +1,624 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fstest implements support for testing implementations and users of file systems.
+package fstest
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "path"
+ "reflect"
+ "sort"
+ "strings"
+ "testing/iotest"
+)
+
+// TestFS tests a file system implementation.
+// It walks the entire tree of files in fsys,
+// opening and checking that each file behaves correctly.
+// It also checks that the file system contains at least the expected files.
+// As a special case, if no expected files are listed, fsys must be empty.
+// Otherwise, fsys must contain at least the listed files; it can also contain others.
+// The contents of fsys must not change concurrently with TestFS.
+//
+// If TestFS finds any misbehaviors, it returns an error reporting all of them.
+// The error text spans multiple lines, one per detected misbehavior.
+//
+// Typical usage inside a test is:
+//
+// if err := fstest.TestFS(myFS, "file/that/should/be/present"); err != nil {
+// t.Fatal(err)
+// }
+func TestFS(fsys fs.FS, expected ...string) error {
+ if err := testFS(fsys, expected...); err != nil {
+ return err
+ }
+ for _, name := range expected {
+ if i := strings.Index(name, "/"); i >= 0 {
+ dir, dirSlash := name[:i], name[:i+1]
+ var subExpected []string
+ for _, name := range expected {
+ if strings.HasPrefix(name, dirSlash) {
+ subExpected = append(subExpected, name[len(dirSlash):])
+ }
+ }
+ sub, err := fs.Sub(fsys, dir)
+ if err != nil {
+ return err
+ }
+ if err := testFS(sub, subExpected...); err != nil {
+ return fmt.Errorf("testing fs.Sub(fsys, %s): %v", dir, err)
+ }
+ break // one sub-test is enough
+ }
+ }
+ return nil
+}
+
+func testFS(fsys fs.FS, expected ...string) error {
+ t := fsTester{fsys: fsys}
+ t.checkDir(".")
+ t.checkOpen(".")
+ found := make(map[string]bool)
+ for _, dir := range t.dirs {
+ found[dir] = true
+ }
+ for _, file := range t.files {
+ found[file] = true
+ }
+ delete(found, ".")
+ if len(expected) == 0 && len(found) > 0 {
+ var list []string
+ for k := range found {
+ if k != "." {
+ list = append(list, k)
+ }
+ }
+ sort.Strings(list)
+ if len(list) > 15 {
+ list = append(list[:10], "...")
+ }
+ t.errorf("expected empty file system but found files:\n%s", strings.Join(list, "\n"))
+ }
+ for _, name := range expected {
+ if !found[name] {
+ t.errorf("expected but not found: %s", name)
+ }
+ }
+ if len(t.errText) == 0 {
+ return nil
+ }
+ return errors.New("TestFS found errors:\n" + string(t.errText))
+}
+
+// An fsTester holds state for running the test.
+type fsTester struct {
+ fsys fs.FS
+ errText []byte
+ dirs []string
+ files []string
+}
+
+// errorf adds an error line to errText.
+func (t *fsTester) errorf(format string, args ...any) {
+ if len(t.errText) > 0 {
+ t.errText = append(t.errText, '\n')
+ }
+ t.errText = append(t.errText, fmt.Sprintf(format, args...)...)
+}
+
+func (t *fsTester) openDir(dir string) fs.ReadDirFile {
+ f, err := t.fsys.Open(dir)
+ if err != nil {
+ t.errorf("%s: Open: %v", dir, err)
+ return nil
+ }
+ d, ok := f.(fs.ReadDirFile)
+ if !ok {
+ f.Close()
+ t.errorf("%s: Open returned File type %T, not a fs.ReadDirFile", dir, f)
+ return nil
+ }
+ return d
+}
+
+// checkDir checks the directory dir, which is expected to exist
+// (it is either the root or was found in a directory listing with IsDir true).
+func (t *fsTester) checkDir(dir string) {
+ // Read entire directory.
+ t.dirs = append(t.dirs, dir)
+ d := t.openDir(dir)
+ if d == nil {
+ return
+ }
+ list, err := d.ReadDir(-1)
+ if err != nil {
+ d.Close()
+ t.errorf("%s: ReadDir(-1): %v", dir, err)
+ return
+ }
+
+ // Check all children.
+ var prefix string
+ if dir == "." {
+ prefix = ""
+ } else {
+ prefix = dir + "/"
+ }
+ for _, info := range list {
+ name := info.Name()
+ switch {
+ case name == ".", name == "..", name == "":
+ t.errorf("%s: ReadDir: child has invalid name: %#q", dir, name)
+ continue
+ case strings.Contains(name, "/"):
+ t.errorf("%s: ReadDir: child name contains slash: %#q", dir, name)
+ continue
+ case strings.Contains(name, `\`):
+ t.errorf("%s: ReadDir: child name contains backslash: %#q", dir, name)
+ continue
+ }
+ path := prefix + name
+ t.checkStat(path, info)
+ t.checkOpen(path)
+ if info.IsDir() {
+ t.checkDir(path)
+ } else {
+ t.checkFile(path)
+ }
+ }
+
+ // Check ReadDir(-1) at EOF.
+ list2, err := d.ReadDir(-1)
+ if len(list2) > 0 || err != nil {
+ d.Close()
+ t.errorf("%s: ReadDir(-1) at EOF = %d entries, %v, wanted 0 entries, nil", dir, len(list2), err)
+ return
+ }
+
+ // Check ReadDir(1) at EOF (different results).
+ list2, err = d.ReadDir(1)
+ if len(list2) > 0 || err != io.EOF {
+ d.Close()
+ t.errorf("%s: ReadDir(1) at EOF = %d entries, %v, wanted 0 entries, EOF", dir, len(list2), err)
+ return
+ }
+
+ // Check that close does not report an error.
+ if err := d.Close(); err != nil {
+ t.errorf("%s: Close: %v", dir, err)
+ }
+
+ // Check that closing twice doesn't crash.
+ // The return value doesn't matter.
+ d.Close()
+
+ // Reopen directory, read a second time, make sure contents match.
+ if d = t.openDir(dir); d == nil {
+ return
+ }
+ defer d.Close()
+ list2, err = d.ReadDir(-1)
+ if err != nil {
+ t.errorf("%s: second Open+ReadDir(-1): %v", dir, err)
+ return
+ }
+ t.checkDirList(dir, "first Open+ReadDir(-1) vs second Open+ReadDir(-1)", list, list2)
+
+ // Reopen directory, read a third time in pieces, make sure contents match.
+ if d = t.openDir(dir); d == nil {
+ return
+ }
+ defer d.Close()
+ list2 = nil
+ for {
+ n := 1
+ if len(list2) > 0 {
+ n = 2
+ }
+ frag, err := d.ReadDir(n)
+ if len(frag) > n {
+ t.errorf("%s: third Open: ReadDir(%d) after %d: %d entries (too many)", dir, n, len(list2), len(frag))
+ return
+ }
+ list2 = append(list2, frag...)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.errorf("%s: third Open: ReadDir(%d) after %d: %v", dir, n, len(list2), err)
+ return
+ }
+ if n == 0 {
+ t.errorf("%s: third Open: ReadDir(%d) after %d: 0 entries but nil error", dir, n, len(list2))
+ return
+ }
+ }
+ t.checkDirList(dir, "first Open+ReadDir(-1) vs third Open+ReadDir(1,2) loop", list, list2)
+
+ // If fsys has ReadDir, check that it matches and is sorted.
+ if fsys, ok := t.fsys.(fs.ReadDirFS); ok {
+ list2, err := fsys.ReadDir(dir)
+ if err != nil {
+ t.errorf("%s: fsys.ReadDir: %v", dir, err)
+ return
+ }
+ t.checkDirList(dir, "first Open+ReadDir(-1) vs fsys.ReadDir", list, list2)
+
+ for i := 0; i+1 < len(list2); i++ {
+ if list2[i].Name() >= list2[i+1].Name() {
+ t.errorf("%s: fsys.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
+ }
+ }
+ }
+
+ // Check fs.ReadDir as well.
+ list2, err = fs.ReadDir(t.fsys, dir)
+ if err != nil {
+ t.errorf("%s: fs.ReadDir: %v", dir, err)
+ return
+ }
+ t.checkDirList(dir, "first Open+ReadDir(-1) vs fs.ReadDir", list, list2)
+
+ for i := 0; i+1 < len(list2); i++ {
+ if list2[i].Name() >= list2[i+1].Name() {
+ t.errorf("%s: fs.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
+ }
+ }
+
+ t.checkGlob(dir, list2)
+}
+
+// formatEntry formats an fs.DirEntry into a string for error messages and comparison.
+func formatEntry(entry fs.DirEntry) string {
+ return fmt.Sprintf("%s IsDir=%v Type=%v", entry.Name(), entry.IsDir(), entry.Type())
+}
+
+// formatInfoEntry formats an fs.FileInfo into a string like the result of formatEntry, for error messages and comparison.
+func formatInfoEntry(info fs.FileInfo) string {
+ return fmt.Sprintf("%s IsDir=%v Type=%v", info.Name(), info.IsDir(), info.Mode().Type())
+}
+
+// formatInfo formats an fs.FileInfo into a string for error messages and comparison.
+func formatInfo(info fs.FileInfo) string {
+ return fmt.Sprintf("%s IsDir=%v Mode=%v Size=%d ModTime=%v", info.Name(), info.IsDir(), info.Mode(), info.Size(), info.ModTime())
+}
+
+// checkGlob checks that various glob patterns work if the file system implements GlobFS.
+func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) {
+ if _, ok := t.fsys.(fs.GlobFS); !ok {
+ return
+ }
+
+ // Make a complex glob pattern prefix that only matches dir.
+ var glob string
+ if dir != "." {
+ elem := strings.Split(dir, "/")
+ for i, e := range elem {
+ var pattern []rune
+ for j, r := range e {
+ if r == '*' || r == '?' || r == '\\' || r == '[' || r == '-' {
+ pattern = append(pattern, '\\', r)
+ continue
+ }
+ switch (i + j) % 5 {
+ case 0:
+ pattern = append(pattern, r)
+ case 1:
+ pattern = append(pattern, '[', r, ']')
+ case 2:
+ pattern = append(pattern, '[', r, '-', r, ']')
+ case 3:
+ pattern = append(pattern, '[', '\\', r, ']')
+ case 4:
+ pattern = append(pattern, '[', '\\', r, '-', '\\', r, ']')
+ }
+ }
+ elem[i] = string(pattern)
+ }
+ glob = strings.Join(elem, "/") + "/"
+ }
+
+ // Test that malformed patterns are detected.
+ // The error is likely path.ErrBadPattern but need not be.
+ if _, err := t.fsys.(fs.GlobFS).Glob(glob + "nonexist/[]"); err == nil {
+ t.errorf("%s: Glob(%#q): bad pattern not detected", dir, glob+"nonexist/[]")
+ }
+
+ // Try to find a letter that appears in only some of the final names.
+ c := rune('a')
+ for ; c <= 'z'; c++ {
+ have, haveNot := false, false
+ for _, d := range list {
+ if strings.ContainsRune(d.Name(), c) {
+ have = true
+ } else {
+ haveNot = true
+ }
+ }
+ if have && haveNot {
+ break
+ }
+ }
+ if c > 'z' {
+ c = 'a'
+ }
+ glob += "*" + string(c) + "*"
+
+ var want []string
+ for _, d := range list {
+ if strings.ContainsRune(d.Name(), c) {
+ want = append(want, path.Join(dir, d.Name()))
+ }
+ }
+
+ names, err := t.fsys.(fs.GlobFS).Glob(glob)
+ if err != nil {
+ t.errorf("%s: Glob(%#q): %v", dir, glob, err)
+ return
+ }
+ if reflect.DeepEqual(want, names) {
+ return
+ }
+
+ if !sort.StringsAreSorted(names) {
+ t.errorf("%s: Glob(%#q): unsorted output:\n%s", dir, glob, strings.Join(names, "\n"))
+ sort.Strings(names)
+ }
+
+ var problems []string
+ for len(want) > 0 || len(names) > 0 {
+ switch {
+ case len(want) > 0 && len(names) > 0 && want[0] == names[0]:
+ want, names = want[1:], names[1:]
+ case len(want) > 0 && (len(names) == 0 || want[0] < names[0]):
+ problems = append(problems, "missing: "+want[0])
+ want = want[1:]
+ default:
+ problems = append(problems, "extra: "+names[0])
+ names = names[1:]
+ }
+ }
+ t.errorf("%s: Glob(%#q): wrong output:\n%s", dir, glob, strings.Join(problems, "\n"))
+}
+
+// checkStat checks that a direct stat of path matches entry,
+// which was found in the parent's directory listing.
+func (t *fsTester) checkStat(path string, entry fs.DirEntry) {
+ file, err := t.fsys.Open(path)
+ if err != nil {
+ t.errorf("%s: Open: %v", path, err)
+ return
+ }
+ info, err := file.Stat()
+ file.Close()
+ if err != nil {
+ t.errorf("%s: Stat: %v", path, err)
+ return
+ }
+ fentry := formatEntry(entry)
+ fientry := formatInfoEntry(info)
+ // Note: mismatch here is OK for symlink, because Open dereferences symlink.
+ if fentry != fientry && entry.Type()&fs.ModeSymlink == 0 {
+ t.errorf("%s: mismatch:\n\tentry = %s\n\tfile.Stat() = %s", path, fentry, fientry)
+ }
+
+ einfo, err := entry.Info()
+ if err != nil {
+ t.errorf("%s: entry.Info: %v", path, err)
+ return
+ }
+ finfo := formatInfo(info)
+ if entry.Type()&fs.ModeSymlink != 0 {
+ // For symlink, just check that entry.Info matches entry on common fields.
+ // Open deferences symlink, so info itself may differ.
+ feentry := formatInfoEntry(einfo)
+ if fentry != feentry {
+ t.errorf("%s: mismatch\n\tentry = %s\n\tentry.Info() = %s\n", path, fentry, feentry)
+ }
+ } else {
+ feinfo := formatInfo(einfo)
+ if feinfo != finfo {
+ t.errorf("%s: mismatch:\n\tentry.Info() = %s\n\tfile.Stat() = %s\n", path, feinfo, finfo)
+ }
+ }
+
+ // Stat should be the same as Open+Stat, even for symlinks.
+ info2, err := fs.Stat(t.fsys, path)
+ if err != nil {
+ t.errorf("%s: fs.Stat: %v", path, err)
+ return
+ }
+ finfo2 := formatInfo(info2)
+ if finfo2 != finfo {
+ t.errorf("%s: fs.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
+ }
+
+ if fsys, ok := t.fsys.(fs.StatFS); ok {
+ info2, err := fsys.Stat(path)
+ if err != nil {
+ t.errorf("%s: fsys.Stat: %v", path, err)
+ return
+ }
+ finfo2 := formatInfo(info2)
+ if finfo2 != finfo {
+ t.errorf("%s: fsys.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
+ }
+ }
+}
+
+// checkDirList checks that two directory lists contain the same files and file info.
+// The order of the lists need not match.
+func (t *fsTester) checkDirList(dir, desc string, list1, list2 []fs.DirEntry) {
+ old := make(map[string]fs.DirEntry)
+ checkMode := func(entry fs.DirEntry) {
+ if entry.IsDir() != (entry.Type()&fs.ModeDir != 0) {
+ if entry.IsDir() {
+ t.errorf("%s: ReadDir returned %s with IsDir() = true, Type() & ModeDir = 0", dir, entry.Name())
+ } else {
+ t.errorf("%s: ReadDir returned %s with IsDir() = false, Type() & ModeDir = ModeDir", dir, entry.Name())
+ }
+ }
+ }
+
+ for _, entry1 := range list1 {
+ old[entry1.Name()] = entry1
+ checkMode(entry1)
+ }
+
+ var diffs []string
+ for _, entry2 := range list2 {
+ entry1 := old[entry2.Name()]
+ if entry1 == nil {
+ checkMode(entry2)
+ diffs = append(diffs, "+ "+formatEntry(entry2))
+ continue
+ }
+ if formatEntry(entry1) != formatEntry(entry2) {
+ diffs = append(diffs, "- "+formatEntry(entry1), "+ "+formatEntry(entry2))
+ }
+ delete(old, entry2.Name())
+ }
+ for _, entry1 := range old {
+ diffs = append(diffs, "- "+formatEntry(entry1))
+ }
+
+ if len(diffs) == 0 {
+ return
+ }
+
+ sort.Slice(diffs, func(i, j int) bool {
+ fi := strings.Fields(diffs[i])
+ fj := strings.Fields(diffs[j])
+ // sort by name (i < j) and then +/- (j < i, because + < -)
+ return fi[1]+" "+fj[0] < fj[1]+" "+fi[0]
+ })
+
+ t.errorf("%s: diff %s:\n\t%s", dir, desc, strings.Join(diffs, "\n\t"))
+}
+
+// checkFile checks that basic file reading works correctly.
+func (t *fsTester) checkFile(file string) {
+ t.files = append(t.files, file)
+
+ // Read entire file.
+ f, err := t.fsys.Open(file)
+ if err != nil {
+ t.errorf("%s: Open: %v", file, err)
+ return
+ }
+
+ data, err := io.ReadAll(f)
+ if err != nil {
+ f.Close()
+ t.errorf("%s: Open+ReadAll: %v", file, err)
+ return
+ }
+
+ if err := f.Close(); err != nil {
+ t.errorf("%s: Close: %v", file, err)
+ }
+
+ // Check that closing twice doesn't crash.
+ // The return value doesn't matter.
+ f.Close()
+
+ // Check that ReadFile works if present.
+ if fsys, ok := t.fsys.(fs.ReadFileFS); ok {
+ data2, err := fsys.ReadFile(file)
+ if err != nil {
+ t.errorf("%s: fsys.ReadFile: %v", file, err)
+ return
+ }
+ t.checkFileRead(file, "ReadAll vs fsys.ReadFile", data, data2)
+
+ // Modify the data and check it again. Modifying the
+ // returned byte slice should not affect the next call.
+ for i := range data2 {
+ data2[i]++
+ }
+ data2, err = fsys.ReadFile(file)
+ if err != nil {
+ t.errorf("%s: second call to fsys.ReadFile: %v", file, err)
+ return
+ }
+ t.checkFileRead(file, "Readall vs second fsys.ReadFile", data, data2)
+
+ t.checkBadPath(file, "ReadFile",
+ func(name string) error { _, err := fsys.ReadFile(name); return err })
+ }
+
+ // Check that fs.ReadFile works with t.fsys.
+ data2, err := fs.ReadFile(t.fsys, file)
+ if err != nil {
+ t.errorf("%s: fs.ReadFile: %v", file, err)
+ return
+ }
+ t.checkFileRead(file, "ReadAll vs fs.ReadFile", data, data2)
+
+ // Use iotest.TestReader to check small reads, Seek, ReadAt.
+ f, err = t.fsys.Open(file)
+ if err != nil {
+ t.errorf("%s: second Open: %v", file, err)
+ return
+ }
+ defer f.Close()
+ if err := iotest.TestReader(f, data); err != nil {
+ t.errorf("%s: failed TestReader:\n\t%s", file, strings.ReplaceAll(err.Error(), "\n", "\n\t"))
+ }
+}
+
+func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) {
+ if string(data1) != string(data2) {
+ t.errorf("%s: %s: different data returned\n\t%q\n\t%q", file, desc, data1, data2)
+ return
+ }
+}
+
+// checkBadPath checks that various invalid forms of file's name cannot be opened using t.fsys.Open.
+func (t *fsTester) checkOpen(file string) {
+ t.checkBadPath(file, "Open", func(file string) error {
+ f, err := t.fsys.Open(file)
+ if err == nil {
+ f.Close()
+ }
+ return err
+ })
+}
+
+// checkBadPath checks that various invalid forms of file's name cannot be opened using open.
+func (t *fsTester) checkBadPath(file string, desc string, open func(string) error) {
+ bad := []string{
+ "/" + file,
+ file + "/.",
+ }
+ if file == "." {
+ bad = append(bad, "/")
+ }
+ if i := strings.Index(file, "/"); i >= 0 {
+ bad = append(bad,
+ file[:i]+"//"+file[i+1:],
+ file[:i]+"/./"+file[i+1:],
+ file[:i]+`\`+file[i+1:],
+ file[:i]+"/../"+file,
+ )
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ bad = append(bad,
+ file[:i]+"//"+file[i+1:],
+ file[:i]+"/./"+file[i+1:],
+ file[:i]+`\`+file[i+1:],
+ file+"/../"+file[i+1:],
+ )
+ }
+
+ for _, b := range bad {
+ if err := open(b); err == nil {
+ t.errorf("%s: %s(%s) succeeded, want error", file, desc, b)
+ }
+ }
+}
diff --git a/src/testing/fstest/testfs_test.go b/src/testing/fstest/testfs_test.go
new file mode 100644
index 0000000..a48c597
--- /dev/null
+++ b/src/testing/fstest/testfs_test.go
@@ -0,0 +1,78 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fstest
+
+import (
+ "internal/testenv"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "testing"
+)
+
+func TestSymlink(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+
+ tmp := t.TempDir()
+ tmpfs := os.DirFS(tmp)
+
+ if err := os.WriteFile(filepath.Join(tmp, "hello"), []byte("hello, world\n"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.Symlink(filepath.Join(tmp, "hello"), filepath.Join(tmp, "hello.link")); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := TestFS(tmpfs, "hello", "hello.link"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDash(t *testing.T) {
+ m := MapFS{
+ "a-b/a": {Data: []byte("a-b/a")},
+ }
+ if err := TestFS(m, "a-b/a"); err != nil {
+ t.Error(err)
+ }
+}
+
+type shuffledFS MapFS
+
+func (fsys shuffledFS) Open(name string) (fs.File, error) {
+ f, err := MapFS(fsys).Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &shuffledFile{File: f}, nil
+}
+
+type shuffledFile struct{ fs.File }
+
+func (f *shuffledFile) ReadDir(n int) ([]fs.DirEntry, error) {
+ dirents, err := f.File.(fs.ReadDirFile).ReadDir(n)
+ // Shuffle in a deterministic way, all we care about is making sure that the
+ // list of directory entries is not is the lexicographic order.
+ //
+ // We do this to make sure that the TestFS test suite is not affected by the
+ // order of directory entries.
+ sort.Slice(dirents, func(i, j int) bool {
+ return dirents[i].Name() > dirents[j].Name()
+ })
+ return dirents, err
+}
+
+func TestShuffledFS(t *testing.T) {
+ fsys := shuffledFS{
+ "tmp/one": {Data: []byte("1")},
+ "tmp/two": {Data: []byte("2")},
+ "tmp/three": {Data: []byte("3")},
+ }
+ if err := TestFS(fsys, "tmp/one", "tmp/two", "tmp/three"); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/src/testing/fuzz.go b/src/testing/fuzz.go
new file mode 100644
index 0000000..d50ea79
--- /dev/null
+++ b/src/testing/fuzz.go
@@ -0,0 +1,731 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+)
+
+func initFuzzFlags() {
+ matchFuzz = flag.String("test.fuzz", "", "run the fuzz test matching `regexp`")
+ flag.Var(&fuzzDuration, "test.fuzztime", "time to spend fuzzing; default is to run indefinitely")
+ flag.Var(&minimizeDuration, "test.fuzzminimizetime", "time to spend minimizing a value after finding a failing input")
+
+ fuzzCacheDir = flag.String("test.fuzzcachedir", "", "directory where interesting fuzzing inputs are stored (for use only by cmd/go)")
+ isFuzzWorker = flag.Bool("test.fuzzworker", false, "coordinate with the parent process to fuzz random values (for use only by cmd/go)")
+}
+
+var (
+ matchFuzz *string
+ fuzzDuration durationOrCountFlag
+ minimizeDuration = durationOrCountFlag{d: 60 * time.Second, allowZero: true}
+ fuzzCacheDir *string
+ isFuzzWorker *bool
+
+ // corpusDir is the parent directory of the fuzz test's seed corpus within
+ // the package.
+ corpusDir = "testdata/fuzz"
+)
+
+// fuzzWorkerExitCode is used as an exit code by fuzz worker processes after an
+// internal error. This distinguishes internal errors from uncontrolled panics
+// and other failures. Keep in sync with internal/fuzz.workerExitCode.
+const fuzzWorkerExitCode = 70
+
+// InternalFuzzTarget is an internal type but exported because it is
+// cross-package; it is part of the implementation of the "go test" command.
+type InternalFuzzTarget struct {
+ Name string
+ Fn func(f *F)
+}
+
+// F is a type passed to fuzz tests.
+//
+// Fuzz tests run generated inputs against a provided fuzz target, which can
+// find and report potential bugs in the code being tested.
+//
+// A fuzz test runs the seed corpus by default, which includes entries provided
+// by (*F).Add and entries in the testdata/fuzz/<FuzzTestName> directory. After
+// any necessary setup and calls to (*F).Add, the fuzz test must then call
+// (*F).Fuzz to provide the fuzz target. See the testing package documentation
+// for an example, and see the [F.Fuzz] and [F.Add] method documentation for
+// details.
+//
+// *F methods can only be called before (*F).Fuzz. Once the test is
+// executing the fuzz target, only (*T) methods can be used. The only *F methods
+// that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name.
+type F struct {
+ common
+ fuzzContext *fuzzContext
+ testContext *testContext
+
+ // inFuzzFn is true when the fuzz function is running. Most F methods cannot
+ // be called when inFuzzFn is true.
+ inFuzzFn bool
+
+ // corpus is a set of seed corpus entries, added with F.Add and loaded
+ // from testdata.
+ corpus []corpusEntry
+
+ result fuzzResult
+ fuzzCalled bool
+}
+
+var _ TB = (*F)(nil)
+
+// corpusEntry is an alias to the same type as internal/fuzz.CorpusEntry.
+// We use a type alias because we don't want to export this type, and we can't
+// import internal/fuzz from testing.
+type corpusEntry = struct {
+ Parent string
+ Path string
+ Data []byte
+ Values []any
+ Generation int
+ IsSeed bool
+}
+
+// Helper marks the calling function as a test helper function.
+// When printing file and line information, that function will be skipped.
+// Helper may be called simultaneously from multiple goroutines.
+func (f *F) Helper() {
+ if f.inFuzzFn {
+ panic("testing: f.Helper was called inside the fuzz target, use t.Helper instead")
+ }
+
+ // common.Helper is inlined here.
+ // If we called it, it would mark F.Helper as the helper
+ // instead of the caller.
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.helperPCs == nil {
+ f.helperPCs = make(map[uintptr]struct{})
+ }
+ // repeating code from callerName here to save walking a stack frame
+ var pc [1]uintptr
+ n := runtime.Callers(2, pc[:]) // skip runtime.Callers + Helper
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ if _, found := f.helperPCs[pc[0]]; !found {
+ f.helperPCs[pc[0]] = struct{}{}
+ f.helperNames = nil // map will be recreated next time it is needed
+ }
+}
+
+// Fail marks the function as having failed but continues execution.
+func (f *F) Fail() {
+ // (*F).Fail may be called by (*T).Fail, which we should allow. However, we
+ // shouldn't allow direct (*F).Fail calls from inside the (*F).Fuzz function.
+ if f.inFuzzFn {
+ panic("testing: f.Fail was called inside the fuzz target, use t.Fail instead")
+ }
+ f.common.Helper()
+ f.common.Fail()
+}
+
+// Skipped reports whether the test was skipped.
+func (f *F) Skipped() bool {
+ // (*F).Skipped may be called by tRunner, which we should allow. However, we
+ // shouldn't allow direct (*F).Skipped calls from inside the (*F).Fuzz function.
+ if f.inFuzzFn {
+ panic("testing: f.Skipped was called inside the fuzz target, use t.Skipped instead")
+ }
+ f.common.Helper()
+ return f.common.Skipped()
+}
+
+// Add will add the arguments to the seed corpus for the fuzz test. This will be
+// a no-op if called after or within the fuzz target, and args must match the
+// arguments for the fuzz target.
+func (f *F) Add(args ...any) {
+ var values []any
+ for i := range args {
+ if t := reflect.TypeOf(args[i]); !supportedTypes[t] {
+ panic(fmt.Sprintf("testing: unsupported type to Add %v", t))
+ }
+ values = append(values, args[i])
+ }
+ f.corpus = append(f.corpus, corpusEntry{Values: values, IsSeed: true, Path: fmt.Sprintf("seed#%d", len(f.corpus))})
+}
+
+// supportedTypes represents all of the supported types which can be fuzzed.
+var supportedTypes = map[reflect.Type]bool{
+ reflect.TypeOf(([]byte)("")): true,
+ reflect.TypeOf((string)("")): true,
+ reflect.TypeOf((bool)(false)): true,
+ reflect.TypeOf((byte)(0)): true,
+ reflect.TypeOf((rune)(0)): true,
+ reflect.TypeOf((float32)(0)): true,
+ reflect.TypeOf((float64)(0)): true,
+ reflect.TypeOf((int)(0)): true,
+ reflect.TypeOf((int8)(0)): true,
+ reflect.TypeOf((int16)(0)): true,
+ reflect.TypeOf((int32)(0)): true,
+ reflect.TypeOf((int64)(0)): true,
+ reflect.TypeOf((uint)(0)): true,
+ reflect.TypeOf((uint8)(0)): true,
+ reflect.TypeOf((uint16)(0)): true,
+ reflect.TypeOf((uint32)(0)): true,
+ reflect.TypeOf((uint64)(0)): true,
+}
+
+// Fuzz runs the fuzz function, ff, for fuzz testing. If ff fails for a set of
+// arguments, those arguments will be added to the seed corpus.
+//
+// ff must be a function with no return value whose first argument is *T and
+// whose remaining arguments are the types to be fuzzed.
+// For example:
+//
+// f.Fuzz(func(t *testing.T, b []byte, i int) { ... })
+//
+// The following types are allowed: []byte, string, bool, byte, rune, float32,
+// float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64.
+// More types may be supported in the future.
+//
+// ff must not call any *F methods, e.g. (*F).Log, (*F).Error, (*F).Skip. Use
+// the corresponding *T method instead. The only *F methods that are allowed in
+// the (*F).Fuzz function are (*F).Failed and (*F).Name.
+//
+// This function should be fast and deterministic, and its behavior should not
+// depend on shared state. No mutatable input arguments, or pointers to them,
+// should be retained between executions of the fuzz function, as the memory
+// backing them may be mutated during a subsequent invocation. ff must not
+// modify the underlying data of the arguments provided by the fuzzing engine.
+//
+// When fuzzing, F.Fuzz does not return until a problem is found, time runs out
+// (set with -fuzztime), or the test process is interrupted by a signal. F.Fuzz
+// should be called exactly once, unless F.Skip or [F.Fail] is called beforehand.
+func (f *F) Fuzz(ff any) {
+ if f.fuzzCalled {
+ panic("testing: F.Fuzz called more than once")
+ }
+ f.fuzzCalled = true
+ if f.failed {
+ return
+ }
+ f.Helper()
+
+ // ff should be in the form func(*testing.T, ...interface{})
+ fn := reflect.ValueOf(ff)
+ fnType := fn.Type()
+ if fnType.Kind() != reflect.Func {
+ panic("testing: F.Fuzz must receive a function")
+ }
+ if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeOf((*T)(nil)) {
+ panic("testing: fuzz target must receive at least two arguments, where the first argument is a *T")
+ }
+ if fnType.NumOut() != 0 {
+ panic("testing: fuzz target must not return a value")
+ }
+
+ // Save the types of the function to compare against the corpus.
+ var types []reflect.Type
+ for i := 1; i < fnType.NumIn(); i++ {
+ t := fnType.In(i)
+ if !supportedTypes[t] {
+ panic(fmt.Sprintf("testing: unsupported type for fuzzing %v", t))
+ }
+ types = append(types, t)
+ }
+
+ // Load the testdata seed corpus. Check types of entries in the testdata
+ // corpus and entries declared with F.Add.
+ //
+ // Don't load the seed corpus if this is a worker process; we won't use it.
+ if f.fuzzContext.mode != fuzzWorker {
+ for _, c := range f.corpus {
+ if err := f.fuzzContext.deps.CheckCorpus(c.Values, types); err != nil {
+ // TODO(#48302): Report the source location of the F.Add call.
+ f.Fatal(err)
+ }
+ }
+
+ // Load seed corpus
+ c, err := f.fuzzContext.deps.ReadCorpus(filepath.Join(corpusDir, f.name), types)
+ if err != nil {
+ f.Fatal(err)
+ }
+ for i := range c {
+ c[i].IsSeed = true // these are all seed corpus values
+ if f.fuzzContext.mode == fuzzCoordinator {
+ // If this is the coordinator process, zero the values, since we don't need
+ // to hold onto them.
+ c[i].Values = nil
+ }
+ }
+
+ f.corpus = append(f.corpus, c...)
+ }
+
+ // run calls fn on a given input, as a subtest with its own T.
+ // run is analogous to T.Run. The test filtering and cleanup works similarly.
+ // fn is called in its own goroutine.
+ run := func(captureOut io.Writer, e corpusEntry) (ok bool) {
+ if e.Values == nil {
+ // The corpusEntry must have non-nil Values in order to run the
+ // test. If Values is nil, it is a bug in our code.
+ panic(fmt.Sprintf("corpus file %q was not unmarshaled", e.Path))
+ }
+ if shouldFailFast() {
+ return true
+ }
+ testName := f.name
+ if e.Path != "" {
+ testName = fmt.Sprintf("%s/%s", testName, filepath.Base(e.Path))
+ }
+ if f.testContext.isFuzzing {
+ // Don't preserve subtest names while fuzzing. If fn calls T.Run,
+ // there will be a very large number of subtests with duplicate names,
+ // which will use a large amount of memory. The subtest names aren't
+ // useful since there's no way to re-run them deterministically.
+ f.testContext.match.clearSubNames()
+ }
+
+ // Record the stack trace at the point of this call so that if the subtest
+ // function - which runs in a separate stack - is marked as a helper, we can
+ // continue walking the stack into the parent test.
+ var pc [maxStackLen]uintptr
+ n := runtime.Callers(2, pc[:])
+ t := &T{
+ common: common{
+ barrier: make(chan bool),
+ signal: make(chan bool),
+ name: testName,
+ parent: &f.common,
+ level: f.level + 1,
+ creator: pc[:n],
+ chatty: f.chatty,
+ },
+ context: f.testContext,
+ }
+ if captureOut != nil {
+ // t.parent aliases f.common.
+ t.parent.w = captureOut
+ }
+ t.w = indenter{&t.common}
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== RUN %s\n", t.name)
+ }
+ f.common.inFuzzFn, f.inFuzzFn = true, true
+ go tRunner(t, func(t *T) {
+ args := []reflect.Value{reflect.ValueOf(t)}
+ for _, v := range e.Values {
+ args = append(args, reflect.ValueOf(v))
+ }
+ // Before resetting the current coverage, defer the snapshot so that
+ // we make sure it is called right before the tRunner function
+ // exits, regardless of whether it was executed cleanly, panicked,
+ // or if the fuzzFn called t.Fatal.
+ if f.testContext.isFuzzing {
+ defer f.fuzzContext.deps.SnapshotCoverage()
+ f.fuzzContext.deps.ResetCoverage()
+ }
+ fn.Call(args)
+ })
+ <-t.signal
+ if t.chatty != nil && t.chatty.json {
+ t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name)
+ }
+ f.common.inFuzzFn, f.inFuzzFn = false, false
+ return !t.Failed()
+ }
+
+ switch f.fuzzContext.mode {
+ case fuzzCoordinator:
+ // Fuzzing is enabled, and this is the test process started by 'go test'.
+ // Act as the coordinator process, and coordinate workers to perform the
+ // actual fuzzing.
+ corpusTargetDir := filepath.Join(corpusDir, f.name)
+ cacheTargetDir := filepath.Join(*fuzzCacheDir, f.name)
+ err := f.fuzzContext.deps.CoordinateFuzzing(
+ fuzzDuration.d,
+ int64(fuzzDuration.n),
+ minimizeDuration.d,
+ int64(minimizeDuration.n),
+ *parallel,
+ f.corpus,
+ types,
+ corpusTargetDir,
+ cacheTargetDir)
+ if err != nil {
+ f.result = fuzzResult{Error: err}
+ f.Fail()
+ fmt.Fprintf(f.w, "%v\n", err)
+ if crashErr, ok := err.(fuzzCrashError); ok {
+ crashPath := crashErr.CrashPath()
+ fmt.Fprintf(f.w, "Failing input written to %s\n", crashPath)
+ testName := filepath.Base(crashPath)
+ fmt.Fprintf(f.w, "To re-run:\ngo test -run=%s/%s\n", f.name, testName)
+ }
+ }
+ // TODO(jayconrod,katiehockman): Aggregate statistics across workers
+ // and add to FuzzResult (ie. time taken, num iterations)
+
+ case fuzzWorker:
+ // Fuzzing is enabled, and this is a worker process. Follow instructions
+ // from the coordinator.
+ if err := f.fuzzContext.deps.RunFuzzWorker(func(e corpusEntry) error {
+ // Don't write to f.w (which points to Stdout) if running from a
+ // fuzz worker. This would become very verbose, particularly during
+ // minimization. Return the error instead, and let the caller deal
+ // with the output.
+ var buf strings.Builder
+ if ok := run(&buf, e); !ok {
+ return errors.New(buf.String())
+ }
+ return nil
+ }); err != nil {
+ // Internal errors are marked with f.Fail; user code may call this too, before F.Fuzz.
+ // The worker will exit with fuzzWorkerExitCode, indicating this is a failure
+ // (and 'go test' should exit non-zero) but a failing input should not be recorded.
+ f.Errorf("communicating with fuzzing coordinator: %v", err)
+ }
+
+ default:
+ // Fuzzing is not enabled, or will be done later. Only run the seed
+ // corpus now.
+ for _, e := range f.corpus {
+ name := fmt.Sprintf("%s/%s", f.name, filepath.Base(e.Path))
+ if _, ok, _ := f.testContext.match.fullName(nil, name); ok {
+ run(f.w, e)
+ }
+ }
+ }
+}
+
+func (f *F) report() {
+ if *isFuzzWorker || f.parent == nil {
+ return
+ }
+ dstr := fmtDuration(f.duration)
+ format := "--- %s: %s (%s)\n"
+ if f.Failed() {
+ f.flushToParent(f.name, format, "FAIL", f.name, dstr)
+ } else if f.chatty != nil {
+ if f.Skipped() {
+ f.flushToParent(f.name, format, "SKIP", f.name, dstr)
+ } else {
+ f.flushToParent(f.name, format, "PASS", f.name, dstr)
+ }
+ }
+}
+
+// fuzzResult contains the results of a fuzz run.
+type fuzzResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Error error // Error is the error from the failing input
+}
+
+func (r fuzzResult) String() string {
+ if r.Error == nil {
+ return ""
+ }
+ return r.Error.Error()
+}
+
+// fuzzCrashError is satisfied by a failing input detected while fuzzing.
+// These errors are written to the seed corpus and can be re-run with 'go test'.
+// Errors within the fuzzing framework (like I/O errors between coordinator
+// and worker processes) don't satisfy this interface.
+type fuzzCrashError interface {
+ error
+ Unwrap() error
+
+ // CrashPath returns the path of the subtest that corresponds to the saved
+ // crash input file in the seed corpus. The test can be re-run with go test
+ // -run=$test/$name $test is the fuzz test name, and $name is the
+ // filepath.Base of the string returned here.
+ CrashPath() string
+}
+
+// fuzzContext holds fields common to all fuzz tests.
+type fuzzContext struct {
+ deps testDeps
+ mode fuzzMode
+}
+
+type fuzzMode uint8
+
+const (
+ seedCorpusOnly fuzzMode = iota
+ fuzzCoordinator
+ fuzzWorker
+)
+
+// runFuzzTests runs the fuzz tests matching the pattern for -run. This will
+// only run the (*F).Fuzz function for each seed corpus without using the
+// fuzzing engine to generate or mutate inputs.
+func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.Time) (ran, ok bool) {
+ ok = true
+ if len(fuzzTests) == 0 || *isFuzzWorker {
+ return ran, ok
+ }
+ m := newMatcher(deps.MatchString, *match, "-test.run", *skip)
+ var mFuzz *matcher
+ if *matchFuzz != "" {
+ mFuzz = newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip)
+ }
+
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ for i := uint(0); i < *count; i++ {
+ if shouldFailFast() {
+ break
+ }
+
+ tctx := newTestContext(*parallel, m)
+ tctx.deadline = deadline
+ fctx := &fuzzContext{deps: deps, mode: seedCorpusOnly}
+ root := common{w: os.Stdout} // gather output in one place
+ if Verbose() {
+ root.chatty = newChattyPrinter(root.w)
+ }
+ for _, ft := range fuzzTests {
+ if shouldFailFast() {
+ break
+ }
+ testName, matched, _ := tctx.match.fullName(nil, ft.Name)
+ if !matched {
+ continue
+ }
+ if mFuzz != nil {
+ if _, fuzzMatched, _ := mFuzz.fullName(nil, ft.Name); fuzzMatched {
+ // If this will be fuzzed, then don't run the seed corpus
+ // right now. That will happen later.
+ continue
+ }
+ }
+ f := &F{
+ common: common{
+ signal: make(chan bool),
+ barrier: make(chan bool),
+ name: testName,
+ parent: &root,
+ level: root.level + 1,
+ chatty: root.chatty,
+ },
+ testContext: tctx,
+ fuzzContext: fctx,
+ }
+ f.w = indenter{&f.common}
+ if f.chatty != nil {
+ f.chatty.Updatef(f.name, "=== RUN %s\n", f.name)
+ }
+ go fRunner(f, ft.Fn)
+ <-f.signal
+ if f.chatty != nil && f.chatty.json {
+ f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name)
+ }
+ ok = ok && !f.Failed()
+ ran = ran || f.ran
+ }
+ if !ran {
+ // There were no tests to run on this iteration.
+ // This won't change, so no reason to keep trying.
+ break
+ }
+ }
+ }
+
+ return ran, ok
+}
+
+// runFuzzing runs the fuzz test matching the pattern for -fuzz. Only one such
+// fuzz test must match. This will run the fuzzing engine to generate and
+// mutate new inputs against the fuzz target.
+//
+// If fuzzing is disabled (-test.fuzz is not set), runFuzzing
+// returns immediately.
+func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) {
+ if len(fuzzTests) == 0 || *matchFuzz == "" {
+ return true
+ }
+ m := newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip)
+ tctx := newTestContext(1, m)
+ tctx.isFuzzing = true
+ fctx := &fuzzContext{
+ deps: deps,
+ }
+ root := common{w: os.Stdout}
+ if *isFuzzWorker {
+ root.w = io.Discard
+ fctx.mode = fuzzWorker
+ } else {
+ fctx.mode = fuzzCoordinator
+ }
+ if Verbose() && !*isFuzzWorker {
+ root.chatty = newChattyPrinter(root.w)
+ }
+ var fuzzTest *InternalFuzzTarget
+ var testName string
+ var matched []string
+ for i := range fuzzTests {
+ name, ok, _ := tctx.match.fullName(nil, fuzzTests[i].Name)
+ if !ok {
+ continue
+ }
+ matched = append(matched, name)
+ fuzzTest = &fuzzTests[i]
+ testName = name
+ }
+ if len(matched) == 0 {
+ fmt.Fprintln(os.Stderr, "testing: warning: no fuzz tests to fuzz")
+ return true
+ }
+ if len(matched) > 1 {
+ fmt.Fprintf(os.Stderr, "testing: will not fuzz, -fuzz matches more than one fuzz test: %v\n", matched)
+ return false
+ }
+
+ f := &F{
+ common: common{
+ signal: make(chan bool),
+ barrier: nil, // T.Parallel has no effect when fuzzing.
+ name: testName,
+ parent: &root,
+ level: root.level + 1,
+ chatty: root.chatty,
+ },
+ fuzzContext: fctx,
+ testContext: tctx,
+ }
+ f.w = indenter{&f.common}
+ if f.chatty != nil {
+ f.chatty.Updatef(f.name, "=== RUN %s\n", f.name)
+ }
+ go fRunner(f, fuzzTest.Fn)
+ <-f.signal
+ if f.chatty != nil {
+ f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name)
+ }
+ return !f.failed
+}
+
+// fRunner wraps a call to a fuzz test and ensures that cleanup functions are
+// called and status flags are set. fRunner should be called in its own
+// goroutine. To wait for its completion, receive from f.signal.
+//
+// fRunner is analogous to tRunner, which wraps subtests started with T.Run.
+// Unit tests and fuzz tests work a little differently, so for now, these
+// functions aren't consolidated. In particular, because there are no F.Run and
+// F.Parallel methods, i.e., no fuzz sub-tests or parallel fuzz tests, a few
+// simplifications are made. We also require that F.Fuzz, F.Skip, or F.Fail is
+// called.
+func fRunner(f *F, fn func(*F)) {
+ // When this goroutine is done, either because runtime.Goexit was called, a
+ // panic started, or fn returned normally, record the duration and send
+ // t.signal, indicating the fuzz test is done.
+ defer func() {
+ // Detect whether the fuzz test panicked or called runtime.Goexit
+ // without calling F.Fuzz, F.Fail, or F.Skip. If it did, panic (possibly
+ // replacing a nil panic value). Nothing should recover after fRunner
+ // unwinds, so this should crash the process and print stack.
+ // Unfortunately, recovering here adds stack frames, but the location of
+ // the original panic should still be
+ // clear.
+ f.checkRaces()
+ if f.Failed() {
+ numFailed.Add(1)
+ }
+ err := recover()
+ if err == nil {
+ f.mu.RLock()
+ fuzzNotCalled := !f.fuzzCalled && !f.skipped && !f.failed
+ if !f.finished && !f.skipped && !f.failed {
+ err = errNilPanicOrGoexit
+ }
+ f.mu.RUnlock()
+ if fuzzNotCalled && err == nil {
+ f.Error("returned without calling F.Fuzz, F.Fail, or F.Skip")
+ }
+ }
+
+ // Use a deferred call to ensure that we report that the test is
+ // complete even if a cleanup function calls F.FailNow. See issue 41355.
+ didPanic := false
+ defer func() {
+ if !didPanic {
+ // Only report that the test is complete if it doesn't panic,
+ // as otherwise the test binary can exit before the panic is
+ // reported to the user. See issue 41479.
+ f.signal <- true
+ }
+ }()
+
+ // If we recovered a panic or inappropriate runtime.Goexit, fail the test,
+ // flush the output log up to the root, then panic.
+ doPanic := func(err any) {
+ f.Fail()
+ if r := f.runCleanup(recoverAndReturnPanic); r != nil {
+ f.Logf("cleanup panicked with %v", r)
+ }
+ for root := &f.common; root.parent != nil; root = root.parent {
+ root.mu.Lock()
+ root.duration += time.Since(root.start)
+ d := root.duration
+ root.mu.Unlock()
+ root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
+ }
+ didPanic = true
+ panic(err)
+ }
+ if err != nil {
+ doPanic(err)
+ }
+
+ // No panic or inappropriate Goexit.
+ f.duration += time.Since(f.start)
+
+ if len(f.sub) > 0 {
+ // Unblock inputs that called T.Parallel while running the seed corpus.
+ // This only affects fuzz tests run as normal tests.
+ // While fuzzing, T.Parallel has no effect, so f.sub is empty, and this
+ // branch is not taken. f.barrier is nil in that case.
+ f.testContext.release()
+ close(f.barrier)
+ // Wait for the subtests to complete.
+ for _, sub := range f.sub {
+ <-sub.signal
+ }
+ cleanupStart := time.Now()
+ err := f.runCleanup(recoverAndReturnPanic)
+ f.duration += time.Since(cleanupStart)
+ if err != nil {
+ doPanic(err)
+ }
+ }
+
+ // Report after all subtests have finished.
+ f.report()
+ f.done = true
+ f.setRan()
+ }()
+ defer func() {
+ if len(f.sub) == 0 {
+ f.runCleanup(normalPanic)
+ }
+ }()
+
+ f.start = time.Now()
+ f.resetRaces()
+ fn(f)
+
+ // Code beyond this point will not be executed when FailNow or SkipNow
+ // is invoked.
+ f.mu.Lock()
+ f.finished = true
+ f.mu.Unlock()
+}
diff --git a/src/testing/helper_test.go b/src/testing/helper_test.go
new file mode 100644
index 0000000..da5622f
--- /dev/null
+++ b/src/testing/helper_test.go
@@ -0,0 +1,116 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "internal/testenv"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestTBHelper(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ testTestHelper(t)
+
+ // Check that calling Helper from inside a top-level test function
+ // has no effect.
+ t.Helper()
+ t.Error("8")
+ return
+ }
+
+ testenv.MustHaveExec(t)
+ t.Parallel()
+
+ exe, err := os.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := testenv.Command(t, exe, "-test.run=^TestTBHelper$")
+ cmd = testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+ out, _ := cmd.CombinedOutput()
+
+ want := `--- FAIL: TestTBHelper \([^)]+\)
+ helperfuncs_test.go:15: 0
+ helperfuncs_test.go:47: 1
+ helperfuncs_test.go:24: 2
+ helperfuncs_test.go:49: 3
+ helperfuncs_test.go:56: 4
+ --- FAIL: TestTBHelper/sub \([^)]+\)
+ helperfuncs_test.go:59: 5
+ helperfuncs_test.go:24: 6
+ helperfuncs_test.go:58: 7
+ --- FAIL: TestTBHelper/sub2 \([^)]+\)
+ helperfuncs_test.go:80: 11
+ helperfuncs_test.go:84: recover 12
+ helperfuncs_test.go:86: GenericFloat64
+ helperfuncs_test.go:87: GenericInt
+ helper_test.go:22: 8
+ helperfuncs_test.go:73: 9
+ helperfuncs_test.go:69: 10
+`
+ if !regexp.MustCompile(want).Match(out) {
+ t.Errorf("got output:\n\n%s\nwant matching:\n\n%s", out, want)
+ }
+}
+
+func TestTBHelperParallel(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ parallelTestHelper(t)
+ return
+ }
+
+ testenv.MustHaveExec(t)
+ t.Parallel()
+
+ exe, err := os.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := testenv.Command(t, exe, "-test.run=^TestTBHelperParallel$")
+ cmd = testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+ out, _ := cmd.CombinedOutput()
+
+ t.Logf("output:\n%s", out)
+
+ lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+
+ // We expect to see one "--- FAIL" line at the start
+ // of the log, five lines of "parallel" logging,
+ // and a final "FAIL" line at the end of the test.
+ const wantLines = 7
+
+ if len(lines) != wantLines {
+ t.Fatalf("parallelTestHelper gave %d lines of output; want %d", len(lines), wantLines)
+ }
+ want := "helperfuncs_test.go:24: parallel"
+ if got := strings.TrimSpace(lines[1]); got != want {
+ t.Errorf("got second output line %q; want %q", got, want)
+ }
+}
+
+func BenchmarkTBHelper(b *testing.B) {
+ f1 := func() {
+ b.Helper()
+ }
+ f2 := func() {
+ b.Helper()
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if i&1 == 0 {
+ f1()
+ } else {
+ f2()
+ }
+ }
+}
diff --git a/src/testing/helperfuncs_test.go b/src/testing/helperfuncs_test.go
new file mode 100644
index 0000000..f0295f3
--- /dev/null
+++ b/src/testing/helperfuncs_test.go
@@ -0,0 +1,124 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "sync"
+ "testing"
+)
+
+// The line numbering of this file is important for TestTBHelper.
+
+func notHelper(t *testing.T, msg string) {
+ t.Error(msg)
+}
+
+func helper(t *testing.T, msg string) {
+ t.Helper()
+ t.Error(msg)
+}
+
+func notHelperCallingHelper(t *testing.T, msg string) {
+ helper(t, msg)
+}
+
+func helperCallingHelper(t *testing.T, msg string) {
+ t.Helper()
+ helper(t, msg)
+}
+
+func genericHelper[G any](t *testing.T, msg string) {
+ t.Helper()
+ t.Error(msg)
+}
+
+var genericIntHelper = genericHelper[int]
+
+func testTestHelper(t *testing.T) {
+ testHelper(t)
+}
+
+func testHelper(t *testing.T) {
+ // Check combinations of directly and indirectly
+ // calling helper functions.
+ notHelper(t, "0")
+ helper(t, "1")
+ notHelperCallingHelper(t, "2")
+ helperCallingHelper(t, "3")
+
+ // Check a function literal closing over t that uses Helper.
+ fn := func(msg string) {
+ t.Helper()
+ t.Error(msg)
+ }
+ fn("4")
+
+ t.Run("sub", func(t *testing.T) {
+ helper(t, "5")
+ notHelperCallingHelper(t, "6")
+ // Check that calling Helper from inside a subtest entry function
+ // works as if it were in an ordinary function call.
+ t.Helper()
+ t.Error("7")
+ })
+
+ // Check that right caller is reported for func passed to Cleanup when
+ // multiple cleanup functions have been registered.
+ t.Cleanup(func() {
+ t.Helper()
+ t.Error("10")
+ })
+ t.Cleanup(func() {
+ t.Helper()
+ t.Error("9")
+ })
+
+ // Check that helper-ness propagates up through subtests
+ // to helpers above. See https://golang.org/issue/44887.
+ helperSubCallingHelper(t, "11")
+
+ // Check that helper-ness propagates up through panic/recover.
+ // See https://golang.org/issue/31154.
+ recoverHelper(t, "12")
+
+ genericHelper[float64](t, "GenericFloat64")
+ genericIntHelper(t, "GenericInt")
+}
+
+func parallelTestHelper(t *testing.T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 5; i++ {
+ wg.Add(1)
+ go func() {
+ notHelperCallingHelper(t, "parallel")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func helperSubCallingHelper(t *testing.T, msg string) {
+ t.Helper()
+ t.Run("sub2", func(t *testing.T) {
+ t.Helper()
+ t.Fatal(msg)
+ })
+}
+
+func recoverHelper(t *testing.T, msg string) {
+ t.Helper()
+ defer func() {
+ t.Helper()
+ if err := recover(); err != nil {
+ t.Errorf("recover %s", err)
+ }
+ }()
+ doPanic(t, msg)
+}
+
+func doPanic(t *testing.T, msg string) {
+ t.Helper()
+ panic(msg)
+}
diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go
new file mode 100644
index 0000000..8683075
--- /dev/null
+++ b/src/testing/internal/testdeps/deps.go
@@ -0,0 +1,199 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testdeps provides access to dependencies needed by test execution.
+//
+// This package is imported by the generated main package, which passes
+// TestDeps into testing.Main. This allows tests to use packages at run time
+// without making those packages direct dependencies of package testing.
+// Direct dependencies of package testing are harder to write tests for.
+package testdeps
+
+import (
+ "bufio"
+ "context"
+ "internal/fuzz"
+ "internal/testlog"
+ "io"
+ "os"
+ "os/signal"
+ "reflect"
+ "regexp"
+ "runtime/pprof"
+ "strings"
+ "sync"
+ "time"
+)
+
+// TestDeps is an implementation of the testing.testDeps interface,
+// suitable for passing to [testing.MainStart].
+type TestDeps struct{}
+
+var matchPat string
+var matchRe *regexp.Regexp
+
+func (TestDeps) MatchString(pat, str string) (result bool, err error) {
+ if matchRe == nil || matchPat != pat {
+ matchPat = pat
+ matchRe, err = regexp.Compile(matchPat)
+ if err != nil {
+ return
+ }
+ }
+ return matchRe.MatchString(str), nil
+}
+
+func (TestDeps) StartCPUProfile(w io.Writer) error {
+ return pprof.StartCPUProfile(w)
+}
+
+func (TestDeps) StopCPUProfile() {
+ pprof.StopCPUProfile()
+}
+
+func (TestDeps) WriteProfileTo(name string, w io.Writer, debug int) error {
+ return pprof.Lookup(name).WriteTo(w, debug)
+}
+
+// ImportPath is the import path of the testing binary, set by the generated main function.
+var ImportPath string
+
+func (TestDeps) ImportPath() string {
+ return ImportPath
+}
+
+// testLog implements testlog.Interface, logging actions by package os.
+type testLog struct {
+ mu sync.Mutex
+ w *bufio.Writer
+ set bool
+}
+
+func (l *testLog) Getenv(key string) {
+ l.add("getenv", key)
+}
+
+func (l *testLog) Open(name string) {
+ l.add("open", name)
+}
+
+func (l *testLog) Stat(name string) {
+ l.add("stat", name)
+}
+
+func (l *testLog) Chdir(name string) {
+ l.add("chdir", name)
+}
+
+// add adds the (op, name) pair to the test log.
+func (l *testLog) add(op, name string) {
+ if strings.Contains(name, "\n") || name == "" {
+ return
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.w == nil {
+ return
+ }
+ l.w.WriteString(op)
+ l.w.WriteByte(' ')
+ l.w.WriteString(name)
+ l.w.WriteByte('\n')
+}
+
+var log testLog
+
+func (TestDeps) StartTestLog(w io.Writer) {
+ log.mu.Lock()
+ log.w = bufio.NewWriter(w)
+ if !log.set {
+ // Tests that define TestMain and then run m.Run multiple times
+ // will call StartTestLog/StopTestLog multiple times.
+ // Checking log.set avoids calling testlog.SetLogger multiple times
+ // (which will panic) and also avoids writing the header multiple times.
+ log.set = true
+ testlog.SetLogger(&log)
+ log.w.WriteString("# test log\n") // known to cmd/go/internal/test/test.go
+ }
+ log.mu.Unlock()
+}
+
+func (TestDeps) StopTestLog() error {
+ log.mu.Lock()
+ defer log.mu.Unlock()
+ err := log.w.Flush()
+ log.w = nil
+ return err
+}
+
+// SetPanicOnExit0 tells the os package whether to panic on os.Exit(0).
+func (TestDeps) SetPanicOnExit0(v bool) {
+ testlog.SetPanicOnExit0(v)
+}
+
+func (TestDeps) CoordinateFuzzing(
+ timeout time.Duration,
+ limit int64,
+ minimizeTimeout time.Duration,
+ minimizeLimit int64,
+ parallel int,
+ seed []fuzz.CorpusEntry,
+ types []reflect.Type,
+ corpusDir,
+ cacheDir string) (err error) {
+ // Fuzzing may be interrupted with a timeout or if the user presses ^C.
+ // In either case, we'll stop worker processes gracefully and save
+ // crashers and interesting values.
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+ err = fuzz.CoordinateFuzzing(ctx, fuzz.CoordinateFuzzingOpts{
+ Log: os.Stderr,
+ Timeout: timeout,
+ Limit: limit,
+ MinimizeTimeout: minimizeTimeout,
+ MinimizeLimit: minimizeLimit,
+ Parallel: parallel,
+ Seed: seed,
+ Types: types,
+ CorpusDir: corpusDir,
+ CacheDir: cacheDir,
+ })
+ if err == ctx.Err() {
+ return nil
+ }
+ return err
+}
+
+func (TestDeps) RunFuzzWorker(fn func(fuzz.CorpusEntry) error) error {
+ // Worker processes may or may not receive a signal when the user presses ^C
+ // On POSIX operating systems, a signal sent to a process group is delivered
+ // to all processes in that group. This is not the case on Windows.
+ // If the worker is interrupted, return quickly and without error.
+ // If only the coordinator process is interrupted, it tells each worker
+ // process to stop by closing its "fuzz_in" pipe.
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+ err := fuzz.RunFuzzWorker(ctx, fn)
+ if err == ctx.Err() {
+ return nil
+ }
+ return err
+}
+
+func (TestDeps) ReadCorpus(dir string, types []reflect.Type) ([]fuzz.CorpusEntry, error) {
+ return fuzz.ReadCorpus(dir, types)
+}
+
+func (TestDeps) CheckCorpus(vals []any, types []reflect.Type) error {
+ return fuzz.CheckCorpus(vals, types)
+}
+
+func (TestDeps) ResetCoverage() {
+ fuzz.ResetCoverage()
+}
+
+func (TestDeps) SnapshotCoverage() {
+ fuzz.SnapshotCoverage()
+}
diff --git a/src/testing/iotest/example_test.go b/src/testing/iotest/example_test.go
new file mode 100644
index 0000000..10f6bd3
--- /dev/null
+++ b/src/testing/iotest/example_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest_test
+
+import (
+ "errors"
+ "fmt"
+ "testing/iotest"
+)
+
+func ExampleErrReader() {
+ // A reader that always returns a custom error.
+ r := iotest.ErrReader(errors.New("custom error"))
+ n, err := r.Read(nil)
+ fmt.Printf("n: %d\nerr: %q\n", n, err)
+
+ // Output:
+ // n: 0
+ // err: "custom error"
+}
diff --git a/src/testing/iotest/logger.go b/src/testing/iotest/logger.go
new file mode 100644
index 0000000..10d0cb5
--- /dev/null
+++ b/src/testing/iotest/logger.go
@@ -0,0 +1,54 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+ "io"
+ "log"
+)
+
+type writeLogger struct {
+ prefix string
+ w io.Writer
+}
+
+func (l *writeLogger) Write(p []byte) (n int, err error) {
+ n, err = l.w.Write(p)
+ if err != nil {
+ log.Printf("%s %x: %v", l.prefix, p[0:n], err)
+ } else {
+ log.Printf("%s %x", l.prefix, p[0:n])
+ }
+ return
+}
+
+// NewWriteLogger returns a writer that behaves like w except
+// that it logs (using [log.Printf]) each write to standard error,
+// printing the prefix and the hexadecimal data written.
+func NewWriteLogger(prefix string, w io.Writer) io.Writer {
+ return &writeLogger{prefix, w}
+}
+
+type readLogger struct {
+ prefix string
+ r io.Reader
+}
+
+func (l *readLogger) Read(p []byte) (n int, err error) {
+ n, err = l.r.Read(p)
+ if err != nil {
+ log.Printf("%s %x: %v", l.prefix, p[0:n], err)
+ } else {
+ log.Printf("%s %x", l.prefix, p[0:n])
+ }
+ return
+}
+
+// NewReadLogger returns a reader that behaves like r except
+// that it logs (using [log.Printf]) each read to standard error,
+// printing the prefix and the hexadecimal data read.
+func NewReadLogger(prefix string, r io.Reader) io.Reader {
+ return &readLogger{prefix, r}
+}
diff --git a/src/testing/iotest/logger_test.go b/src/testing/iotest/logger_test.go
new file mode 100644
index 0000000..7a7d0aa
--- /dev/null
+++ b/src/testing/iotest/logger_test.go
@@ -0,0 +1,153 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log"
+ "strings"
+ "testing"
+)
+
+type errWriter struct {
+ err error
+}
+
+func (w errWriter) Write([]byte) (int, error) {
+ return 0, w.err
+}
+
+func TestWriteLogger(t *testing.T) {
+ olw := log.Writer()
+ olf := log.Flags()
+ olp := log.Prefix()
+
+ // Revert the original log settings before we exit.
+ defer func() {
+ log.SetFlags(olf)
+ log.SetPrefix(olp)
+ log.SetOutput(olw)
+ }()
+
+ lOut := new(strings.Builder)
+ log.SetPrefix("lw: ")
+ log.SetOutput(lOut)
+ log.SetFlags(0)
+
+ lw := new(strings.Builder)
+ wl := NewWriteLogger("write:", lw)
+ if _, err := wl.Write([]byte("Hello, World!")); err != nil {
+ t.Fatalf("Unexpectedly failed to write: %v", err)
+ }
+
+ if g, w := lw.String(), "Hello, World!"; g != w {
+ t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+ wantLogWithHex := fmt.Sprintf("lw: write: %x\n", "Hello, World!")
+ if g, w := lOut.String(), wantLogWithHex; g != w {
+ t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+}
+
+func TestWriteLogger_errorOnWrite(t *testing.T) {
+ olw := log.Writer()
+ olf := log.Flags()
+ olp := log.Prefix()
+
+ // Revert the original log settings before we exit.
+ defer func() {
+ log.SetFlags(olf)
+ log.SetPrefix(olp)
+ log.SetOutput(olw)
+ }()
+
+ lOut := new(strings.Builder)
+ log.SetPrefix("lw: ")
+ log.SetOutput(lOut)
+ log.SetFlags(0)
+
+ lw := errWriter{err: errors.New("Write Error!")}
+ wl := NewWriteLogger("write:", lw)
+ if _, err := wl.Write([]byte("Hello, World!")); err == nil {
+ t.Fatalf("Unexpectedly succeeded to write: %v", err)
+ }
+
+ wantLogWithHex := fmt.Sprintf("lw: write: %x: %v\n", "", "Write Error!")
+ if g, w := lOut.String(), wantLogWithHex; g != w {
+ t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+}
+
+func TestReadLogger(t *testing.T) {
+ olw := log.Writer()
+ olf := log.Flags()
+ olp := log.Prefix()
+
+ // Revert the original log settings before we exit.
+ defer func() {
+ log.SetFlags(olf)
+ log.SetPrefix(olp)
+ log.SetOutput(olw)
+ }()
+
+ lOut := new(strings.Builder)
+ log.SetPrefix("lr: ")
+ log.SetOutput(lOut)
+ log.SetFlags(0)
+
+ data := []byte("Hello, World!")
+ p := make([]byte, len(data))
+ lr := bytes.NewReader(data)
+ rl := NewReadLogger("read:", lr)
+
+ n, err := rl.Read(p)
+ if err != nil {
+ t.Fatalf("Unexpectedly failed to read: %v", err)
+ }
+
+ if g, w := p[:n], data; !bytes.Equal(g, w) {
+ t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+
+ wantLogWithHex := fmt.Sprintf("lr: read: %x\n", "Hello, World!")
+ if g, w := lOut.String(), wantLogWithHex; g != w {
+ t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+}
+
+func TestReadLogger_errorOnRead(t *testing.T) {
+ olw := log.Writer()
+ olf := log.Flags()
+ olp := log.Prefix()
+
+ // Revert the original log settings before we exit.
+ defer func() {
+ log.SetFlags(olf)
+ log.SetPrefix(olp)
+ log.SetOutput(olw)
+ }()
+
+ lOut := new(strings.Builder)
+ log.SetPrefix("lr: ")
+ log.SetOutput(lOut)
+ log.SetFlags(0)
+
+ data := []byte("Hello, World!")
+ p := make([]byte, len(data))
+
+ lr := ErrReader(errors.New("io failure"))
+ rl := NewReadLogger("read", lr)
+ n, err := rl.Read(p)
+ if err == nil {
+ t.Fatalf("Unexpectedly succeeded to read: %v", err)
+ }
+
+ wantLogWithHex := fmt.Sprintf("lr: read %x: io failure\n", p[:n])
+ if g, w := lOut.String(), wantLogWithHex; g != w {
+ t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
+ }
+}
diff --git a/src/testing/iotest/reader.go b/src/testing/iotest/reader.go
new file mode 100644
index 0000000..8529e1c
--- /dev/null
+++ b/src/testing/iotest/reader.go
@@ -0,0 +1,268 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package iotest implements Readers and Writers useful mainly for testing.
+package iotest
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// OneByteReader returns a Reader that implements
+// each non-empty Read by reading one byte from r.
+func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} }
+
+type oneByteReader struct {
+ r io.Reader
+}
+
+func (r *oneByteReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ return r.r.Read(p[0:1])
+}
+
+// HalfReader returns a Reader that implements Read
+// by reading half as many requested bytes from r.
+func HalfReader(r io.Reader) io.Reader { return &halfReader{r} }
+
+type halfReader struct {
+ r io.Reader
+}
+
+func (r *halfReader) Read(p []byte) (int, error) {
+ return r.r.Read(p[0 : (len(p)+1)/2])
+}
+
+// DataErrReader changes the way errors are handled by a Reader. Normally, a
+// Reader returns an error (typically EOF) from the first Read call after the
+// last piece of data is read. DataErrReader wraps a Reader and changes its
+// behavior so the final error is returned along with the final data, instead
+// of in the first call after the final data.
+func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} }
+
+type dataErrReader struct {
+ r io.Reader
+ unread []byte
+ data []byte
+}
+
+func (r *dataErrReader) Read(p []byte) (n int, err error) {
+ // loop because first call needs two reads:
+ // one to get data and a second to look for an error.
+ for {
+ if len(r.unread) == 0 {
+ n1, err1 := r.r.Read(r.data)
+ r.unread = r.data[0:n1]
+ err = err1
+ }
+ if n > 0 || err != nil {
+ break
+ }
+ n = copy(p, r.unread)
+ r.unread = r.unread[n:]
+ }
+ return
+}
+
+// ErrTimeout is a fake timeout error.
+var ErrTimeout = errors.New("timeout")
+
+// TimeoutReader returns [ErrTimeout] on the second read
+// with no data. Subsequent calls to read succeed.
+func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
+
+type timeoutReader struct {
+ r io.Reader
+ count int
+}
+
+func (r *timeoutReader) Read(p []byte) (int, error) {
+ r.count++
+ if r.count == 2 {
+ return 0, ErrTimeout
+ }
+ return r.r.Read(p)
+}
+
+// ErrReader returns an [io.Reader] that returns 0, err from all Read calls.
+func ErrReader(err error) io.Reader {
+ return &errReader{err: err}
+}
+
+type errReader struct {
+ err error
+}
+
+func (r *errReader) Read(p []byte) (int, error) {
+ return 0, r.err
+}
+
+type smallByteReader struct {
+ r io.Reader
+ off int
+ n int
+}
+
+func (r *smallByteReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.n = r.n%3 + 1
+ n := r.n
+ if n > len(p) {
+ n = len(p)
+ }
+ n, err := r.r.Read(p[0:n])
+ if err != nil && err != io.EOF {
+ err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
+ }
+ r.off += n
+ return n, err
+}
+
+// TestReader tests that reading from r returns the expected file content.
+// It does reads of different sizes, until EOF.
+// If r implements [io.ReaderAt] or [io.Seeker], TestReader also checks
+// that those operations behave as they should.
+//
+// If TestReader finds any misbehaviors, it returns an error reporting them.
+// The error text may span multiple lines.
+func TestReader(r io.Reader, content []byte) error {
+ if len(content) > 0 {
+ n, err := r.Read(nil)
+ if n != 0 || err != nil {
+ return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err)
+ }
+ }
+
+ data, err := io.ReadAll(&smallByteReader{r: r})
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(data, content) {
+ return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content)
+ }
+ n, err := r.Read(make([]byte, 10))
+ if n != 0 || err != io.EOF {
+ return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err)
+ }
+
+ if r, ok := r.(io.ReadSeeker); ok {
+ // Seek(0, 1) should report the current file position (EOF).
+ if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil {
+ return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content))
+ }
+
+ // Seek backward partway through file, in two steps.
+ // If middle == 0, len(content) == 0, can't use the -1 and +1 seeks.
+ middle := len(content) - len(content)/3
+ if middle > 0 {
+ if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil {
+ return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1)
+ }
+ if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil {
+ return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1)
+ }
+ if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil {
+ return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle)
+ }
+ }
+
+ // Seek(0, 1) should report the current file position (middle).
+ if off, err := r.Seek(0, 1); off != int64(middle) || err != nil {
+ return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle)
+ }
+
+ // Reading forward should return the last part of the file.
+ data, err := io.ReadAll(&smallByteReader{r: r})
+ if err != nil {
+ return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
+ }
+ if !bytes.Equal(data, content[middle:]) {
+ return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
+ }
+
+ // Seek relative to end of file, but start elsewhere.
+ if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
+ return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
+ }
+ if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil {
+ return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle)
+ }
+
+ // Reading forward should return the last part of the file (again).
+ data, err = io.ReadAll(&smallByteReader{r: r})
+ if err != nil {
+ return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
+ }
+ if !bytes.Equal(data, content[middle:]) {
+ return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
+ }
+
+ // Absolute seek & read forward.
+ if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
+ return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
+ }
+ data, err = io.ReadAll(r)
+ if err != nil {
+ return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err)
+ }
+ if !bytes.Equal(data, content[middle/2:]) {
+ return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:])
+ }
+ }
+
+ if r, ok := r.(io.ReaderAt); ok {
+ data := make([]byte, len(content), len(content)+1)
+ for i := range data {
+ data[i] = 0xfe
+ }
+ n, err := r.ReadAt(data, 0)
+ if n != len(data) || err != nil && err != io.EOF {
+ return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data))
+ }
+ if !bytes.Equal(data, content) {
+ return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
+ }
+
+ n, err = r.ReadAt(data[:1], int64(len(data)))
+ if n != 0 || err != io.EOF {
+ return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err)
+ }
+
+ for i := range data {
+ data[i] = 0xfe
+ }
+ n, err = r.ReadAt(data[:cap(data)], 0)
+ if n != len(data) || err != io.EOF {
+ return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data))
+ }
+ if !bytes.Equal(data, content) {
+ return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
+ }
+
+ for i := range data {
+ data[i] = 0xfe
+ }
+ for i := range data {
+ n, err = r.ReadAt(data[i:i+1], int64(i))
+ if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) {
+ want := "nil"
+ if i == len(data)-1 {
+ want = "nil or EOF"
+ }
+ return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want)
+ }
+ if data[i] != content[i] {
+ return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1])
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/testing/iotest/reader_test.go b/src/testing/iotest/reader_test.go
new file mode 100644
index 0000000..1d22237
--- /dev/null
+++ b/src/testing/iotest/reader_test.go
@@ -0,0 +1,261 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestOneByteReader_nonEmptyReader(t *testing.T) {
+ msg := "Hello, World!"
+ buf := new(bytes.Buffer)
+ buf.WriteString(msg)
+
+ obr := OneByteReader(buf)
+ var b []byte
+ n, err := obr.Read(b)
+ if err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+
+ b = make([]byte, 3)
+ // Read from obr until EOF.
+ got := new(strings.Builder)
+ for i := 0; ; i++ {
+ n, err = obr.Read(b)
+ if err != nil {
+ break
+ }
+ if g, w := n, 1; g != w {
+ t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
+ }
+ got.Write(b[:n])
+ }
+ if g, w := err, io.EOF; g != w {
+ t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := got.String(), "Hello, World!"; g != w {
+ t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
+ }
+}
+
+func TestOneByteReader_emptyReader(t *testing.T) {
+ r := new(bytes.Buffer)
+
+ obr := OneByteReader(r)
+ var b []byte
+ if n, err := obr.Read(b); err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+
+ b = make([]byte, 5)
+ n, err := obr.Read(b)
+ if g, w := err, io.EOF; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+}
+
+func TestHalfReader_nonEmptyReader(t *testing.T) {
+ msg := "Hello, World!"
+ buf := new(bytes.Buffer)
+ buf.WriteString(msg)
+ // empty read buffer
+ hr := HalfReader(buf)
+ var b []byte
+ n, err := hr.Read(b)
+ if err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+ // non empty read buffer
+ b = make([]byte, 2)
+ got := new(strings.Builder)
+ for i := 0; ; i++ {
+ n, err = hr.Read(b)
+ if err != nil {
+ break
+ }
+ if g, w := n, 1; g != w {
+ t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
+ }
+ got.Write(b[:n])
+ }
+ if g, w := err, io.EOF; g != w {
+ t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := got.String(), "Hello, World!"; g != w {
+ t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
+ }
+}
+
+func TestHalfReader_emptyReader(t *testing.T) {
+ r := new(bytes.Buffer)
+
+ hr := HalfReader(r)
+ var b []byte
+ if n, err := hr.Read(b); err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+
+ b = make([]byte, 5)
+ n, err := hr.Read(b)
+ if g, w := err, io.EOF; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+}
+
+func TestTimeOutReader_nonEmptyReader(t *testing.T) {
+ msg := "Hello, World!"
+ buf := new(bytes.Buffer)
+ buf.WriteString(msg)
+ // empty read buffer
+ tor := TimeoutReader(buf)
+ var b []byte
+ n, err := tor.Read(b)
+ if err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+ // Second call should timeout
+ n, err = tor.Read(b)
+ if g, w := err, ErrTimeout; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+ // non empty read buffer
+ tor2 := TimeoutReader(buf)
+ b = make([]byte, 3)
+ if n, err := tor2.Read(b); err != nil || n == 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+ // Second call should timeout
+ n, err = tor2.Read(b)
+ if g, w := err, ErrTimeout; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+}
+
+func TestTimeOutReader_emptyReader(t *testing.T) {
+ r := new(bytes.Buffer)
+ // empty read buffer
+ tor := TimeoutReader(r)
+ var b []byte
+ if n, err := tor.Read(b); err != nil || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+ // Second call should timeout
+ n, err := tor.Read(b)
+ if g, w := err, ErrTimeout; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+ // non empty read buffer
+ tor2 := TimeoutReader(r)
+ b = make([]byte, 5)
+ if n, err := tor2.Read(b); err != io.EOF || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+ // Second call should timeout
+ n, err = tor2.Read(b)
+ if g, w := err, ErrTimeout; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+}
+
+func TestDataErrReader_nonEmptyReader(t *testing.T) {
+ msg := "Hello, World!"
+ buf := new(bytes.Buffer)
+ buf.WriteString(msg)
+
+ der := DataErrReader(buf)
+
+ b := make([]byte, 3)
+ got := new(strings.Builder)
+ var n int
+ var err error
+ for {
+ n, err = der.Read(b)
+ got.Write(b[:n])
+ if err != nil {
+ break
+ }
+ }
+ if err != io.EOF || n == 0 {
+ t.Errorf("Last Read returned n=%d err=%v", n, err)
+ }
+ if g, w := got.String(), "Hello, World!"; g != w {
+ t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
+ }
+}
+
+func TestDataErrReader_emptyReader(t *testing.T) {
+ r := new(bytes.Buffer)
+
+ der := DataErrReader(r)
+ var b []byte
+ if n, err := der.Read(b); err != io.EOF || n != 0 {
+ t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
+ }
+
+ b = make([]byte, 5)
+ n, err := der.Read(b)
+ if g, w := err, io.EOF; g != w {
+ t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
+ }
+ if g, w := n, 0; g != w {
+ t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
+ }
+}
+
+func TestErrReader(t *testing.T) {
+ cases := []struct {
+ name string
+ err error
+ }{
+ {"nil error", nil},
+ {"non-nil error", errors.New("io failure")},
+ {"io.EOF", io.EOF},
+ }
+
+ for _, tt := range cases {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ n, err := ErrReader(tt.err).Read(nil)
+ if err != tt.err {
+ t.Fatalf("Error mismatch\nGot: %v\nWant: %v", err, tt.err)
+ }
+ if n != 0 {
+ t.Fatalf("Byte count mismatch: got %d want 0", n)
+ }
+ })
+ }
+}
+
+func TestStringsReader(t *testing.T) {
+ const msg = "Now is the time for all good gophers."
+
+ r := strings.NewReader(msg)
+ if err := TestReader(r, []byte(msg)); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/src/testing/iotest/writer.go b/src/testing/iotest/writer.go
new file mode 100644
index 0000000..af61ab8
--- /dev/null
+++ b/src/testing/iotest/writer.go
@@ -0,0 +1,35 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import "io"
+
+// TruncateWriter returns a Writer that writes to w
+// but stops silently after n bytes.
+func TruncateWriter(w io.Writer, n int64) io.Writer {
+ return &truncateWriter{w, n}
+}
+
+type truncateWriter struct {
+ w io.Writer
+ n int64
+}
+
+func (t *truncateWriter) Write(p []byte) (n int, err error) {
+ if t.n <= 0 {
+ return len(p), nil
+ }
+ // real write
+ n = len(p)
+ if int64(n) > t.n {
+ n = int(t.n)
+ }
+ n, err = t.w.Write(p[0:n])
+ t.n -= int64(n)
+ if err == nil {
+ n = len(p)
+ }
+ return
+}
diff --git a/src/testing/iotest/writer_test.go b/src/testing/iotest/writer_test.go
new file mode 100644
index 0000000..2762513
--- /dev/null
+++ b/src/testing/iotest/writer_test.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package iotest
+
+import (
+ "strings"
+ "testing"
+)
+
+var truncateWriterTests = []struct {
+ in string
+ want string
+ trunc int64
+ n int
+}{
+ {"hello", "", -1, 5},
+ {"world", "", 0, 5},
+ {"abcde", "abc", 3, 5},
+ {"edcba", "edcba", 7, 5},
+}
+
+func TestTruncateWriter(t *testing.T) {
+ for _, tt := range truncateWriterTests {
+ buf := new(strings.Builder)
+ tw := TruncateWriter(buf, tt.trunc)
+ n, err := tw.Write([]byte(tt.in))
+ if err != nil {
+ t.Errorf("Unexpected error %v for\n\t%+v", err, tt)
+ }
+ if g, w := buf.String(), tt.want; g != w {
+ t.Errorf("got %q, expected %q", g, w)
+ }
+ if g, w := n, tt.n; g != w {
+ t.Errorf("read %d bytes, but expected to have read %d bytes for\n\t%+v", g, w, tt)
+ }
+ }
+}
diff --git a/src/testing/match.go b/src/testing/match.go
new file mode 100644
index 0000000..84804dc
--- /dev/null
+++ b/src/testing/match.go
@@ -0,0 +1,317 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// matcher sanitizes, uniques, and filters names of subtests and subbenchmarks.
+type matcher struct {
+ filter filterMatch
+ skip filterMatch
+ matchFunc func(pat, str string) (bool, error)
+
+ mu sync.Mutex
+
+ // subNames is used to deduplicate subtest names.
+ // Each key is the subtest name joined to the deduplicated name of the parent test.
+ // Each value is the count of the number of occurrences of the given subtest name
+ // already seen.
+ subNames map[string]int32
+}
+
+type filterMatch interface {
+ // matches checks the name against the receiver's pattern strings using the
+ // given match function.
+ matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool)
+
+ // verify checks that the receiver's pattern strings are valid filters by
+ // calling the given match function.
+ verify(name string, matchString func(pat, str string) (bool, error)) error
+}
+
+// simpleMatch matches a test name if all of the pattern strings match in
+// sequence.
+type simpleMatch []string
+
+// alternationMatch matches a test name if one of the alternations match.
+type alternationMatch []filterMatch
+
+// TODO: fix test_main to avoid race and improve caching, also allowing to
+// eliminate this Mutex.
+var matchMutex sync.Mutex
+
+func allMatcher() *matcher {
+ return newMatcher(nil, "", "", "")
+}
+
+func newMatcher(matchString func(pat, str string) (bool, error), patterns, name, skips string) *matcher {
+ var filter, skip filterMatch
+ if patterns == "" {
+ filter = simpleMatch{} // always partial true
+ } else {
+ filter = splitRegexp(patterns)
+ if err := filter.verify(name, matchString); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for %s\n", err)
+ os.Exit(1)
+ }
+ }
+ if skips == "" {
+ skip = alternationMatch{} // always false
+ } else {
+ skip = splitRegexp(skips)
+ if err := skip.verify("-test.skip", matchString); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for %v\n", err)
+ os.Exit(1)
+ }
+ }
+ return &matcher{
+ filter: filter,
+ skip: skip,
+ matchFunc: matchString,
+ subNames: map[string]int32{},
+ }
+}
+
+func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) {
+ name = subname
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if c != nil && c.level > 0 {
+ name = m.unique(c.name, rewrite(subname))
+ }
+
+ matchMutex.Lock()
+ defer matchMutex.Unlock()
+
+ // We check the full array of paths each time to allow for the case that a pattern contains a '/'.
+ elem := strings.Split(name, "/")
+
+ // filter must match.
+ // accept partial match that may produce full match later.
+ ok, partial = m.filter.matches(elem, m.matchFunc)
+ if !ok {
+ return name, false, false
+ }
+
+ // skip must not match.
+ // ignore partial match so we can get to more precise match later.
+ skip, partialSkip := m.skip.matches(elem, m.matchFunc)
+ if skip && !partialSkip {
+ return name, false, false
+ }
+
+ return name, ok, partial
+}
+
+// clearSubNames clears the matcher's internal state, potentially freeing
+// memory. After this is called, T.Name may return the same strings as it did
+// for earlier subtests.
+func (m *matcher) clearSubNames() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ clear(m.subNames)
+}
+
+func (m simpleMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) {
+ for i, s := range name {
+ if i >= len(m) {
+ break
+ }
+ if ok, _ := matchString(m[i], s); !ok {
+ return false, false
+ }
+ }
+ return true, len(name) < len(m)
+}
+
+func (m simpleMatch) verify(name string, matchString func(pat, str string) (bool, error)) error {
+ for i, s := range m {
+ m[i] = rewrite(s)
+ }
+ // Verify filters before doing any processing.
+ for i, s := range m {
+ if _, err := matchString(s, "non-empty"); err != nil {
+ return fmt.Errorf("element %d of %s (%q): %s", i, name, s, err)
+ }
+ }
+ return nil
+}
+
+func (m alternationMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) {
+ for _, m := range m {
+ if ok, partial = m.matches(name, matchString); ok {
+ return ok, partial
+ }
+ }
+ return false, false
+}
+
+func (m alternationMatch) verify(name string, matchString func(pat, str string) (bool, error)) error {
+ for i, m := range m {
+ if err := m.verify(name, matchString); err != nil {
+ return fmt.Errorf("alternation %d of %s", i, err)
+ }
+ }
+ return nil
+}
+
+func splitRegexp(s string) filterMatch {
+ a := make(simpleMatch, 0, strings.Count(s, "/"))
+ b := make(alternationMatch, 0, strings.Count(s, "|"))
+ cs := 0
+ cp := 0
+ for i := 0; i < len(s); {
+ switch s[i] {
+ case '[':
+ cs++
+ case ']':
+ if cs--; cs < 0 { // An unmatched ']' is legal.
+ cs = 0
+ }
+ case '(':
+ if cs == 0 {
+ cp++
+ }
+ case ')':
+ if cs == 0 {
+ cp--
+ }
+ case '\\':
+ i++
+ case '/':
+ if cs == 0 && cp == 0 {
+ a = append(a, s[:i])
+ s = s[i+1:]
+ i = 0
+ continue
+ }
+ case '|':
+ if cs == 0 && cp == 0 {
+ a = append(a, s[:i])
+ s = s[i+1:]
+ i = 0
+ b = append(b, a)
+ a = make(simpleMatch, 0, len(a))
+ continue
+ }
+ }
+ i++
+ }
+
+ a = append(a, s)
+ if len(b) == 0 {
+ return a
+ }
+ return append(b, a)
+}
+
+// unique creates a unique name for the given parent and subname by affixing it
+// with one or more counts, if necessary.
+func (m *matcher) unique(parent, subname string) string {
+ base := parent + "/" + subname
+
+ for {
+ n := m.subNames[base]
+ if n < 0 {
+ panic("subtest count overflow")
+ }
+ m.subNames[base] = n + 1
+
+ if n == 0 && subname != "" {
+ prefix, nn := parseSubtestNumber(base)
+ if len(prefix) < len(base) && nn < m.subNames[prefix] {
+ // This test is explicitly named like "parent/subname#NN",
+ // and #NN was already used for the NNth occurrence of "parent/subname".
+ // Loop to add a disambiguating suffix.
+ continue
+ }
+ return base
+ }
+
+ name := fmt.Sprintf("%s#%02d", base, n)
+ if m.subNames[name] != 0 {
+ // This is the nth occurrence of base, but the name "parent/subname#NN"
+ // collides with the first occurrence of a subtest *explicitly* named
+ // "parent/subname#NN". Try the next number.
+ continue
+ }
+
+ return name
+ }
+}
+
+// parseSubtestNumber splits a subtest name into a "#%02d"-formatted int32
+// suffix (if present), and a prefix preceding that suffix (always).
+func parseSubtestNumber(s string) (prefix string, nn int32) {
+ i := strings.LastIndex(s, "#")
+ if i < 0 {
+ return s, 0
+ }
+
+ prefix, suffix := s[:i], s[i+1:]
+ if len(suffix) < 2 || (len(suffix) > 2 && suffix[0] == '0') {
+ // Even if suffix is numeric, it is not a possible output of a "%02" format
+ // string: it has either too few digits or too many leading zeroes.
+ return s, 0
+ }
+ if suffix == "00" {
+ if !strings.HasSuffix(prefix, "/") {
+ // We only use "#00" as a suffix for subtests named with the empty
+ // string — it isn't a valid suffix if the subtest name is non-empty.
+ return s, 0
+ }
+ }
+
+ n, err := strconv.ParseInt(suffix, 10, 32)
+ if err != nil || n < 0 {
+ return s, 0
+ }
+ return prefix, int32(n)
+}
+
+// rewrite rewrites a subname to having only printable characters and no white
+// space.
+func rewrite(s string) string {
+ b := []byte{}
+ for _, r := range s {
+ switch {
+ case isSpace(r):
+ b = append(b, '_')
+ case !strconv.IsPrint(r):
+ s := strconv.QuoteRune(r)
+ b = append(b, s[1:len(s)-1]...)
+ default:
+ b = append(b, string(r)...)
+ }
+ }
+ return string(b)
+}
+
+func isSpace(r rune) bool {
+ if r < 0x2000 {
+ switch r {
+ // Note: not the same as Unicode Z class.
+ case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0, 0x1680:
+ return true
+ }
+ } else {
+ if r <= 0x200a {
+ return true
+ }
+ switch r {
+ case 0x2028, 0x2029, 0x202f, 0x205f, 0x3000:
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/testing/match_test.go b/src/testing/match_test.go
new file mode 100644
index 0000000..d31efbc
--- /dev/null
+++ b/src/testing/match_test.go
@@ -0,0 +1,263 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+func init() {
+ testingTesting = true
+}
+
+// Verify that our IsSpace agrees with unicode.IsSpace.
+func TestIsSpace(t *T) {
+ n := 0
+ for r := rune(0); r <= unicode.MaxRune; r++ {
+ if isSpace(r) != unicode.IsSpace(r) {
+ t.Errorf("IsSpace(%U)=%t incorrect", r, isSpace(r))
+ n++
+ if n > 10 {
+ return
+ }
+ }
+ }
+}
+
+func TestSplitRegexp(t *T) {
+ res := func(s ...string) filterMatch { return simpleMatch(s) }
+ alt := func(m ...filterMatch) filterMatch { return alternationMatch(m) }
+ testCases := []struct {
+ pattern string
+ result filterMatch
+ }{
+ // Correct patterns
+ // If a regexp pattern is correct, all split regexps need to be correct
+ // as well.
+ {"", res("")},
+ {"/", res("", "")},
+ {"//", res("", "", "")},
+ {"A", res("A")},
+ {"A/B", res("A", "B")},
+ {"A/B/", res("A", "B", "")},
+ {"/A/B/", res("", "A", "B", "")},
+ {"[A]/(B)", res("[A]", "(B)")},
+ {"[/]/[/]", res("[/]", "[/]")},
+ {"[/]/[:/]", res("[/]", "[:/]")},
+ {"/]", res("", "]")},
+ {"]/", res("]", "")},
+ {"]/[/]", res("]", "[/]")},
+ {`([)/][(])`, res(`([)/][(])`)},
+ {"[(]/[)]", res("[(]", "[)]")},
+
+ {"A/B|C/D", alt(res("A", "B"), res("C", "D"))},
+
+ // Faulty patterns
+ // Errors in original should produce at least one faulty regexp in results.
+ {")/", res(")/")},
+ {")/(/)", res(")/(", ")")},
+ {"a[/)b", res("a[/)b")},
+ {"(/]", res("(/]")},
+ {"(/", res("(/")},
+ {"[/]/[/", res("[/]", "[/")},
+ {`\p{/}`, res(`\p{`, "}")},
+ {`\p/`, res(`\p`, "")},
+ {`[[:/:]]`, res(`[[:/:]]`)},
+ }
+ for _, tc := range testCases {
+ a := splitRegexp(tc.pattern)
+ if !reflect.DeepEqual(a, tc.result) {
+ t.Errorf("splitRegexp(%q) = %#v; want %#v", tc.pattern, a, tc.result)
+ }
+
+ // If there is any error in the pattern, one of the returned subpatterns
+ // needs to have an error as well.
+ if _, err := regexp.Compile(tc.pattern); err != nil {
+ ok := true
+ if err := a.verify("", regexp.MatchString); err != nil {
+ ok = false
+ }
+ if ok {
+ t.Errorf("%s: expected error in any of %q", tc.pattern, a)
+ }
+ }
+ }
+}
+
+func TestMatcher(t *T) {
+ testCases := []struct {
+ pattern string
+ skip string
+ parent, sub string
+ ok bool
+ partial bool
+ }{
+ // Behavior without subtests.
+ {"", "", "", "TestFoo", true, false},
+ {"TestFoo", "", "", "TestFoo", true, false},
+ {"TestFoo/", "", "", "TestFoo", true, true},
+ {"TestFoo/bar/baz", "", "", "TestFoo", true, true},
+ {"TestFoo", "", "", "TestBar", false, false},
+ {"TestFoo/", "", "", "TestBar", false, false},
+ {"TestFoo/bar/baz", "", "", "TestBar/bar/baz", false, false},
+ {"", "TestBar", "", "TestFoo", true, false},
+ {"", "TestBar", "", "TestBar", false, false},
+
+ // Skipping a non-existent test doesn't change anything.
+ {"", "TestFoo/skipped", "", "TestFoo", true, false},
+ {"TestFoo", "TestFoo/skipped", "", "TestFoo", true, false},
+ {"TestFoo/", "TestFoo/skipped", "", "TestFoo", true, true},
+ {"TestFoo/bar/baz", "TestFoo/skipped", "", "TestFoo", true, true},
+ {"TestFoo", "TestFoo/skipped", "", "TestBar", false, false},
+ {"TestFoo/", "TestFoo/skipped", "", "TestBar", false, false},
+ {"TestFoo/bar/baz", "TestFoo/skipped", "", "TestBar/bar/baz", false, false},
+
+ // with subtests
+ {"", "", "TestFoo", "x", true, false},
+ {"TestFoo", "", "TestFoo", "x", true, false},
+ {"TestFoo/", "", "TestFoo", "x", true, false},
+ {"TestFoo/bar/baz", "", "TestFoo", "bar", true, true},
+
+ {"", "TestFoo/skipped", "TestFoo", "x", true, false},
+ {"TestFoo", "TestFoo/skipped", "TestFoo", "x", true, false},
+ {"TestFoo", "TestFoo/skipped", "TestFoo", "skipped", false, false},
+ {"TestFoo/", "TestFoo/skipped", "TestFoo", "x", true, false},
+ {"TestFoo/bar/baz", "TestFoo/skipped", "TestFoo", "bar", true, true},
+
+ // Subtest with a '/' in its name still allows for copy and pasted names
+ // to match.
+ {"TestFoo/bar/baz", "", "TestFoo", "bar/baz", true, false},
+ {"TestFoo/bar/baz", "TestFoo/bar/baz", "TestFoo", "bar/baz", false, false},
+ {"TestFoo/bar/baz", "TestFoo/bar/baz/skip", "TestFoo", "bar/baz", true, false},
+ {"TestFoo/bar/baz", "", "TestFoo/bar", "baz", true, false},
+ {"TestFoo/bar/baz", "", "TestFoo", "x", false, false},
+ {"TestFoo", "", "TestBar", "x", false, false},
+ {"TestFoo/", "", "TestBar", "x", false, false},
+ {"TestFoo/bar/baz", "", "TestBar", "x/bar/baz", false, false},
+
+ {"A/B|C/D", "", "TestA", "B", true, false},
+ {"A/B|C/D", "", "TestC", "D", true, false},
+ {"A/B|C/D", "", "TestA", "C", false, false},
+
+ // subtests only
+ {"", "", "TestFoo", "x", true, false},
+ {"/", "", "TestFoo", "x", true, false},
+ {"./", "", "TestFoo", "x", true, false},
+ {"./.", "", "TestFoo", "x", true, false},
+ {"/bar/baz", "", "TestFoo", "bar", true, true},
+ {"/bar/baz", "", "TestFoo", "bar/baz", true, false},
+ {"//baz", "", "TestFoo", "bar/baz", true, false},
+ {"//", "", "TestFoo", "bar/baz", true, false},
+ {"/bar/baz", "", "TestFoo/bar", "baz", true, false},
+ {"//foo", "", "TestFoo", "bar/baz", false, false},
+ {"/bar/baz", "", "TestFoo", "x", false, false},
+ {"/bar/baz", "", "TestBar", "x/bar/baz", false, false},
+ }
+
+ for _, tc := range testCases {
+ m := newMatcher(regexp.MatchString, tc.pattern, "-test.run", tc.skip)
+
+ parent := &common{name: tc.parent}
+ if tc.parent != "" {
+ parent.level = 1
+ }
+ if n, ok, partial := m.fullName(parent, tc.sub); ok != tc.ok || partial != tc.partial {
+ t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v partial %v; want ok %v partial %v",
+ tc.pattern, tc.parent, tc.sub, n, ok, partial, tc.ok, tc.partial)
+ }
+ }
+}
+
+var namingTestCases = []struct{ name, want string }{
+ // Uniqueness
+ {"", "x/#00"},
+ {"", "x/#01"},
+ {"#0", "x/#0"}, // Doesn't conflict with #00 because the number of digits differs.
+ {"#00", "x/#00#01"}, // Conflicts with implicit #00 (used above), so add a suffix.
+ {"#", "x/#"},
+ {"#", "x/##01"},
+
+ {"t", "x/t"},
+ {"t", "x/t#01"},
+ {"t", "x/t#02"},
+ {"t#00", "x/t#00"}, // Explicit "#00" doesn't conflict with the unsuffixed first subtest.
+
+ {"a#01", "x/a#01"}, // user has subtest with this name.
+ {"a", "x/a"}, // doesn't conflict with this name.
+ {"a", "x/a#02"}, // This string is claimed now, so resume
+ {"a", "x/a#03"}, // with counting.
+ {"a#02", "x/a#02#01"}, // We already used a#02 once, so add a suffix.
+
+ {"b#00", "x/b#00"},
+ {"b", "x/b"}, // Implicit 0 doesn't conflict with explicit "#00".
+ {"b", "x/b#01"},
+ {"b#9223372036854775807", "x/b#9223372036854775807"}, // MaxInt64
+ {"b", "x/b#02"},
+ {"b", "x/b#03"},
+
+ // Sanitizing
+ {"A:1 B:2", "x/A:1_B:2"},
+ {"s\t\r\u00a0", "x/s___"},
+ {"\x01", `x/\x01`},
+ {"\U0010ffff", `x/\U0010ffff`},
+}
+
+func TestNaming(t *T) {
+ m := newMatcher(regexp.MatchString, "", "", "")
+ parent := &common{name: "x", level: 1} // top-level test.
+
+ for i, tc := range namingTestCases {
+ if got, _, _ := m.fullName(parent, tc.name); got != tc.want {
+ t.Errorf("%d:%s: got %q; want %q", i, tc.name, got, tc.want)
+ }
+ }
+}
+
+func FuzzNaming(f *F) {
+ for _, tc := range namingTestCases {
+ f.Add(tc.name)
+ }
+ parent := &common{name: "x", level: 1}
+ var m *matcher
+ var seen map[string]string
+ reset := func() {
+ m = allMatcher()
+ seen = make(map[string]string)
+ }
+ reset()
+
+ f.Fuzz(func(t *T, subname string) {
+ if len(subname) > 10 {
+ // Long names attract the OOM killer.
+ t.Skip()
+ }
+ name := m.unique(parent.name, subname)
+ if !strings.Contains(name, "/"+subname) {
+ t.Errorf("name %q does not contain subname %q", name, subname)
+ }
+ if prev, ok := seen[name]; ok {
+ t.Errorf("name %q generated by both %q and %q", name, prev, subname)
+ }
+ if len(seen) > 1e6 {
+ // Free up memory.
+ reset()
+ }
+ seen[name] = subname
+ })
+}
+
+// GoString returns a string that is more readable than the default, which makes
+// it easier to read test errors.
+func (m alternationMatch) GoString() string {
+ s := make([]string, len(m))
+ for i, m := range m {
+ s[i] = fmt.Sprintf("%#v", m)
+ }
+ return fmt.Sprintf("(%s)", strings.Join(s, " | "))
+}
diff --git a/src/testing/newcover.go b/src/testing/newcover.go
new file mode 100644
index 0000000..6199f3b
--- /dev/null
+++ b/src/testing/newcover.go
@@ -0,0 +1,59 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Support for test coverage with redesigned coverage implementation.
+
+package testing
+
+import (
+ "fmt"
+ "internal/goexperiment"
+ "os"
+)
+
+// cover2 variable stores the current coverage mode and a
+// tear-down function to be called at the end of the testing run.
+var cover2 struct {
+ mode string
+ tearDown func(coverprofile string, gocoverdir string) (string, error)
+ snapshotcov func() float64
+}
+
+// registerCover2 is invoked during "go test -cover" runs by the test harness
+// code in _testmain.go; it is used to record a 'tear down' function
+// (to be called when the test is complete) and the coverage mode.
+func registerCover2(mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) {
+ cover2.mode = mode
+ cover2.tearDown = tearDown
+ cover2.snapshotcov = snapcov
+}
+
+// coverReport2 invokes a callback in _testmain.go that will
+// emit coverage data at the point where test execution is complete,
+// for "go test -cover" runs.
+func coverReport2() {
+ if !goexperiment.CoverageRedesign {
+ panic("unexpected")
+ }
+ if errmsg, err := cover2.tearDown(*coverProfile, *gocoverdir); err != nil {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", errmsg, err)
+ os.Exit(2)
+ }
+}
+
+// testGoCoverDir returns the value passed to the -test.gocoverdir
+// flag by the Go command, if goexperiment.CoverageRedesign is
+// in effect.
+func testGoCoverDir() string {
+ return *gocoverdir
+}
+
+// coverage2 returns a rough "coverage percentage so far"
+// number to support the testing.Coverage() function.
+func coverage2() float64 {
+ if cover2.mode == "" {
+ return 0.0
+ }
+ return cover2.snapshotcov()
+}
diff --git a/src/testing/panic_test.go b/src/testing/panic_test.go
new file mode 100644
index 0000000..6307b84
--- /dev/null
+++ b/src/testing/panic_test.go
@@ -0,0 +1,267 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var testPanicTest = flag.String("test_panic_test", "", "TestPanic: indicates which test should panic")
+var testPanicParallel = flag.Bool("test_panic_parallel", false, "TestPanic: run subtests in parallel")
+var testPanicCleanup = flag.Bool("test_panic_cleanup", false, "TestPanic: indicates whether test should call Cleanup")
+var testPanicCleanupPanic = flag.String("test_panic_cleanup_panic", "", "TestPanic: indicate whether test should call Cleanup function that panics")
+
+func TestPanic(t *testing.T) {
+ testenv.MustHaveExec(t)
+
+ testCases := []struct {
+ desc string
+ flags []string
+ want string
+ }{{
+ desc: "root test panics",
+ flags: []string{"-test_panic_test=TestPanicHelper"},
+ want: `
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+`,
+ }, {
+ desc: "subtest panics",
+ flags: []string{"-test_panic_test=TestPanicHelper/1"},
+ want: `
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "subtest panics with cleanup",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "subtest panics with outer cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+`,
+ }, {
+ desc: "subtest panics with middle cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "subtest panics with inner cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with cleanup",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with outer cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+`,
+ }, {
+ desc: "parallel subtest panics with middle cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }, {
+ desc: "parallel subtest panics with inner cleanup panic",
+ flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner", "-test_panic_parallel"},
+ want: `
+ran inner cleanup 1
+ran middle cleanup 1
+ran outer cleanup
+--- FAIL: TestPanicHelper (N.NNs)
+ panic_test.go:NNN: TestPanicHelper
+ --- FAIL: TestPanicHelper/1 (N.NNs)
+ panic_test.go:NNN: TestPanicHelper/1
+`,
+ }}
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ cmd := exec.Command(os.Args[0], "-test.run=^TestPanicHelper$")
+ cmd.Args = append(cmd.Args, tc.flags...)
+ cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
+ b, _ := cmd.CombinedOutput()
+ got := string(b)
+ want := strings.TrimSpace(tc.want)
+ re := makeRegexp(want)
+ if ok, err := regexp.MatchString(re, got); !ok || err != nil {
+ t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want)
+ }
+ })
+ }
+}
+
+func makeRegexp(s string) string {
+ s = regexp.QuoteMeta(s)
+ s = strings.ReplaceAll(s, ":NNN:", `:\d+:`)
+ s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`)
+ return s
+}
+
+func TestPanicHelper(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ t.Log(t.Name())
+ if t.Name() == *testPanicTest {
+ panic("panic")
+ }
+ switch *testPanicCleanupPanic {
+ case "", "outer", "middle", "inner":
+ default:
+ t.Fatalf("bad -test_panic_cleanup_panic: %s", *testPanicCleanupPanic)
+ }
+ t.Cleanup(func() {
+ fmt.Println("ran outer cleanup")
+ if *testPanicCleanupPanic == "outer" {
+ panic("outer cleanup")
+ }
+ })
+ for i := 0; i < 3; i++ {
+ i := i
+ t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ chosen := t.Name() == *testPanicTest
+ if chosen && *testPanicCleanup {
+ t.Cleanup(func() {
+ fmt.Printf("ran middle cleanup %d\n", i)
+ if *testPanicCleanupPanic == "middle" {
+ panic("middle cleanup")
+ }
+ })
+ }
+ if chosen && *testPanicParallel {
+ t.Parallel()
+ }
+ t.Log(t.Name())
+ if chosen {
+ if *testPanicCleanup {
+ t.Cleanup(func() {
+ fmt.Printf("ran inner cleanup %d\n", i)
+ if *testPanicCleanupPanic == "inner" {
+ panic("inner cleanup")
+ }
+ })
+ }
+ panic("panic")
+ }
+ })
+ }
+}
+
+func TestMorePanic(t *testing.T) {
+ testenv.MustHaveExec(t)
+
+ testCases := []struct {
+ desc string
+ flags []string
+ want string
+ }{
+ {
+ desc: "Issue 48502: call runtime.Goexit in t.Cleanup after panic",
+ flags: []string{"-test.run=^TestGoexitInCleanupAfterPanicHelper$"},
+ want: `panic: die
+ panic: test executed panic(nil) or runtime.Goexit`,
+ },
+ {
+ desc: "Issue 48515: call t.Run in t.Cleanup should trigger panic",
+ flags: []string{"-test.run=^TestCallRunInCleanupHelper$"},
+ want: `panic: testing: t.Run called during t.Cleanup`,
+ },
+ }
+
+ for _, tc := range testCases {
+ cmd := exec.Command(os.Args[0], tc.flags...)
+ cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
+ b, _ := cmd.CombinedOutput()
+ got := string(b)
+ want := tc.want
+ re := makeRegexp(want)
+ if ok, err := regexp.MatchString(re, got); !ok || err != nil {
+ t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want)
+ }
+ }
+}
+
+func TestCallRunInCleanupHelper(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+
+ t.Cleanup(func() {
+ t.Run("in-cleanup", func(t *testing.T) {
+ t.Log("must not be executed")
+ })
+ })
+}
+
+func TestGoexitInCleanupAfterPanicHelper(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+
+ t.Cleanup(func() { runtime.Goexit() })
+ t.Parallel()
+ panic("die")
+}
diff --git a/src/testing/quick/quick.go b/src/testing/quick/quick.go
new file mode 100644
index 0000000..8ef9cf7
--- /dev/null
+++ b/src/testing/quick/quick.go
@@ -0,0 +1,385 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quick implements utility functions to help with black box testing.
+//
+// The testing/quick package is frozen and is not accepting new features.
+package quick
+
+import (
+ "flag"
+ "fmt"
+ "math"
+ "math/rand"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check")
+
+// A Generator can generate random values of its own type.
+type Generator interface {
+ // Generate returns a random instance of the type on which it is a
+ // method using the size as a size hint.
+ Generate(rand *rand.Rand, size int) reflect.Value
+}
+
+// randFloat32 generates a random float taking the full range of a float32.
+func randFloat32(rand *rand.Rand) float32 {
+ f := rand.Float64() * math.MaxFloat32
+ if rand.Int()&1 == 1 {
+ f = -f
+ }
+ return float32(f)
+}
+
+// randFloat64 generates a random float taking the full range of a float64.
+func randFloat64(rand *rand.Rand) float64 {
+ f := rand.Float64() * math.MaxFloat64
+ if rand.Int()&1 == 1 {
+ f = -f
+ }
+ return f
+}
+
+// randInt64 returns a random int64.
+func randInt64(rand *rand.Rand) int64 {
+ return int64(rand.Uint64())
+}
+
+// complexSize is the maximum length of arbitrary values that contain other
+// values.
+const complexSize = 50
+
+// Value returns an arbitrary value of the given type.
+// If the type implements the [Generator] interface, that will be used.
+// Note: To create arbitrary values for structs, all the fields must be exported.
+func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) {
+ return sizedValue(t, rand, complexSize)
+}
+
+// sizedValue returns an arbitrary value of the given type. The size
+// hint is used for shrinking as a function of indirection level so
+// that recursive data structures will terminate.
+func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) {
+ if m, ok := reflect.Zero(t).Interface().(Generator); ok {
+ return m.Generate(rand, size), true
+ }
+
+ v := reflect.New(t).Elem()
+ switch concrete := t; concrete.Kind() {
+ case reflect.Bool:
+ v.SetBool(rand.Int()&1 == 0)
+ case reflect.Float32:
+ v.SetFloat(float64(randFloat32(rand)))
+ case reflect.Float64:
+ v.SetFloat(randFloat64(rand))
+ case reflect.Complex64:
+ v.SetComplex(complex(float64(randFloat32(rand)), float64(randFloat32(rand))))
+ case reflect.Complex128:
+ v.SetComplex(complex(randFloat64(rand), randFloat64(rand)))
+ case reflect.Int16:
+ v.SetInt(randInt64(rand))
+ case reflect.Int32:
+ v.SetInt(randInt64(rand))
+ case reflect.Int64:
+ v.SetInt(randInt64(rand))
+ case reflect.Int8:
+ v.SetInt(randInt64(rand))
+ case reflect.Int:
+ v.SetInt(randInt64(rand))
+ case reflect.Uint16:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Uint32:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Uint64:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Uint8:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Uint:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Uintptr:
+ v.SetUint(uint64(randInt64(rand)))
+ case reflect.Map:
+ numElems := rand.Intn(size)
+ v.Set(reflect.MakeMap(concrete))
+ for i := 0; i < numElems; i++ {
+ key, ok1 := sizedValue(concrete.Key(), rand, size)
+ value, ok2 := sizedValue(concrete.Elem(), rand, size)
+ if !ok1 || !ok2 {
+ return reflect.Value{}, false
+ }
+ v.SetMapIndex(key, value)
+ }
+ case reflect.Pointer:
+ if rand.Intn(size) == 0 {
+ v.SetZero() // Generate nil pointer.
+ } else {
+ elem, ok := sizedValue(concrete.Elem(), rand, size)
+ if !ok {
+ return reflect.Value{}, false
+ }
+ v.Set(reflect.New(concrete.Elem()))
+ v.Elem().Set(elem)
+ }
+ case reflect.Slice:
+ numElems := rand.Intn(size)
+ sizeLeft := size - numElems
+ v.Set(reflect.MakeSlice(concrete, numElems, numElems))
+ for i := 0; i < numElems; i++ {
+ elem, ok := sizedValue(concrete.Elem(), rand, sizeLeft)
+ if !ok {
+ return reflect.Value{}, false
+ }
+ v.Index(i).Set(elem)
+ }
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ elem, ok := sizedValue(concrete.Elem(), rand, size)
+ if !ok {
+ return reflect.Value{}, false
+ }
+ v.Index(i).Set(elem)
+ }
+ case reflect.String:
+ numChars := rand.Intn(complexSize)
+ codePoints := make([]rune, numChars)
+ for i := 0; i < numChars; i++ {
+ codePoints[i] = rune(rand.Intn(0x10ffff))
+ }
+ v.SetString(string(codePoints))
+ case reflect.Struct:
+ n := v.NumField()
+ // Divide sizeLeft evenly among the struct fields.
+ sizeLeft := size
+ if n > sizeLeft {
+ sizeLeft = 1
+ } else if n > 0 {
+ sizeLeft /= n
+ }
+ for i := 0; i < n; i++ {
+ elem, ok := sizedValue(concrete.Field(i).Type, rand, sizeLeft)
+ if !ok {
+ return reflect.Value{}, false
+ }
+ v.Field(i).Set(elem)
+ }
+ default:
+ return reflect.Value{}, false
+ }
+
+ return v, true
+}
+
+// A Config structure contains options for running a test.
+type Config struct {
+ // MaxCount sets the maximum number of iterations.
+ // If zero, MaxCountScale is used.
+ MaxCount int
+ // MaxCountScale is a non-negative scale factor applied to the
+ // default maximum.
+ // A count of zero implies the default, which is usually 100
+ // but can be set by the -quickchecks flag.
+ MaxCountScale float64
+ // Rand specifies a source of random numbers.
+ // If nil, a default pseudo-random source will be used.
+ Rand *rand.Rand
+ // Values specifies a function to generate a slice of
+ // arbitrary reflect.Values that are congruent with the
+ // arguments to the function being tested.
+ // If nil, the top-level Value function is used to generate them.
+ Values func([]reflect.Value, *rand.Rand)
+}
+
+var defaultConfig Config
+
+// getRand returns the *rand.Rand to use for a given Config.
+func (c *Config) getRand() *rand.Rand {
+ if c.Rand == nil {
+ return rand.New(rand.NewSource(time.Now().UnixNano()))
+ }
+ return c.Rand
+}
+
+// getMaxCount returns the maximum number of iterations to run for a given
+// Config.
+func (c *Config) getMaxCount() (maxCount int) {
+ maxCount = c.MaxCount
+ if maxCount == 0 {
+ if c.MaxCountScale != 0 {
+ maxCount = int(c.MaxCountScale * float64(*defaultMaxCount))
+ } else {
+ maxCount = *defaultMaxCount
+ }
+ }
+
+ return
+}
+
+// A SetupError is the result of an error in the way that check is being
+// used, independent of the functions being tested.
+type SetupError string
+
+func (s SetupError) Error() string { return string(s) }
+
+// A CheckError is the result of Check finding an error.
+type CheckError struct {
+ Count int
+ In []any
+}
+
+func (s *CheckError) Error() string {
+ return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In))
+}
+
+// A CheckEqualError is the result [CheckEqual] finding an error.
+type CheckEqualError struct {
+ CheckError
+ Out1 []any
+ Out2 []any
+}
+
+func (s *CheckEqualError) Error() string {
+ return fmt.Sprintf("#%d: failed on input %s. Output 1: %s. Output 2: %s", s.Count, toString(s.In), toString(s.Out1), toString(s.Out2))
+}
+
+// Check looks for an input to f, any function that returns bool,
+// such that f returns false. It calls f repeatedly, with arbitrary
+// values for each argument. If f returns false on a given input,
+// Check returns that input as a *[CheckError].
+// For example:
+//
+// func TestOddMultipleOfThree(t *testing.T) {
+// f := func(x int) bool {
+// y := OddMultipleOfThree(x)
+// return y%2 == 1 && y%3 == 0
+// }
+// if err := quick.Check(f, nil); err != nil {
+// t.Error(err)
+// }
+// }
+func Check(f any, config *Config) error {
+ if config == nil {
+ config = &defaultConfig
+ }
+
+ fVal, fType, ok := functionAndType(f)
+ if !ok {
+ return SetupError("argument is not a function")
+ }
+
+ if fType.NumOut() != 1 {
+ return SetupError("function does not return one value")
+ }
+ if fType.Out(0).Kind() != reflect.Bool {
+ return SetupError("function does not return a bool")
+ }
+
+ arguments := make([]reflect.Value, fType.NumIn())
+ rand := config.getRand()
+ maxCount := config.getMaxCount()
+
+ for i := 0; i < maxCount; i++ {
+ err := arbitraryValues(arguments, fType, config, rand)
+ if err != nil {
+ return err
+ }
+
+ if !fVal.Call(arguments)[0].Bool() {
+ return &CheckError{i + 1, toInterfaces(arguments)}
+ }
+ }
+
+ return nil
+}
+
+// CheckEqual looks for an input on which f and g return different results.
+// It calls f and g repeatedly with arbitrary values for each argument.
+// If f and g return different answers, CheckEqual returns a *[CheckEqualError]
+// describing the input and the outputs.
+func CheckEqual(f, g any, config *Config) error {
+ if config == nil {
+ config = &defaultConfig
+ }
+
+ x, xType, ok := functionAndType(f)
+ if !ok {
+ return SetupError("f is not a function")
+ }
+ y, yType, ok := functionAndType(g)
+ if !ok {
+ return SetupError("g is not a function")
+ }
+
+ if xType != yType {
+ return SetupError("functions have different types")
+ }
+
+ arguments := make([]reflect.Value, xType.NumIn())
+ rand := config.getRand()
+ maxCount := config.getMaxCount()
+
+ for i := 0; i < maxCount; i++ {
+ err := arbitraryValues(arguments, xType, config, rand)
+ if err != nil {
+ return err
+ }
+
+ xOut := toInterfaces(x.Call(arguments))
+ yOut := toInterfaces(y.Call(arguments))
+
+ if !reflect.DeepEqual(xOut, yOut) {
+ return &CheckEqualError{CheckError{i + 1, toInterfaces(arguments)}, xOut, yOut}
+ }
+ }
+
+ return nil
+}
+
+// arbitraryValues writes Values to args such that args contains Values
+// suitable for calling f.
+func arbitraryValues(args []reflect.Value, f reflect.Type, config *Config, rand *rand.Rand) (err error) {
+ if config.Values != nil {
+ config.Values(args, rand)
+ return
+ }
+
+ for j := 0; j < len(args); j++ {
+ var ok bool
+ args[j], ok = Value(f.In(j), rand)
+ if !ok {
+ err = SetupError(fmt.Sprintf("cannot create arbitrary value of type %s for argument %d", f.In(j), j))
+ return
+ }
+ }
+
+ return
+}
+
+func functionAndType(f any) (v reflect.Value, t reflect.Type, ok bool) {
+ v = reflect.ValueOf(f)
+ ok = v.Kind() == reflect.Func
+ if !ok {
+ return
+ }
+ t = v.Type()
+ return
+}
+
+func toInterfaces(values []reflect.Value) []any {
+ ret := make([]any, len(values))
+ for i, v := range values {
+ ret[i] = v.Interface()
+ }
+ return ret
+}
+
+func toString(interfaces []any) string {
+ s := make([]string, len(interfaces))
+ for i, v := range interfaces {
+ s[i] = fmt.Sprintf("%#v", v)
+ }
+ return strings.Join(s, ", ")
+}
diff --git a/src/testing/quick/quick_test.go b/src/testing/quick/quick_test.go
new file mode 100644
index 0000000..9df6dd4
--- /dev/null
+++ b/src/testing/quick/quick_test.go
@@ -0,0 +1,327 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quick
+
+import (
+ "math/rand"
+ "reflect"
+ "testing"
+)
+
+func fArray(a [4]byte) [4]byte { return a }
+
+type TestArrayAlias [4]byte
+
+func fArrayAlias(a TestArrayAlias) TestArrayAlias { return a }
+
+func fBool(a bool) bool { return a }
+
+type TestBoolAlias bool
+
+func fBoolAlias(a TestBoolAlias) TestBoolAlias { return a }
+
+func fFloat32(a float32) float32 { return a }
+
+type TestFloat32Alias float32
+
+func fFloat32Alias(a TestFloat32Alias) TestFloat32Alias { return a }
+
+func fFloat64(a float64) float64 { return a }
+
+type TestFloat64Alias float64
+
+func fFloat64Alias(a TestFloat64Alias) TestFloat64Alias { return a }
+
+func fComplex64(a complex64) complex64 { return a }
+
+type TestComplex64Alias complex64
+
+func fComplex64Alias(a TestComplex64Alias) TestComplex64Alias { return a }
+
+func fComplex128(a complex128) complex128 { return a }
+
+type TestComplex128Alias complex128
+
+func fComplex128Alias(a TestComplex128Alias) TestComplex128Alias { return a }
+
+func fInt16(a int16) int16 { return a }
+
+type TestInt16Alias int16
+
+func fInt16Alias(a TestInt16Alias) TestInt16Alias { return a }
+
+func fInt32(a int32) int32 { return a }
+
+type TestInt32Alias int32
+
+func fInt32Alias(a TestInt32Alias) TestInt32Alias { return a }
+
+func fInt64(a int64) int64 { return a }
+
+type TestInt64Alias int64
+
+func fInt64Alias(a TestInt64Alias) TestInt64Alias { return a }
+
+func fInt8(a int8) int8 { return a }
+
+type TestInt8Alias int8
+
+func fInt8Alias(a TestInt8Alias) TestInt8Alias { return a }
+
+func fInt(a int) int { return a }
+
+type TestIntAlias int
+
+func fIntAlias(a TestIntAlias) TestIntAlias { return a }
+
+func fMap(a map[int]int) map[int]int { return a }
+
+type TestMapAlias map[int]int
+
+func fMapAlias(a TestMapAlias) TestMapAlias { return a }
+
+func fPtr(a *int) *int {
+ if a == nil {
+ return nil
+ }
+ b := *a
+ return &b
+}
+
+type TestPtrAlias *int
+
+func fPtrAlias(a TestPtrAlias) TestPtrAlias { return a }
+
+func fSlice(a []byte) []byte { return a }
+
+type TestSliceAlias []byte
+
+func fSliceAlias(a TestSliceAlias) TestSliceAlias { return a }
+
+func fString(a string) string { return a }
+
+type TestStringAlias string
+
+func fStringAlias(a TestStringAlias) TestStringAlias { return a }
+
+type TestStruct struct {
+ A int
+ B string
+}
+
+func fStruct(a TestStruct) TestStruct { return a }
+
+type TestStructAlias TestStruct
+
+func fStructAlias(a TestStructAlias) TestStructAlias { return a }
+
+func fUint16(a uint16) uint16 { return a }
+
+type TestUint16Alias uint16
+
+func fUint16Alias(a TestUint16Alias) TestUint16Alias { return a }
+
+func fUint32(a uint32) uint32 { return a }
+
+type TestUint32Alias uint32
+
+func fUint32Alias(a TestUint32Alias) TestUint32Alias { return a }
+
+func fUint64(a uint64) uint64 { return a }
+
+type TestUint64Alias uint64
+
+func fUint64Alias(a TestUint64Alias) TestUint64Alias { return a }
+
+func fUint8(a uint8) uint8 { return a }
+
+type TestUint8Alias uint8
+
+func fUint8Alias(a TestUint8Alias) TestUint8Alias { return a }
+
+func fUint(a uint) uint { return a }
+
+type TestUintAlias uint
+
+func fUintAlias(a TestUintAlias) TestUintAlias { return a }
+
+func fUintptr(a uintptr) uintptr { return a }
+
+type TestUintptrAlias uintptr
+
+func fUintptrAlias(a TestUintptrAlias) TestUintptrAlias { return a }
+
+func reportError(property string, err error, t *testing.T) {
+ if err != nil {
+ t.Errorf("%s: %s", property, err)
+ }
+}
+
+func TestCheckEqual(t *testing.T) {
+ reportError("fArray", CheckEqual(fArray, fArray, nil), t)
+ reportError("fArrayAlias", CheckEqual(fArrayAlias, fArrayAlias, nil), t)
+ reportError("fBool", CheckEqual(fBool, fBool, nil), t)
+ reportError("fBoolAlias", CheckEqual(fBoolAlias, fBoolAlias, nil), t)
+ reportError("fFloat32", CheckEqual(fFloat32, fFloat32, nil), t)
+ reportError("fFloat32Alias", CheckEqual(fFloat32Alias, fFloat32Alias, nil), t)
+ reportError("fFloat64", CheckEqual(fFloat64, fFloat64, nil), t)
+ reportError("fFloat64Alias", CheckEqual(fFloat64Alias, fFloat64Alias, nil), t)
+ reportError("fComplex64", CheckEqual(fComplex64, fComplex64, nil), t)
+ reportError("fComplex64Alias", CheckEqual(fComplex64Alias, fComplex64Alias, nil), t)
+ reportError("fComplex128", CheckEqual(fComplex128, fComplex128, nil), t)
+ reportError("fComplex128Alias", CheckEqual(fComplex128Alias, fComplex128Alias, nil), t)
+ reportError("fInt16", CheckEqual(fInt16, fInt16, nil), t)
+ reportError("fInt16Alias", CheckEqual(fInt16Alias, fInt16Alias, nil), t)
+ reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t)
+ reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t)
+ reportError("fInt64", CheckEqual(fInt64, fInt64, nil), t)
+ reportError("fInt64Alias", CheckEqual(fInt64Alias, fInt64Alias, nil), t)
+ reportError("fInt8", CheckEqual(fInt8, fInt8, nil), t)
+ reportError("fInt8Alias", CheckEqual(fInt8Alias, fInt8Alias, nil), t)
+ reportError("fInt", CheckEqual(fInt, fInt, nil), t)
+ reportError("fIntAlias", CheckEqual(fIntAlias, fIntAlias, nil), t)
+ reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t)
+ reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t)
+ reportError("fMap", CheckEqual(fMap, fMap, nil), t)
+ reportError("fMapAlias", CheckEqual(fMapAlias, fMapAlias, nil), t)
+ reportError("fPtr", CheckEqual(fPtr, fPtr, nil), t)
+ reportError("fPtrAlias", CheckEqual(fPtrAlias, fPtrAlias, nil), t)
+ reportError("fSlice", CheckEqual(fSlice, fSlice, nil), t)
+ reportError("fSliceAlias", CheckEqual(fSliceAlias, fSliceAlias, nil), t)
+ reportError("fString", CheckEqual(fString, fString, nil), t)
+ reportError("fStringAlias", CheckEqual(fStringAlias, fStringAlias, nil), t)
+ reportError("fStruct", CheckEqual(fStruct, fStruct, nil), t)
+ reportError("fStructAlias", CheckEqual(fStructAlias, fStructAlias, nil), t)
+ reportError("fUint16", CheckEqual(fUint16, fUint16, nil), t)
+ reportError("fUint16Alias", CheckEqual(fUint16Alias, fUint16Alias, nil), t)
+ reportError("fUint32", CheckEqual(fUint32, fUint32, nil), t)
+ reportError("fUint32Alias", CheckEqual(fUint32Alias, fUint32Alias, nil), t)
+ reportError("fUint64", CheckEqual(fUint64, fUint64, nil), t)
+ reportError("fUint64Alias", CheckEqual(fUint64Alias, fUint64Alias, nil), t)
+ reportError("fUint8", CheckEqual(fUint8, fUint8, nil), t)
+ reportError("fUint8Alias", CheckEqual(fUint8Alias, fUint8Alias, nil), t)
+ reportError("fUint", CheckEqual(fUint, fUint, nil), t)
+ reportError("fUintAlias", CheckEqual(fUintAlias, fUintAlias, nil), t)
+ reportError("fUintptr", CheckEqual(fUintptr, fUintptr, nil), t)
+ reportError("fUintptrAlias", CheckEqual(fUintptrAlias, fUintptrAlias, nil), t)
+}
+
+// This tests that ArbitraryValue is working by checking that all the arbitrary
+// values of type MyStruct have x = 42.
+type myStruct struct {
+ x int
+}
+
+func (m myStruct) Generate(r *rand.Rand, _ int) reflect.Value {
+ return reflect.ValueOf(myStruct{x: 42})
+}
+
+func myStructProperty(in myStruct) bool { return in.x == 42 }
+
+func TestCheckProperty(t *testing.T) {
+ reportError("myStructProperty", Check(myStructProperty, nil), t)
+}
+
+func TestFailure(t *testing.T) {
+ f := func(x int) bool { return false }
+ err := Check(f, nil)
+ if err == nil {
+ t.Errorf("Check didn't return an error")
+ }
+ if _, ok := err.(*CheckError); !ok {
+ t.Errorf("Error was not a CheckError: %s", err)
+ }
+
+ err = CheckEqual(fUint, fUint32, nil)
+ if err == nil {
+ t.Errorf("#1 CheckEqual didn't return an error")
+ }
+ if _, ok := err.(SetupError); !ok {
+ t.Errorf("#1 Error was not a SetupError: %s", err)
+ }
+
+ err = CheckEqual(func(x, y int) {}, func(x int) {}, nil)
+ if err == nil {
+ t.Errorf("#2 CheckEqual didn't return an error")
+ }
+ if _, ok := err.(SetupError); !ok {
+ t.Errorf("#2 Error was not a SetupError: %s", err)
+ }
+
+ err = CheckEqual(func(x int) int { return 0 }, func(x int) int32 { return 0 }, nil)
+ if err == nil {
+ t.Errorf("#3 CheckEqual didn't return an error")
+ }
+ if _, ok := err.(SetupError); !ok {
+ t.Errorf("#3 Error was not a SetupError: %s", err)
+ }
+}
+
+// Recursive data structures didn't terminate.
+// Issues 8818 and 11148.
+func TestRecursive(t *testing.T) {
+ type R struct {
+ Ptr *R
+ SliceP []*R
+ Slice []R
+ Map map[int]R
+ MapP map[int]*R
+ MapR map[*R]*R
+ SliceMap []map[int]R
+ }
+
+ f := func(r R) bool { return true }
+ Check(f, nil)
+}
+
+func TestEmptyStruct(t *testing.T) {
+ f := func(struct{}) bool { return true }
+ Check(f, nil)
+}
+
+type (
+ A struct{ B *B }
+ B struct{ A *A }
+)
+
+func TestMutuallyRecursive(t *testing.T) {
+ f := func(a A) bool { return true }
+ Check(f, nil)
+}
+
+// Some serialization formats (e.g. encoding/pem) cannot distinguish
+// between a nil and an empty map or slice, so avoid generating the
+// zero value for these.
+func TestNonZeroSliceAndMap(t *testing.T) {
+ type Q struct {
+ M map[int]int
+ S []int
+ }
+ f := func(q Q) bool {
+ return q.M != nil && q.S != nil
+ }
+ err := Check(f, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestInt64(t *testing.T) {
+ var lo, hi int64
+ f := func(x int64) bool {
+ if x < lo {
+ lo = x
+ }
+ if x > hi {
+ hi = x
+ }
+ return true
+ }
+ cfg := &Config{MaxCount: 10000}
+ Check(f, cfg)
+ if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 {
+ t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi)
+ }
+}
diff --git a/src/testing/run_example.go b/src/testing/run_example.go
new file mode 100644
index 0000000..b2c5c3d
--- /dev/null
+++ b/src/testing/run_example.go
@@ -0,0 +1,66 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js && !wasip1
+
+// TODO(@musiol, @odeke-em): re-unify this entire file back into
+// example.go when js/wasm gets an os.Pipe implementation
+// and no longer needs this separation.
+
+package testing
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+func runExample(eg InternalExample) (ok bool) {
+ if chatty.on {
+ fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name)
+ }
+
+ // Capture stdout.
+ stdout := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout = w
+ outC := make(chan string)
+ go func() {
+ var buf strings.Builder
+ _, err := io.Copy(&buf, r)
+ r.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err)
+ os.Exit(1)
+ }
+ outC <- buf.String()
+ }()
+
+ finished := false
+ start := time.Now()
+
+ // Clean up in a deferred call so we can recover if the example panics.
+ defer func() {
+ timeSpent := time.Since(start)
+
+ // Close pipe, restore stdout, get output.
+ w.Close()
+ os.Stdout = stdout
+ out := <-outC
+
+ err := recover()
+ ok = eg.processRunResult(out, timeSpent, finished, err)
+ }()
+
+ // Run example.
+ eg.F()
+ finished = true
+ return
+}
diff --git a/src/testing/run_example_wasm.go b/src/testing/run_example_wasm.go
new file mode 100644
index 0000000..b815fcd
--- /dev/null
+++ b/src/testing/run_example_wasm.go
@@ -0,0 +1,76 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js || wasip1
+
+package testing
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+// TODO(@musiol, @odeke-em): unify this code back into
+// example.go when js/wasm gets an os.Pipe implementation.
+func runExample(eg InternalExample) (ok bool) {
+ if chatty.on {
+ fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name)
+ }
+
+ // Capture stdout to temporary file. We're not using
+ // os.Pipe because it is not supported on js/wasm.
+ stdout := os.Stdout
+ f := createTempFile(eg.Name)
+ os.Stdout = f
+ finished := false
+ start := time.Now()
+
+ // Clean up in a deferred call so we can recover if the example panics.
+ defer func() {
+ timeSpent := time.Since(start)
+
+ // Restore stdout, get output and remove temporary file.
+ os.Stdout = stdout
+ var buf strings.Builder
+ _, seekErr := f.Seek(0, io.SeekStart)
+ _, readErr := io.Copy(&buf, f)
+ out := buf.String()
+ f.Close()
+ os.Remove(f.Name())
+ if seekErr != nil {
+ fmt.Fprintf(os.Stderr, "testing: seek temp file: %v\n", seekErr)
+ os.Exit(1)
+ }
+ if readErr != nil {
+ fmt.Fprintf(os.Stderr, "testing: read temp file: %v\n", readErr)
+ os.Exit(1)
+ }
+
+ err := recover()
+ ok = eg.processRunResult(out, timeSpent, finished, err)
+ }()
+
+ // Run example.
+ eg.F()
+ finished = true
+ return
+}
+
+func createTempFile(exampleName string) *os.File {
+ for i := 0; ; i++ {
+ name := fmt.Sprintf("%s/go-example-stdout-%s-%d.txt", os.TempDir(), exampleName, i)
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ fmt.Fprintf(os.Stderr, "testing: open temp file: %v\n", err)
+ os.Exit(1)
+ }
+ return f
+ }
+}
diff --git a/src/testing/slogtest/example_test.go b/src/testing/slogtest/example_test.go
new file mode 100644
index 0000000..0517a4b
--- /dev/null
+++ b/src/testing/slogtest/example_test.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slogtest_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "log"
+ "log/slog"
+ "testing/slogtest"
+)
+
+// This example demonstrates one technique for testing a handler with this
+// package. The handler is given a [bytes.Buffer] to write to, and each line
+// of the resulting output is parsed.
+// For JSON output, [encoding/json.Unmarshal] produces a result in the desired
+// format when given a pointer to a map[string]any.
+func Example_parsing() {
+ var buf bytes.Buffer
+ h := slog.NewJSONHandler(&buf, nil)
+
+ results := func() []map[string]any {
+ var ms []map[string]any
+ for _, line := range bytes.Split(buf.Bytes(), []byte{'\n'}) {
+ if len(line) == 0 {
+ continue
+ }
+ var m map[string]any
+ if err := json.Unmarshal(line, &m); err != nil {
+ panic(err) // In a real test, use t.Fatal.
+ }
+ ms = append(ms, m)
+ }
+ return ms
+ }
+ err := slogtest.TestHandler(h, results)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+}
diff --git a/src/testing/slogtest/run_test.go b/src/testing/slogtest/run_test.go
new file mode 100644
index 0000000..c82da10
--- /dev/null
+++ b/src/testing/slogtest/run_test.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slogtest_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "log/slog"
+ "testing"
+ "testing/slogtest"
+)
+
+func TestRun(t *testing.T) {
+ var buf bytes.Buffer
+
+ newHandler := func(*testing.T) slog.Handler {
+ buf.Reset()
+ return slog.NewJSONHandler(&buf, nil)
+ }
+ result := func(t *testing.T) map[string]any {
+ m := map[string]any{}
+ if err := json.Unmarshal(buf.Bytes(), &m); err != nil {
+ t.Fatal(err)
+ }
+ return m
+ }
+
+ slogtest.Run(t, newHandler, result)
+}
diff --git a/src/testing/slogtest/slogtest.go b/src/testing/slogtest/slogtest.go
new file mode 100644
index 0000000..5c3aced
--- /dev/null
+++ b/src/testing/slogtest/slogtest.go
@@ -0,0 +1,375 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slogtest implements support for testing implementations of log/slog.Handler.
+package slogtest
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "runtime"
+ "testing"
+ "time"
+)
+
+type testCase struct {
+ // Subtest name.
+ name string
+ // If non-empty, explanation explains the violated constraint.
+ explanation string
+ // f executes a single log event using its argument logger.
+ // So that mkdescs.sh can generate the right description,
+ // the body of f must appear on a single line whose first
+ // non-whitespace characters are "l.".
+ f func(*slog.Logger)
+ // If mod is not nil, it is called to modify the Record
+ // generated by the Logger before it is passed to the Handler.
+ mod func(*slog.Record)
+ // checks is a list of checks to run on the result.
+ checks []check
+}
+
+var cases = []testCase{
+ {
+ name: "built-ins",
+ explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"),
+ f: func(l *slog.Logger) {
+ l.Info("message")
+ },
+ checks: []check{
+ hasKey(slog.TimeKey),
+ hasKey(slog.LevelKey),
+ hasAttr(slog.MessageKey, "message"),
+ },
+ },
+ {
+ name: "attrs",
+ explanation: withSource("a Handler should output attributes passed to the logging function"),
+ f: func(l *slog.Logger) {
+ l.Info("message", "k", "v")
+ },
+ checks: []check{
+ hasAttr("k", "v"),
+ },
+ },
+ {
+ name: "empty-attr",
+ explanation: withSource("a Handler should ignore an empty Attr"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "a", "b", "", nil, "c", "d")
+ },
+ checks: []check{
+ hasAttr("a", "b"),
+ missingKey(""),
+ hasAttr("c", "d"),
+ },
+ },
+ {
+ name: "zero-time",
+ explanation: withSource("a Handler should ignore a zero Record.Time"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "k", "v")
+ },
+ mod: func(r *slog.Record) { r.Time = time.Time{} },
+ checks: []check{
+ missingKey(slog.TimeKey),
+ },
+ },
+ {
+ name: "WithAttrs",
+ explanation: withSource("a Handler should include the attributes from the WithAttrs method"),
+ f: func(l *slog.Logger) {
+ l.With("a", "b").Info("msg", "k", "v")
+ },
+ checks: []check{
+ hasAttr("a", "b"),
+ hasAttr("k", "v"),
+ },
+ },
+ {
+ name: "groups",
+ explanation: withSource("a Handler should handle Group attributes"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f")
+ },
+ checks: []check{
+ hasAttr("a", "b"),
+ inGroup("G", hasAttr("c", "d")),
+ hasAttr("e", "f"),
+ },
+ },
+ {
+ name: "empty-group",
+ explanation: withSource("a Handler should ignore an empty group"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "a", "b", slog.Group("G"), "e", "f")
+ },
+ checks: []check{
+ hasAttr("a", "b"),
+ missingKey("G"),
+ hasAttr("e", "f"),
+ },
+ },
+ {
+ name: "inline-group",
+ explanation: withSource("a Handler should inline the Attrs of a group with an empty key"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f")
+
+ },
+ checks: []check{
+ hasAttr("a", "b"),
+ hasAttr("c", "d"),
+ hasAttr("e", "f"),
+ },
+ },
+ {
+ name: "WithGroup",
+ explanation: withSource("a Handler should handle the WithGroup method"),
+ f: func(l *slog.Logger) {
+ l.WithGroup("G").Info("msg", "a", "b")
+ },
+ checks: []check{
+ hasKey(slog.TimeKey),
+ hasKey(slog.LevelKey),
+ hasAttr(slog.MessageKey, "msg"),
+ missingKey("a"),
+ inGroup("G", hasAttr("a", "b")),
+ },
+ },
+ {
+ name: "multi-With",
+ explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"),
+ f: func(l *slog.Logger) {
+ l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f")
+ },
+ checks: []check{
+ hasKey(slog.TimeKey),
+ hasKey(slog.LevelKey),
+ hasAttr(slog.MessageKey, "msg"),
+ hasAttr("a", "b"),
+ inGroup("G", hasAttr("c", "d")),
+ inGroup("G", inGroup("H", hasAttr("e", "f"))),
+ },
+ },
+ {
+ name: "empty-group-record",
+ explanation: withSource("a Handler should not output groups if there are no attributes"),
+ f: func(l *slog.Logger) {
+ l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg")
+ },
+ checks: []check{
+ hasKey(slog.TimeKey),
+ hasKey(slog.LevelKey),
+ hasAttr(slog.MessageKey, "msg"),
+ hasAttr("a", "b"),
+ inGroup("G", hasAttr("c", "d")),
+ inGroup("G", missingKey("H")),
+ },
+ },
+ {
+ name: "resolve",
+ explanation: withSource("a Handler should call Resolve on attribute values"),
+ f: func(l *slog.Logger) {
+ l.Info("msg", "k", &replace{"replaced"})
+ },
+ checks: []check{hasAttr("k", "replaced")},
+ },
+ {
+ name: "resolve-groups",
+ explanation: withSource("a Handler should call Resolve on attribute values in groups"),
+ f: func(l *slog.Logger) {
+ l.Info("msg",
+ slog.Group("G",
+ slog.String("a", "v1"),
+ slog.Any("b", &replace{"v2"})))
+ },
+ checks: []check{
+ inGroup("G", hasAttr("a", "v1")),
+ inGroup("G", hasAttr("b", "v2")),
+ },
+ },
+ {
+ name: "resolve-WithAttrs",
+ explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"),
+ f: func(l *slog.Logger) {
+ l = l.With("k", &replace{"replaced"})
+ l.Info("msg")
+ },
+ checks: []check{hasAttr("k", "replaced")},
+ },
+ {
+ name: "resolve-WithAttrs-groups",
+ explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"),
+ f: func(l *slog.Logger) {
+ l = l.With(slog.Group("G",
+ slog.String("a", "v1"),
+ slog.Any("b", &replace{"v2"})))
+ l.Info("msg")
+ },
+ checks: []check{
+ inGroup("G", hasAttr("a", "v1")),
+ inGroup("G", hasAttr("b", "v2")),
+ },
+ },
+ {
+ name: "empty-PC",
+ explanation: withSource("a Handler should not output SourceKey if the PC is zero"),
+ f: func(l *slog.Logger) {
+ l.Info("message")
+ },
+ mod: func(r *slog.Record) { r.PC = 0 },
+ checks: []check{
+ missingKey(slog.SourceKey),
+ },
+ },
+}
+
+// TestHandler tests a [slog.Handler].
+// If TestHandler finds any misbehaviors, it returns an error for each,
+// combined into a single error with [errors.Join].
+//
+// TestHandler installs the given Handler in a [slog.Logger] and
+// makes several calls to the Logger's output methods.
+// The Handler should be enabled for levels Info and above.
+//
+// The results function is invoked after all such calls.
+// It should return a slice of map[string]any, one for each call to a Logger output method.
+// The keys and values of the map should correspond to the keys and values of the Handler's
+// output. Each group in the output should be represented as its own nested map[string]any.
+// The standard keys [slog.TimeKey], [slog.LevelKey] and [slog.MessageKey] should be used.
+//
+// If the Handler outputs JSON, then calling [encoding/json.Unmarshal] with a `map[string]any`
+// will create the right data structure.
+//
+// If a Handler intentionally drops an attribute that is checked by a test,
+// then the results function should check for its absence and add it to the map it returns.
+func TestHandler(h slog.Handler, results func() []map[string]any) error {
+ // Run the handler on the test cases.
+ for _, c := range cases {
+ ht := h
+ if c.mod != nil {
+ ht = &wrapper{h, c.mod}
+ }
+ l := slog.New(ht)
+ c.f(l)
+ }
+
+ // Collect and check the results.
+ var errs []error
+ res := results()
+ if g, w := len(res), len(cases); g != w {
+ return fmt.Errorf("got %d results, want %d", g, w)
+ }
+ for i, got := range results() {
+ c := cases[i]
+ for _, check := range c.checks {
+ if problem := check(got); problem != "" {
+ errs = append(errs, fmt.Errorf("%s: %s", problem, c.explanation))
+ }
+ }
+ }
+ return errors.Join(errs...)
+}
+
+// Run exercises a [slog.Handler] on the same test cases as [TestHandler], but
+// runs each case in a subtest. For each test case, it first calls newHandler to
+// get an instance of the handler under test, then runs the test case, then
+// calls result to get the result. If the test case fails, it calls t.Error.
+func Run(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any) {
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ h := newHandler(t)
+ if c.mod != nil {
+ h = &wrapper{h, c.mod}
+ }
+ l := slog.New(h)
+ c.f(l)
+ got := result(t)
+ for _, check := range c.checks {
+ if p := check(got); p != "" {
+ t.Errorf("%s: %s", p, c.explanation)
+ }
+ }
+ })
+ }
+}
+
+type check func(map[string]any) string
+
+func hasKey(key string) check {
+ return func(m map[string]any) string {
+ if _, ok := m[key]; !ok {
+ return fmt.Sprintf("missing key %q", key)
+ }
+ return ""
+ }
+}
+
+func missingKey(key string) check {
+ return func(m map[string]any) string {
+ if _, ok := m[key]; ok {
+ return fmt.Sprintf("unexpected key %q", key)
+ }
+ return ""
+ }
+}
+
+func hasAttr(key string, wantVal any) check {
+ return func(m map[string]any) string {
+ if s := hasKey(key)(m); s != "" {
+ return s
+ }
+ gotVal := m[key]
+ if !reflect.DeepEqual(gotVal, wantVal) {
+ return fmt.Sprintf("%q: got %#v, want %#v", key, gotVal, wantVal)
+ }
+ return ""
+ }
+}
+
+func inGroup(name string, c check) check {
+ return func(m map[string]any) string {
+ v, ok := m[name]
+ if !ok {
+ return fmt.Sprintf("missing group %q", name)
+ }
+ g, ok := v.(map[string]any)
+ if !ok {
+ return fmt.Sprintf("value for group %q is not map[string]any", name)
+ }
+ return c(g)
+ }
+}
+
+type wrapper struct {
+ slog.Handler
+ mod func(*slog.Record)
+}
+
+func (h *wrapper) Handle(ctx context.Context, r slog.Record) error {
+ h.mod(&r)
+ return h.Handler.Handle(ctx, r)
+}
+
+func withSource(s string) string {
+ _, file, line, ok := runtime.Caller(1)
+ if !ok {
+ panic("runtime.Caller failed")
+ }
+ return fmt.Sprintf("%s (%s:%d)", s, file, line)
+}
+
+type replace struct {
+ v any
+}
+
+func (r *replace) LogValue() slog.Value { return slog.AnyValue(r.v) }
+
+func (r *replace) String() string {
+ return fmt.Sprintf("<replace(%v)>", r.v)
+}
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
new file mode 100644
index 0000000..1c23d05
--- /dev/null
+++ b/src/testing/sub_test.go
@@ -0,0 +1,992 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func init() {
+ // Make benchmark tests run 10x faster.
+ benchTime.d = 100 * time.Millisecond
+}
+
+func TestTestContext(t *T) {
+ const (
+ add1 = 0
+ done = 1
+ )
+ // After each of the calls are applied to the context, the
+ type call struct {
+ typ int // run or done
+ // result from applying the call
+ running int
+ waiting int
+ started bool
+ }
+ testCases := []struct {
+ max int
+ run []call
+ }{{
+ max: 1,
+ run: []call{
+ {typ: add1, running: 1, waiting: 0, started: true},
+ {typ: done, running: 0, waiting: 0, started: false},
+ },
+ }, {
+ max: 1,
+ run: []call{
+ {typ: add1, running: 1, waiting: 0, started: true},
+ {typ: add1, running: 1, waiting: 1, started: false},
+ {typ: done, running: 1, waiting: 0, started: true},
+ {typ: done, running: 0, waiting: 0, started: false},
+ {typ: add1, running: 1, waiting: 0, started: true},
+ },
+ }, {
+ max: 3,
+ run: []call{
+ {typ: add1, running: 1, waiting: 0, started: true},
+ {typ: add1, running: 2, waiting: 0, started: true},
+ {typ: add1, running: 3, waiting: 0, started: true},
+ {typ: add1, running: 3, waiting: 1, started: false},
+ {typ: add1, running: 3, waiting: 2, started: false},
+ {typ: add1, running: 3, waiting: 3, started: false},
+ {typ: done, running: 3, waiting: 2, started: true},
+ {typ: add1, running: 3, waiting: 3, started: false},
+ {typ: done, running: 3, waiting: 2, started: true},
+ {typ: done, running: 3, waiting: 1, started: true},
+ {typ: done, running: 3, waiting: 0, started: true},
+ {typ: done, running: 2, waiting: 0, started: false},
+ {typ: done, running: 1, waiting: 0, started: false},
+ {typ: done, running: 0, waiting: 0, started: false},
+ },
+ }}
+ for i, tc := range testCases {
+ ctx := &testContext{
+ startParallel: make(chan bool),
+ maxParallel: tc.max,
+ }
+ for j, call := range tc.run {
+ doCall := func(f func()) chan bool {
+ done := make(chan bool)
+ go func() {
+ f()
+ done <- true
+ }()
+ return done
+ }
+ started := false
+ switch call.typ {
+ case add1:
+ signal := doCall(ctx.waitParallel)
+ select {
+ case <-signal:
+ started = true
+ case ctx.startParallel <- true:
+ <-signal
+ }
+ case done:
+ signal := doCall(ctx.release)
+ select {
+ case <-signal:
+ case <-ctx.startParallel:
+ started = true
+ <-signal
+ }
+ }
+ if started != call.started {
+ t.Errorf("%d:%d:started: got %v; want %v", i, j, started, call.started)
+ }
+ if ctx.running != call.running {
+ t.Errorf("%d:%d:running: got %v; want %v", i, j, ctx.running, call.running)
+ }
+ if ctx.numWaiting != call.waiting {
+ t.Errorf("%d:%d:waiting: got %v; want %v", i, j, ctx.numWaiting, call.waiting)
+ }
+ }
+ }
+}
+
+func TestTRun(t *T) {
+ realTest := t
+ testCases := []struct {
+ desc string
+ ok bool
+ maxPar int
+ chatty bool
+ json bool
+ output string
+ f func(*T)
+ }{{
+ desc: "failnow skips future sequential and parallel tests at same level",
+ ok: false,
+ maxPar: 1,
+ output: `
+--- FAIL: failnow skips future sequential and parallel tests at same level (N.NNs)
+ --- FAIL: failnow skips future sequential and parallel tests at same level/#00 (N.NNs)
+ `,
+ f: func(t *T) {
+ ranSeq := false
+ ranPar := false
+ t.Run("", func(t *T) {
+ t.Run("par", func(t *T) {
+ t.Parallel()
+ ranPar = true
+ })
+ t.Run("seq", func(t *T) {
+ ranSeq = true
+ })
+ t.FailNow()
+ t.Run("seq", func(t *T) {
+ realTest.Error("test must be skipped")
+ })
+ t.Run("par", func(t *T) {
+ t.Parallel()
+ realTest.Error("test must be skipped.")
+ })
+ })
+ if !ranPar {
+ realTest.Error("parallel test was not run")
+ }
+ if !ranSeq {
+ realTest.Error("sequential test was not run")
+ }
+ },
+ }, {
+ desc: "failure in parallel test propagates upwards",
+ ok: false,
+ maxPar: 1,
+ output: `
+--- FAIL: failure in parallel test propagates upwards (N.NNs)
+ --- FAIL: failure in parallel test propagates upwards/#00 (N.NNs)
+ --- FAIL: failure in parallel test propagates upwards/#00/par (N.NNs)
+ `,
+ f: func(t *T) {
+ t.Run("", func(t *T) {
+ t.Parallel()
+ t.Run("par", func(t *T) {
+ t.Parallel()
+ t.Fail()
+ })
+ })
+ },
+ }, {
+ desc: "skipping without message, chatty",
+ ok: true,
+ chatty: true,
+ output: `
+=== RUN skipping without message, chatty
+--- SKIP: skipping without message, chatty (N.NNs)`,
+ f: func(t *T) { t.SkipNow() },
+ }, {
+ desc: "chatty with recursion",
+ ok: true,
+ chatty: true,
+ output: `
+=== RUN chatty with recursion
+=== RUN chatty with recursion/#00
+=== RUN chatty with recursion/#00/#00
+--- PASS: chatty with recursion (N.NNs)
+ --- PASS: chatty with recursion/#00 (N.NNs)
+ --- PASS: chatty with recursion/#00/#00 (N.NNs)`,
+ f: func(t *T) {
+ t.Run("", func(t *T) {
+ t.Run("", func(t *T) {})
+ })
+ },
+ }, {
+ desc: "chatty with recursion and json",
+ ok: false,
+ chatty: true,
+ json: true,
+ output: `
+^V=== RUN chatty with recursion and json
+^V=== RUN chatty with recursion and json/#00
+^V=== RUN chatty with recursion and json/#00/#00
+^V--- PASS: chatty with recursion and json/#00/#00 (N.NNs)
+^V=== NAME chatty with recursion and json/#00
+^V=== RUN chatty with recursion and json/#00/#01
+ sub_test.go:NNN: skip
+^V--- SKIP: chatty with recursion and json/#00/#01 (N.NNs)
+^V=== NAME chatty with recursion and json/#00
+^V=== RUN chatty with recursion and json/#00/#02
+ sub_test.go:NNN: fail
+^V--- FAIL: chatty with recursion and json/#00/#02 (N.NNs)
+^V=== NAME chatty with recursion and json/#00
+^V--- FAIL: chatty with recursion and json/#00 (N.NNs)
+^V=== NAME chatty with recursion and json
+^V--- FAIL: chatty with recursion and json (N.NNs)
+^V=== NAME `,
+ f: func(t *T) {
+ t.Run("", func(t *T) {
+ t.Run("", func(t *T) {})
+ t.Run("", func(t *T) { t.Skip("skip") })
+ t.Run("", func(t *T) { t.Fatal("fail") })
+ })
+ },
+ }, {
+ desc: "skipping without message, not chatty",
+ ok: true,
+ f: func(t *T) { t.SkipNow() },
+ }, {
+ desc: "skipping after error",
+ output: `
+--- FAIL: skipping after error (N.NNs)
+ sub_test.go:NNN: an error
+ sub_test.go:NNN: skipped`,
+ f: func(t *T) {
+ t.Error("an error")
+ t.Skip("skipped")
+ },
+ }, {
+ desc: "use Run to locally synchronize parallelism",
+ ok: true,
+ maxPar: 1,
+ f: func(t *T) {
+ var count uint32
+ t.Run("waitGroup", func(t *T) {
+ for i := 0; i < 4; i++ {
+ t.Run("par", func(t *T) {
+ t.Parallel()
+ atomic.AddUint32(&count, 1)
+ })
+ }
+ })
+ if count != 4 {
+ t.Errorf("count was %d; want 4", count)
+ }
+ },
+ }, {
+ desc: "alternate sequential and parallel",
+ // Sequential tests should partake in the counting of running threads.
+ // Otherwise, if one runs parallel subtests in sequential tests that are
+ // itself subtests of parallel tests, the counts can get askew.
+ ok: true,
+ maxPar: 1,
+ f: func(t *T) {
+ t.Run("a", func(t *T) {
+ t.Parallel()
+ t.Run("b", func(t *T) {
+ // Sequential: ensure running count is decremented.
+ t.Run("c", func(t *T) {
+ t.Parallel()
+ })
+
+ })
+ })
+ },
+ }, {
+ desc: "alternate sequential and parallel 2",
+ // Sequential tests should partake in the counting of running threads.
+ // Otherwise, if one runs parallel subtests in sequential tests that are
+ // itself subtests of parallel tests, the counts can get askew.
+ ok: true,
+ maxPar: 2,
+ f: func(t *T) {
+ for i := 0; i < 2; i++ {
+ t.Run("a", func(t *T) {
+ t.Parallel()
+ time.Sleep(time.Nanosecond)
+ for i := 0; i < 2; i++ {
+ t.Run("b", func(t *T) {
+ time.Sleep(time.Nanosecond)
+ for i := 0; i < 2; i++ {
+ t.Run("c", func(t *T) {
+ t.Parallel()
+ time.Sleep(time.Nanosecond)
+ })
+ }
+
+ })
+ }
+ })
+ }
+ },
+ }, {
+ desc: "stress test",
+ ok: true,
+ maxPar: 4,
+ f: func(t *T) {
+ t.Parallel()
+ for i := 0; i < 12; i++ {
+ t.Run("a", func(t *T) {
+ t.Parallel()
+ time.Sleep(time.Nanosecond)
+ for i := 0; i < 12; i++ {
+ t.Run("b", func(t *T) {
+ time.Sleep(time.Nanosecond)
+ for i := 0; i < 12; i++ {
+ t.Run("c", func(t *T) {
+ t.Parallel()
+ time.Sleep(time.Nanosecond)
+ t.Run("d1", func(t *T) {})
+ t.Run("d2", func(t *T) {})
+ t.Run("d3", func(t *T) {})
+ t.Run("d4", func(t *T) {})
+ })
+ }
+ })
+ }
+ })
+ }
+ },
+ }, {
+ desc: "skip output",
+ ok: true,
+ maxPar: 4,
+ f: func(t *T) {
+ t.Skip()
+ },
+ }, {
+ desc: "subtest calls error on parent",
+ ok: false,
+ output: `
+--- FAIL: subtest calls error on parent (N.NNs)
+ sub_test.go:NNN: first this
+ sub_test.go:NNN: and now this!
+ sub_test.go:NNN: oh, and this too`,
+ maxPar: 1,
+ f: func(t *T) {
+ t.Errorf("first this")
+ outer := t
+ t.Run("", func(t *T) {
+ outer.Errorf("and now this!")
+ })
+ t.Errorf("oh, and this too")
+ },
+ }, {
+ desc: "subtest calls fatal on parent",
+ ok: false,
+ output: `
+--- FAIL: subtest calls fatal on parent (N.NNs)
+ sub_test.go:NNN: first this
+ sub_test.go:NNN: and now this!
+ --- FAIL: subtest calls fatal on parent/#00 (N.NNs)
+ testing.go:NNN: test executed panic(nil) or runtime.Goexit: subtest may have called FailNow on a parent test`,
+ maxPar: 1,
+ f: func(t *T) {
+ outer := t
+ t.Errorf("first this")
+ t.Run("", func(t *T) {
+ outer.Fatalf("and now this!")
+ })
+ t.Errorf("Should not reach here.")
+ },
+ }, {
+ desc: "subtest calls error on ancestor",
+ ok: false,
+ output: `
+--- FAIL: subtest calls error on ancestor (N.NNs)
+ sub_test.go:NNN: Report to ancestor
+ --- FAIL: subtest calls error on ancestor/#00 (N.NNs)
+ sub_test.go:NNN: Still do this
+ sub_test.go:NNN: Also do this`,
+ maxPar: 1,
+ f: func(t *T) {
+ outer := t
+ t.Run("", func(t *T) {
+ t.Run("", func(t *T) {
+ outer.Errorf("Report to ancestor")
+ })
+ t.Errorf("Still do this")
+ })
+ t.Errorf("Also do this")
+ },
+ }, {
+ desc: "subtest calls fatal on ancestor",
+ ok: false,
+ output: `
+--- FAIL: subtest calls fatal on ancestor (N.NNs)
+ sub_test.go:NNN: Nope`,
+ maxPar: 1,
+ f: func(t *T) {
+ outer := t
+ t.Run("", func(t *T) {
+ for i := 0; i < 4; i++ {
+ t.Run("", func(t *T) {
+ outer.Fatalf("Nope")
+ })
+ t.Errorf("Don't do this")
+ }
+ t.Errorf("And neither do this")
+ })
+ t.Errorf("Nor this")
+ },
+ }, {
+ desc: "panic on goroutine fail after test exit",
+ ok: false,
+ maxPar: 4,
+ f: func(t *T) {
+ ch := make(chan bool)
+ t.Run("", func(t *T) {
+ go func() {
+ <-ch
+ defer func() {
+ if r := recover(); r == nil {
+ realTest.Errorf("expected panic")
+ }
+ ch <- true
+ }()
+ t.Errorf("failed after success")
+ }()
+ })
+ ch <- true
+ <-ch
+ },
+ }, {
+ desc: "log in finished sub test logs to parent",
+ ok: false,
+ output: `
+ --- FAIL: log in finished sub test logs to parent (N.NNs)
+ sub_test.go:NNN: message2
+ sub_test.go:NNN: message1
+ sub_test.go:NNN: error`,
+ maxPar: 1,
+ f: func(t *T) {
+ ch := make(chan bool)
+ t.Run("sub", func(t2 *T) {
+ go func() {
+ <-ch
+ t2.Log("message1")
+ ch <- true
+ }()
+ })
+ t.Log("message2")
+ ch <- true
+ <-ch
+ t.Errorf("error")
+ },
+ }, {
+ // A chatty test should always log with fmt.Print, even if the
+ // parent test has completed.
+ desc: "log in finished sub test with chatty",
+ ok: false,
+ chatty: true,
+ output: `
+ --- FAIL: log in finished sub test with chatty (N.NNs)`,
+ maxPar: 1,
+ f: func(t *T) {
+ ch := make(chan bool)
+ t.Run("sub", func(t2 *T) {
+ go func() {
+ <-ch
+ t2.Log("message1")
+ ch <- true
+ }()
+ })
+ t.Log("message2")
+ ch <- true
+ <-ch
+ t.Errorf("error")
+ },
+ }, {
+ // If a subtest panics we should run cleanups.
+ desc: "cleanup when subtest panics",
+ ok: false,
+ chatty: false,
+ output: `
+--- FAIL: cleanup when subtest panics (N.NNs)
+ --- FAIL: cleanup when subtest panics/sub (N.NNs)
+ sub_test.go:NNN: running cleanup`,
+ f: func(t *T) {
+ t.Cleanup(func() { t.Log("running cleanup") })
+ t.Run("sub", func(t2 *T) {
+ t2.FailNow()
+ })
+ },
+ }}
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *T) {
+ ctx := newTestContext(tc.maxPar, allMatcher())
+ buf := &strings.Builder{}
+ root := &T{
+ common: common{
+ signal: make(chan bool),
+ barrier: make(chan bool),
+ name: "",
+ w: buf,
+ },
+ context: ctx,
+ }
+ if tc.chatty {
+ root.chatty = newChattyPrinter(root.w)
+ root.chatty.json = tc.json
+ }
+ ok := root.Run(tc.desc, tc.f)
+ ctx.release()
+
+ if ok != tc.ok {
+ t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok)
+ }
+ if ok != !root.Failed() {
+ t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
+ }
+ if ctx.running != 0 || ctx.numWaiting != 0 {
+ t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting)
+ }
+ got := strings.TrimSpace(buf.String())
+ want := strings.TrimSpace(tc.output)
+ re := makeRegexp(want)
+ if ok, err := regexp.MatchString(re, got); !ok || err != nil {
+ t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
+ }
+ })
+ }
+}
+
+func TestBRun(t *T) {
+ work := func(b *B) {
+ for i := 0; i < b.N; i++ {
+ time.Sleep(time.Nanosecond)
+ }
+ }
+ testCases := []struct {
+ desc string
+ failed bool
+ chatty bool
+ output string
+ f func(*B)
+ }{{
+ desc: "simulate sequential run of subbenchmarks.",
+ f: func(b *B) {
+ b.Run("", func(b *B) { work(b) })
+ time1 := b.result.NsPerOp()
+ b.Run("", func(b *B) { work(b) })
+ time2 := b.result.NsPerOp()
+ if time1 >= time2 {
+ t.Errorf("no time spent in benchmark t1 >= t2 (%d >= %d)", time1, time2)
+ }
+ },
+ }, {
+ desc: "bytes set by all benchmarks",
+ f: func(b *B) {
+ b.Run("", func(b *B) { b.SetBytes(10); work(b) })
+ b.Run("", func(b *B) { b.SetBytes(10); work(b) })
+ if b.result.Bytes != 20 {
+ t.Errorf("bytes: got: %d; want 20", b.result.Bytes)
+ }
+ },
+ }, {
+ desc: "bytes set by some benchmarks",
+ // In this case the bytes result is meaningless, so it must be 0.
+ f: func(b *B) {
+ b.Run("", func(b *B) { b.SetBytes(10); work(b) })
+ b.Run("", func(b *B) { work(b) })
+ b.Run("", func(b *B) { b.SetBytes(10); work(b) })
+ if b.result.Bytes != 0 {
+ t.Errorf("bytes: got: %d; want 0", b.result.Bytes)
+ }
+ },
+ }, {
+ desc: "failure carried over to root",
+ failed: true,
+ output: "--- FAIL: root",
+ f: func(b *B) { b.Fail() },
+ }, {
+ desc: "skipping without message, chatty",
+ chatty: true,
+ output: "--- SKIP: root",
+ f: func(b *B) { b.SkipNow() },
+ }, {
+ desc: "chatty with recursion",
+ chatty: true,
+ f: func(b *B) {
+ b.Run("", func(b *B) {
+ b.Run("", func(b *B) {})
+ })
+ },
+ }, {
+ desc: "skipping without message, not chatty",
+ f: func(b *B) { b.SkipNow() },
+ }, {
+ desc: "skipping after error",
+ failed: true,
+ output: `
+--- FAIL: root
+ sub_test.go:NNN: an error
+ sub_test.go:NNN: skipped`,
+ f: func(b *B) {
+ b.Error("an error")
+ b.Skip("skipped")
+ },
+ }, {
+ desc: "memory allocation",
+ f: func(b *B) {
+ const bufSize = 256
+ alloc := func(b *B) {
+ var buf [bufSize]byte
+ for i := 0; i < b.N; i++ {
+ _ = append([]byte(nil), buf[:]...)
+ }
+ }
+ b.Run("", func(b *B) {
+ alloc(b)
+ b.ReportAllocs()
+ })
+ b.Run("", func(b *B) {
+ alloc(b)
+ b.ReportAllocs()
+ })
+ // runtime.MemStats sometimes reports more allocations than the
+ // benchmark is responsible for. Luckily the point of this test is
+ // to ensure that the results are not underreported, so we can
+ // simply verify the lower bound.
+ if got := b.result.MemAllocs; got < 2 {
+ t.Errorf("MemAllocs was %v; want 2", got)
+ }
+ if got := b.result.MemBytes; got < 2*bufSize {
+ t.Errorf("MemBytes was %v; want %v", got, 2*bufSize)
+ }
+ },
+ }, {
+ desc: "cleanup is called",
+ f: func(b *B) {
+ var calls, cleanups, innerCalls, innerCleanups int
+ b.Run("", func(b *B) {
+ calls++
+ b.Cleanup(func() {
+ cleanups++
+ })
+ b.Run("", func(b *B) {
+ b.Cleanup(func() {
+ innerCleanups++
+ })
+ innerCalls++
+ })
+ work(b)
+ })
+ if calls == 0 || calls != cleanups {
+ t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
+ }
+ if innerCalls == 0 || innerCalls != innerCleanups {
+ t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
+ }
+ },
+ }, {
+ desc: "cleanup is called on failure",
+ failed: true,
+ f: func(b *B) {
+ var calls, cleanups int
+ b.Run("", func(b *B) {
+ calls++
+ b.Cleanup(func() {
+ cleanups++
+ })
+ b.Fatalf("failure")
+ })
+ if calls == 0 || calls != cleanups {
+ t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
+ }
+ },
+ }}
+ hideStdoutForTesting = true
+ defer func() {
+ hideStdoutForTesting = false
+ }()
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *T) {
+ var ok bool
+ buf := &strings.Builder{}
+ // This is almost like the Benchmark function, except that we override
+ // the benchtime and catch the failure result of the subbenchmark.
+ root := &B{
+ common: common{
+ signal: make(chan bool),
+ name: "root",
+ w: buf,
+ },
+ benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
+ benchTime: durationOrCountFlag{d: 1 * time.Microsecond},
+ }
+ if tc.chatty {
+ root.chatty = newChattyPrinter(root.w)
+ }
+ root.runN(1)
+ if ok != !tc.failed {
+ t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
+ }
+ if !ok != root.Failed() {
+ t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
+ }
+ // All tests are run as subtests
+ if root.result.N != 1 {
+ t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N)
+ }
+ got := strings.TrimSpace(buf.String())
+ want := strings.TrimSpace(tc.output)
+ re := makeRegexp(want)
+ if ok, err := regexp.MatchString(re, got); !ok || err != nil {
+ t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
+ }
+ })
+ }
+}
+
+func makeRegexp(s string) string {
+ s = regexp.QuoteMeta(s)
+ s = strings.ReplaceAll(s, "^V", "\x16")
+ s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d\d?:`)
+ s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`)
+ return s
+}
+
+func TestBenchmarkOutput(t *T) {
+ // Ensure Benchmark initialized common.w by invoking it with an error and
+ // normal case.
+ Benchmark(func(b *B) { b.Error("do not print this output") })
+ Benchmark(func(b *B) {})
+}
+
+func TestBenchmarkStartsFrom1(t *T) {
+ var first = true
+ Benchmark(func(b *B) {
+ if first && b.N != 1 {
+ panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N))
+ }
+ first = false
+ })
+}
+
+func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) {
+ var first = true
+ Benchmark(func(b *B) {
+ if first && (b.startAllocs == 0 || b.startBytes == 0) {
+ panic("ReadMemStats not called before first run")
+ }
+ first = false
+ })
+}
+
+type funcWriter struct {
+ write func([]byte) (int, error)
+}
+
+func (fw *funcWriter) Write(b []byte) (int, error) {
+ return fw.write(b)
+}
+
+func TestRacyOutput(t *T) {
+ var runs int32 // The number of running Writes
+ var races int32 // Incremented for each race detected
+ raceDetector := func(b []byte) (int, error) {
+ // Check if some other goroutine is concurrently calling Write.
+ if atomic.LoadInt32(&runs) > 0 {
+ atomic.AddInt32(&races, 1) // Race detected!
+ }
+ atomic.AddInt32(&runs, 1)
+ defer atomic.AddInt32(&runs, -1)
+ runtime.Gosched() // Increase probability of a race
+ return len(b), nil
+ }
+
+ root := &T{
+ common: common{w: &funcWriter{raceDetector}},
+ context: newTestContext(1, allMatcher()),
+ }
+ root.chatty = newChattyPrinter(root.w)
+ root.Run("", func(t *T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ t.Run(fmt.Sprint(i), func(t *T) {
+ t.Logf("testing run %d", i)
+ })
+ }(i)
+ }
+ wg.Wait()
+ })
+
+ if races > 0 {
+ t.Errorf("detected %d racy Writes", races)
+ }
+}
+
+// The late log message did not include the test name. Issue 29388.
+func TestLogAfterComplete(t *T) {
+ ctx := newTestContext(1, allMatcher())
+ var buf bytes.Buffer
+ t1 := &T{
+ common: common{
+ // Use a buffered channel so that tRunner can write
+ // to it although nothing is reading from it.
+ signal: make(chan bool, 1),
+ w: &buf,
+ },
+ context: ctx,
+ }
+
+ c1 := make(chan bool)
+ c2 := make(chan string)
+ tRunner(t1, func(t *T) {
+ t.Run("TestLateLog", func(t *T) {
+ go func() {
+ defer close(c2)
+ defer func() {
+ p := recover()
+ if p == nil {
+ c2 <- "subtest did not panic"
+ return
+ }
+ s, ok := p.(string)
+ if !ok {
+ c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p)
+ return
+ }
+ const want = "Log in goroutine after TestLateLog has completed: log after test"
+ if !strings.Contains(s, want) {
+ c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want)
+ }
+ }()
+
+ <-c1
+ t.Log("log after test")
+ }()
+ })
+ })
+ close(c1)
+
+ if s := <-c2; s != "" {
+ t.Error(s)
+ }
+}
+
+func TestBenchmark(t *T) {
+ if Short() {
+ t.Skip("skipping in short mode")
+ }
+ res := Benchmark(func(b *B) {
+ for i := 0; i < 5; i++ {
+ b.Run("", func(b *B) {
+ for i := 0; i < b.N; i++ {
+ time.Sleep(time.Millisecond)
+ }
+ })
+ }
+ })
+ if res.NsPerOp() < 4000000 {
+ t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp()))
+ }
+}
+
+func TestCleanup(t *T) {
+ var cleanups []int
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() { cleanups = append(cleanups, 1) })
+ t.Cleanup(func() { cleanups = append(cleanups, 2) })
+ })
+ if got, want := cleanups, []int{2, 1}; !reflect.DeepEqual(got, want) {
+ t.Errorf("unexpected cleanup record; got %v want %v", got, want)
+ }
+}
+
+func TestConcurrentCleanup(t *T) {
+ cleanups := 0
+ t.Run("test", func(t *T) {
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ i := i
+ go func() {
+ t.Cleanup(func() {
+ // Although the calls to Cleanup are concurrent, the functions passed
+ // to Cleanup should be called sequentially, in some nondeterministic
+ // order based on when the Cleanup calls happened to be scheduled.
+ // So these assignments to the cleanups variable should not race.
+ cleanups |= 1 << i
+ })
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ })
+ if cleanups != 1|2 {
+ t.Errorf("unexpected cleanup; got %d want 3", cleanups)
+ }
+}
+
+func TestCleanupCalledEvenAfterGoexit(t *T) {
+ cleanups := 0
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() {
+ cleanups++
+ })
+ t.Cleanup(func() {
+ runtime.Goexit()
+ })
+ })
+ if cleanups != 1 {
+ t.Errorf("unexpected cleanup count; got %d want 1", cleanups)
+ }
+}
+
+func TestRunCleanup(t *T) {
+ outerCleanup := 0
+ innerCleanup := 0
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() { outerCleanup++ })
+ t.Run("x", func(t *T) {
+ t.Cleanup(func() { innerCleanup++ })
+ })
+ })
+ if innerCleanup != 1 {
+ t.Errorf("unexpected inner cleanup count; got %d want 1", innerCleanup)
+ }
+ if outerCleanup != 1 {
+ t.Errorf("unexpected outer cleanup count; got %d want 0", outerCleanup)
+ }
+}
+
+func TestCleanupParallelSubtests(t *T) {
+ ranCleanup := 0
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() { ranCleanup++ })
+ t.Run("x", func(t *T) {
+ t.Parallel()
+ if ranCleanup > 0 {
+ t.Error("outer cleanup ran before parallel subtest")
+ }
+ })
+ })
+ if ranCleanup != 1 {
+ t.Errorf("unexpected cleanup count; got %d want 1", ranCleanup)
+ }
+}
+
+func TestNestedCleanup(t *T) {
+ ranCleanup := 0
+ t.Run("test", func(t *T) {
+ t.Cleanup(func() {
+ if ranCleanup != 2 {
+ t.Errorf("unexpected cleanup count in first cleanup: got %d want 2", ranCleanup)
+ }
+ ranCleanup++
+ })
+ t.Cleanup(func() {
+ if ranCleanup != 0 {
+ t.Errorf("unexpected cleanup count in second cleanup: got %d want 0", ranCleanup)
+ }
+ ranCleanup++
+ t.Cleanup(func() {
+ if ranCleanup != 1 {
+ t.Errorf("unexpected cleanup count in nested cleanup: got %d want 1", ranCleanup)
+ }
+ ranCleanup++
+ })
+ })
+ })
+ if ranCleanup != 3 {
+ t.Errorf("unexpected cleanup count: got %d want 3", ranCleanup)
+ }
+}
diff --git a/src/testing/testing.go b/src/testing/testing.go
new file mode 100644
index 0000000..5c06aea
--- /dev/null
+++ b/src/testing/testing.go
@@ -0,0 +1,2409 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides support for automated testing of Go packages.
+// It is intended to be used in concert with the "go test" command, which automates
+// execution of any function of the form
+//
+// func TestXxx(*testing.T)
+//
+// where Xxx does not start with a lowercase letter. The function name
+// serves to identify the test routine.
+//
+// Within these functions, use the Error, Fail or related methods to signal failure.
+//
+// To write a new test suite, create a file that
+// contains the TestXxx functions as described here,
+// and give that file a name ending in "_test.go".
+// The file will be excluded from regular
+// package builds but will be included when the "go test" command is run.
+//
+// The test file can be in the same package as the one being tested,
+// or in a corresponding package with the suffix "_test".
+//
+// If the test file is in the same package, it may refer to unexported
+// identifiers within the package, as in this example:
+//
+// package abs
+//
+// import "testing"
+//
+// func TestAbs(t *testing.T) {
+// got := Abs(-1)
+// if got != 1 {
+// t.Errorf("Abs(-1) = %d; want 1", got)
+// }
+// }
+//
+// If the file is in a separate "_test" package, the package being tested
+// must be imported explicitly and only its exported identifiers may be used.
+// This is known as "black box" testing.
+//
+// package abs_test
+//
+// import (
+// "testing"
+//
+// "path_to_pkg/abs"
+// )
+//
+// func TestAbs(t *testing.T) {
+// got := abs.Abs(-1)
+// if got != 1 {
+// t.Errorf("Abs(-1) = %d; want 1", got)
+// }
+// }
+//
+// For more detail, run "go help test" and "go help testflag".
+//
+// # Benchmarks
+//
+// Functions of the form
+//
+// func BenchmarkXxx(*testing.B)
+//
+// are considered benchmarks, and are executed by the "go test" command when
+// its -bench flag is provided. Benchmarks are run sequentially.
+//
+// For a description of the testing flags, see
+// https://golang.org/cmd/go/#hdr-Testing_flags.
+//
+// A sample benchmark function looks like this:
+//
+// func BenchmarkRandInt(b *testing.B) {
+// for i := 0; i < b.N; i++ {
+// rand.Int()
+// }
+// }
+//
+// The benchmark function must run the target code b.N times.
+// During benchmark execution, b.N is adjusted until the benchmark function lasts
+// long enough to be timed reliably. The output
+//
+// BenchmarkRandInt-8 68453040 17.8 ns/op
+//
+// means that the loop ran 68453040 times at a speed of 17.8 ns per loop.
+//
+// If a benchmark needs some expensive setup before running, the timer
+// may be reset:
+//
+// func BenchmarkBigLen(b *testing.B) {
+// big := NewBig()
+// b.ResetTimer()
+// for i := 0; i < b.N; i++ {
+// big.Len()
+// }
+// }
+//
+// If a benchmark needs to test performance in a parallel setting, it may use
+// the RunParallel helper function; such benchmarks are intended to be used with
+// the go test -cpu flag:
+//
+// func BenchmarkTemplateParallel(b *testing.B) {
+// templ := template.Must(template.New("test").Parse("Hello, {{.}}!"))
+// b.RunParallel(func(pb *testing.PB) {
+// var buf bytes.Buffer
+// for pb.Next() {
+// buf.Reset()
+// templ.Execute(&buf, "World")
+// }
+// })
+// }
+//
+// A detailed specification of the benchmark results format is given
+// in https://golang.org/design/14313-benchmark-format.
+//
+// There are standard tools for working with benchmark results at
+// https://golang.org/x/perf/cmd.
+// In particular, https://golang.org/x/perf/cmd/benchstat performs
+// statistically robust A/B comparisons.
+//
+// # Examples
+//
+// The package also runs and verifies example code. Example functions may
+// include a concluding line comment that begins with "Output:" and is compared with
+// the standard output of the function when the tests are run. (The comparison
+// ignores leading and trailing space.) These are examples of an example:
+//
+// func ExampleHello() {
+// fmt.Println("hello")
+// // Output: hello
+// }
+//
+// func ExampleSalutations() {
+// fmt.Println("hello, and")
+// fmt.Println("goodbye")
+// // Output:
+// // hello, and
+// // goodbye
+// }
+//
+// The comment prefix "Unordered output:" is like "Output:", but matches any
+// line order:
+//
+// func ExamplePerm() {
+// for _, value := range Perm(5) {
+// fmt.Println(value)
+// }
+// // Unordered output: 4
+// // 2
+// // 1
+// // 3
+// // 0
+// }
+//
+// Example functions without output comments are compiled but not executed.
+//
+// The naming convention to declare examples for the package, a function F, a type T and
+// method M on type T are:
+//
+// func Example() { ... }
+// func ExampleF() { ... }
+// func ExampleT() { ... }
+// func ExampleT_M() { ... }
+//
+// Multiple example functions for a package/type/function/method may be provided by
+// appending a distinct suffix to the name. The suffix must start with a
+// lower-case letter.
+//
+// func Example_suffix() { ... }
+// func ExampleF_suffix() { ... }
+// func ExampleT_suffix() { ... }
+// func ExampleT_M_suffix() { ... }
+//
+// The entire test file is presented as the example when it contains a single
+// example function, at least one other function, type, variable, or constant
+// declaration, and no test or benchmark functions.
+//
+// # Fuzzing
+//
+// 'go test' and the testing package support fuzzing, a testing technique where
+// a function is called with randomly generated inputs to find bugs not
+// anticipated by unit tests.
+//
+// Functions of the form
+//
+// func FuzzXxx(*testing.F)
+//
+// are considered fuzz tests.
+//
+// For example:
+//
+// func FuzzHex(f *testing.F) {
+// for _, seed := range [][]byte{{}, {0}, {9}, {0xa}, {0xf}, {1, 2, 3, 4}} {
+// f.Add(seed)
+// }
+// f.Fuzz(func(t *testing.T, in []byte) {
+// enc := hex.EncodeToString(in)
+// out, err := hex.DecodeString(enc)
+// if err != nil {
+// t.Fatalf("%v: decode: %v", in, err)
+// }
+// if !bytes.Equal(in, out) {
+// t.Fatalf("%v: not equal after round trip: %v", in, out)
+// }
+// })
+// }
+//
+// A fuzz test maintains a seed corpus, or a set of inputs which are run by
+// default, and can seed input generation. Seed inputs may be registered by
+// calling (*F).Add or by storing files in the directory testdata/fuzz/<Name>
+// (where <Name> is the name of the fuzz test) within the package containing
+// the fuzz test. Seed inputs are optional, but the fuzzing engine may find
+// bugs more efficiently when provided with a set of small seed inputs with good
+// code coverage. These seed inputs can also serve as regression tests for bugs
+// identified through fuzzing.
+//
+// The function passed to (*F).Fuzz within the fuzz test is considered the fuzz
+// target. A fuzz target must accept a *T parameter, followed by one or more
+// parameters for random inputs. The types of arguments passed to (*F).Add must
+// be identical to the types of these parameters. The fuzz target may signal
+// that it's found a problem the same way tests do: by calling T.Fail (or any
+// method that calls it like T.Error or T.Fatal) or by panicking.
+//
+// When fuzzing is enabled (by setting the -fuzz flag to a regular expression
+// that matches a specific fuzz test), the fuzz target is called with arguments
+// generated by repeatedly making random changes to the seed inputs. On
+// supported platforms, 'go test' compiles the test executable with fuzzing
+// coverage instrumentation. The fuzzing engine uses that instrumentation to
+// find and cache inputs that expand coverage, increasing the likelihood of
+// finding bugs. If the fuzz target fails for a given input, the fuzzing engine
+// writes the inputs that caused the failure to a file in the directory
+// testdata/fuzz/<Name> within the package directory. This file later serves as
+// a seed input. If the file can't be written at that location (for example,
+// because the directory is read-only), the fuzzing engine writes the file to
+// the fuzz cache directory within the build cache instead.
+//
+// When fuzzing is disabled, the fuzz target is called with the seed inputs
+// registered with F.Add and seed inputs from testdata/fuzz/<Name>. In this
+// mode, the fuzz test acts much like a regular test, with subtests started
+// with F.Fuzz instead of T.Run.
+//
+// See https://go.dev/doc/fuzz for documentation about fuzzing.
+//
+// # Skipping
+//
+// Tests or benchmarks may be skipped at run time with a call to
+// the Skip method of *T or *B:
+//
+// func TestTimeConsuming(t *testing.T) {
+// if testing.Short() {
+// t.Skip("skipping test in short mode.")
+// }
+// ...
+// }
+//
+// The Skip method of *T can be used in a fuzz target if the input is invalid,
+// but should not be considered a failing input. For example:
+//
+// func FuzzJSONMarshaling(f *testing.F) {
+// f.Fuzz(func(t *testing.T, b []byte) {
+// var v interface{}
+// if err := json.Unmarshal(b, &v); err != nil {
+// t.Skip()
+// }
+// if _, err := json.Marshal(v); err != nil {
+// t.Errorf("Marshal: %v", err)
+// }
+// })
+// }
+//
+// # Subtests and Sub-benchmarks
+//
+// The Run methods of T and B allow defining subtests and sub-benchmarks,
+// without having to define separate functions for each. This enables uses
+// like table-driven benchmarks and creating hierarchical tests.
+// It also provides a way to share common setup and tear-down code:
+//
+// func TestFoo(t *testing.T) {
+// // <setup code>
+// t.Run("A=1", func(t *testing.T) { ... })
+// t.Run("A=2", func(t *testing.T) { ... })
+// t.Run("B=1", func(t *testing.T) { ... })
+// // <tear-down code>
+// }
+//
+// Each subtest and sub-benchmark has a unique name: the combination of the name
+// of the top-level test and the sequence of names passed to Run, separated by
+// slashes, with an optional trailing sequence number for disambiguation.
+//
+// The argument to the -run, -bench, and -fuzz command-line flags is an unanchored regular
+// expression that matches the test's name. For tests with multiple slash-separated
+// elements, such as subtests, the argument is itself slash-separated, with
+// expressions matching each name element in turn. Because it is unanchored, an
+// empty expression matches any string.
+// For example, using "matching" to mean "whose name contains":
+//
+// go test -run '' # Run all tests.
+// go test -run Foo # Run top-level tests matching "Foo", such as "TestFooBar".
+// go test -run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=".
+// go test -run /A=1 # For all top-level tests, run subtests matching "A=1".
+// go test -fuzz FuzzFoo # Fuzz the target matching "FuzzFoo"
+//
+// The -run argument can also be used to run a specific value in the seed
+// corpus, for debugging. For example:
+//
+// go test -run=FuzzFoo/9ddb952d9814
+//
+// The -fuzz and -run flags can both be set, in order to fuzz a target but
+// skip the execution of all other tests.
+//
+// Subtests can also be used to control parallelism. A parent test will only
+// complete once all of its subtests complete. In this example, all tests are
+// run in parallel with each other, and only with each other, regardless of
+// other top-level tests that may be defined:
+//
+// func TestGroupedParallel(t *testing.T) {
+// for _, tc := range tests {
+// tc := tc // capture range variable
+// t.Run(tc.Name, func(t *testing.T) {
+// t.Parallel()
+// ...
+// })
+// }
+// }
+//
+// Run does not return until parallel subtests have completed, providing a way
+// to clean up after a group of parallel tests:
+//
+// func TestTeardownParallel(t *testing.T) {
+// // This Run will not return until the parallel tests finish.
+// t.Run("group", func(t *testing.T) {
+// t.Run("Test1", parallelTest1)
+// t.Run("Test2", parallelTest2)
+// t.Run("Test3", parallelTest3)
+// })
+// // <tear-down code>
+// }
+//
+// # Main
+//
+// It is sometimes necessary for a test or benchmark program to do extra setup or teardown
+// before or after it executes. It is also sometimes necessary to control
+// which code runs on the main thread. To support these and other cases,
+// if a test file contains a function:
+//
+// func TestMain(m *testing.M)
+//
+// then the generated test will call TestMain(m) instead of running the tests or benchmarks
+// directly. TestMain runs in the main goroutine and can do whatever setup
+// and teardown is necessary around a call to m.Run. m.Run will return an exit
+// code that may be passed to os.Exit. If TestMain returns, the test wrapper
+// will pass the result of m.Run to os.Exit itself.
+//
+// When TestMain is called, flag.Parse has not been run. If TestMain depends on
+// command-line flags, including those of the testing package, it should call
+// flag.Parse explicitly. Command line flags are always parsed by the time test
+// or benchmark functions run.
+//
+// A simple implementation of TestMain is:
+//
+// func TestMain(m *testing.M) {
+// // call flag.Parse() here if TestMain uses flags
+// os.Exit(m.Run())
+// }
+//
+// TestMain is a low-level primitive and should not be necessary for casual
+// testing needs, where ordinary test functions suffice.
+package testing
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "internal/goexperiment"
+ "internal/race"
+ "io"
+ "math/rand"
+ "os"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "runtime/trace"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var initRan bool
+
+// Init registers testing flags. These flags are automatically registered by
+// the "go test" command before running test functions, so Init is only needed
+// when calling functions such as Benchmark without using "go test".
+//
+// Init is not safe to call concurrently. It has no effect if it was already called.
+func Init() {
+ if initRan {
+ return
+ }
+ initRan = true
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "go test" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // The failfast flag requests that test execution stop after the first test failure.
+ failFast = flag.Bool("test.failfast", false, "do not start new tests after the first test failure")
+
+ // The directory in which to create profile files and the like. When run from
+ // "go test", the binary always runs in the source directory for the package;
+ // this flag lets "go test" tell the binary to write the files in the directory where
+ // the "go test" command is run.
+ outputDir = flag.String("test.outputdir", "", "write profiles to `dir`")
+ // Report as tests are run; default is silent for success.
+ flag.Var(&chatty, "test.v", "verbose: print additional output")
+ count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times")
+ coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`")
+ gocoverdir = flag.String("test.gocoverdir", "", "write coverage intermediate files to this directory")
+ matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit")
+ match = flag.String("test.run", "", "run only tests and examples matching `regexp`")
+ skip = flag.String("test.skip", "", "do not list or run tests matching `regexp`")
+ memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`")
+ blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`")
+ blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)")
+ mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution")
+ mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()")
+ panicOnExit0 = flag.Bool("test.paniconexit0", false, "panic on call to os.Exit(0)")
+ traceFile = flag.String("test.trace", "", "write an execution trace to `file`")
+ timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel")
+ testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)")
+ shuffle = flag.String("test.shuffle", "off", "randomize the execution order of tests and benchmarks")
+ fullPath = flag.Bool("test.fullpath", false, "show full file names in error messages")
+
+ initBenchmarkFlags()
+ initFuzzFlags()
+}
+
+var (
+ // Flags, registered during Init.
+ short *bool
+ failFast *bool
+ outputDir *string
+ chatty chattyFlag
+ count *uint
+ coverProfile *string
+ gocoverdir *string
+ matchList *string
+ match *string
+ skip *string
+ memProfile *string
+ memProfileRate *int
+ cpuProfile *string
+ blockProfile *string
+ blockProfileRate *int
+ mutexProfile *string
+ mutexProfileFraction *int
+ panicOnExit0 *bool
+ traceFile *string
+ timeout *time.Duration
+ cpuListStr *string
+ parallel *int
+ shuffle *string
+ testlog *string
+ fullPath *bool
+
+ haveExamples bool // are there examples?
+
+ cpuList []int
+ testlogFile *os.File
+
+ numFailed atomic.Uint32 // number of test failures
+
+ running sync.Map // map[string]time.Time of running, unpaused tests
+)
+
+type chattyFlag struct {
+ on bool // -v is set in some form
+ json bool // -v=test2json is set, to make output better for test2json
+}
+
+func (*chattyFlag) IsBoolFlag() bool { return true }
+
+func (f *chattyFlag) Set(arg string) error {
+ switch arg {
+ default:
+ return fmt.Errorf("invalid flag -test.v=%s", arg)
+ case "true", "test2json":
+ f.on = true
+ f.json = arg == "test2json"
+ case "false":
+ f.on = false
+ f.json = false
+ }
+ return nil
+}
+
+func (f *chattyFlag) String() string {
+ if f.json {
+ return "test2json"
+ }
+ if f.on {
+ return "true"
+ }
+ return "false"
+}
+
+func (f *chattyFlag) Get() any {
+ if f.json {
+ return "test2json"
+ }
+ return f.on
+}
+
+const marker = byte(0x16) // ^V for framing
+
+func (f *chattyFlag) prefix() string {
+ if f.json {
+ return string(marker)
+ }
+ return ""
+}
+
+type chattyPrinter struct {
+ w io.Writer
+ lastNameMu sync.Mutex // guards lastName
+ lastName string // last printed test name in chatty mode
+ json bool // -v=json output mode
+}
+
+func newChattyPrinter(w io.Writer) *chattyPrinter {
+ return &chattyPrinter{w: w, json: chatty.json}
+}
+
+// prefix is like chatty.prefix but using p.json instead of chatty.json.
+// Using p.json allows tests to check the json behavior without modifying
+// the global variable. For convenience, we allow p == nil and treat
+// that as not in json mode (because it's not chatty at all).
+func (p *chattyPrinter) prefix() string {
+ if p != nil && p.json {
+ return string(marker)
+ }
+ return ""
+}
+
+// Updatef prints a message about the status of the named test to w.
+//
+// The formatted message must include the test name itself.
+func (p *chattyPrinter) Updatef(testName, format string, args ...any) {
+ p.lastNameMu.Lock()
+ defer p.lastNameMu.Unlock()
+
+ // Since the message already implies an association with a specific new test,
+ // we don't need to check what the old test name was or log an extra NAME line
+ // for it. (We're updating it anyway, and the current message already includes
+ // the test name.)
+ p.lastName = testName
+ fmt.Fprintf(p.w, p.prefix()+format, args...)
+}
+
+// Printf prints a message, generated by the named test, that does not
+// necessarily mention that tests's name itself.
+func (p *chattyPrinter) Printf(testName, format string, args ...any) {
+ p.lastNameMu.Lock()
+ defer p.lastNameMu.Unlock()
+
+ if p.lastName == "" {
+ p.lastName = testName
+ } else if p.lastName != testName {
+ fmt.Fprintf(p.w, "%s=== NAME %s\n", p.prefix(), testName)
+ p.lastName = testName
+ }
+
+ fmt.Fprintf(p.w, format, args...)
+}
+
+// The maximum number of stack frames to go through when skipping helper functions for
+// the purpose of decorating log messages.
+const maxStackLen = 50
+
+// common holds the elements common between T and B and
+// captures common methods such as Errorf.
+type common struct {
+ mu sync.RWMutex // guards this group of fields
+ output []byte // Output generated by test or benchmark.
+ w io.Writer // For flushToParent.
+ ran bool // Test or benchmark (or one of its subtests) was executed.
+ failed bool // Test or benchmark has failed.
+ skipped bool // Test or benchmark has been skipped.
+ done bool // Test is finished and all subtests have completed.
+ helperPCs map[uintptr]struct{} // functions to be skipped when writing file/line info
+ helperNames map[string]struct{} // helperPCs converted to function names
+ cleanups []func() // optional functions to be called at the end of the test
+ cleanupName string // Name of the cleanup function.
+ cleanupPc []uintptr // The stack trace at the point where Cleanup was called.
+ finished bool // Test function has completed.
+ inFuzzFn bool // Whether the fuzz target, if this is one, is running.
+
+ chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set.
+ bench bool // Whether the current test is a benchmark.
+ hasSub atomic.Bool // whether there are sub-benchmarks.
+ cleanupStarted atomic.Bool // Registered cleanup callbacks have started to execute
+ runner string // Function name of tRunner running the test.
+ isParallel bool // Whether the test is parallel.
+
+ parent *common
+ level int // Nesting depth of test or benchmark.
+ creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run.
+ name string // Name of test or benchmark.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing).
+ signal chan bool // To signal a test is done.
+ sub []*T // Queue of subtests to be run in parallel.
+
+ lastRaceErrors atomic.Int64 // Max value of race.Errors seen during the test or its subtests.
+ raceErrorLogged atomic.Bool
+
+ tempDirMu sync.Mutex
+ tempDir string
+ tempDirErr error
+ tempDirSeq int32
+}
+
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+ if short == nil {
+ panic("testing: Short called before Init")
+ }
+ // Catch code that calls this from TestMain without first calling flag.Parse.
+ if !flag.Parsed() {
+ panic("testing: Short called before Parse")
+ }
+
+ return *short
+}
+
+// testBinary is set by cmd/go to "1" if this is a binary built by "go test".
+// The value is set to "1" by a -X option to cmd/link. We assume that
+// because this is possible, the compiler will not optimize testBinary
+// into a constant on the basis that it is an unexported package-scope
+// variable that is never changed. If the compiler ever starts implementing
+// such an optimization, we will need some technique to mark this variable
+// as "changed by a cmd/link -X option".
+var testBinary = "0"
+
+// Testing reports whether the current code is being run in a test.
+// This will report true in programs created by "go test",
+// false in programs created by "go build".
+func Testing() bool {
+ return testBinary == "1"
+}
+
+// CoverMode reports what the test coverage mode is set to. The
+// values are "set", "count", or "atomic". The return value will be
+// empty if test coverage is not enabled.
+func CoverMode() string {
+ if goexperiment.CoverageRedesign {
+ return cover2.mode
+ }
+ return cover.Mode
+}
+
+// Verbose reports whether the -test.v flag is set.
+func Verbose() bool {
+ // Same as in Short.
+ if !flag.Parsed() {
+ panic("testing: Verbose called before Parse")
+ }
+ return chatty.on
+}
+
+func (c *common) checkFuzzFn(name string) {
+ if c.inFuzzFn {
+ panic(fmt.Sprintf("testing: f.%s was called inside the fuzz target, use t.%s instead", name, name))
+ }
+}
+
+// frameSkip searches, starting after skip frames, for the first caller frame
+// in a function not marked as a helper and returns that frame.
+// The search stops if it finds a tRunner function that
+// was the entry point into the test and the test is not a subtest.
+// This function must be called with c.mu held.
+func (c *common) frameSkip(skip int) runtime.Frame {
+ // If the search continues into the parent test, we'll have to hold
+ // its mu temporarily. If we then return, we need to unlock it.
+ shouldUnlock := false
+ defer func() {
+ if shouldUnlock {
+ c.mu.Unlock()
+ }
+ }()
+ var pc [maxStackLen]uintptr
+ // Skip two extra frames to account for this function
+ // and runtime.Callers itself.
+ n := runtime.Callers(skip+2, pc[:])
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ frames := runtime.CallersFrames(pc[:n])
+ var firstFrame, prevFrame, frame runtime.Frame
+ for more := true; more; prevFrame = frame {
+ frame, more = frames.Next()
+ if frame.Function == "runtime.gopanic" {
+ continue
+ }
+ if frame.Function == c.cleanupName {
+ frames = runtime.CallersFrames(c.cleanupPc)
+ continue
+ }
+ if firstFrame.PC == 0 {
+ firstFrame = frame
+ }
+ if frame.Function == c.runner {
+ // We've gone up all the way to the tRunner calling
+ // the test function (so the user must have
+ // called tb.Helper from inside that test function).
+ // If this is a top-level test, only skip up to the test function itself.
+ // If we're in a subtest, continue searching in the parent test,
+ // starting from the point of the call to Run which created this subtest.
+ if c.level > 1 {
+ frames = runtime.CallersFrames(c.creator)
+ parent := c.parent
+ // We're no longer looking at the current c after this point,
+ // so we should unlock its mu, unless it's the original receiver,
+ // in which case our caller doesn't expect us to do that.
+ if shouldUnlock {
+ c.mu.Unlock()
+ }
+ c = parent
+ // Remember to unlock c.mu when we no longer need it, either
+ // because we went up another nesting level, or because we
+ // returned.
+ shouldUnlock = true
+ c.mu.Lock()
+ continue
+ }
+ return prevFrame
+ }
+ // If more helper PCs have been added since we last did the conversion
+ if c.helperNames == nil {
+ c.helperNames = make(map[string]struct{})
+ for pc := range c.helperPCs {
+ c.helperNames[pcToName(pc)] = struct{}{}
+ }
+ }
+ if _, ok := c.helperNames[frame.Function]; !ok {
+ // Found a frame that wasn't inside a helper function.
+ return frame
+ }
+ }
+ return firstFrame
+}
+
+// decorate prefixes the string with the file and line of the call site
+// and inserts the final newline if needed and indentation spaces for formatting.
+// This function must be called with c.mu held.
+func (c *common) decorate(s string, skip int) string {
+ frame := c.frameSkip(skip)
+ file := frame.File
+ line := frame.Line
+ if file != "" {
+ if *fullPath {
+ // If relative path, truncate file name at last file name separator.
+ } else if index := strings.LastIndexAny(file, `/\`); index >= 0 {
+ file = file[index+1:]
+ }
+ } else {
+ file = "???"
+ }
+ if line == 0 {
+ line = 1
+ }
+ buf := new(strings.Builder)
+ // Every line is indented at least 4 spaces.
+ buf.WriteString(" ")
+ fmt.Fprintf(buf, "%s:%d: ", file, line)
+ lines := strings.Split(s, "\n")
+ if l := len(lines); l > 1 && lines[l-1] == "" {
+ lines = lines[:l-1]
+ }
+ for i, line := range lines {
+ if i > 0 {
+ // Second and subsequent lines are indented an additional 4 spaces.
+ buf.WriteString("\n ")
+ }
+ buf.WriteString(line)
+ }
+ buf.WriteByte('\n')
+ return buf.String()
+}
+
+// flushToParent writes c.output to the parent after first writing the header
+// with the given format and arguments.
+func (c *common) flushToParent(testName, format string, args ...any) {
+ p := c.parent
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(c.output) > 0 {
+ // Add the current c.output to the print,
+ // and then arrange for the print to replace c.output.
+ // (This displays the logged output after the --- FAIL line.)
+ format += "%s"
+ args = append(args[:len(args):len(args)], c.output)
+ c.output = c.output[:0]
+ }
+
+ if c.chatty != nil && (p.w == c.chatty.w || c.chatty.json) {
+ // We're flushing to the actual output, so track that this output is
+ // associated with a specific test (and, specifically, that the next output
+ // is *not* associated with that test).
+ //
+ // Moreover, if c.output is non-empty it is important that this write be
+ // atomic with respect to the output of other tests, so that we don't end up
+ // with confusing '=== NAME' lines in the middle of our '--- PASS' block.
+ // Neither humans nor cmd/test2json can parse those easily.
+ // (See https://go.dev/issue/40771.)
+ //
+ // If test2json is used, we never flush to parent tests,
+ // so that the json stream shows subtests as they finish.
+ // (See https://go.dev/issue/29811.)
+ c.chatty.Updatef(testName, format, args...)
+ } else {
+ // We're flushing to the output buffer of the parent test, which will
+ // itself follow a test-name header when it is finally flushed to stdout.
+ fmt.Fprintf(p.w, c.chatty.prefix()+format, args...)
+ }
+}
+
+type indenter struct {
+ c *common
+}
+
+func (w indenter) Write(b []byte) (n int, err error) {
+ n = len(b)
+ for len(b) > 0 {
+ end := bytes.IndexByte(b, '\n')
+ if end == -1 {
+ end = len(b)
+ } else {
+ end++
+ }
+ // An indent of 4 spaces will neatly align the dashes with the status
+ // indicator of the parent.
+ line := b[:end]
+ if line[0] == marker {
+ w.c.output = append(w.c.output, marker)
+ line = line[1:]
+ }
+ const indent = " "
+ w.c.output = append(w.c.output, indent...)
+ w.c.output = append(w.c.output, line...)
+ b = b[end:]
+ }
+ return
+}
+
+// fmtDuration returns a string representing d in the form "87.00s".
+func fmtDuration(d time.Duration) string {
+ return fmt.Sprintf("%.2fs", d.Seconds())
+}
+
+// TB is the interface common to T, B, and F.
+type TB interface {
+ Cleanup(func())
+ Error(args ...any)
+ Errorf(format string, args ...any)
+ Fail()
+ FailNow()
+ Failed() bool
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Helper()
+ Log(args ...any)
+ Logf(format string, args ...any)
+ Name() string
+ Setenv(key, value string)
+ Skip(args ...any)
+ SkipNow()
+ Skipf(format string, args ...any)
+ Skipped() bool
+ TempDir() string
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate Go 1 compatibility.
+ private()
+}
+
+var _ TB = (*T)(nil)
+var _ TB = (*B)(nil)
+
+// T is a type passed to Test functions to manage test state and support formatted test logs.
+//
+// A test ends when its Test function returns or calls any of the methods
+// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods, as well as
+// the Parallel method, must be called only from the goroutine running the
+// Test function.
+//
+// The other reporting methods, such as the variations of Log and Error,
+// may be called simultaneously from multiple goroutines.
+type T struct {
+ common
+ isEnvSet bool
+ context *testContext // For running tests and subtests.
+}
+
+func (c *common) private() {}
+
+// Name returns the name of the running (sub-) test or benchmark.
+//
+// The name will include the name of the test along with the names of
+// any nested sub-tests. If two sibling sub-tests have the same name,
+// Name will append a suffix to guarantee the returned name is unique.
+func (c *common) Name() string {
+ return c.name
+}
+
+func (c *common) setRan() {
+ if c.parent != nil {
+ c.parent.setRan()
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.ran = true
+}
+
+// Fail marks the function as having failed but continues execution.
+func (c *common) Fail() {
+ if c.parent != nil {
+ c.parent.Fail()
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ // c.done needs to be locked to synchronize checks to c.done in parent tests.
+ if c.done {
+ panic("Fail in goroutine after " + c.name + " has completed")
+ }
+ c.failed = true
+}
+
+// Failed reports whether the function has failed.
+func (c *common) Failed() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if !c.done && int64(race.Errors()) > c.lastRaceErrors.Load() {
+ c.mu.RUnlock()
+ c.checkRaces()
+ c.mu.RLock()
+ }
+
+ return c.failed
+}
+
+// FailNow marks the function as having failed and stops its execution
+// by calling runtime.Goexit (which then runs all deferred calls in the
+// current goroutine).
+// Execution will continue at the next test or benchmark.
+// FailNow must be called from the goroutine running the
+// test or benchmark function, not from other goroutines
+// created during the test. Calling FailNow does not stop
+// those other goroutines.
+func (c *common) FailNow() {
+ c.checkFuzzFn("FailNow")
+ c.Fail()
+
+ // Calling runtime.Goexit will exit the goroutine, which
+ // will run the deferred functions in this goroutine,
+ // which will eventually run the deferred lines in tRunner,
+ // which will signal to the test loop that this test is done.
+ //
+ // A previous version of this code said:
+ //
+ // c.duration = ...
+ // c.signal <- c.self
+ // runtime.Goexit()
+ //
+ // This previous version duplicated code (those lines are in
+ // tRunner no matter what), but worse the goroutine teardown
+ // implicit in runtime.Goexit was not guaranteed to complete
+ // before the test exited. If a test deferred an important cleanup
+ // function (like removing temporary files), there was no guarantee
+ // it would run on a test failure. Because we send on c.signal during
+ // a top-of-stack deferred function now, we know that the send
+ // only happens after any other stacked defers have completed.
+ c.mu.Lock()
+ c.finished = true
+ c.mu.Unlock()
+ runtime.Goexit()
+}
+
+// log generates the output. It's always at the same stack depth.
+func (c *common) log(s string) {
+ c.logDepth(s, 3) // logDepth + log + public function
+}
+
+// logDepth generates the output at an arbitrary stack depth.
+func (c *common) logDepth(s string, depth int) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.done {
+ // This test has already finished. Try and log this message
+ // with our parent. If we don't have a parent, panic.
+ for parent := c.parent; parent != nil; parent = parent.parent {
+ parent.mu.Lock()
+ defer parent.mu.Unlock()
+ if !parent.done {
+ parent.output = append(parent.output, parent.decorate(s, depth+1)...)
+ return
+ }
+ }
+ panic("Log in goroutine after " + c.name + " has completed: " + s)
+ } else {
+ if c.chatty != nil {
+ if c.bench {
+ // Benchmarks don't print === CONT, so we should skip the test
+ // printer and just print straight to stdout.
+ fmt.Print(c.decorate(s, depth+1))
+ } else {
+ c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1))
+ }
+
+ return
+ }
+ c.output = append(c.output, c.decorate(s, depth+1)...)
+ }
+}
+
+// Log formats its arguments using default formatting, analogous to Println,
+// and records the text in the error log. For tests, the text will be printed only if
+// the test fails or the -test.v flag is set. For benchmarks, the text is always
+// printed to avoid having performance depend on the value of the -test.v flag.
+func (c *common) Log(args ...any) {
+ c.checkFuzzFn("Log")
+ c.log(fmt.Sprintln(args...))
+}
+
+// Logf formats its arguments according to the format, analogous to Printf, and
+// records the text in the error log. A final newline is added if not provided. For
+// tests, the text will be printed only if the test fails or the -test.v flag is
+// set. For benchmarks, the text is always printed to avoid having performance
+// depend on the value of the -test.v flag.
+func (c *common) Logf(format string, args ...any) {
+ c.checkFuzzFn("Logf")
+ c.log(fmt.Sprintf(format, args...))
+}
+
+// Error is equivalent to Log followed by Fail.
+func (c *common) Error(args ...any) {
+ c.checkFuzzFn("Error")
+ c.log(fmt.Sprintln(args...))
+ c.Fail()
+}
+
+// Errorf is equivalent to Logf followed by Fail.
+func (c *common) Errorf(format string, args ...any) {
+ c.checkFuzzFn("Errorf")
+ c.log(fmt.Sprintf(format, args...))
+ c.Fail()
+}
+
+// Fatal is equivalent to Log followed by FailNow.
+func (c *common) Fatal(args ...any) {
+ c.checkFuzzFn("Fatal")
+ c.log(fmt.Sprintln(args...))
+ c.FailNow()
+}
+
+// Fatalf is equivalent to Logf followed by FailNow.
+func (c *common) Fatalf(format string, args ...any) {
+ c.checkFuzzFn("Fatalf")
+ c.log(fmt.Sprintf(format, args...))
+ c.FailNow()
+}
+
+// Skip is equivalent to Log followed by SkipNow.
+func (c *common) Skip(args ...any) {
+ c.checkFuzzFn("Skip")
+ c.log(fmt.Sprintln(args...))
+ c.SkipNow()
+}
+
+// Skipf is equivalent to Logf followed by SkipNow.
+func (c *common) Skipf(format string, args ...any) {
+ c.checkFuzzFn("Skipf")
+ c.log(fmt.Sprintf(format, args...))
+ c.SkipNow()
+}
+
+// SkipNow marks the test as having been skipped and stops its execution
+// by calling [runtime.Goexit].
+// If a test fails (see Error, Errorf, Fail) and is then skipped,
+// it is still considered to have failed.
+// Execution will continue at the next test or benchmark. See also FailNow.
+// SkipNow must be called from the goroutine running the test, not from
+// other goroutines created during the test. Calling SkipNow does not stop
+// those other goroutines.
+func (c *common) SkipNow() {
+ c.checkFuzzFn("SkipNow")
+ c.mu.Lock()
+ c.skipped = true
+ c.finished = true
+ c.mu.Unlock()
+ runtime.Goexit()
+}
+
+// Skipped reports whether the test was skipped.
+func (c *common) Skipped() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.skipped
+}
+
+// Helper marks the calling function as a test helper function.
+// When printing file and line information, that function will be skipped.
+// Helper may be called simultaneously from multiple goroutines.
+func (c *common) Helper() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.helperPCs == nil {
+ c.helperPCs = make(map[uintptr]struct{})
+ }
+ // repeating code from callerName here to save walking a stack frame
+ var pc [1]uintptr
+ n := runtime.Callers(2, pc[:]) // skip runtime.Callers + Helper
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ if _, found := c.helperPCs[pc[0]]; !found {
+ c.helperPCs[pc[0]] = struct{}{}
+ c.helperNames = nil // map will be recreated next time it is needed
+ }
+}
+
+// Cleanup registers a function to be called when the test (or subtest) and all its
+// subtests complete. Cleanup functions will be called in last added,
+// first called order.
+func (c *common) Cleanup(f func()) {
+ c.checkFuzzFn("Cleanup")
+ var pc [maxStackLen]uintptr
+ // Skip two extra frames to account for this function and runtime.Callers itself.
+ n := runtime.Callers(2, pc[:])
+ cleanupPc := pc[:n]
+
+ fn := func() {
+ defer func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cleanupName = ""
+ c.cleanupPc = nil
+ }()
+
+ name := callerName(0)
+ c.mu.Lock()
+ c.cleanupName = name
+ c.cleanupPc = cleanupPc
+ c.mu.Unlock()
+
+ f()
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cleanups = append(c.cleanups, fn)
+}
+
+// TempDir returns a temporary directory for the test to use.
+// The directory is automatically removed when the test and
+// all its subtests complete.
+// Each subsequent call to t.TempDir returns a unique directory;
+// if the directory creation fails, TempDir terminates the test by calling Fatal.
+func (c *common) TempDir() string {
+ c.checkFuzzFn("TempDir")
+ // Use a single parent directory for all the temporary directories
+ // created by a test, each numbered sequentially.
+ c.tempDirMu.Lock()
+ var nonExistent bool
+ if c.tempDir == "" { // Usually the case with js/wasm
+ nonExistent = true
+ } else {
+ _, err := os.Stat(c.tempDir)
+ nonExistent = os.IsNotExist(err)
+ if err != nil && !nonExistent {
+ c.Fatalf("TempDir: %v", err)
+ }
+ }
+
+ if nonExistent {
+ c.Helper()
+
+ // Drop unusual characters (such as path separators or
+ // characters interacting with globs) from the directory name to
+ // avoid surprising os.MkdirTemp behavior.
+ mapper := func(r rune) rune {
+ if r < utf8.RuneSelf {
+ const allowed = "!#$%&()+,-.=@^_{}~ "
+ if '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z' ||
+ 'A' <= r && r <= 'Z' {
+ return r
+ }
+ if strings.ContainsRune(allowed, r) {
+ return r
+ }
+ } else if unicode.IsLetter(r) || unicode.IsNumber(r) {
+ return r
+ }
+ return -1
+ }
+ pattern := strings.Map(mapper, c.Name())
+ c.tempDir, c.tempDirErr = os.MkdirTemp("", pattern)
+ if c.tempDirErr == nil {
+ c.Cleanup(func() {
+ if err := removeAll(c.tempDir); err != nil {
+ c.Errorf("TempDir RemoveAll cleanup: %v", err)
+ }
+ })
+ }
+ }
+
+ if c.tempDirErr == nil {
+ c.tempDirSeq++
+ }
+ seq := c.tempDirSeq
+ c.tempDirMu.Unlock()
+
+ if c.tempDirErr != nil {
+ c.Fatalf("TempDir: %v", c.tempDirErr)
+ }
+
+ dir := fmt.Sprintf("%s%c%03d", c.tempDir, os.PathSeparator, seq)
+ if err := os.Mkdir(dir, 0777); err != nil {
+ c.Fatalf("TempDir: %v", err)
+ }
+ return dir
+}
+
+// removeAll is like os.RemoveAll, but retries Windows "Access is denied."
+// errors up to an arbitrary timeout.
+//
+// Those errors have been known to occur spuriously on at least the
+// windows-amd64-2012 builder (https://go.dev/issue/50051), and can only occur
+// legitimately if the test leaves behind a temp file that either is still open
+// or the test otherwise lacks permission to delete. In the case of legitimate
+// failures, a failing test may take a bit longer to fail, but once the test is
+// fixed the extra latency will go away.
+func removeAll(path string) error {
+ const arbitraryTimeout = 2 * time.Second
+ var (
+ start time.Time
+ nextSleep = 1 * time.Millisecond
+ )
+ for {
+ err := os.RemoveAll(path)
+ if !isWindowsRetryable(err) {
+ return err
+ }
+ if start.IsZero() {
+ start = time.Now()
+ } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
+ return err
+ }
+ time.Sleep(nextSleep)
+ nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
+ }
+}
+
+// Setenv calls os.Setenv(key, value) and uses Cleanup to
+// restore the environment variable to its original value
+// after the test.
+//
+// Because Setenv affects the whole process, it cannot be used
+// in parallel tests or tests with parallel ancestors.
+func (c *common) Setenv(key, value string) {
+ c.checkFuzzFn("Setenv")
+ prevValue, ok := os.LookupEnv(key)
+
+ if err := os.Setenv(key, value); err != nil {
+ c.Fatalf("cannot set environment variable: %v", err)
+ }
+
+ if ok {
+ c.Cleanup(func() {
+ os.Setenv(key, prevValue)
+ })
+ } else {
+ c.Cleanup(func() {
+ os.Unsetenv(key)
+ })
+ }
+}
+
+// panicHanding controls the panic handling used by runCleanup.
+type panicHandling int
+
+const (
+ normalPanic panicHandling = iota
+ recoverAndReturnPanic
+)
+
+// runCleanup is called at the end of the test.
+// If ph is recoverAndReturnPanic, it will catch panics, and return the
+// recovered value if any.
+func (c *common) runCleanup(ph panicHandling) (panicVal any) {
+ c.cleanupStarted.Store(true)
+ defer c.cleanupStarted.Store(false)
+
+ if ph == recoverAndReturnPanic {
+ defer func() {
+ panicVal = recover()
+ }()
+ }
+
+ // Make sure that if a cleanup function panics,
+ // we still run the remaining cleanup functions.
+ defer func() {
+ c.mu.Lock()
+ recur := len(c.cleanups) > 0
+ c.mu.Unlock()
+ if recur {
+ c.runCleanup(normalPanic)
+ }
+ }()
+
+ for {
+ var cleanup func()
+ c.mu.Lock()
+ if len(c.cleanups) > 0 {
+ last := len(c.cleanups) - 1
+ cleanup = c.cleanups[last]
+ c.cleanups = c.cleanups[:last]
+ }
+ c.mu.Unlock()
+ if cleanup == nil {
+ return nil
+ }
+ cleanup()
+ }
+}
+
+// resetRaces updates c.parent's count of data race errors (or the global count,
+// if c has no parent), and updates c.lastRaceErrors to match.
+//
+// Any races that occurred prior to this call to resetRaces will
+// not be attributed to c.
+func (c *common) resetRaces() {
+ if c.parent == nil {
+ c.lastRaceErrors.Store(int64(race.Errors()))
+ } else {
+ c.lastRaceErrors.Store(c.parent.checkRaces())
+ }
+}
+
+// checkRaces checks whether the global count of data race errors has increased
+// since c's count was last reset.
+//
+// If so, it marks c as having failed due to those races (logging an error for
+// the first such race), and updates the race counts for the parents of c so
+// that if they are currently suspended (such as in a call to T.Run) they will
+// not log separate errors for the race(s).
+//
+// Note that multiple tests may be marked as failed due to the same race if they
+// are executing in parallel.
+func (c *common) checkRaces() (raceErrors int64) {
+ raceErrors = int64(race.Errors())
+ for {
+ last := c.lastRaceErrors.Load()
+ if raceErrors <= last {
+ // All races have already been reported.
+ return raceErrors
+ }
+ if c.lastRaceErrors.CompareAndSwap(last, raceErrors) {
+ break
+ }
+ }
+
+ if c.raceErrorLogged.CompareAndSwap(false, true) {
+ // This is the first race we've encountered for this test.
+ // Mark the test as failed, and log the reason why only once.
+ // (Note that the race detector itself will still write a goroutine
+ // dump for any further races it detects.)
+ c.Errorf("race detected during execution of test")
+ }
+
+ // Update the parent(s) of this test so that they don't re-report the race.
+ parent := c.parent
+ for parent != nil {
+ for {
+ last := parent.lastRaceErrors.Load()
+ if raceErrors <= last {
+ // This race was already reported by another (likely parallel) subtest.
+ return raceErrors
+ }
+ if parent.lastRaceErrors.CompareAndSwap(last, raceErrors) {
+ break
+ }
+ }
+ parent = parent.parent
+ }
+
+ return raceErrors
+}
+
+// callerName gives the function name (qualified with a package path)
+// for the caller after skip frames (where 0 means the current function).
+func callerName(skip int) string {
+ var pc [1]uintptr
+ n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ return pcToName(pc[0])
+}
+
+func pcToName(pc uintptr) string {
+ pcs := []uintptr{pc}
+ frames := runtime.CallersFrames(pcs)
+ frame, _ := frames.Next()
+ return frame.Function
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests. When a test is run multiple times due to use of
+// -test.count or -test.cpu, multiple instances of a single test never run in
+// parallel with each other.
+func (t *T) Parallel() {
+ if t.isParallel {
+ panic("testing: t.Parallel called multiple times")
+ }
+ if t.isEnvSet {
+ panic("testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests")
+ }
+ t.isParallel = true
+ if t.parent.barrier == nil {
+ // T.Parallel has no effect when fuzzing.
+ // Multiple processes may run in parallel, but only one input can run at a
+ // time per process so we can attribute crashes to specific inputs.
+ return
+ }
+
+ // We don't want to include the time we spend waiting for serial tests
+ // in the test duration. Record the elapsed time thus far and reset the
+ // timer afterwards.
+ t.duration += time.Since(t.start)
+
+ // Add to the list of tests to be released by the parent.
+ t.parent.sub = append(t.parent.sub, t)
+
+ // Report any races during execution of this test up to this point.
+ //
+ // We will assume that any races that occur between here and the point where
+ // we unblock are not caused by this subtest. That assumption usually holds,
+ // although it can be wrong if the test spawns a goroutine that races in the
+ // background while the rest of the test is blocked on the call to Parallel.
+ // If that happens, we will misattribute the background race to some other
+ // test, or to no test at all — but that false-negative is so unlikely that it
+ // is not worth adding race-report noise for the common case where the test is
+ // completely suspended during the call to Parallel.
+ t.checkRaces()
+
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name)
+ }
+ running.Delete(t.name)
+
+ t.signal <- true // Release calling test.
+ <-t.parent.barrier // Wait for the parent test to complete.
+ t.context.waitParallel()
+
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== CONT %s\n", t.name)
+ }
+ running.Store(t.name, time.Now())
+ t.start = time.Now()
+
+ // Reset the local race counter to ignore any races that happened while this
+ // goroutine was blocked, such as in the parent test or in other parallel
+ // subtests.
+ //
+ // (Note that we don't call parent.checkRaces here:
+ // if other parallel subtests have already introduced races, we want to
+ // let them report those races instead of attributing them to the parent.)
+ t.lastRaceErrors.Store(int64(race.Errors()))
+}
+
+// Setenv calls os.Setenv(key, value) and uses Cleanup to
+// restore the environment variable to its original value
+// after the test.
+//
+// Because Setenv affects the whole process, it cannot be used
+// in parallel tests or tests with parallel ancestors.
+func (t *T) Setenv(key, value string) {
+ // Non-parallel subtests that have parallel ancestors may still
+ // run in parallel with other tests: they are only non-parallel
+ // with respect to the other subtests of the same parent.
+ // Since SetEnv affects the whole process, we need to disallow it
+ // if the current test or any parent is parallel.
+ isParallel := false
+ for c := &t.common; c != nil; c = c.parent {
+ if c.isParallel {
+ isParallel = true
+ break
+ }
+ }
+ if isParallel {
+ panic("testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests")
+ }
+
+ t.isEnvSet = true
+
+ t.common.Setenv(key, value)
+}
+
+// InternalTest is an internal type but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
+type InternalTest struct {
+ Name string
+ F func(*T)
+}
+
+var errNilPanicOrGoexit = errors.New("test executed panic(nil) or runtime.Goexit")
+
+func tRunner(t *T, fn func(t *T)) {
+ t.runner = callerName(0)
+
+ // When this goroutine is done, either because fn(t)
+ // returned normally or because a test failure triggered
+ // a call to runtime.Goexit, record the duration and send
+ // a signal saying that the test is done.
+ defer func() {
+ t.checkRaces()
+
+ // TODO(#61034): This is the wrong place for this check.
+ if t.Failed() {
+ numFailed.Add(1)
+ }
+
+ // Check if the test panicked or Goexited inappropriately.
+ //
+ // If this happens in a normal test, print output but continue panicking.
+ // tRunner is called in its own goroutine, so this terminates the process.
+ //
+ // If this happens while fuzzing, recover from the panic and treat it like a
+ // normal failure. It's important that the process keeps running in order to
+ // find short inputs that cause panics.
+ err := recover()
+ signal := true
+
+ t.mu.RLock()
+ finished := t.finished
+ t.mu.RUnlock()
+ if !finished && err == nil {
+ err = errNilPanicOrGoexit
+ for p := t.parent; p != nil; p = p.parent {
+ p.mu.RLock()
+ finished = p.finished
+ p.mu.RUnlock()
+ if finished {
+ if !t.isParallel {
+ t.Errorf("%v: subtest may have called FailNow on a parent test", err)
+ err = nil
+ }
+ signal = false
+ break
+ }
+ }
+ }
+
+ if err != nil && t.context.isFuzzing {
+ prefix := "panic: "
+ if err == errNilPanicOrGoexit {
+ prefix = ""
+ }
+ t.Errorf("%s%s\n%s\n", prefix, err, string(debug.Stack()))
+ t.mu.Lock()
+ t.finished = true
+ t.mu.Unlock()
+ err = nil
+ }
+
+ // Use a deferred call to ensure that we report that the test is
+ // complete even if a cleanup function calls t.FailNow. See issue 41355.
+ didPanic := false
+ defer func() {
+ // Only report that the test is complete if it doesn't panic,
+ // as otherwise the test binary can exit before the panic is
+ // reported to the user. See issue 41479.
+ if didPanic {
+ return
+ }
+ if err != nil {
+ panic(err)
+ }
+ running.Delete(t.name)
+ t.signal <- signal
+ }()
+
+ doPanic := func(err any) {
+ t.Fail()
+ if r := t.runCleanup(recoverAndReturnPanic); r != nil {
+ t.Logf("cleanup panicked with %v", r)
+ }
+ // Flush the output log up to the root before dying.
+ for root := &t.common; root.parent != nil; root = root.parent {
+ root.mu.Lock()
+ root.duration += time.Since(root.start)
+ d := root.duration
+ root.mu.Unlock()
+ root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
+ if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
+ fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
+ }
+ }
+ didPanic = true
+ panic(err)
+ }
+ if err != nil {
+ doPanic(err)
+ }
+
+ t.duration += time.Since(t.start)
+
+ if len(t.sub) > 0 {
+ // Run parallel subtests.
+
+ // Decrease the running count for this test and mark it as no longer running.
+ t.context.release()
+ running.Delete(t.name)
+
+ // Release the parallel subtests.
+ close(t.barrier)
+ // Wait for subtests to complete.
+ for _, sub := range t.sub {
+ <-sub.signal
+ }
+
+ // Run any cleanup callbacks, marking the test as running
+ // in case the cleanup hangs.
+ cleanupStart := time.Now()
+ running.Store(t.name, cleanupStart)
+ err := t.runCleanup(recoverAndReturnPanic)
+ t.duration += time.Since(cleanupStart)
+ if err != nil {
+ doPanic(err)
+ }
+ t.checkRaces()
+ if !t.isParallel {
+ // Reacquire the count for sequential tests. See comment in Run.
+ t.context.waitParallel()
+ }
+ } else if t.isParallel {
+ // Only release the count for this test if it was run as a parallel
+ // test. See comment in Run method.
+ t.context.release()
+ }
+ t.report() // Report after all subtests have finished.
+
+ // Do not lock t.done to allow race detector to detect race in case
+ // the user does not appropriately synchronize a goroutine.
+ t.done = true
+ if t.parent != nil && !t.hasSub.Load() {
+ t.setRan()
+ }
+ }()
+ defer func() {
+ if len(t.sub) == 0 {
+ t.runCleanup(normalPanic)
+ }
+ }()
+
+ t.start = time.Now()
+ t.resetRaces()
+ fn(t)
+
+ // code beyond here will not be executed when FailNow is invoked
+ t.mu.Lock()
+ t.finished = true
+ t.mu.Unlock()
+}
+
+// Run runs f as a subtest of t called name. It runs f in a separate goroutine
+// and blocks until f returns or calls t.Parallel to become a parallel test.
+// Run reports whether f succeeded (or at least did not fail before calling t.Parallel).
+//
+// Run may be called simultaneously from multiple goroutines, but all such calls
+// must return before the outer test function for t returns.
+func (t *T) Run(name string, f func(t *T)) bool {
+ if t.cleanupStarted.Load() {
+ panic("testing: t.Run called during t.Cleanup")
+ }
+
+ t.hasSub.Store(true)
+ testName, ok, _ := t.context.match.fullName(&t.common, name)
+ if !ok || shouldFailFast() {
+ return true
+ }
+ // Record the stack trace at the point of this call so that if the subtest
+ // function - which runs in a separate stack - is marked as a helper, we can
+ // continue walking the stack into the parent test.
+ var pc [maxStackLen]uintptr
+ n := runtime.Callers(2, pc[:])
+ t = &T{
+ common: common{
+ barrier: make(chan bool),
+ signal: make(chan bool, 1),
+ name: testName,
+ parent: &t.common,
+ level: t.level + 1,
+ creator: pc[:n],
+ chatty: t.chatty,
+ },
+ context: t.context,
+ }
+ t.w = indenter{&t.common}
+
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== RUN %s\n", t.name)
+ }
+ running.Store(t.name, time.Now())
+
+ // Instead of reducing the running count of this test before calling the
+ // tRunner and increasing it afterwards, we rely on tRunner keeping the
+ // count correct. This ensures that a sequence of sequential tests runs
+ // without being preempted, even when their parent is a parallel test. This
+ // may especially reduce surprises if *parallel == 1.
+ go tRunner(t, f)
+
+ // The parent goroutine will block until the subtest either finishes or calls
+ // Parallel, but in general we don't know whether the parent goroutine is the
+ // top-level test function or some other goroutine it has spawned.
+ // To avoid confusing false-negatives, we leave the parent in the running map
+ // even though in the typical case it is blocked.
+
+ if !<-t.signal {
+ // At this point, it is likely that FailNow was called on one of the
+ // parent tests by one of the subtests. Continue aborting up the chain.
+ runtime.Goexit()
+ }
+
+ if t.chatty != nil && t.chatty.json {
+ t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name)
+ }
+ return !t.failed
+}
+
+// Deadline reports the time at which the test binary will have
+// exceeded the timeout specified by the -timeout flag.
+//
+// The ok result is false if the -timeout flag indicates “no timeout” (0).
+func (t *T) Deadline() (deadline time.Time, ok bool) {
+ deadline = t.context.deadline
+ return deadline, !deadline.IsZero()
+}
+
+// testContext holds all fields that are common to all tests. This includes
+// synchronization primitives to run at most *parallel tests.
+type testContext struct {
+ match *matcher
+ deadline time.Time
+
+ // isFuzzing is true in the context used when generating random inputs
+ // for fuzz targets. isFuzzing is false when running normal tests and
+ // when running fuzz tests as unit tests (without -fuzz or when -fuzz
+ // does not match).
+ isFuzzing bool
+
+ mu sync.Mutex
+
+ // Channel used to signal tests that are ready to be run in parallel.
+ startParallel chan bool
+
+ // running is the number of tests currently running in parallel.
+ // This does not include tests that are waiting for subtests to complete.
+ running int
+
+ // numWaiting is the number tests waiting to be run in parallel.
+ numWaiting int
+
+ // maxParallel is a copy of the parallel flag.
+ maxParallel int
+}
+
+func newTestContext(maxParallel int, m *matcher) *testContext {
+ return &testContext{
+ match: m,
+ startParallel: make(chan bool),
+ maxParallel: maxParallel,
+ running: 1, // Set the count to 1 for the main (sequential) test.
+ }
+}
+
+func (c *testContext) waitParallel() {
+ c.mu.Lock()
+ if c.running < c.maxParallel {
+ c.running++
+ c.mu.Unlock()
+ return
+ }
+ c.numWaiting++
+ c.mu.Unlock()
+ <-c.startParallel
+}
+
+func (c *testContext) release() {
+ c.mu.Lock()
+ if c.numWaiting == 0 {
+ c.running--
+ c.mu.Unlock()
+ return
+ }
+ c.numWaiting--
+ c.mu.Unlock()
+ c.startParallel <- true // Pick a waiting test to be run.
+}
+
+// No one should be using func Main anymore.
+// See the doc comment on func Main and use MainStart instead.
+var errMain = errors.New("testing: unexpected use of func Main")
+
+type matchStringOnly func(pat, str string) (bool, error)
+
+func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f(pat, str) }
+func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain }
+func (f matchStringOnly) StopCPUProfile() {}
+func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain }
+func (f matchStringOnly) ImportPath() string { return "" }
+func (f matchStringOnly) StartTestLog(io.Writer) {}
+func (f matchStringOnly) StopTestLog() error { return errMain }
+func (f matchStringOnly) SetPanicOnExit0(bool) {}
+func (f matchStringOnly) CoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error {
+ return errMain
+}
+func (f matchStringOnly) RunFuzzWorker(func(corpusEntry) error) error { return errMain }
+func (f matchStringOnly) ReadCorpus(string, []reflect.Type) ([]corpusEntry, error) {
+ return nil, errMain
+}
+func (f matchStringOnly) CheckCorpus([]any, []reflect.Type) error { return nil }
+func (f matchStringOnly) ResetCoverage() {}
+func (f matchStringOnly) SnapshotCoverage() {}
+
+// Main is an internal function, part of the implementation of the "go test" command.
+// It was exported because it is cross-package and predates "internal" packages.
+// It is no longer used by "go test" but preserved, as much as possible, for other
+// systems that simulate "go test" using Main, but Main sometimes cannot be updated as
+// new functionality is added to the testing package.
+// Systems simulating "go test" should be updated to use MainStart.
+func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+ os.Exit(MainStart(matchStringOnly(matchString), tests, benchmarks, nil, examples).Run())
+}
+
+// M is a type passed to a TestMain function to run the actual tests.
+type M struct {
+ deps testDeps
+ tests []InternalTest
+ benchmarks []InternalBenchmark
+ fuzzTargets []InternalFuzzTarget
+ examples []InternalExample
+
+ timer *time.Timer
+ afterOnce sync.Once
+
+ numRun int
+
+ // value to pass to os.Exit, the outer test func main
+ // harness calls os.Exit with this code. See #34129.
+ exitCode int
+}
+
+// testDeps is an internal interface of functionality that is
+// passed into this package by a test's generated main package.
+// The canonical implementation of this interface is
+// testing/internal/testdeps's TestDeps.
+type testDeps interface {
+ ImportPath() string
+ MatchString(pat, str string) (bool, error)
+ SetPanicOnExit0(bool)
+ StartCPUProfile(io.Writer) error
+ StopCPUProfile()
+ StartTestLog(io.Writer)
+ StopTestLog() error
+ WriteProfileTo(string, io.Writer, int) error
+ CoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error
+ RunFuzzWorker(func(corpusEntry) error) error
+ ReadCorpus(string, []reflect.Type) ([]corpusEntry, error)
+ CheckCorpus([]any, []reflect.Type) error
+ ResetCoverage()
+ SnapshotCoverage()
+}
+
+// MainStart is meant for use by tests generated by 'go test'.
+// It is not meant to be called directly and is not subject to the Go 1 compatibility document.
+// It may change signature from release to release.
+func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M {
+ Init()
+ return &M{
+ deps: deps,
+ tests: tests,
+ benchmarks: benchmarks,
+ fuzzTargets: fuzzTargets,
+ examples: examples,
+ }
+}
+
+var testingTesting bool
+var realStderr *os.File
+
+// Run runs the tests. It returns an exit code to pass to os.Exit.
+func (m *M) Run() (code int) {
+ defer func() {
+ code = m.exitCode
+ }()
+
+ // Count the number of calls to m.Run.
+ // We only ever expected 1, but we didn't enforce that,
+ // and now there are tests in the wild that call m.Run multiple times.
+ // Sigh. go.dev/issue/23129.
+ m.numRun++
+
+ // TestMain may have already called flag.Parse.
+ if !flag.Parsed() {
+ flag.Parse()
+ }
+
+ if chatty.json {
+ // With -v=json, stdout and stderr are pointing to the same pipe,
+ // which is leading into test2json. In general, operating systems
+ // do a good job of ensuring that writes to the same pipe through
+ // different file descriptors are delivered whole, so that writing
+ // AAA to stdout and BBB to stderr simultaneously produces
+ // AAABBB or BBBAAA on the pipe, not something like AABBBA.
+ // However, the exception to this is when the pipe fills: in that
+ // case, Go's use of non-blocking I/O means that writing AAA
+ // or BBB might be split across multiple system calls, making it
+ // entirely possible to get output like AABBBA. The same problem
+ // happens inside the operating system kernel if we switch to
+ // blocking I/O on the pipe. This interleaved output can do things
+ // like print unrelated messages in the middle of a TestFoo line,
+ // which confuses test2json. Setting os.Stderr = os.Stdout will make
+ // them share a single pfd, which will hold a lock for each program
+ // write, preventing any interleaving.
+ //
+ // It might be nice to set Stderr = Stdout always, or perhaps if
+ // we can tell they are the same file, but for now -v=json is
+ // a very clear signal. Making the two files the same may cause
+ // surprises if programs close os.Stdout but expect to be able
+ // to continue to write to os.Stderr, but it's hard to see why a
+ // test would think it could take over global state that way.
+ //
+ // This fix only helps programs where the output is coming directly
+ // from Go code. It does not help programs in which a subprocess is
+ // writing to stderr or stdout at the same time that a Go test is writing output.
+ // It also does not help when the output is coming from the runtime,
+ // such as when using the print/println functions, since that code writes
+ // directly to fd 2 without any locking.
+ // We keep realStderr around to prevent fd 2 from being closed.
+ //
+ // See go.dev/issue/33419.
+ realStderr = os.Stderr
+ os.Stderr = os.Stdout
+ }
+
+ if *parallel < 1 {
+ fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer")
+ flag.Usage()
+ m.exitCode = 2
+ return
+ }
+ if *matchFuzz != "" && *fuzzCacheDir == "" {
+ fmt.Fprintln(os.Stderr, "testing: -test.fuzzcachedir must be set if -test.fuzz is set")
+ flag.Usage()
+ m.exitCode = 2
+ return
+ }
+
+ if *matchList != "" {
+ listTests(m.deps.MatchString, m.tests, m.benchmarks, m.fuzzTargets, m.examples)
+ m.exitCode = 0
+ return
+ }
+
+ if *shuffle != "off" {
+ var n int64
+ var err error
+ if *shuffle == "on" {
+ n = time.Now().UnixNano()
+ } else {
+ n, err = strconv.ParseInt(*shuffle, 10, 64)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, `testing: -shuffle should be "off", "on", or a valid integer:`, err)
+ m.exitCode = 2
+ return
+ }
+ }
+ fmt.Println("-test.shuffle", n)
+ rng := rand.New(rand.NewSource(n))
+ rng.Shuffle(len(m.tests), func(i, j int) { m.tests[i], m.tests[j] = m.tests[j], m.tests[i] })
+ rng.Shuffle(len(m.benchmarks), func(i, j int) { m.benchmarks[i], m.benchmarks[j] = m.benchmarks[j], m.benchmarks[i] })
+ }
+
+ parseCpuList()
+
+ m.before()
+ defer m.after()
+
+ // Run tests, examples, and benchmarks unless this is a fuzz worker process.
+ // Workers start after this is done by their parent process, and they should
+ // not repeat this work.
+ if !*isFuzzWorker {
+ deadline := m.startAlarm()
+ haveExamples = len(m.examples) > 0
+ testRan, testOk := runTests(m.deps.MatchString, m.tests, deadline)
+ fuzzTargetsRan, fuzzTargetsOk := runFuzzTests(m.deps, m.fuzzTargets, deadline)
+ exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples)
+ m.stopAlarm()
+ if !testRan && !exampleRan && !fuzzTargetsRan && *matchBenchmarks == "" && *matchFuzz == "" {
+ fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+ if testingTesting && *match != "^$" {
+ // If this happens during testing of package testing it could be that
+ // package testing's own logic for when to run a test is broken,
+ // in which case every test will run nothing and succeed,
+ // with no obvious way to detect this problem (since no tests are running).
+ // So make 'no tests to run' a hard failure when testing package testing itself.
+ fmt.Print(chatty.prefix(), "FAIL: package testing must run tests\n")
+ testOk = false
+ }
+ }
+ anyFailed := !testOk || !exampleOk || !fuzzTargetsOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks)
+ if !anyFailed && race.Errors() > 0 {
+ fmt.Print(chatty.prefix(), "testing: race detected outside of test execution\n")
+ anyFailed = true
+ }
+ if anyFailed {
+ fmt.Print(chatty.prefix(), "FAIL\n")
+ m.exitCode = 1
+ return
+ }
+ }
+
+ fuzzingOk := runFuzzing(m.deps, m.fuzzTargets)
+ if !fuzzingOk {
+ fmt.Print(chatty.prefix(), "FAIL\n")
+ if *isFuzzWorker {
+ m.exitCode = fuzzWorkerExitCode
+ } else {
+ m.exitCode = 1
+ }
+ return
+ }
+
+ m.exitCode = 0
+ if !*isFuzzWorker {
+ fmt.Print(chatty.prefix(), "PASS\n")
+ }
+ return
+}
+
+func (t *T) report() {
+ if t.parent == nil {
+ return
+ }
+ dstr := fmtDuration(t.duration)
+ format := "--- %s: %s (%s)\n"
+ if t.Failed() {
+ t.flushToParent(t.name, format, "FAIL", t.name, dstr)
+ } else if t.chatty != nil {
+ if t.Skipped() {
+ t.flushToParent(t.name, format, "SKIP", t.name, dstr)
+ } else {
+ t.flushToParent(t.name, format, "PASS", t.name, dstr)
+ }
+ }
+}
+
+func listTests(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) {
+ if _, err := matchString(*matchList, "non-empty"); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, err)
+ os.Exit(1)
+ }
+
+ for _, test := range tests {
+ if ok, _ := matchString(*matchList, test.Name); ok {
+ fmt.Println(test.Name)
+ }
+ }
+ for _, bench := range benchmarks {
+ if ok, _ := matchString(*matchList, bench.Name); ok {
+ fmt.Println(bench.Name)
+ }
+ }
+ for _, fuzzTarget := range fuzzTargets {
+ if ok, _ := matchString(*matchList, fuzzTarget.Name); ok {
+ fmt.Println(fuzzTarget.Name)
+ }
+ }
+ for _, example := range examples {
+ if ok, _ := matchString(*matchList, example.Name); ok {
+ fmt.Println(example.Name)
+ }
+ }
+}
+
+// RunTests is an internal function but exported because it is cross-package;
+// it is part of the implementation of the "go test" command.
+func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
+ var deadline time.Time
+ if *timeout > 0 {
+ deadline = time.Now().Add(*timeout)
+ }
+ ran, ok := runTests(matchString, tests, deadline)
+ if !ran && !haveExamples {
+ fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+ }
+ return ok
+}
+
+func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) {
+ ok = true
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ for i := uint(0); i < *count; i++ {
+ if shouldFailFast() {
+ break
+ }
+ if i > 0 && !ran {
+ // There were no tests to run on the first
+ // iteration. This won't change, so no reason
+ // to keep trying.
+ break
+ }
+ ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run", *skip))
+ ctx.deadline = deadline
+ t := &T{
+ common: common{
+ signal: make(chan bool, 1),
+ barrier: make(chan bool),
+ w: os.Stdout,
+ },
+ context: ctx,
+ }
+ if Verbose() {
+ t.chatty = newChattyPrinter(t.w)
+ }
+ tRunner(t, func(t *T) {
+ for _, test := range tests {
+ t.Run(test.Name, test.F)
+ }
+ })
+ select {
+ case <-t.signal:
+ default:
+ panic("internal error: tRunner exited without sending on t.signal")
+ }
+ ok = ok && !t.Failed()
+ ran = ran || t.ran
+ }
+ }
+ return ran, ok
+}
+
+// before runs before all testing.
+func (m *M) before() {
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ }
+ if *cpuProfile != "" {
+ f, err := os.Create(toOutputDir(*cpuProfile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ return
+ }
+ if err := m.deps.StartCPUProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err)
+ f.Close()
+ return
+ }
+ // Could save f so after can call f.Close; not worth the effort.
+ }
+ if *traceFile != "" {
+ f, err := os.Create(toOutputDir(*traceFile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ return
+ }
+ if err := trace.Start(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't start tracing: %s\n", err)
+ f.Close()
+ return
+ }
+ // Could save f so after can call f.Close; not worth the effort.
+ }
+ if *blockProfile != "" && *blockProfileRate >= 0 {
+ runtime.SetBlockProfileRate(*blockProfileRate)
+ }
+ if *mutexProfile != "" && *mutexProfileFraction >= 0 {
+ runtime.SetMutexProfileFraction(*mutexProfileFraction)
+ }
+ if *coverProfile != "" && CoverMode() == "" {
+ fmt.Fprintf(os.Stderr, "testing: cannot use -test.coverprofile because test binary was not built with coverage enabled\n")
+ os.Exit(2)
+ }
+ if *gocoverdir != "" && CoverMode() == "" {
+ fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n")
+ os.Exit(2)
+ }
+ if *testlog != "" {
+ // Note: Not using toOutputDir.
+ // This file is for use by cmd/go, not users.
+ var f *os.File
+ var err error
+ if m.numRun == 1 {
+ f, err = os.Create(*testlog)
+ } else {
+ f, err = os.OpenFile(*testlog, os.O_WRONLY, 0)
+ if err == nil {
+ f.Seek(0, io.SeekEnd)
+ }
+ }
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ os.Exit(2)
+ }
+ m.deps.StartTestLog(f)
+ testlogFile = f
+ }
+ if *panicOnExit0 {
+ m.deps.SetPanicOnExit0(true)
+ }
+}
+
+// after runs after all testing.
+func (m *M) after() {
+ m.afterOnce.Do(func() {
+ m.writeProfiles()
+ })
+
+ // Restore PanicOnExit0 after every run, because we set it to true before
+ // every run. Otherwise, if m.Run is called multiple times the behavior of
+ // os.Exit(0) will not be restored after the second run.
+ if *panicOnExit0 {
+ m.deps.SetPanicOnExit0(false)
+ }
+}
+
+func (m *M) writeProfiles() {
+ if *testlog != "" {
+ if err := m.deps.StopTestLog(); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err)
+ os.Exit(2)
+ }
+ if err := testlogFile.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err)
+ os.Exit(2)
+ }
+ }
+ if *cpuProfile != "" {
+ m.deps.StopCPUProfile() // flushes profile to disk
+ }
+ if *traceFile != "" {
+ trace.Stop() // flushes trace to disk
+ }
+ if *memProfile != "" {
+ f, err := os.Create(toOutputDir(*memProfile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ os.Exit(2)
+ }
+ runtime.GC() // materialize all statistics
+ if err = m.deps.WriteProfileTo("allocs", f, 0); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err)
+ os.Exit(2)
+ }
+ f.Close()
+ }
+ if *blockProfile != "" && *blockProfileRate >= 0 {
+ f, err := os.Create(toOutputDir(*blockProfile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ os.Exit(2)
+ }
+ if err = m.deps.WriteProfileTo("block", f, 0); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err)
+ os.Exit(2)
+ }
+ f.Close()
+ }
+ if *mutexProfile != "" && *mutexProfileFraction >= 0 {
+ f, err := os.Create(toOutputDir(*mutexProfile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s\n", err)
+ os.Exit(2)
+ }
+ if err = m.deps.WriteProfileTo("mutex", f, 0); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *mutexProfile, err)
+ os.Exit(2)
+ }
+ f.Close()
+ }
+ if CoverMode() != "" {
+ coverReport()
+ }
+}
+
+// toOutputDir returns the file name relocated, if required, to outputDir.
+// Simple implementation to avoid pulling in path/filepath.
+func toOutputDir(path string) string {
+ if *outputDir == "" || path == "" {
+ return path
+ }
+ // On Windows, it's clumsy, but we can be almost always correct
+ // by just looking for a drive letter and a colon.
+ // Absolute paths always have a drive letter (ignoring UNC).
+ // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear
+ // what to do, but even then path/filepath doesn't help.
+ // TODO: Worth doing better? Probably not, because we're here only
+ // under the management of go test.
+ if runtime.GOOS == "windows" && len(path) >= 2 {
+ letter, colon := path[0], path[1]
+ if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' {
+ // If path starts with a drive letter we're stuck with it regardless.
+ return path
+ }
+ }
+ if os.IsPathSeparator(path[0]) {
+ return path
+ }
+ return fmt.Sprintf("%s%c%s", *outputDir, os.PathSeparator, path)
+}
+
+// startAlarm starts an alarm if requested.
+func (m *M) startAlarm() time.Time {
+ if *timeout <= 0 {
+ return time.Time{}
+ }
+
+ deadline := time.Now().Add(*timeout)
+ m.timer = time.AfterFunc(*timeout, func() {
+ m.after()
+ debug.SetTraceback("all")
+ extra := ""
+
+ if list := runningList(); len(list) > 0 {
+ var b strings.Builder
+ b.WriteString("\nrunning tests:")
+ for _, name := range list {
+ b.WriteString("\n\t")
+ b.WriteString(name)
+ }
+ extra = b.String()
+ }
+ panic(fmt.Sprintf("test timed out after %v%s", *timeout, extra))
+ })
+ return deadline
+}
+
+// runningList returns the list of running tests.
+func runningList() []string {
+ var list []string
+ running.Range(func(k, v any) bool {
+ list = append(list, fmt.Sprintf("%s (%v)", k.(string), time.Since(v.(time.Time)).Round(time.Second)))
+ return true
+ })
+ sort.Strings(list)
+ return list
+}
+
+// stopAlarm turns off the alarm.
+func (m *M) stopAlarm() {
+ if *timeout > 0 {
+ m.timer.Stop()
+ }
+}
+
+func parseCpuList() {
+ for _, val := range strings.Split(*cpuListStr, ",") {
+ val = strings.TrimSpace(val)
+ if val == "" {
+ continue
+ }
+ cpu, err := strconv.Atoi(val)
+ if err != nil || cpu <= 0 {
+ fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val)
+ os.Exit(1)
+ }
+ cpuList = append(cpuList, cpu)
+ }
+ if cpuList == nil {
+ cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
+ }
+}
+
+func shouldFailFast() bool {
+ return *failFast && numFailed.Load() > 0
+}
diff --git a/src/testing/testing_other.go b/src/testing/testing_other.go
new file mode 100644
index 0000000..99a6276
--- /dev/null
+++ b/src/testing/testing_other.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package testing
+
+// isWindowsRetryable reports whether err is a Windows error code
+// that may be fixed by retrying a failed filesystem operation.
+func isWindowsRetryable(err error) bool {
+ return false
+}
diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go
new file mode 100644
index 0000000..d3822df
--- /dev/null
+++ b/src/testing/testing_test.go
@@ -0,0 +1,814 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing_test
+
+import (
+ "bytes"
+ "fmt"
+ "internal/race"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// This is exactly what a test would do without a TestMain.
+// It's here only so that there is at least one package in the
+// standard library with a TestMain, so that code is executed.
+
+func TestMain(m *testing.M) {
+ if os.Getenv("GO_WANT_RACE_BEFORE_TESTS") == "1" {
+ doRace()
+ }
+
+ m.Run()
+
+ // Note: m.Run currently prints the final "PASS" line, so if any race is
+ // reported here (after m.Run but before the process exits), it will print
+ // "PASS", then print the stack traces for the race, then exit with nonzero
+ // status.
+ //
+ // This is a somewhat fundamental race: because the race detector hooks into
+ // the runtime at a very low level, no matter where we put the printing it
+ // would be possible to report a race that occurs afterward. However, we could
+ // theoretically move the printing after TestMain, which would at least do a
+ // better job of diagnosing races in cleanup functions within TestMain itself.
+}
+
+func TestTempDirInCleanup(t *testing.T) {
+ var dir string
+
+ t.Run("test", func(t *testing.T) {
+ t.Cleanup(func() {
+ dir = t.TempDir()
+ })
+ _ = t.TempDir()
+ })
+
+ fi, err := os.Stat(dir)
+ if fi != nil {
+ t.Fatalf("Directory %q from user Cleanup still exists", dir)
+ }
+ if !os.IsNotExist(err) {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+}
+
+func TestTempDirInBenchmark(t *testing.T) {
+ testing.Benchmark(func(b *testing.B) {
+ if !b.Run("test", func(b *testing.B) {
+ // Add a loop so that the test won't fail. See issue 38677.
+ for i := 0; i < b.N; i++ {
+ _ = b.TempDir()
+ }
+ }) {
+ t.Fatal("Sub test failure in a benchmark")
+ }
+ })
+}
+
+func TestTempDir(t *testing.T) {
+ testTempDir(t)
+ t.Run("InSubtest", testTempDir)
+ t.Run("test/subtest", testTempDir)
+ t.Run("test\\subtest", testTempDir)
+ t.Run("test:subtest", testTempDir)
+ t.Run("test/..", testTempDir)
+ t.Run("../test", testTempDir)
+ t.Run("test[]", testTempDir)
+ t.Run("test*", testTempDir)
+ t.Run("äöüéè", testTempDir)
+}
+
+func testTempDir(t *testing.T) {
+ dirCh := make(chan string, 1)
+ t.Cleanup(func() {
+ // Verify directory has been removed.
+ select {
+ case dir := <-dirCh:
+ fi, err := os.Stat(dir)
+ if os.IsNotExist(err) {
+ // All good
+ return
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Errorf("directory %q still exists: %v, isDir=%v", dir, fi, fi.IsDir())
+ default:
+ if !t.Failed() {
+ t.Fatal("never received dir channel")
+ }
+ }
+ })
+
+ dir := t.TempDir()
+ if dir == "" {
+ t.Fatal("expected dir")
+ }
+ dir2 := t.TempDir()
+ if dir == dir2 {
+ t.Fatal("subsequent calls to TempDir returned the same directory")
+ }
+ if filepath.Dir(dir) != filepath.Dir(dir2) {
+ t.Fatalf("calls to TempDir do not share a parent; got %q, %q", dir, dir2)
+ }
+ dirCh <- dir
+ fi, err := os.Stat(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Errorf("dir %q is not a dir", dir)
+ }
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(files) > 0 {
+ t.Errorf("unexpected %d files in TempDir: %v", len(files), files)
+ }
+
+ glob := filepath.Join(dir, "*.txt")
+ if _, err := filepath.Glob(glob); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSetenv(t *testing.T) {
+ tests := []struct {
+ name string
+ key string
+ initialValueExists bool
+ initialValue string
+ newValue string
+ }{
+ {
+ name: "initial value exists",
+ key: "GO_TEST_KEY_1",
+ initialValueExists: true,
+ initialValue: "111",
+ newValue: "222",
+ },
+ {
+ name: "initial value exists but empty",
+ key: "GO_TEST_KEY_2",
+ initialValueExists: true,
+ initialValue: "",
+ newValue: "222",
+ },
+ {
+ name: "initial value is not exists",
+ key: "GO_TEST_KEY_3",
+ initialValueExists: false,
+ initialValue: "",
+ newValue: "222",
+ },
+ }
+
+ for _, test := range tests {
+ if test.initialValueExists {
+ if err := os.Setenv(test.key, test.initialValue); err != nil {
+ t.Fatalf("unable to set env: got %v", err)
+ }
+ } else {
+ os.Unsetenv(test.key)
+ }
+
+ t.Run(test.name, func(t *testing.T) {
+ t.Setenv(test.key, test.newValue)
+ if os.Getenv(test.key) != test.newValue {
+ t.Fatalf("unexpected value after t.Setenv: got %s, want %s", os.Getenv(test.key), test.newValue)
+ }
+ })
+
+ got, exists := os.LookupEnv(test.key)
+ if got != test.initialValue {
+ t.Fatalf("unexpected value after t.Setenv cleanup: got %s, want %s", got, test.initialValue)
+ }
+ if exists != test.initialValueExists {
+ t.Fatalf("unexpected value after t.Setenv cleanup: got %t, want %t", exists, test.initialValueExists)
+ }
+ }
+}
+
+func TestSetenvWithParallelAfterSetenv(t *testing.T) {
+ defer func() {
+ want := "testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests"
+ if got := recover(); got != want {
+ t.Fatalf("expected panic; got %#v want %q", got, want)
+ }
+ }()
+
+ t.Setenv("GO_TEST_KEY_1", "value")
+
+ t.Parallel()
+}
+
+func TestSetenvWithParallelBeforeSetenv(t *testing.T) {
+ defer func() {
+ want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
+ if got := recover(); got != want {
+ t.Fatalf("expected panic; got %#v want %q", got, want)
+ }
+ }()
+
+ t.Parallel()
+
+ t.Setenv("GO_TEST_KEY_1", "value")
+}
+
+func TestSetenvWithParallelParentBeforeSetenv(t *testing.T) {
+ t.Parallel()
+
+ t.Run("child", func(t *testing.T) {
+ defer func() {
+ want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
+ if got := recover(); got != want {
+ t.Fatalf("expected panic; got %#v want %q", got, want)
+ }
+ }()
+
+ t.Setenv("GO_TEST_KEY_1", "value")
+ })
+}
+
+func TestSetenvWithParallelGrandParentBeforeSetenv(t *testing.T) {
+ t.Parallel()
+
+ t.Run("child", func(t *testing.T) {
+ t.Run("grand-child", func(t *testing.T) {
+ defer func() {
+ want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
+ if got := recover(); got != want {
+ t.Fatalf("expected panic; got %#v want %q", got, want)
+ }
+ }()
+
+ t.Setenv("GO_TEST_KEY_1", "value")
+ })
+ })
+}
+
+// testingTrueInInit is part of TestTesting.
+var testingTrueInInit = false
+
+// testingTrueInPackageVarInit is part of TestTesting.
+var testingTrueInPackageVarInit = testing.Testing()
+
+// init is part of TestTesting.
+func init() {
+ if testing.Testing() {
+ testingTrueInInit = true
+ }
+}
+
+var testingProg = `
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+func main() {
+ fmt.Println(testing.Testing())
+}
+`
+
+func TestTesting(t *testing.T) {
+ if !testing.Testing() {
+ t.Errorf("testing.Testing() == %t, want %t", testing.Testing(), true)
+ }
+ if !testingTrueInInit {
+ t.Errorf("testing.Testing() called by init function == %t, want %t", testingTrueInInit, true)
+ }
+ if !testingTrueInPackageVarInit {
+ t.Errorf("testing.Testing() variable initialized as %t, want %t", testingTrueInPackageVarInit, true)
+ }
+
+ if testing.Short() {
+ t.Skip("skipping building a binary in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+
+ fn := filepath.Join(t.TempDir(), "x.go")
+ if err := os.WriteFile(fn, []byte(testingProg), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "run", fn)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%v failed: %v\n%s", cmd, err, out)
+ }
+
+ s := string(bytes.TrimSpace(out))
+ if s != "false" {
+ t.Errorf("in non-test testing.Test() returned %q, want %q", s, "false")
+ }
+}
+
+// runTest runs a helper test with -test.v, ignoring its exit status.
+// runTest both logs and returns the test output.
+func runTest(t *testing.T, test string) []byte {
+ t.Helper()
+
+ testenv.MustHaveExec(t)
+
+ exe, err := os.Executable()
+ if err != nil {
+ t.Skipf("can't find test executable: %v", err)
+ }
+
+ cmd := testenv.Command(t, exe, "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x")
+ cmd = testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v: %v\n%s", cmd, err, out)
+
+ return out
+}
+
+// doRace provokes a data race that generates a race detector report if run
+// under the race detector and is otherwise benign.
+func doRace() {
+ var x int
+ c1 := make(chan bool)
+ go func() {
+ x = 1 // racy write
+ c1 <- true
+ }()
+ _ = x // racy read
+ <-c1
+}
+
+func TestRaceReports(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ // Generate a race detector report in a sub test.
+ t.Run("Sub", func(t *testing.T) {
+ doRace()
+ })
+ return
+ }
+
+ out := runTest(t, "TestRaceReports")
+
+ // We should see at most one race detector report.
+ c := bytes.Count(out, []byte("race detected"))
+ want := 0
+ if race.Enabled {
+ want = 1
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}
+
+// Issue #60083. This used to fail on the race builder.
+func TestRaceName(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ doRace()
+ return
+ }
+
+ out := runTest(t, "TestRaceName")
+
+ if regexp.MustCompile(`=== NAME\s*$`).Match(out) {
+ t.Errorf("incorrectly reported test with no name")
+ }
+}
+
+func TestRaceSubReports(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ t.Parallel()
+ c1 := make(chan bool, 1)
+ t.Run("sub", func(t *testing.T) {
+ t.Run("subsub1", func(t *testing.T) {
+ t.Parallel()
+ doRace()
+ c1 <- true
+ })
+ t.Run("subsub2", func(t *testing.T) {
+ t.Parallel()
+ doRace()
+ <-c1
+ })
+ })
+ doRace()
+ return
+ }
+
+ out := runTest(t, "TestRaceSubReports")
+
+ // There should be three race reports: one for each subtest, and one for the
+ // race after the subtests complete. Note that because the subtests run in
+ // parallel, the race stacks may both be printed in with one or the other
+ // test's logs.
+ cReport := bytes.Count(out, []byte("race detected during execution of test"))
+ wantReport := 0
+ if race.Enabled {
+ wantReport = 3
+ }
+ if cReport != wantReport {
+ t.Errorf("got %d race reports, want %d", cReport, wantReport)
+ }
+
+ // Regardless of when the stacks are printed, we expect each subtest to be
+ // marked as failed, and that failure should propagate up to the parents.
+ cFail := bytes.Count(out, []byte("--- FAIL:"))
+ wantFail := 0
+ if race.Enabled {
+ wantFail = 4
+ }
+ if cFail != wantFail {
+ t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
+ }
+}
+
+func TestRaceInCleanup(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ t.Cleanup(doRace)
+ t.Parallel()
+ t.Run("sub", func(t *testing.T) {
+ t.Parallel()
+ // No race should be reported for sub.
+ })
+ return
+ }
+
+ out := runTest(t, "TestRaceInCleanup")
+
+ // There should be one race report, for the parent test only.
+ cReport := bytes.Count(out, []byte("race detected during execution of test"))
+ wantReport := 0
+ if race.Enabled {
+ wantReport = 1
+ }
+ if cReport != wantReport {
+ t.Errorf("got %d race reports, want %d", cReport, wantReport)
+ }
+
+ // Only the parent test should be marked as failed.
+ // (The subtest does not race, and should pass.)
+ cFail := bytes.Count(out, []byte("--- FAIL:"))
+ wantFail := 0
+ if race.Enabled {
+ wantFail = 1
+ }
+ if cFail != wantFail {
+ t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
+ }
+}
+
+func TestDeepSubtestRace(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ t.Run("sub", func(t *testing.T) {
+ t.Run("subsub", func(t *testing.T) {
+ t.Run("subsubsub", func(t *testing.T) {
+ doRace()
+ })
+ })
+ doRace()
+ })
+ return
+ }
+
+ out := runTest(t, "TestDeepSubtestRace")
+
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+ want := 0
+ // There should be two race reports.
+ if race.Enabled {
+ want = 2
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}
+
+func TestRaceDuringParallelFailsAllSubtests(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ var ready sync.WaitGroup
+ ready.Add(2)
+ done := make(chan struct{})
+ go func() {
+ ready.Wait()
+ doRace() // This race happens while both subtests are running.
+ close(done)
+ }()
+
+ t.Run("sub", func(t *testing.T) {
+ t.Run("subsub1", func(t *testing.T) {
+ t.Parallel()
+ ready.Done()
+ <-done
+ })
+ t.Run("subsub2", func(t *testing.T) {
+ t.Parallel()
+ ready.Done()
+ <-done
+ })
+ })
+
+ return
+ }
+
+ out := runTest(t, "TestRaceDuringParallelFailsAllSubtests")
+
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+ want := 0
+ // Each subtest should report the race independently.
+ if race.Enabled {
+ want = 2
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}
+
+func TestRaceBeforeParallel(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ t.Run("sub", func(t *testing.T) {
+ doRace()
+ t.Parallel()
+ })
+ return
+ }
+
+ out := runTest(t, "TestRaceBeforeParallel")
+
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+ want := 0
+ // We should see one race detector report.
+ if race.Enabled {
+ want = 1
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}
+
+func TestRaceBeforeTests(t *testing.T) {
+ testenv.MustHaveExec(t)
+
+ exe, err := os.Executable()
+ if err != nil {
+ t.Skipf("can't find test executable: %v", err)
+ }
+
+ cmd := testenv.Command(t, exe, "-test.run=^$")
+ cmd = testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "GO_WANT_RACE_BEFORE_TESTS=1")
+ out, _ := cmd.CombinedOutput()
+ t.Logf("%s", out)
+
+ c := bytes.Count(out, []byte("race detected outside of test execution"))
+
+ want := 0
+ if race.Enabled {
+ want = 1
+ }
+ if c != want {
+ t.Errorf("got %d race reports; want %d", c, want)
+ }
+}
+
+func TestBenchmarkRace(t *testing.T) {
+ out := runTest(t, "BenchmarkRacy")
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+
+ want := 0
+ // We should see one race detector report.
+ if race.Enabled {
+ want = 1
+ }
+ if c != want {
+ t.Errorf("got %d race reports; want %d", c, want)
+ }
+}
+
+func BenchmarkRacy(b *testing.B) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ b.Skipf("skipping intentionally-racy benchmark")
+ }
+ for i := 0; i < b.N; i++ {
+ doRace()
+ }
+}
+
+func TestBenchmarkSubRace(t *testing.T) {
+ out := runTest(t, "BenchmarkSubRacy")
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+
+ want := 0
+ // We should see two race detector reports:
+ // one in the sub-bencmark, and one in the parent afterward.
+ if race.Enabled {
+ want = 2
+ }
+ if c != want {
+ t.Errorf("got %d race reports; want %d", c, want)
+ }
+}
+
+func BenchmarkSubRacy(b *testing.B) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ b.Skipf("skipping intentionally-racy benchmark")
+ }
+
+ b.Run("non-racy", func(b *testing.B) {
+ tot := 0
+ for i := 0; i < b.N; i++ {
+ tot++
+ }
+ _ = tot
+ })
+
+ b.Run("racy", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ doRace()
+ }
+ })
+
+ doRace() // should be reported separately
+}
+
+func TestRunningTests(t *testing.T) {
+ t.Parallel()
+
+ // Regression test for https://go.dev/issue/64404:
+ // on timeout, the "running tests" message should not include
+ // tests that are waiting on parked subtests.
+
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ for i := 0; i < 2; i++ {
+ t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
+ t.Parallel()
+ for j := 0; j < 2; j++ {
+ t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
+ t.Parallel()
+ for {
+ time.Sleep(1 * time.Millisecond)
+ }
+ })
+ }
+ })
+ }
+ }
+
+ timeout := 10 * time.Millisecond
+ for {
+ cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String(), "-test.parallel=4")
+ cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v:\n%s", cmd, out)
+ if _, ok := err.(*exec.ExitError); !ok {
+ t.Fatal(err)
+ }
+
+ // Because the outer subtests (and TestRunningTests itself) are marked as
+ // parallel, their test functions return (and are no longer “running”)
+ // before the inner subtests are released to run and hang.
+ // Only those inner subtests should be reported as running.
+ want := []string{
+ "TestRunningTests/outer0/inner0",
+ "TestRunningTests/outer0/inner1",
+ "TestRunningTests/outer1/inner0",
+ "TestRunningTests/outer1/inner1",
+ }
+
+ got, ok := parseRunningTests(out)
+ if slices.Equal(got, want) {
+ break
+ }
+ if ok {
+ t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
+ } else {
+ t.Logf("no running tests found")
+ }
+ t.Logf("retrying with longer timeout")
+ timeout *= 2
+ }
+}
+
+func TestRunningTestsInCleanup(t *testing.T) {
+ t.Parallel()
+
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ for i := 0; i < 2; i++ {
+ t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
+ // Not parallel: we expect to see only one outer test,
+ // stuck in cleanup after its subtest finishes.
+
+ t.Cleanup(func() {
+ for {
+ time.Sleep(1 * time.Millisecond)
+ }
+ })
+
+ for j := 0; j < 2; j++ {
+ t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
+ t.Parallel()
+ })
+ }
+ })
+ }
+ }
+
+ timeout := 10 * time.Millisecond
+ for {
+ cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String())
+ cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v:\n%s", cmd, out)
+ if _, ok := err.(*exec.ExitError); !ok {
+ t.Fatal(err)
+ }
+
+ // TestRunningTestsInCleanup is blocked in the call to t.Run,
+ // but its test function has not yet returned so it should still
+ // be considered to be running.
+ // outer1 hasn't even started yet, so only outer0 and the top-level
+ // test function should be reported as running.
+ want := []string{
+ "TestRunningTestsInCleanup",
+ "TestRunningTestsInCleanup/outer0",
+ }
+
+ got, ok := parseRunningTests(out)
+ if slices.Equal(got, want) {
+ break
+ }
+ if ok {
+ t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
+ } else {
+ t.Logf("no running tests found")
+ }
+ t.Logf("retrying with longer timeout")
+ timeout *= 2
+ }
+}
+
+func parseRunningTests(out []byte) (runningTests []string, ok bool) {
+ inRunningTests := false
+ for _, line := range strings.Split(string(out), "\n") {
+ if inRunningTests {
+ if trimmed, ok := strings.CutPrefix(line, "\t"); ok {
+ if name, _, ok := strings.Cut(trimmed, " "); ok {
+ runningTests = append(runningTests, name)
+ continue
+ }
+ }
+
+ // This line is not the name of a running test.
+ return runningTests, true
+ }
+
+ if strings.TrimSpace(line) == "running tests:" {
+ inRunningTests = true
+ }
+ }
+
+ return nil, false
+}
+
+func TestConcurrentRun(t *testing.T) {
+ // Regression test for https://go.dev/issue/64402:
+ // this deadlocked after https://go.dev/cl/506755.
+
+ block := make(chan struct{})
+ var ready, done sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ ready.Add(1)
+ done.Add(1)
+ go t.Run("", func(*testing.T) {
+ ready.Done()
+ <-block
+ done.Done()
+ })
+ }
+ ready.Wait()
+ close(block)
+ done.Wait()
+}
+
+func TestParentRun(t1 *testing.T) {
+ // Regression test for https://go.dev/issue/64402:
+ // this deadlocked after https://go.dev/cl/506755.
+
+ t1.Run("outer", func(t2 *testing.T) {
+ t2.Log("Hello outer!")
+ t1.Run("not_inner", func(t3 *testing.T) { // Note: this is t1.Run, not t2.Run.
+ t3.Log("Hello inner!")
+ })
+ })
+}
diff --git a/src/testing/testing_windows.go b/src/testing/testing_windows.go
new file mode 100644
index 0000000..fd48ae9
--- /dev/null
+++ b/src/testing/testing_windows.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package testing
+
+import (
+ "errors"
+ "internal/syscall/windows"
+ "syscall"
+)
+
+// isWindowsRetryable reports whether err is a Windows error code
+// that may be fixed by retrying a failed filesystem operation.
+func isWindowsRetryable(err error) bool {
+ for {
+ unwrapped := errors.Unwrap(err)
+ if unwrapped == nil {
+ break
+ }
+ err = unwrapped
+ }
+ if err == syscall.ERROR_ACCESS_DENIED {
+ return true // Observed in https://go.dev/issue/50051.
+ }
+ if err == windows.ERROR_SHARING_VIOLATION {
+ return true // Observed in https://go.dev/issue/51442.
+ }
+ return false
+}