summaryrefslogtreecommitdiffstats
path: root/src/runtime/debug
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/runtime/debug.go115
-rw-r--r--src/runtime/debug/debug.s9
-rw-r--r--src/runtime/debug/garbage.go238
-rw-r--r--src/runtime/debug/garbage_test.go238
-rw-r--r--src/runtime/debug/heapdump_test.go95
-rw-r--r--src/runtime/debug/mod.go287
-rw-r--r--src/runtime/debug/mod_test.go75
-rw-r--r--src/runtime/debug/panic_test.go56
-rw-r--r--src/runtime/debug/stack.go30
-rw-r--r--src/runtime/debug/stack_test.go121
-rw-r--r--src/runtime/debug/stubs.go18
-rw-r--r--src/runtime/debug_test.go307
-rw-r--r--src/runtime/debugcall.go252
-rw-r--r--src/runtime/debuglog.go831
-rw-r--r--src/runtime/debuglog_off.go19
-rw-r--r--src/runtime/debuglog_on.go45
-rw-r--r--src/runtime/debuglog_test.go169
17 files changed, 2905 insertions, 0 deletions
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
new file mode 100644
index 0000000..669c36f
--- /dev/null
+++ b/src/runtime/debug.go
@@ -0,0 +1,115 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// GOMAXPROCS sets the maximum number of CPUs that can be executing
+// simultaneously and returns the previous setting. It defaults to
+// the value of runtime.NumCPU. If n < 1, it does not change the current setting.
+// This call will go away when the scheduler improves.
+func GOMAXPROCS(n int) int {
+ if GOARCH == "wasm" && n > 1 {
+ n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
+ }
+
+ lock(&sched.lock)
+ ret := int(gomaxprocs)
+ unlock(&sched.lock)
+ if n <= 0 || n == ret {
+ return ret
+ }
+
+ stopTheWorldGC("GOMAXPROCS")
+
+ // newprocs will be processed by startTheWorld
+ newprocs = int32(n)
+
+ startTheWorldGC()
+ return ret
+}
+
+// NumCPU returns the number of logical CPUs usable by the current process.
+//
+// The set of available CPUs is checked by querying the operating system
+// at process startup. Changes to operating system CPU allocation after
+// process startup are not reflected.
+func NumCPU() int {
+ return int(ncpu)
+}
+
+// NumCgoCall returns the number of cgo calls made by the current process.
+func NumCgoCall() int64 {
+ var n = int64(atomic.Load64(&ncgocall))
+ for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
+ n += int64(mp.ncgocall)
+ }
+ return n
+}
+
+// NumGoroutine returns the number of goroutines that currently exist.
+func NumGoroutine() int {
+ return int(gcount())
+}
+
+//go:linkname debug_modinfo runtime/debug.modinfo
+func debug_modinfo() string {
+ return modinfo
+}
+
+// mayMoreStackPreempt is a maymorestack hook that forces a preemption
+// at every possible cooperative preemption point.
+//
+// This is valuable to apply to the runtime, which can be sensitive to
+// preemption points. To apply this to all preemption points in the
+// runtime and runtime-like code, use the following in bash or zsh:
+//
+// X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
+//
+// This must be deeply nosplit because it is called from a function
+// prologue before the stack is set up and because the compiler will
+// call it from any splittable prologue (leading to infinite
+// recursion).
+//
+// Ideally it should also use very little stack because the linker
+// doesn't currently account for this in nosplit stack depth checking.
+//
+// Ensure mayMoreStackPreempt can be called for all ABIs.
+//
+//go:nosplit
+//go:linkname mayMoreStackPreempt
+func mayMoreStackPreempt() {
+ // Don't do anything on the g0 or gsignal stack.
+ gp := getg()
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
+ return
+ }
+ // Force a preemption, unless the stack is already poisoned.
+ if gp.stackguard0 < stackPoisonMin {
+ gp.stackguard0 = stackPreempt
+ }
+}
+
+// mayMoreStackMove is a maymorestack hook that forces stack movement
+// at every possible point.
+//
+// See mayMoreStackPreempt.
+//
+//go:nosplit
+//go:linkname mayMoreStackMove
+func mayMoreStackMove() {
+ // Don't do anything on the g0 or gsignal stack.
+ gp := getg()
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
+ return
+ }
+ // Force stack movement, unless the stack is already poisoned.
+ if gp.stackguard0 < stackPoisonMin {
+ gp.stackguard0 = stackForceMove
+ }
+}
diff --git a/src/runtime/debug/debug.s b/src/runtime/debug/debug.s
new file mode 100644
index 0000000..6aae33a
--- /dev/null
+++ b/src/runtime/debug/debug.s
@@ -0,0 +1,9 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Nothing to see here.
+// This file exists so that the go command knows that parts of the
+// package are implemented in C, so that it does not instruct the
+// Go compiler to complain about extern declarations.
+// The actual implementations are in package runtime.
diff --git a/src/runtime/debug/garbage.go b/src/runtime/debug/garbage.go
new file mode 100644
index 0000000..0f53928
--- /dev/null
+++ b/src/runtime/debug/garbage.go
@@ -0,0 +1,238 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "runtime"
+ "sort"
+ "time"
+)
+
+// GCStats collect information about recent garbage collections.
+type GCStats struct {
+ LastGC time.Time // time of last collection
+ NumGC int64 // number of garbage collections
+ PauseTotal time.Duration // total pause for all collections
+ Pause []time.Duration // pause history, most recent first
+ PauseEnd []time.Time // pause end times history, most recent first
+ PauseQuantiles []time.Duration
+}
+
+// ReadGCStats reads statistics about garbage collection into stats.
+// The number of entries in the pause history is system-dependent;
+// stats.Pause slice will be reused if large enough, reallocated otherwise.
+// ReadGCStats may use the full capacity of the stats.Pause slice.
+// If stats.PauseQuantiles is non-empty, ReadGCStats fills it with quantiles
+// summarizing the distribution of pause time. For example, if
+// len(stats.PauseQuantiles) is 5, it will be filled with the minimum,
+// 25%, 50%, 75%, and maximum pause times.
+func ReadGCStats(stats *GCStats) {
+ // Create a buffer with space for at least two copies of the
+ // pause history tracked by the runtime. One will be returned
+ // to the caller and the other will be used as transfer buffer
+ // for end times history and as a temporary buffer for
+ // computing quantiles.
+ const maxPause = len(((*runtime.MemStats)(nil)).PauseNs)
+ if cap(stats.Pause) < 2*maxPause+3 {
+ stats.Pause = make([]time.Duration, 2*maxPause+3)
+ }
+
+ // readGCStats fills in the pause and end times histories (up to
+ // maxPause entries) and then three more: Unix ns time of last GC,
+ // number of GC, and total pause time in nanoseconds. Here we
+ // depend on the fact that time.Duration's native unit is
+ // nanoseconds, so the pauses and the total pause time do not need
+ // any conversion.
+ readGCStats(&stats.Pause)
+ n := len(stats.Pause) - 3
+ stats.LastGC = time.Unix(0, int64(stats.Pause[n]))
+ stats.NumGC = int64(stats.Pause[n+1])
+ stats.PauseTotal = stats.Pause[n+2]
+ n /= 2 // buffer holds pauses and end times
+ stats.Pause = stats.Pause[:n]
+
+ if cap(stats.PauseEnd) < maxPause {
+ stats.PauseEnd = make([]time.Time, 0, maxPause)
+ }
+ stats.PauseEnd = stats.PauseEnd[:0]
+ for _, ns := range stats.Pause[n : n+n] {
+ stats.PauseEnd = append(stats.PauseEnd, time.Unix(0, int64(ns)))
+ }
+
+ if len(stats.PauseQuantiles) > 0 {
+ if n == 0 {
+ for i := range stats.PauseQuantiles {
+ stats.PauseQuantiles[i] = 0
+ }
+ } else {
+ // There's room for a second copy of the data in stats.Pause.
+ // See the allocation at the top of the function.
+ sorted := stats.Pause[n : n+n]
+ copy(sorted, stats.Pause)
+ sort.Slice(sorted, func(i, j int) bool { return sorted[i] < sorted[j] })
+ nq := len(stats.PauseQuantiles) - 1
+ for i := 0; i < nq; i++ {
+ stats.PauseQuantiles[i] = sorted[len(sorted)*i/nq]
+ }
+ stats.PauseQuantiles[nq] = sorted[len(sorted)-1]
+ }
+ }
+}
+
+// SetGCPercent sets the garbage collection target percentage:
+// a collection is triggered when the ratio of freshly allocated data
+// to live data remaining after the previous collection reaches this percentage.
+// SetGCPercent returns the previous setting.
+// The initial setting is the value of the GOGC environment variable
+// at startup, or 100 if the variable is not set.
+// This setting may be effectively reduced in order to maintain a memory
+// limit.
+// A negative percentage effectively disables garbage collection, unless
+// the memory limit is reached.
+// See SetMemoryLimit for more details.
+func SetGCPercent(percent int) int {
+ return int(setGCPercent(int32(percent)))
+}
+
+// FreeOSMemory forces a garbage collection followed by an
+// attempt to return as much memory to the operating system
+// as possible. (Even if this is not called, the runtime gradually
+// returns memory to the operating system in a background task.)
+func FreeOSMemory() {
+ freeOSMemory()
+}
+
+// SetMaxStack sets the maximum amount of memory that
+// can be used by a single goroutine stack.
+// If any goroutine exceeds this limit while growing its stack,
+// the program crashes.
+// SetMaxStack returns the previous setting.
+// The initial setting is 1 GB on 64-bit systems, 250 MB on 32-bit systems.
+// There may be a system-imposed maximum stack limit regardless
+// of the value provided to SetMaxStack.
+//
+// SetMaxStack is useful mainly for limiting the damage done by
+// goroutines that enter an infinite recursion. It only limits future
+// stack growth.
+func SetMaxStack(bytes int) int {
+ return setMaxStack(bytes)
+}
+
+// SetMaxThreads sets the maximum number of operating system
+// threads that the Go program can use. If it attempts to use more than
+// this many, the program crashes.
+// SetMaxThreads returns the previous setting.
+// The initial setting is 10,000 threads.
+//
+// The limit controls the number of operating system threads, not the number
+// of goroutines. A Go program creates a new thread only when a goroutine
+// is ready to run but all the existing threads are blocked in system calls, cgo calls,
+// or are locked to other goroutines due to use of runtime.LockOSThread.
+//
+// SetMaxThreads is useful mainly for limiting the damage done by
+// programs that create an unbounded number of threads. The idea is
+// to take down the program before it takes down the operating system.
+func SetMaxThreads(threads int) int {
+ return setMaxThreads(threads)
+}
+
+// SetPanicOnFault controls the runtime's behavior when a program faults
+// at an unexpected (non-nil) address. Such faults are typically caused by
+// bugs such as runtime memory corruption, so the default response is to crash
+// the program. Programs working with memory-mapped files or unsafe
+// manipulation of memory may cause faults at non-nil addresses in less
+// dramatic situations; SetPanicOnFault allows such programs to request
+// that the runtime trigger only a panic, not a crash.
+// The runtime.Error that the runtime panics with may have an additional method:
+//
+// Addr() uintptr
+//
+// If that method exists, it returns the memory address which triggered the fault.
+// The results of Addr are best-effort and the veracity of the result
+// may depend on the platform.
+// SetPanicOnFault applies only to the current goroutine.
+// It returns the previous setting.
+func SetPanicOnFault(enabled bool) bool {
+ return setPanicOnFault(enabled)
+}
+
+// WriteHeapDump writes a description of the heap and the objects in
+// it to the given file descriptor.
+//
+// WriteHeapDump suspends the execution of all goroutines until the heap
+// dump is completely written. Thus, the file descriptor must not be
+// connected to a pipe or socket whose other end is in the same Go
+// process; instead, use a temporary file or network socket.
+//
+// The heap dump format is defined at https://golang.org/s/go15heapdump.
+func WriteHeapDump(fd uintptr)
+
+// SetTraceback sets the amount of detail printed by the runtime in
+// the traceback it prints before exiting due to an unrecovered panic
+// or an internal runtime error.
+// The level argument takes the same values as the GOTRACEBACK
+// environment variable. For example, SetTraceback("all") ensure
+// that the program prints all goroutines when it crashes.
+// See the package runtime documentation for details.
+// If SetTraceback is called with a level lower than that of the
+// environment variable, the call is ignored.
+func SetTraceback(level string)
+
+// SetMemoryLimit provides the runtime with a soft memory limit.
+//
+// The runtime undertakes several processes to try to respect this
+// memory limit, including adjustments to the frequency of garbage
+// collections and returning memory to the underlying system more
+// aggressively. This limit will be respected even if GOGC=off (or,
+// if SetGCPercent(-1) is executed).
+//
+// The input limit is provided as bytes, and includes all memory
+// mapped, managed, and not released by the Go runtime. Notably, it
+// does not account for space used by the Go binary and memory
+// external to Go, such as memory managed by the underlying system
+// on behalf of the process, or memory managed by non-Go code inside
+// the same process. Examples of excluded memory sources include: OS
+// kernel memory held on behalf of the process, memory allocated by
+// C code, and memory mapped by syscall.Mmap (because it is not
+// managed by the Go runtime).
+//
+// More specifically, the following expression accurately reflects
+// the value the runtime attempts to maintain as the limit:
+//
+// runtime.MemStats.Sys - runtime.MemStats.HeapReleased
+//
+// or in terms of the runtime/metrics package:
+//
+// /memory/classes/total:bytes - /memory/classes/heap/released:bytes
+//
+// A zero limit or a limit that's lower than the amount of memory
+// used by the Go runtime may cause the garbage collector to run
+// nearly continuously. However, the application may still make
+// progress.
+//
+// The memory limit is always respected by the Go runtime, so to
+// effectively disable this behavior, set the limit very high.
+// math.MaxInt64 is the canonical value for disabling the limit,
+// but values much greater than the available memory on the underlying
+// system work just as well.
+//
+// See https://go.dev/doc/gc-guide for a detailed guide explaining
+// the soft memory limit in more detail, as well as a variety of common
+// use-cases and scenarios.
+//
+// The initial setting is math.MaxInt64 unless the GOMEMLIMIT
+// environment variable is set, in which case it provides the initial
+// setting. GOMEMLIMIT is a numeric value in bytes with an optional
+// unit suffix. The supported suffixes include B, KiB, MiB, GiB, and
+// TiB. These suffixes represent quantities of bytes as defined by
+// the IEC 80000-13 standard. That is, they are based on powers of
+// two: KiB means 2^10 bytes, MiB means 2^20 bytes, and so on.
+//
+// SetMemoryLimit returns the previously set memory limit.
+// A negative input does not adjust the limit, and allows for
+// retrieval of the currently set memory limit.
+func SetMemoryLimit(limit int64) int64 {
+ return setMemoryLimit(limit)
+}
diff --git a/src/runtime/debug/garbage_test.go b/src/runtime/debug/garbage_test.go
new file mode 100644
index 0000000..7213bbe
--- /dev/null
+++ b/src/runtime/debug/garbage_test.go
@@ -0,0 +1,238 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "internal/testenv"
+ "os"
+ "runtime"
+ . "runtime/debug"
+ "testing"
+ "time"
+)
+
+func TestReadGCStats(t *testing.T) {
+ defer SetGCPercent(SetGCPercent(-1))
+
+ var stats GCStats
+ var mstats runtime.MemStats
+ var min, max time.Duration
+
+ // First ReadGCStats will allocate, second should not,
+ // especially if we follow up with an explicit garbage collection.
+ stats.PauseQuantiles = make([]time.Duration, 10)
+ ReadGCStats(&stats)
+ runtime.GC()
+
+ // Assume these will return same data: no GC during ReadGCStats.
+ ReadGCStats(&stats)
+ runtime.ReadMemStats(&mstats)
+
+ if stats.NumGC != int64(mstats.NumGC) {
+ t.Errorf("stats.NumGC = %d, but mstats.NumGC = %d", stats.NumGC, mstats.NumGC)
+ }
+ if stats.PauseTotal != time.Duration(mstats.PauseTotalNs) {
+ t.Errorf("stats.PauseTotal = %d, but mstats.PauseTotalNs = %d", stats.PauseTotal, mstats.PauseTotalNs)
+ }
+ if stats.LastGC.UnixNano() != int64(mstats.LastGC) {
+ t.Errorf("stats.LastGC.UnixNano = %d, but mstats.LastGC = %d", stats.LastGC.UnixNano(), mstats.LastGC)
+ }
+ n := int(mstats.NumGC)
+ if n > len(mstats.PauseNs) {
+ n = len(mstats.PauseNs)
+ }
+ if len(stats.Pause) != n {
+ t.Errorf("len(stats.Pause) = %d, want %d", len(stats.Pause), n)
+ } else {
+ off := (int(mstats.NumGC) + len(mstats.PauseNs) - 1) % len(mstats.PauseNs)
+ for i := 0; i < n; i++ {
+ dt := stats.Pause[i]
+ if dt != time.Duration(mstats.PauseNs[off]) {
+ t.Errorf("stats.Pause[%d] = %d, want %d", i, dt, mstats.PauseNs[off])
+ }
+ if max < dt {
+ max = dt
+ }
+ if min > dt || i == 0 {
+ min = dt
+ }
+ off = (off + len(mstats.PauseNs) - 1) % len(mstats.PauseNs)
+ }
+ }
+
+ q := stats.PauseQuantiles
+ nq := len(q)
+ if q[0] != min || q[nq-1] != max {
+ t.Errorf("stats.PauseQuantiles = [%d, ..., %d], want [%d, ..., %d]", q[0], q[nq-1], min, max)
+ }
+
+ for i := 0; i < nq-1; i++ {
+ if q[i] > q[i+1] {
+ t.Errorf("stats.PauseQuantiles[%d]=%d > stats.PauseQuantiles[%d]=%d", i, q[i], i+1, q[i+1])
+ }
+ }
+
+ // compare memory stats with gc stats:
+ if len(stats.PauseEnd) != n {
+ t.Fatalf("len(stats.PauseEnd) = %d, want %d", len(stats.PauseEnd), n)
+ }
+ off := (int(mstats.NumGC) + len(mstats.PauseEnd) - 1) % len(mstats.PauseEnd)
+ for i := 0; i < n; i++ {
+ dt := stats.PauseEnd[i]
+ if dt.UnixNano() != int64(mstats.PauseEnd[off]) {
+ t.Errorf("stats.PauseEnd[%d] = %d, want %d", i, dt.UnixNano(), mstats.PauseEnd[off])
+ }
+ off = (off + len(mstats.PauseEnd) - 1) % len(mstats.PauseEnd)
+ }
+}
+
+var big []byte
+
+func TestFreeOSMemory(t *testing.T) {
+ // Tests FreeOSMemory by making big susceptible to collection
+ // and checking that at least that much memory is returned to
+ // the OS after.
+
+ const bigBytes = 32 << 20
+ big = make([]byte, bigBytes)
+
+ // Make sure any in-progress GCs are complete.
+ runtime.GC()
+
+ var before runtime.MemStats
+ runtime.ReadMemStats(&before)
+
+ // Clear the last reference to the big allocation, making it
+ // susceptible to collection.
+ big = nil
+
+ // FreeOSMemory runs a GC cycle before releasing memory,
+ // so it's fine to skip a GC here.
+ //
+ // It's possible the background scavenger runs concurrently
+ // with this function and does most of the work for it.
+ // If that happens, it's OK. What we want is a test that fails
+ // often if FreeOSMemory does not work correctly, and a test
+ // that passes every time if it does.
+ FreeOSMemory()
+
+ var after runtime.MemStats
+ runtime.ReadMemStats(&after)
+
+ // Check to make sure that the big allocation (now freed)
+ // had its memory shift into HeapReleased as a result of that
+ // FreeOSMemory.
+ if after.HeapReleased <= before.HeapReleased {
+ t.Fatalf("no memory released: %d -> %d", before.HeapReleased, after.HeapReleased)
+ }
+
+ // Check to make sure bigBytes was released, plus some slack. Pages may get
+ // allocated in between the two measurements above for a variety for reasons,
+ // most commonly for GC work bufs. Since this can get fairly high, depending
+ // on scheduling and what GOMAXPROCS is, give a lot of slack up-front.
+ //
+ // Add a little more slack too if the page size is bigger than the runtime page size.
+ // "big" could end up unaligned on its ends, forcing the scavenger to skip at worst
+ // 2x pages.
+ slack := uint64(bigBytes / 2)
+ pageSize := uint64(os.Getpagesize())
+ if pageSize > 8<<10 {
+ slack += pageSize * 2
+ }
+ if slack > bigBytes {
+ // We basically already checked this.
+ return
+ }
+ if after.HeapReleased-before.HeapReleased < bigBytes-slack {
+ t.Fatalf("less than %d released: %d -> %d", bigBytes, before.HeapReleased, after.HeapReleased)
+ }
+}
+
+var (
+ setGCPercentBallast any
+ setGCPercentSink any
+)
+
+func TestSetGCPercent(t *testing.T) {
+ testenv.SkipFlaky(t, 20076)
+
+ // Test that the variable is being set and returned correctly.
+ old := SetGCPercent(123)
+ new := SetGCPercent(old)
+ if new != 123 {
+ t.Errorf("SetGCPercent(123); SetGCPercent(x) = %d, want 123", new)
+ }
+
+ // Test that the percentage is implemented correctly.
+ defer func() {
+ SetGCPercent(old)
+ setGCPercentBallast, setGCPercentSink = nil, nil
+ }()
+ SetGCPercent(100)
+ runtime.GC()
+ // Create 100 MB of live heap as a baseline.
+ const baseline = 100 << 20
+ var ms runtime.MemStats
+ runtime.ReadMemStats(&ms)
+ setGCPercentBallast = make([]byte, baseline-ms.Alloc)
+ runtime.GC()
+ runtime.ReadMemStats(&ms)
+ if abs64(baseline-int64(ms.Alloc)) > 10<<20 {
+ t.Fatalf("failed to set up baseline live heap; got %d MB, want %d MB", ms.Alloc>>20, baseline>>20)
+ }
+ // NextGC should be ~200 MB.
+ const thresh = 20 << 20 // TODO: Figure out why this is so noisy on some builders
+ if want := int64(2 * baseline); abs64(want-int64(ms.NextGC)) > thresh {
+ t.Errorf("NextGC = %d MB, want %d±%d MB", ms.NextGC>>20, want>>20, thresh>>20)
+ }
+ // Create some garbage, but not enough to trigger another GC.
+ for i := 0; i < int(1.2*baseline); i += 1 << 10 {
+ setGCPercentSink = make([]byte, 1<<10)
+ }
+ setGCPercentSink = nil
+ // Adjust GOGC to 50. NextGC should be ~150 MB.
+ SetGCPercent(50)
+ runtime.ReadMemStats(&ms)
+ if want := int64(1.5 * baseline); abs64(want-int64(ms.NextGC)) > thresh {
+ t.Errorf("NextGC = %d MB, want %d±%d MB", ms.NextGC>>20, want>>20, thresh>>20)
+ }
+
+ // Trigger a GC and get back to 100 MB live with GOGC=100.
+ SetGCPercent(100)
+ runtime.GC()
+ // Raise live to 120 MB.
+ setGCPercentSink = make([]byte, int(0.2*baseline))
+ // Lower GOGC to 10. This must force a GC.
+ runtime.ReadMemStats(&ms)
+ ngc1 := ms.NumGC
+ SetGCPercent(10)
+ // It may require an allocation to actually force the GC.
+ setGCPercentSink = make([]byte, 1<<20)
+ runtime.ReadMemStats(&ms)
+ ngc2 := ms.NumGC
+ if ngc1 == ngc2 {
+ t.Errorf("expected GC to run but it did not")
+ }
+}
+
+func abs64(a int64) int64 {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+func TestSetMaxThreadsOvf(t *testing.T) {
+ // Verify that a big threads count will not overflow the int32
+ // maxmcount variable, causing a panic (see Issue 16076).
+ //
+ // This can only happen when ints are 64 bits, since on platforms
+ // with 32 bit ints SetMaxThreads (which takes an int parameter)
+ // cannot be given anything that will overflow an int32.
+ //
+ // Call SetMaxThreads with 1<<31, but only on 64 bit systems.
+ nt := SetMaxThreads(1 << (30 + ^uint(0)>>63))
+ SetMaxThreads(nt) // restore previous value
+}
diff --git a/src/runtime/debug/heapdump_test.go b/src/runtime/debug/heapdump_test.go
new file mode 100644
index 0000000..ee6b054
--- /dev/null
+++ b/src/runtime/debug/heapdump_test.go
@@ -0,0 +1,95 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "os"
+ "runtime"
+ . "runtime/debug"
+ "testing"
+)
+
+func TestWriteHeapDumpNonempty(t *testing.T) {
+ if runtime.GOOS == "js" {
+ t.Skipf("WriteHeapDump is not available on %s.", runtime.GOOS)
+ }
+ f, err := os.CreateTemp("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ WriteHeapDump(f.Fd())
+ fi, err := f.Stat()
+ if err != nil {
+ t.Fatalf("Stat failed: %v", err)
+ }
+ const minSize = 1
+ if size := fi.Size(); size < minSize {
+ t.Fatalf("Heap dump size %d bytes, expected at least %d bytes", size, minSize)
+ }
+}
+
+type Obj struct {
+ x, y int
+}
+
+func objfin(x *Obj) {
+ //println("finalized", x)
+}
+
+func TestWriteHeapDumpFinalizers(t *testing.T) {
+ if runtime.GOOS == "js" {
+ t.Skipf("WriteHeapDump is not available on %s.", runtime.GOOS)
+ }
+ f, err := os.CreateTemp("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+
+ // bug 9172: WriteHeapDump couldn't handle more than one finalizer
+ println("allocating objects")
+ x := &Obj{}
+ runtime.SetFinalizer(x, objfin)
+ y := &Obj{}
+ runtime.SetFinalizer(y, objfin)
+
+ // Trigger collection of x and y, queueing of their finalizers.
+ println("starting gc")
+ runtime.GC()
+
+ // Make sure WriteHeapDump doesn't fail with multiple queued finalizers.
+ println("starting dump")
+ WriteHeapDump(f.Fd())
+ println("done dump")
+}
+
+type G[T any] struct{}
+type I interface {
+ M()
+}
+
+//go:noinline
+func (g G[T]) M() {}
+
+var dummy I = G[int]{}
+var dummy2 I = G[G[int]]{}
+
+func TestWriteHeapDumpTypeName(t *testing.T) {
+ if runtime.GOOS == "js" {
+ t.Skipf("WriteHeapDump is not available on %s.", runtime.GOOS)
+ }
+ f, err := os.CreateTemp("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ WriteHeapDump(f.Fd())
+ dummy.M()
+ dummy2.M()
+}
diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go
new file mode 100644
index 0000000..8b7a423
--- /dev/null
+++ b/src/runtime/debug/mod.go
@@ -0,0 +1,287 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// exported from runtime.
+func modinfo() string
+
+// ReadBuildInfo returns the build information embedded
+// in the running binary. The information is available only
+// in binaries built with module support.
+func ReadBuildInfo() (info *BuildInfo, ok bool) {
+ data := modinfo()
+ if len(data) < 32 {
+ return nil, false
+ }
+ data = data[16 : len(data)-16]
+ bi, err := ParseBuildInfo(data)
+ if err != nil {
+ return nil, false
+ }
+
+ // The go version is stored separately from other build info, mostly for
+ // historical reasons. It is not part of the modinfo() string, and
+ // ParseBuildInfo does not recognize it. We inject it here to hide this
+ // awkwardness from the user.
+ bi.GoVersion = runtime.Version()
+
+ return bi, true
+}
+
+// BuildInfo represents the build information read from a Go binary.
+type BuildInfo struct {
+ // GoVersion is the version of the Go toolchain that built the binary
+ // (for example, "go1.19.2").
+ GoVersion string
+
+ // Path is the package path of the main package for the binary
+ // (for example, "golang.org/x/tools/cmd/stringer").
+ Path string
+
+ // Main describes the module that contains the main package for the binary.
+ Main Module
+
+ // Deps describes all the dependency modules, both direct and indirect,
+ // that contributed packages to the build of this binary.
+ Deps []*Module
+
+ // Settings describes the build settings used to build the binary.
+ Settings []BuildSetting
+}
+
+// A Module describes a single module included in a build.
+type Module struct {
+ Path string // module path
+ Version string // module version
+ Sum string // checksum
+ Replace *Module // replaced by this module
+}
+
+// A BuildSetting is a key-value pair describing one setting that influenced a build.
+//
+// Defined keys include:
+//
+// - -buildmode: the buildmode flag used (typically "exe")
+// - -compiler: the compiler toolchain flag used (typically "gc")
+// - CGO_ENABLED: the effective CGO_ENABLED environment variable
+// - CGO_CFLAGS: the effective CGO_CFLAGS environment variable
+// - CGO_CPPFLAGS: the effective CGO_CPPFLAGS environment variable
+// - CGO_CXXFLAGS: the effective CGO_CPPFLAGS environment variable
+// - CGO_LDFLAGS: the effective CGO_CPPFLAGS environment variable
+// - GOARCH: the architecture target
+// - GOAMD64/GOARM64/GO386/etc: the architecture feature level for GOARCH
+// - GOOS: the operating system target
+// - vcs: the version control system for the source tree where the build ran
+// - vcs.revision: the revision identifier for the current commit or checkout
+// - vcs.time: the modification time associated with vcs.revision, in RFC3339 format
+// - vcs.modified: true or false indicating whether the source tree had local modifications
+type BuildSetting struct {
+ // Key and Value describe the build setting.
+ // Key must not contain an equals sign, space, tab, or newline.
+ // Value must not contain newlines ('\n').
+ Key, Value string
+}
+
+// quoteKey reports whether key is required to be quoted.
+func quoteKey(key string) bool {
+ return len(key) == 0 || strings.ContainsAny(key, "= \t\r\n\"`")
+}
+
+// quoteValue reports whether value is required to be quoted.
+func quoteValue(value string) bool {
+ return strings.ContainsAny(value, " \t\r\n\"`")
+}
+
+func (bi *BuildInfo) String() string {
+ buf := new(strings.Builder)
+ if bi.GoVersion != "" {
+ fmt.Fprintf(buf, "go\t%s\n", bi.GoVersion)
+ }
+ if bi.Path != "" {
+ fmt.Fprintf(buf, "path\t%s\n", bi.Path)
+ }
+ var formatMod func(string, Module)
+ formatMod = func(word string, m Module) {
+ buf.WriteString(word)
+ buf.WriteByte('\t')
+ buf.WriteString(m.Path)
+ buf.WriteByte('\t')
+ buf.WriteString(m.Version)
+ if m.Replace == nil {
+ buf.WriteByte('\t')
+ buf.WriteString(m.Sum)
+ } else {
+ buf.WriteByte('\n')
+ formatMod("=>", *m.Replace)
+ }
+ buf.WriteByte('\n')
+ }
+ if bi.Main != (Module{}) {
+ formatMod("mod", bi.Main)
+ }
+ for _, dep := range bi.Deps {
+ formatMod("dep", *dep)
+ }
+ for _, s := range bi.Settings {
+ key := s.Key
+ if quoteKey(key) {
+ key = strconv.Quote(key)
+ }
+ value := s.Value
+ if quoteValue(value) {
+ value = strconv.Quote(value)
+ }
+ fmt.Fprintf(buf, "build\t%s=%s\n", key, value)
+ }
+
+ return buf.String()
+}
+
+func ParseBuildInfo(data string) (bi *BuildInfo, err error) {
+ lineNum := 1
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("could not parse Go build info: line %d: %w", lineNum, err)
+ }
+ }()
+
+ var (
+ pathLine = "path\t"
+ modLine = "mod\t"
+ depLine = "dep\t"
+ repLine = "=>\t"
+ buildLine = "build\t"
+ newline = "\n"
+ tab = "\t"
+ )
+
+ readModuleLine := func(elem []string) (Module, error) {
+ if len(elem) != 2 && len(elem) != 3 {
+ return Module{}, fmt.Errorf("expected 2 or 3 columns; got %d", len(elem))
+ }
+ version := elem[1]
+ sum := ""
+ if len(elem) == 3 {
+ sum = elem[2]
+ }
+ return Module{
+ Path: elem[0],
+ Version: version,
+ Sum: sum,
+ }, nil
+ }
+
+ bi = new(BuildInfo)
+ var (
+ last *Module
+ line string
+ ok bool
+ )
+ // Reverse of BuildInfo.String(), except for go version.
+ for len(data) > 0 {
+ line, data, ok = strings.Cut(data, newline)
+ if !ok {
+ break
+ }
+ switch {
+ case strings.HasPrefix(line, pathLine):
+ elem := line[len(pathLine):]
+ bi.Path = string(elem)
+ case strings.HasPrefix(line, modLine):
+ elem := strings.Split(line[len(modLine):], tab)
+ last = &bi.Main
+ *last, err = readModuleLine(elem)
+ if err != nil {
+ return nil, err
+ }
+ case strings.HasPrefix(line, depLine):
+ elem := strings.Split(line[len(depLine):], tab)
+ last = new(Module)
+ bi.Deps = append(bi.Deps, last)
+ *last, err = readModuleLine(elem)
+ if err != nil {
+ return nil, err
+ }
+ case strings.HasPrefix(line, repLine):
+ elem := strings.Split(line[len(repLine):], tab)
+ if len(elem) != 3 {
+ return nil, fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
+ }
+ if last == nil {
+ return nil, fmt.Errorf("replacement with no module on previous line")
+ }
+ last.Replace = &Module{
+ Path: string(elem[0]),
+ Version: string(elem[1]),
+ Sum: string(elem[2]),
+ }
+ last = nil
+ case strings.HasPrefix(line, buildLine):
+ kv := line[len(buildLine):]
+ if len(kv) < 1 {
+ return nil, fmt.Errorf("build line missing '='")
+ }
+
+ var key, rawValue string
+ switch kv[0] {
+ case '=':
+ return nil, fmt.Errorf("build line with missing key")
+
+ case '`', '"':
+ rawKey, err := strconv.QuotedPrefix(kv)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted key in build line")
+ }
+ if len(kv) == len(rawKey) {
+ return nil, fmt.Errorf("build line missing '=' after quoted key")
+ }
+ if c := kv[len(rawKey)]; c != '=' {
+ return nil, fmt.Errorf("unexpected character after quoted key: %q", c)
+ }
+ key, _ = strconv.Unquote(rawKey)
+ rawValue = kv[len(rawKey)+1:]
+
+ default:
+ var ok bool
+ key, rawValue, ok = strings.Cut(kv, "=")
+ if !ok {
+ return nil, fmt.Errorf("build line missing '=' after key")
+ }
+ if quoteKey(key) {
+ return nil, fmt.Errorf("unquoted key %q must be quoted", key)
+ }
+ }
+
+ var value string
+ if len(rawValue) > 0 {
+ switch rawValue[0] {
+ case '`', '"':
+ var err error
+ value, err = strconv.Unquote(rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted value in build line")
+ }
+
+ default:
+ value = rawValue
+ if quoteValue(value) {
+ return nil, fmt.Errorf("unquoted value %q must be quoted", value)
+ }
+ }
+ }
+
+ bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: value})
+ }
+ lineNum++
+ }
+ return bi, nil
+}
diff --git a/src/runtime/debug/mod_test.go b/src/runtime/debug/mod_test.go
new file mode 100644
index 0000000..b291769
--- /dev/null
+++ b/src/runtime/debug/mod_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+)
+
+// strip removes two leading tabs after each newline of s.
+func strip(s string) string {
+ replaced := strings.ReplaceAll(s, "\n\t\t", "\n")
+ if len(replaced) > 0 && replaced[0] == '\n' {
+ replaced = replaced[1:]
+ }
+ return replaced
+}
+
+func FuzzParseBuildInfoRoundTrip(f *testing.F) {
+ // Package built from outside a module, missing some fields..
+ f.Add(strip(`
+ path rsc.io/fortune
+ mod rsc.io/fortune v1.0.0
+ `))
+
+ // Package built from the standard library, missing some fields..
+ f.Add(`path cmd/test2json`)
+
+ // Package built from inside a module.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ mod example.com/m (devel)
+ build -compiler=gc
+ `))
+
+ // Package built in GOPATH mode.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build -compiler=gc
+ `))
+
+ // Escaped build info.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build CRAZY_ENV="requires\nescaping"
+ `))
+
+ f.Fuzz(func(t *testing.T, s string) {
+ bi, err := debug.ParseBuildInfo(s)
+ if err != nil {
+ // Not a round-trippable BuildInfo string.
+ t.Log(err)
+ return
+ }
+
+ // s2 could have different escaping from s.
+ // However, it should parse to exactly the same contents.
+ s2 := bi.String()
+ bi2, err := debug.ParseBuildInfo(s2)
+ if err != nil {
+ t.Fatalf("%v:\n%s", err, s2)
+ }
+
+ if !reflect.DeepEqual(bi2, bi) {
+ t.Fatalf("Parsed representation differs.\ninput:\n%s\noutput:\n%s", s, s2)
+ }
+ })
+}
diff --git a/src/runtime/debug/panic_test.go b/src/runtime/debug/panic_test.go
new file mode 100644
index 0000000..ec5294c
--- /dev/null
+++ b/src/runtime/debug/panic_test.go
@@ -0,0 +1,56 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd
+
+// TODO: test on Windows?
+
+package debug_test
+
+import (
+ "runtime"
+ "runtime/debug"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestPanicOnFault(t *testing.T) {
+ if runtime.GOARCH == "s390x" {
+ t.Skip("s390x fault addresses are missing the low order bits")
+ }
+ if runtime.GOOS == "ios" {
+ t.Skip("iOS doesn't provide fault addresses")
+ }
+ if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm" {
+ t.Skip("netbsd-arm doesn't provide fault address (golang.org/issue/45026)")
+ }
+ m, err := syscall.Mmap(-1, 0, 0x1000, syscall.PROT_READ /* Note: no PROT_WRITE */, syscall.MAP_SHARED|syscall.MAP_ANON)
+ if err != nil {
+ t.Fatalf("can't map anonymous memory: %s", err)
+ }
+ defer syscall.Munmap(m)
+ old := debug.SetPanicOnFault(true)
+ defer debug.SetPanicOnFault(old)
+ const lowBits = 0x3e7
+ defer func() {
+ r := recover()
+ if r == nil {
+ t.Fatalf("write did not fault")
+ }
+ type addressable interface {
+ Addr() uintptr
+ }
+ a, ok := r.(addressable)
+ if !ok {
+ t.Fatalf("fault does not contain address")
+ }
+ want := uintptr(unsafe.Pointer(&m[lowBits]))
+ got := a.Addr()
+ if got != want {
+ t.Fatalf("fault address %x, want %x", got, want)
+ }
+ }()
+ m[lowBits] = 1 // will fault
+}
diff --git a/src/runtime/debug/stack.go b/src/runtime/debug/stack.go
new file mode 100644
index 0000000..5d810af
--- /dev/null
+++ b/src/runtime/debug/stack.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package debug contains facilities for programs to debug themselves while
+// they are running.
+package debug
+
+import (
+ "os"
+ "runtime"
+)
+
+// PrintStack prints to standard error the stack trace returned by runtime.Stack.
+func PrintStack() {
+ os.Stderr.Write(Stack())
+}
+
+// Stack returns a formatted stack trace of the goroutine that calls it.
+// It calls runtime.Stack with a large enough buffer to capture the entire trace.
+func Stack() []byte {
+ buf := make([]byte, 1024)
+ for {
+ n := runtime.Stack(buf, false)
+ if n < len(buf) {
+ return buf[:n]
+ }
+ buf = make([]byte, 2*len(buf))
+ }
+}
diff --git a/src/runtime/debug/stack_test.go b/src/runtime/debug/stack_test.go
new file mode 100644
index 0000000..671057c
--- /dev/null
+++ b/src/runtime/debug/stack_test.go
@@ -0,0 +1,121 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ . "runtime/debug"
+ "strings"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ if os.Getenv("GO_RUNTIME_DEBUG_TEST_DUMP_GOROOT") != "" {
+ fmt.Println(runtime.GOROOT())
+ os.Exit(0)
+ }
+ os.Exit(m.Run())
+}
+
+type T int
+
+func (t *T) ptrmethod() []byte {
+ return Stack()
+}
+func (t T) method() []byte {
+ return t.ptrmethod()
+}
+
+/*
+The traceback should look something like this, modulo line numbers and hex constants.
+Don't worry much about the base levels, but check the ones in our own package.
+
+ goroutine 10 [running]:
+ runtime/debug.Stack(0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack.go:28 +0x80
+ runtime/debug.(*T).ptrmethod(0xc82005ee70, 0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack_test.go:15 +0x29
+ runtime/debug.T.method(0x0, 0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack_test.go:18 +0x32
+ runtime/debug.TestStack(0xc8201ce000)
+ /Users/r/go/src/runtime/debug/stack_test.go:37 +0x38
+ testing.tRunner(0xc8201ce000, 0x664b58)
+ /Users/r/go/src/testing/testing.go:456 +0x98
+ created by testing.RunTests
+ /Users/r/go/src/testing/testing.go:561 +0x86d
+*/
+func TestStack(t *testing.T) {
+ b := T(0).method()
+ lines := strings.Split(string(b), "\n")
+ if len(lines) < 6 {
+ t.Fatal("too few lines")
+ }
+
+ // If built with -trimpath, file locations should start with package paths.
+ // Otherwise, file locations should start with a GOROOT/src prefix
+ // (for whatever value of GOROOT is baked into the binary, not the one
+ // that may be set in the environment).
+ fileGoroot := ""
+ if envGoroot := os.Getenv("GOROOT"); envGoroot != "" {
+ // Since GOROOT is set explicitly in the environment, we can't be certain
+ // that it is the same GOROOT value baked into the binary, and we can't
+ // change the value in-process because runtime.GOROOT uses the value from
+ // initial (not current) environment. Spawn a subprocess to determine the
+ // real baked-in GOROOT.
+ t.Logf("found GOROOT %q from environment; checking embedded GOROOT value", envGoroot)
+ testenv.MustHaveExec(t)
+ exe, err := os.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmd := exec.Command(exe)
+ cmd.Env = append(os.Environ(), "GOROOT=", "GO_RUNTIME_DEBUG_TEST_DUMP_GOROOT=1")
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+ fileGoroot = string(bytes.TrimSpace(out))
+ } else {
+ // Since GOROOT is not set in the environment, its value (if any) must come
+ // from the path embedded in the binary.
+ fileGoroot = runtime.GOROOT()
+ }
+ filePrefix := ""
+ if fileGoroot != "" {
+ filePrefix = filepath.ToSlash(fileGoroot) + "/src/"
+ }
+
+ n := 0
+ frame := func(file, code string) {
+ t.Helper()
+
+ line := lines[n]
+ if !strings.Contains(line, code) {
+ t.Errorf("expected %q in %q", code, line)
+ }
+ n++
+
+ line = lines[n]
+
+ wantPrefix := "\t" + filePrefix + file
+ if !strings.HasPrefix(line, wantPrefix) {
+ t.Errorf("in line %q, expected prefix %q", line, wantPrefix)
+ }
+ n++
+ }
+ n++
+
+ frame("runtime/debug/stack.go", "runtime/debug.Stack")
+ frame("runtime/debug/stack_test.go", "runtime/debug_test.(*T).ptrmethod")
+ frame("runtime/debug/stack_test.go", "runtime/debug_test.T.method")
+ frame("runtime/debug/stack_test.go", "runtime/debug_test.TestStack")
+ frame("testing/testing.go", "")
+}
diff --git a/src/runtime/debug/stubs.go b/src/runtime/debug/stubs.go
new file mode 100644
index 0000000..913d4b9
--- /dev/null
+++ b/src/runtime/debug/stubs.go
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "time"
+)
+
+// Implemented in package runtime.
+func readGCStats(*[]time.Duration)
+func freeOSMemory()
+func setMaxStack(int) int
+func setGCPercent(int32) int32
+func setPanicOnFault(bool) bool
+func setMaxThreads(int) int
+func setMemoryLimit(int64) int64
diff --git a/src/runtime/debug_test.go b/src/runtime/debug_test.go
new file mode 100644
index 0000000..75fe07e
--- /dev/null
+++ b/src/runtime/debug_test.go
@@ -0,0 +1,307 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: This test could be implemented on all (most?) UNIXes if we
+// added syscall.Tgkill more widely.
+
+// We skip all of these tests under race mode because our test thread
+// spends all of its time in the race runtime, which isn't a safe
+// point.
+
+//go:build (amd64 || arm64) && linux && !race
+
+package runtime_test
+
+import (
+ "fmt"
+ "internal/abi"
+ "math"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "sync/atomic"
+ "syscall"
+ "testing"
+)
+
+func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
+ // This can deadlock if run under a debugger because it
+ // depends on catching SIGTRAP, which is usually swallowed by
+ // a debugger.
+ skipUnderDebugger(t)
+
+ // This can deadlock if there aren't enough threads or if a GC
+ // tries to interrupt an atomic loop (see issue #10958). Execute
+ // an extra GC to ensure even the sweep phase is done (out of
+ // caution to prevent #49370 from happening).
+ // TODO(mknyszek): This extra GC cycle is likely unnecessary
+ // because preemption (which may happen during the sweep phase)
+ // isn't much of an issue anymore thanks to asynchronous preemption.
+ // The biggest risk is having a write barrier in the debug call
+ // injection test code fire, because it runs in a signal handler
+ // and may not have a P.
+ //
+ // We use 8 Ps so there's room for the debug call worker,
+ // something that's trying to preempt the call worker, and the
+ // goroutine that's trying to stop the call worker.
+ ogomaxprocs := runtime.GOMAXPROCS(8)
+ ogcpercent := debug.SetGCPercent(-1)
+ runtime.GC()
+
+ // ready is a buffered channel so debugCallWorker won't block
+ // on sending to it. This makes it less likely we'll catch
+ // debugCallWorker while it's in the runtime.
+ ready := make(chan *runtime.G, 1)
+ var stop uint32
+ done := make(chan error)
+ go debugCallWorker(ready, &stop, done)
+ g = <-ready
+ return g, func() {
+ atomic.StoreUint32(&stop, 1)
+ err := <-done
+ if err != nil {
+ t.Fatal(err)
+ }
+ runtime.GOMAXPROCS(ogomaxprocs)
+ debug.SetGCPercent(ogcpercent)
+ }
+}
+
+func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ ready <- runtime.Getg()
+
+ x := 2
+ debugCallWorker2(stop, &x)
+ if x != 1 {
+ done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
+ }
+ close(done)
+}
+
+// Don't inline this function, since we want to test adjusting
+// pointers in the arguments.
+//
+//go:noinline
+func debugCallWorker2(stop *uint32, x *int) {
+ for atomic.LoadUint32(stop) == 0 {
+ // Strongly encourage x to live in a register so we
+ // can test pointer register adjustment.
+ *x++
+ }
+ *x = 1
+}
+
+func debugCallTKill(tid int) error {
+ return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
+}
+
+// skipUnderDebugger skips the current test when running under a
+// debugger (specifically if this process has a tracer). This is
+// Linux-specific.
+func skipUnderDebugger(t *testing.T) {
+ pid := syscall.Getpid()
+ status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
+ if err != nil {
+ t.Logf("couldn't get proc tracer: %s", err)
+ return
+ }
+ re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
+ sub := re.FindSubmatch(status)
+ if sub == nil {
+ t.Logf("couldn't find proc tracer PID")
+ return
+ }
+ if string(sub[1]) == "0" {
+ return
+ }
+ t.Skip("test will deadlock under a debugger")
+}
+
+func TestDebugCall(t *testing.T) {
+ g, after := startDebugCallWorker(t)
+ defer after()
+
+ type stackArgs struct {
+ x0 int
+ x1 float64
+ y0Ret int
+ y1Ret float64
+ }
+
+ // Inject a call into the debugCallWorker goroutine and test
+ // basic argument and result passing.
+ fn := func(x int, y float64) (y0Ret int, y1Ret float64) {
+ return x + 1, y + 1.0
+ }
+ var args *stackArgs
+ var regs abi.RegArgs
+ intRegs := regs.Ints[:]
+ floatRegs := regs.Floats[:]
+ fval := float64(42.0)
+ if len(intRegs) > 0 {
+ intRegs[0] = 42
+ floatRegs[0] = math.Float64bits(fval)
+ } else {
+ args = &stackArgs{
+ x0: 42,
+ x1: 42.0,
+ }
+ }
+
+ if _, err := runtime.InjectDebugCall(g, fn, &regs, args, debugCallTKill, false); err != nil {
+ t.Fatal(err)
+ }
+ var result0 int
+ var result1 float64
+ if len(intRegs) > 0 {
+ result0 = int(intRegs[0])
+ result1 = math.Float64frombits(floatRegs[0])
+ } else {
+ result0 = args.y0Ret
+ result1 = args.y1Ret
+ }
+ if result0 != 43 {
+ t.Errorf("want 43, got %d", result0)
+ }
+ if result1 != fval+1 {
+ t.Errorf("want 43, got %f", result1)
+ }
+}
+
+func TestDebugCallLarge(t *testing.T) {
+ g, after := startDebugCallWorker(t)
+ defer after()
+
+ // Inject a call with a large call frame.
+ const N = 128
+ var args struct {
+ in [N]int
+ out [N]int
+ }
+ fn := func(in [N]int) (out [N]int) {
+ for i := range in {
+ out[i] = in[i] + 1
+ }
+ return
+ }
+ var want [N]int
+ for i := range args.in {
+ args.in[i] = i
+ want[i] = i + 1
+ }
+ if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil {
+ t.Fatal(err)
+ }
+ if want != args.out {
+ t.Fatalf("want %v, got %v", want, args.out)
+ }
+}
+
+func TestDebugCallGC(t *testing.T) {
+ g, after := startDebugCallWorker(t)
+ defer after()
+
+ // Inject a call that performs a GC.
+ if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDebugCallGrowStack(t *testing.T) {
+ g, after := startDebugCallWorker(t)
+ defer after()
+
+ // Inject a call that grows the stack. debugCallWorker checks
+ // for stack pointer breakage.
+ if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
+ t.Fatal(err)
+ }
+}
+
+//go:nosplit
+func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
+ // The nosplit causes this function to not contain safe-points
+ // except at calls.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ *gpp = runtime.Getg()
+
+ for atomic.LoadUint32(stop) == 0 {
+ atomic.StoreUint32(ready, 1)
+ }
+}
+
+func TestDebugCallUnsafePoint(t *testing.T) {
+ skipUnderDebugger(t)
+
+ // This can deadlock if there aren't enough threads or if a GC
+ // tries to interrupt an atomic loop (see issue #10958).
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
+
+ // InjectDebugCall cannot be executed while a GC is actively in
+ // progress. Wait until the current GC is done, and turn it off.
+ //
+ // See #49370.
+ runtime.GC()
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ // Test that the runtime refuses call injection at unsafe points.
+ var g *runtime.G
+ var ready, stop uint32
+ defer atomic.StoreUint32(&stop, 1)
+ go debugCallUnsafePointWorker(&g, &ready, &stop)
+ for atomic.LoadUint32(&ready) == 0 {
+ runtime.Gosched()
+ }
+
+ _, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true)
+ if msg := "call not at safe point"; err == nil || err.Error() != msg {
+ t.Fatalf("want %q, got %s", msg, err)
+ }
+}
+
+func TestDebugCallPanic(t *testing.T) {
+ skipUnderDebugger(t)
+
+ // This can deadlock if there aren't enough threads.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
+
+ // InjectDebugCall cannot be executed while a GC is actively in
+ // progress. Wait until the current GC is done, and turn it off.
+ //
+ // See #10958 and #49370.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ // TODO(mknyszek): This extra GC cycle is likely unnecessary
+ // because preemption (which may happen during the sweep phase)
+ // isn't much of an issue anymore thanks to asynchronous preemption.
+ // The biggest risk is having a write barrier in the debug call
+ // injection test code fire, because it runs in a signal handler
+ // and may not have a P.
+ runtime.GC()
+
+ ready := make(chan *runtime.G)
+ var stop uint32
+ defer atomic.StoreUint32(&stop, 1)
+ go func() {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ ready <- runtime.Getg()
+ for atomic.LoadUint32(&stop) == 0 {
+ }
+ }()
+ g := <-ready
+
+ p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ps, ok := p.(string); !ok || ps != "test" {
+ t.Fatalf("wanted panic %v, got %v", "test", p)
+ }
+}
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
new file mode 100644
index 0000000..a4393b1
--- /dev/null
+++ b/src/runtime/debugcall.go
@@ -0,0 +1,252 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package runtime
+
+import "unsafe"
+
+const (
+ debugCallSystemStack = "executing on Go runtime stack"
+ debugCallUnknownFunc = "call from unknown function"
+ debugCallRuntime = "call from within the Go runtime"
+ debugCallUnsafePoint = "call not at safe point"
+)
+
+func debugCallV2()
+func debugCallPanicked(val any)
+
+// debugCallCheck checks whether it is safe to inject a debugger
+// function call with return PC pc. If not, it returns a string
+// explaining why.
+//
+//go:nosplit
+func debugCallCheck(pc uintptr) string {
+ // No user calls from the system stack.
+ if getg() != getg().m.curg {
+ return debugCallSystemStack
+ }
+ if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
+ // Fast syscalls (nanotime) and racecall switch to the
+ // g0 stack without switching g. We can't safely make
+ // a call in this state. (We can't even safely
+ // systemstack.)
+ return debugCallSystemStack
+ }
+
+ // Switch to the system stack to avoid overflowing the user
+ // stack.
+ var ret string
+ systemstack(func() {
+ f := findfunc(pc)
+ if !f.valid() {
+ ret = debugCallUnknownFunc
+ return
+ }
+
+ name := funcname(f)
+
+ switch name {
+ case "debugCall32",
+ "debugCall64",
+ "debugCall128",
+ "debugCall256",
+ "debugCall512",
+ "debugCall1024",
+ "debugCall2048",
+ "debugCall4096",
+ "debugCall8192",
+ "debugCall16384",
+ "debugCall32768",
+ "debugCall65536":
+ // These functions are allowed so that the debugger can initiate multiple function calls.
+ // See: https://golang.org/cl/161137/
+ return
+ }
+
+ // Disallow calls from the runtime. We could
+ // potentially make this condition tighter (e.g., not
+ // when locks are held), but there are enough tightly
+ // coded sequences (e.g., defer handling) that it's
+ // better to play it safe.
+ if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
+ ret = debugCallRuntime
+ return
+ }
+
+ // Check that this isn't an unsafe-point.
+ if pc != f.entry() {
+ pc--
+ }
+ up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
+ if up != _PCDATA_UnsafePointSafe {
+ // Not at a safe point.
+ ret = debugCallUnsafePoint
+ }
+ })
+ return ret
+}
+
+// debugCallWrap starts a new goroutine to run a debug call and blocks
+// the calling goroutine. On the goroutine, it prepares to recover
+// panics from the debug call, and then calls the call dispatching
+// function at PC dispatch.
+//
+// This must be deeply nosplit because there are untyped values on the
+// stack from debugCallV2.
+//
+//go:nosplit
+func debugCallWrap(dispatch uintptr) {
+ var lockedm bool
+ var lockedExt uint32
+ callerpc := getcallerpc()
+ gp := getg()
+
+ // Create a new goroutine to execute the call on. Run this on
+ // the system stack to avoid growing our stack.
+ systemstack(func() {
+ // TODO(mknyszek): It would be nice to wrap these arguments in an allocated
+ // closure and start the goroutine with that closure, but the compiler disallows
+ // implicit closure allocation in the runtime.
+ fn := debugCallWrap1
+ newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
+ args := &debugCallWrapArgs{
+ dispatch: dispatch,
+ callingG: gp,
+ }
+ newg.param = unsafe.Pointer(args)
+
+ // If the current G is locked, then transfer that
+ // locked-ness to the new goroutine.
+ if gp.lockedm != 0 {
+ // Save lock state to restore later.
+ mp := gp.m
+ if mp != gp.lockedm.ptr() {
+ throw("inconsistent lockedm")
+ }
+
+ lockedm = true
+ lockedExt = mp.lockedExt
+
+ // Transfer external lock count to internal so
+ // it can't be unlocked from the debug call.
+ mp.lockedInt++
+ mp.lockedExt = 0
+
+ mp.lockedg.set(newg)
+ newg.lockedm.set(mp)
+ gp.lockedm = 0
+ }
+
+ // Mark the calling goroutine as being at an async
+ // safe-point, since it has a few conservative frames
+ // at the bottom of the stack. This also prevents
+ // stack shrinks.
+ gp.asyncSafePoint = true
+
+ // Stash newg away so we can execute it below (mcall's
+ // closure can't capture anything).
+ gp.schedlink.set(newg)
+ })
+
+ // Switch to the new goroutine.
+ mcall(func(gp *g) {
+ // Get newg.
+ newg := gp.schedlink.ptr()
+ gp.schedlink = 0
+
+ // Park the calling goroutine.
+ if trace.enabled {
+ traceGoPark(traceEvGoBlock, 1)
+ }
+ casGToWaiting(gp, _Grunning, waitReasonDebugCall)
+ dropg()
+
+ // Directly execute the new goroutine. The debug
+ // protocol will continue on the new goroutine, so
+ // it's important we not just let the scheduler do
+ // this or it may resume a different goroutine.
+ execute(newg, true)
+ })
+
+ // We'll resume here when the call returns.
+
+ // Restore locked state.
+ if lockedm {
+ mp := gp.m
+ mp.lockedExt = lockedExt
+ mp.lockedInt--
+ mp.lockedg.set(gp)
+ gp.lockedm.set(mp)
+ }
+
+ gp.asyncSafePoint = false
+}
+
+type debugCallWrapArgs struct {
+ dispatch uintptr
+ callingG *g
+}
+
+// debugCallWrap1 is the continuation of debugCallWrap on the callee
+// goroutine.
+func debugCallWrap1() {
+ gp := getg()
+ args := (*debugCallWrapArgs)(gp.param)
+ dispatch, callingG := args.dispatch, args.callingG
+ gp.param = nil
+
+ // Dispatch call and trap panics.
+ debugCallWrap2(dispatch)
+
+ // Resume the caller goroutine.
+ getg().schedlink.set(callingG)
+ mcall(func(gp *g) {
+ callingG := gp.schedlink.ptr()
+ gp.schedlink = 0
+
+ // Unlock this goroutine from the M if necessary. The
+ // calling G will relock.
+ if gp.lockedm != 0 {
+ gp.lockedm = 0
+ gp.m.lockedg = 0
+ }
+
+ // Switch back to the calling goroutine. At some point
+ // the scheduler will schedule us again and we'll
+ // finish exiting.
+ if trace.enabled {
+ traceGoSched()
+ }
+ casgstatus(gp, _Grunning, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ globrunqput(gp)
+ unlock(&sched.lock)
+
+ if trace.enabled {
+ traceGoUnpark(callingG, 0)
+ }
+ casgstatus(callingG, _Gwaiting, _Grunnable)
+ execute(callingG, true)
+ })
+}
+
+func debugCallWrap2(dispatch uintptr) {
+ // Call the dispatch function and trap panics.
+ var dispatchF func()
+ dispatchFV := funcval{dispatch}
+ *(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
+
+ var ok bool
+ defer func() {
+ if !ok {
+ err := recover()
+ debugCallPanicked(err)
+ }
+ }()
+ dispatchF()
+ ok = true
+}
diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go
new file mode 100644
index 0000000..b18774e
--- /dev/null
+++ b/src/runtime/debuglog.go
@@ -0,0 +1,831 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides an internal debug logging facility. The debug
+// log is a lightweight, in-memory, per-M ring buffer. By default, the
+// runtime prints the debug log on panic.
+//
+// To print something to the debug log, call dlog to obtain a dlogger
+// and use the methods on that to add values. The values will be
+// space-separated in the output (much like println).
+//
+// This facility can be enabled by passing -tags debuglog when
+// building. Without this tag, dlog calls compile to nothing.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// debugLogBytes is the size of each per-M ring buffer. This is
+// allocated off-heap to avoid blowing up the M and hence the GC'd
+// heap size.
+const debugLogBytes = 16 << 10
+
+// debugLogStringLimit is the maximum number of bytes in a string.
+// Above this, the string will be truncated with "..(n more bytes).."
+const debugLogStringLimit = debugLogBytes / 8
+
+// dlog returns a debug logger. The caller can use methods on the
+// returned logger to add values, which will be space-separated in the
+// final output, much like println. The caller must call end() to
+// finish the message.
+//
+// dlog can be used from highly-constrained corners of the runtime: it
+// is safe to use in the signal handler, from within the write
+// barrier, from within the stack implementation, and in places that
+// must be recursively nosplit.
+//
+// This will be compiled away if built without the debuglog build tag.
+// However, argument construction may not be. If any of the arguments
+// are not literals or trivial expressions, consider protecting the
+// call with "if dlogEnabled".
+//
+//go:nosplit
+//go:nowritebarrierrec
+func dlog() *dlogger {
+ if !dlogEnabled {
+ return nil
+ }
+
+ // Get the time.
+ tick, nano := uint64(cputicks()), uint64(nanotime())
+
+ // Try to get a cached logger.
+ l := getCachedDlogger()
+
+ // If we couldn't get a cached logger, try to get one from the
+ // global pool.
+ if l == nil {
+ allp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
+ for l1 := all; l1 != nil; l1 = l1.allLink {
+ if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
+ l = l1
+ break
+ }
+ }
+ }
+
+ // If that failed, allocate a new logger.
+ if l == nil {
+ // Use sysAllocOS instead of sysAlloc because we want to interfere
+ // with the runtime as little as possible, and sysAlloc updates accounting.
+ l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
+ if l == nil {
+ throw("failed to allocate debug log")
+ }
+ l.w.r.data = &l.w.data
+ l.owned.Store(1)
+
+ // Prepend to allDloggers list.
+ headp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ for {
+ head := atomic.Loaduintptr(headp)
+ l.allLink = (*dlogger)(unsafe.Pointer(head))
+ if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
+ break
+ }
+ }
+ }
+
+ // If the time delta is getting too high, write a new sync
+ // packet. We set the limit so we don't write more than 6
+ // bytes of delta in the record header.
+ const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
+ if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
+ l.w.writeSync(tick, nano)
+ }
+
+ // Reserve space for framing header.
+ l.w.ensure(debugLogHeaderSize)
+ l.w.write += debugLogHeaderSize
+
+ // Write record header.
+ l.w.uvarint(tick - l.w.tick)
+ l.w.uvarint(nano - l.w.nano)
+ gp := getg()
+ if gp != nil && gp.m != nil && gp.m.p != 0 {
+ l.w.varint(int64(gp.m.p.ptr().id))
+ } else {
+ l.w.varint(-1)
+ }
+
+ return l
+}
+
+// A dlogger writes to the debug log.
+//
+// To obtain a dlogger, call dlog(). When done with the dlogger, call
+// end().
+type dlogger struct {
+ _ sys.NotInHeap
+ w debugLogWriter
+
+ // allLink is the next dlogger in the allDloggers list.
+ allLink *dlogger
+
+ // owned indicates that this dlogger is owned by an M. This is
+ // accessed atomically.
+ owned atomic.Uint32
+}
+
+// allDloggers is a list of all dloggers, linked through
+// dlogger.allLink. This is accessed atomically. This is prepend only,
+// so it doesn't need to protect against ABA races.
+var allDloggers *dlogger
+
+//go:nosplit
+func (l *dlogger) end() {
+ if !dlogEnabled {
+ return
+ }
+
+ // Fill in framing header.
+ size := l.w.write - l.w.r.end
+ if !l.w.writeFrameAt(l.w.r.end, size) {
+ throw("record too large")
+ }
+
+ // Commit the record.
+ l.w.r.end = l.w.write
+
+ // Attempt to return this logger to the cache.
+ if putCachedDlogger(l) {
+ return
+ }
+
+ // Return the logger to the global pool.
+ l.owned.Store(0)
+}
+
+const (
+ debugLogUnknown = 1 + iota
+ debugLogBoolTrue
+ debugLogBoolFalse
+ debugLogInt
+ debugLogUint
+ debugLogHex
+ debugLogPtr
+ debugLogString
+ debugLogConstString
+ debugLogStringOverflow
+
+ debugLogPC
+ debugLogTraceback
+)
+
+//go:nosplit
+func (l *dlogger) b(x bool) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ if x {
+ l.w.byte(debugLogBoolTrue)
+ } else {
+ l.w.byte(debugLogBoolFalse)
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) i(x int) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i8(x int8) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i16(x int16) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i32(x int32) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i64(x int64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogInt)
+ l.w.varint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) u(x uint) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) uptr(x uintptr) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u8(x uint8) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u16(x uint16) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u32(x uint32) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u64(x uint64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogUint)
+ l.w.uvarint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) hex(x uint64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogHex)
+ l.w.uvarint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) p(x any) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogPtr)
+ if x == nil {
+ l.w.uvarint(0)
+ } else {
+ v := efaceOf(&x)
+ switch v._type.kind & kindMask {
+ case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer:
+ l.w.uvarint(uint64(uintptr(v.data)))
+ default:
+ throw("not a pointer type")
+ }
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) s(x string) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+
+ strData := unsafe.StringData(x)
+ datap := &firstmoduledata
+ if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
+ // String constants are in the rodata section, which
+ // isn't recorded in moduledata. But it has to be
+ // somewhere between etext and end.
+ l.w.byte(debugLogConstString)
+ l.w.uvarint(uint64(len(x)))
+ l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
+ } else {
+ l.w.byte(debugLogString)
+ // We can't use unsafe.Slice as it may panic, which isn't safe
+ // in this (potentially) nowritebarrier context.
+ var b []byte
+ bb := (*slice)(unsafe.Pointer(&b))
+ bb.array = unsafe.Pointer(strData)
+ bb.len, bb.cap = len(x), len(x)
+ if len(b) > debugLogStringLimit {
+ b = b[:debugLogStringLimit]
+ }
+ l.w.uvarint(uint64(len(b)))
+ l.w.bytes(b)
+ if len(b) != len(x) {
+ l.w.byte(debugLogStringOverflow)
+ l.w.uvarint(uint64(len(x) - len(b)))
+ }
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) pc(x uintptr) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogPC)
+ l.w.uvarint(uint64(x))
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) traceback(x []uintptr) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogTraceback)
+ l.w.uvarint(uint64(len(x)))
+ for _, pc := range x {
+ l.w.uvarint(uint64(pc))
+ }
+ return l
+}
+
+// A debugLogWriter is a ring buffer of binary debug log records.
+//
+// A log record consists of a 2-byte framing header and a sequence of
+// fields. The framing header gives the size of the record as a little
+// endian 16-bit value. Each field starts with a byte indicating its
+// type, followed by type-specific data. If the size in the framing
+// header is 0, it's a sync record consisting of two little endian
+// 64-bit values giving a new time base.
+//
+// Because this is a ring buffer, new records will eventually
+// overwrite old records. Hence, it maintains a reader that consumes
+// the log as it gets overwritten. That reader state is where an
+// actual log reader would start.
+type debugLogWriter struct {
+ _ sys.NotInHeap
+ write uint64
+ data debugLogBuf
+
+ // tick and nano are the time bases from the most recently
+ // written sync record.
+ tick, nano uint64
+
+ // r is a reader that consumes records as they get overwritten
+ // by the writer. It also acts as the initial reader state
+ // when printing the log.
+ r debugLogReader
+
+ // buf is a scratch buffer for encoding. This is here to
+ // reduce stack usage.
+ buf [10]byte
+}
+
+type debugLogBuf struct {
+ _ sys.NotInHeap
+ b [debugLogBytes]byte
+}
+
+const (
+ // debugLogHeaderSize is the number of bytes in the framing
+ // header of every dlog record.
+ debugLogHeaderSize = 2
+
+ // debugLogSyncSize is the number of bytes in a sync record.
+ debugLogSyncSize = debugLogHeaderSize + 2*8
+)
+
+//go:nosplit
+func (l *debugLogWriter) ensure(n uint64) {
+ for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
+ // Consume record at begin.
+ if l.r.skip() == ^uint64(0) {
+ // Wrapped around within a record.
+ //
+ // TODO(austin): It would be better to just
+ // eat the whole buffer at this point, but we
+ // have to communicate that to the reader
+ // somehow.
+ throw("record wrapped around")
+ }
+ }
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
+ l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
+ l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
+ return size <= 0xFFFF
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeSync(tick, nano uint64) {
+ l.tick, l.nano = tick, nano
+ l.ensure(debugLogHeaderSize)
+ l.writeFrameAt(l.write, 0)
+ l.write += debugLogHeaderSize
+ l.writeUint64LE(tick)
+ l.writeUint64LE(nano)
+ l.r.end = l.write
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeUint64LE(x uint64) {
+ var b [8]byte
+ b[0] = byte(x)
+ b[1] = byte(x >> 8)
+ b[2] = byte(x >> 16)
+ b[3] = byte(x >> 24)
+ b[4] = byte(x >> 32)
+ b[5] = byte(x >> 40)
+ b[6] = byte(x >> 48)
+ b[7] = byte(x >> 56)
+ l.bytes(b[:])
+}
+
+//go:nosplit
+func (l *debugLogWriter) byte(x byte) {
+ l.ensure(1)
+ pos := l.write
+ l.write++
+ l.data.b[pos%uint64(len(l.data.b))] = x
+}
+
+//go:nosplit
+func (l *debugLogWriter) bytes(x []byte) {
+ l.ensure(uint64(len(x)))
+ pos := l.write
+ l.write += uint64(len(x))
+ for len(x) > 0 {
+ n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
+ pos += uint64(n)
+ x = x[n:]
+ }
+}
+
+//go:nosplit
+func (l *debugLogWriter) varint(x int64) {
+ var u uint64
+ if x < 0 {
+ u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
+ } else {
+ u = (uint64(x) << 1) // do not complement i, bit 0 is 0
+ }
+ l.uvarint(u)
+}
+
+//go:nosplit
+func (l *debugLogWriter) uvarint(u uint64) {
+ i := 0
+ for u >= 0x80 {
+ l.buf[i] = byte(u) | 0x80
+ u >>= 7
+ i++
+ }
+ l.buf[i] = byte(u)
+ i++
+ l.bytes(l.buf[:i])
+}
+
+type debugLogReader struct {
+ data *debugLogBuf
+
+ // begin and end are the positions in the log of the beginning
+ // and end of the log data, modulo len(data).
+ begin, end uint64
+
+ // tick and nano are the current time base at begin.
+ tick, nano uint64
+}
+
+//go:nosplit
+func (r *debugLogReader) skip() uint64 {
+ // Read size at pos.
+ if r.begin+debugLogHeaderSize > r.end {
+ return ^uint64(0)
+ }
+ size := uint64(r.readUint16LEAt(r.begin))
+ if size == 0 {
+ // Sync packet.
+ r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
+ r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
+ size = debugLogSyncSize
+ }
+ if r.begin+size > r.end {
+ return ^uint64(0)
+ }
+ r.begin += size
+ return size
+}
+
+//go:nosplit
+func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
+ return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
+ uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
+}
+
+//go:nosplit
+func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
+ var b [8]byte
+ for i := range b {
+ b[i] = r.data.b[pos%uint64(len(r.data.b))]
+ pos++
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 |
+ uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 |
+ uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (r *debugLogReader) peek() (tick uint64) {
+ // Consume any sync records.
+ size := uint64(0)
+ for size == 0 {
+ if r.begin+debugLogHeaderSize > r.end {
+ return ^uint64(0)
+ }
+ size = uint64(r.readUint16LEAt(r.begin))
+ if size != 0 {
+ break
+ }
+ if r.begin+debugLogSyncSize > r.end {
+ return ^uint64(0)
+ }
+ // Sync packet.
+ r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
+ r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
+ r.begin += debugLogSyncSize
+ }
+
+ // Peek tick delta.
+ if r.begin+size > r.end {
+ return ^uint64(0)
+ }
+ pos := r.begin + debugLogHeaderSize
+ var u uint64
+ for i := uint(0); ; i += 7 {
+ b := r.data.b[pos%uint64(len(r.data.b))]
+ pos++
+ u |= uint64(b&^0x80) << i
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ if pos > r.begin+size {
+ return ^uint64(0)
+ }
+ return r.tick + u
+}
+
+func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
+ // Read size. We've already skipped sync packets and checked
+ // bounds in peek.
+ size := uint64(r.readUint16LEAt(r.begin))
+ end = r.begin + size
+ r.begin += debugLogHeaderSize
+
+ // Read tick, nano, and p.
+ tick = r.uvarint() + r.tick
+ nano = r.uvarint() + r.nano
+ p = int(r.varint())
+
+ return
+}
+
+func (r *debugLogReader) uvarint() uint64 {
+ var u uint64
+ for i := uint(0); ; i += 7 {
+ b := r.data.b[r.begin%uint64(len(r.data.b))]
+ r.begin++
+ u |= uint64(b&^0x80) << i
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return u
+}
+
+func (r *debugLogReader) varint() int64 {
+ u := r.uvarint()
+ var v int64
+ if u&1 == 0 {
+ v = int64(u >> 1)
+ } else {
+ v = ^int64(u >> 1)
+ }
+ return v
+}
+
+func (r *debugLogReader) printVal() bool {
+ typ := r.data.b[r.begin%uint64(len(r.data.b))]
+ r.begin++
+
+ switch typ {
+ default:
+ print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
+ return false
+
+ case debugLogUnknown:
+ print("<unknown kind>")
+
+ case debugLogBoolTrue:
+ print(true)
+
+ case debugLogBoolFalse:
+ print(false)
+
+ case debugLogInt:
+ print(r.varint())
+
+ case debugLogUint:
+ print(r.uvarint())
+
+ case debugLogHex, debugLogPtr:
+ print(hex(r.uvarint()))
+
+ case debugLogString:
+ sl := r.uvarint()
+ if r.begin+sl > r.end {
+ r.begin = r.end
+ print("<string length corrupted>")
+ break
+ }
+ for sl > 0 {
+ b := r.data.b[r.begin%uint64(len(r.data.b)):]
+ if uint64(len(b)) > sl {
+ b = b[:sl]
+ }
+ r.begin += uint64(len(b))
+ sl -= uint64(len(b))
+ gwrite(b)
+ }
+
+ case debugLogConstString:
+ len, ptr := int(r.uvarint()), uintptr(r.uvarint())
+ ptr += firstmoduledata.etext
+ // We can't use unsafe.String as it may panic, which isn't safe
+ // in this (potentially) nowritebarrier context.
+ str := stringStruct{
+ str: unsafe.Pointer(ptr),
+ len: len,
+ }
+ s := *(*string)(unsafe.Pointer(&str))
+ print(s)
+
+ case debugLogStringOverflow:
+ print("..(", r.uvarint(), " more bytes)..")
+
+ case debugLogPC:
+ printDebugLogPC(uintptr(r.uvarint()), false)
+
+ case debugLogTraceback:
+ n := int(r.uvarint())
+ for i := 0; i < n; i++ {
+ print("\n\t")
+ // gentraceback PCs are always return PCs.
+ // Convert them to call PCs.
+ //
+ // TODO(austin): Expand inlined frames.
+ printDebugLogPC(uintptr(r.uvarint()), true)
+ }
+ }
+
+ return true
+}
+
+// printDebugLog prints the debug log.
+func printDebugLog() {
+ if !dlogEnabled {
+ return
+ }
+
+ // This function should not panic or throw since it is used in
+ // the fatal panic path and this may deadlock.
+
+ printlock()
+
+ // Get the list of all debug logs.
+ allp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
+
+ // Count the logs.
+ n := 0
+ for l := all; l != nil; l = l.allLink {
+ n++
+ }
+ if n == 0 {
+ printunlock()
+ return
+ }
+
+ // Prepare read state for all logs.
+ type readState struct {
+ debugLogReader
+ first bool
+ lost uint64
+ nextTick uint64
+ }
+ // Use sysAllocOS instead of sysAlloc because we want to interfere
+ // with the runtime as little as possible, and sysAlloc updates accounting.
+ state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
+ if state1 == nil {
+ println("failed to allocate read state for", n, "logs")
+ printunlock()
+ return
+ }
+ state := (*[1 << 20]readState)(state1)[:n]
+ {
+ l := all
+ for i := range state {
+ s := &state[i]
+ s.debugLogReader = l.w.r
+ s.first = true
+ s.lost = l.w.r.begin
+ s.nextTick = s.peek()
+ l = l.allLink
+ }
+ }
+
+ // Print records.
+ for {
+ // Find the next record.
+ var best struct {
+ tick uint64
+ i int
+ }
+ best.tick = ^uint64(0)
+ for i := range state {
+ if state[i].nextTick < best.tick {
+ best.tick = state[i].nextTick
+ best.i = i
+ }
+ }
+ if best.tick == ^uint64(0) {
+ break
+ }
+
+ // Print record.
+ s := &state[best.i]
+ if s.first {
+ print(">> begin log ", best.i)
+ if s.lost != 0 {
+ print("; lost first ", s.lost>>10, "KB")
+ }
+ print(" <<\n")
+ s.first = false
+ }
+
+ end, _, nano, p := s.header()
+ oldEnd := s.end
+ s.end = end
+
+ print("[")
+ var tmpbuf [21]byte
+ pnano := int64(nano) - runtimeInitTime
+ if pnano < 0 {
+ // Logged before runtimeInitTime was set.
+ pnano = 0
+ }
+ pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
+ print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
+ print(" P ", p, "] ")
+
+ for i := 0; s.begin < s.end; i++ {
+ if i > 0 {
+ print(" ")
+ }
+ if !s.printVal() {
+ // Abort this P log.
+ print("<aborting P log>")
+ end = oldEnd
+ break
+ }
+ }
+ println()
+
+ // Move on to the next record.
+ s.begin = end
+ s.end = oldEnd
+ s.nextTick = s.peek()
+ }
+
+ printunlock()
+}
+
+// printDebugLogPC prints a single symbolized PC. If returnPC is true,
+// pc is a return PC that must first be converted to a call PC.
+func printDebugLogPC(pc uintptr, returnPC bool) {
+ fn := findfunc(pc)
+ if returnPC && (!fn.valid() || pc > fn.entry()) {
+ // TODO(austin): Don't back up if the previous frame
+ // was a sigpanic.
+ pc--
+ }
+
+ print(hex(pc))
+ if !fn.valid() {
+ print(" [unknown PC]")
+ } else {
+ name := funcname(fn)
+ file, line := funcline(fn, pc)
+ print(" [", name, "+", hex(pc-fn.entry()),
+ " ", file, ":", line, "]")
+ }
+}
diff --git a/src/runtime/debuglog_off.go b/src/runtime/debuglog_off.go
new file mode 100644
index 0000000..fa3be39
--- /dev/null
+++ b/src/runtime/debuglog_off.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !debuglog
+
+package runtime
+
+const dlogEnabled = false
+
+type dlogPerM struct{}
+
+func getCachedDlogger() *dlogger {
+ return nil
+}
+
+func putCachedDlogger(l *dlogger) bool {
+ return false
+}
diff --git a/src/runtime/debuglog_on.go b/src/runtime/debuglog_on.go
new file mode 100644
index 0000000..b815020
--- /dev/null
+++ b/src/runtime/debuglog_on.go
@@ -0,0 +1,45 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build debuglog
+
+package runtime
+
+const dlogEnabled = true
+
+// dlogPerM is the per-M debug log data. This is embedded in the m
+// struct.
+type dlogPerM struct {
+ dlogCache *dlogger
+}
+
+// getCachedDlogger returns a cached dlogger if it can do so
+// efficiently, or nil otherwise. The returned dlogger will be owned.
+func getCachedDlogger() *dlogger {
+ mp := acquirem()
+ // We don't return a cached dlogger if we're running on the
+ // signal stack in case the signal arrived while in
+ // get/putCachedDlogger. (Too bad we don't have non-atomic
+ // exchange!)
+ var l *dlogger
+ if getg() != mp.gsignal {
+ l = mp.dlogCache
+ mp.dlogCache = nil
+ }
+ releasem(mp)
+ return l
+}
+
+// putCachedDlogger attempts to return l to the local cache. It
+// returns false if this fails.
+func putCachedDlogger(l *dlogger) bool {
+ mp := acquirem()
+ if getg() != mp.gsignal && mp.dlogCache == nil {
+ mp.dlogCache = l
+ releasem(mp)
+ return true
+ }
+ releasem(mp)
+ return false
+}
diff --git a/src/runtime/debuglog_test.go b/src/runtime/debuglog_test.go
new file mode 100644
index 0000000..18c54a8
--- /dev/null
+++ b/src/runtime/debuglog_test.go
@@ -0,0 +1,169 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(austin): All of these tests are skipped if the debuglog build
+// tag isn't provided. That means we basically never test debuglog.
+// There are two potential ways around this:
+//
+// 1. Make these tests re-build the runtime test with the debuglog
+// build tag and re-invoke themselves.
+//
+// 2. Always build the whole debuglog infrastructure and depend on
+// linker dead-code elimination to drop it. This is easy for dlog()
+// since there won't be any calls to it. For printDebugLog, we can
+// make panic call a wrapper that is call printDebugLog if the
+// debuglog build tag is set, or otherwise do nothing. Then tests
+// could call printDebugLog directly. This is the right answer in
+// principle, but currently our linker reads in all symbols
+// regardless, so this would slow down and bloat all links. If the
+// linker gets more efficient about this, we should revisit this
+// approach.
+
+package runtime_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func skipDebugLog(t *testing.T) {
+ if !runtime.DlogEnabled {
+ t.Skip("debug log disabled (rebuild with -tags debuglog)")
+ }
+}
+
+func dlogCanonicalize(x string) string {
+ begin := regexp.MustCompile(`(?m)^>> begin log \d+ <<\n`)
+ x = begin.ReplaceAllString(x, "")
+ prefix := regexp.MustCompile(`(?m)^\[[^]]+\]`)
+ x = prefix.ReplaceAllString(x, "[]")
+ return x
+}
+
+func TestDebugLog(t *testing.T) {
+ skipDebugLog(t)
+ runtime.ResetDebugLog()
+ runtime.Dlog().S("testing").End()
+ got := dlogCanonicalize(runtime.DumpDebugLog())
+ if want := "[] testing\n"; got != want {
+ t.Fatalf("want %q, got %q", want, got)
+ }
+}
+
+func TestDebugLogTypes(t *testing.T) {
+ skipDebugLog(t)
+ runtime.ResetDebugLog()
+ var varString = strings.Repeat("a", 4)
+ runtime.Dlog().B(true).B(false).I(-42).I16(0x7fff).U64(^uint64(0)).Hex(0xfff).P(nil).S(varString).S("const string").End()
+ got := dlogCanonicalize(runtime.DumpDebugLog())
+ if want := "[] true false -42 32767 18446744073709551615 0xfff 0x0 aaaa const string\n"; got != want {
+ t.Fatalf("want %q, got %q", want, got)
+ }
+}
+
+func TestDebugLogSym(t *testing.T) {
+ skipDebugLog(t)
+ runtime.ResetDebugLog()
+ pc, _, _, _ := runtime.Caller(0)
+ runtime.Dlog().PC(pc).End()
+ got := dlogCanonicalize(runtime.DumpDebugLog())
+ want := regexp.MustCompile(`\[\] 0x[0-9a-f]+ \[runtime_test\.TestDebugLogSym\+0x[0-9a-f]+ .*/debuglog_test\.go:[0-9]+\]\n`)
+ if !want.MatchString(got) {
+ t.Fatalf("want matching %s, got %q", want, got)
+ }
+}
+
+func TestDebugLogInterleaving(t *testing.T) {
+ skipDebugLog(t)
+ runtime.ResetDebugLog()
+ var wg sync.WaitGroup
+ done := int32(0)
+ wg.Add(1)
+ go func() {
+ // Encourage main goroutine to move around to
+ // different Ms and Ps.
+ for atomic.LoadInt32(&done) == 0 {
+ runtime.Gosched()
+ }
+ wg.Done()
+ }()
+ var want strings.Builder
+ for i := 0; i < 1000; i++ {
+ runtime.Dlog().I(i).End()
+ fmt.Fprintf(&want, "[] %d\n", i)
+ runtime.Gosched()
+ }
+ atomic.StoreInt32(&done, 1)
+ wg.Wait()
+
+ gotFull := runtime.DumpDebugLog()
+ got := dlogCanonicalize(gotFull)
+ if got != want.String() {
+ // Since the timestamps are useful in understand
+ // failures of this test, we print the uncanonicalized
+ // output.
+ t.Fatalf("want %q, got (uncanonicalized) %q", want.String(), gotFull)
+ }
+}
+
+func TestDebugLogWraparound(t *testing.T) {
+ skipDebugLog(t)
+
+ // Make sure we don't switch logs so it's easier to fill one up.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ runtime.ResetDebugLog()
+ var longString = strings.Repeat("a", 128)
+ var want strings.Builder
+ for i, j := 0, 0; j < 2*runtime.DebugLogBytes; i, j = i+1, j+len(longString) {
+ runtime.Dlog().I(i).S(longString).End()
+ fmt.Fprintf(&want, "[] %d %s\n", i, longString)
+ }
+ log := runtime.DumpDebugLog()
+
+ // Check for "lost" message.
+ lost := regexp.MustCompile(`^>> begin log \d+; lost first \d+KB <<\n`)
+ if !lost.MatchString(log) {
+ t.Fatalf("want matching %s, got %q", lost, log)
+ }
+ idx := lost.FindStringIndex(log)
+ // Strip lost message.
+ log = dlogCanonicalize(log[idx[1]:])
+
+ // Check log.
+ if !strings.HasSuffix(want.String(), log) {
+ t.Fatalf("wrong suffix:\n%s", log)
+ }
+}
+
+func TestDebugLogLongString(t *testing.T) {
+ skipDebugLog(t)
+
+ runtime.ResetDebugLog()
+ var longString = strings.Repeat("a", runtime.DebugLogStringLimit+1)
+ runtime.Dlog().S(longString).End()
+ got := dlogCanonicalize(runtime.DumpDebugLog())
+ want := "[] " + strings.Repeat("a", runtime.DebugLogStringLimit) + " ..(1 more bytes)..\n"
+ if got != want {
+ t.Fatalf("want %q, got %q", want, got)
+ }
+}
+
+// TestDebugLogBuild verifies that the runtime builds with -tags=debuglog.
+func TestDebugLogBuild(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // It doesn't matter which program we build, anything will rebuild the
+ // runtime.
+ if _, err := buildTestProg(t, "testprog", "-tags=debuglog"); err != nil {
+ t.Fatal(err)
+ }
+}