diff options
Diffstat (limited to 'src/runtime/testdata/testprog')
27 files changed, 2387 insertions, 0 deletions
diff --git a/src/runtime/testdata/testprog/abort.go b/src/runtime/testdata/testprog/abort.go new file mode 100644 index 0000000..9e79d4d --- /dev/null +++ b/src/runtime/testdata/testprog/abort.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "unsafe" // for go:linkname + +func init() { + register("Abort", Abort) +} + +//go:linkname runtimeAbort runtime.abort +func runtimeAbort() + +func Abort() { + defer func() { + recover() + panic("BAD: recovered from abort") + }() + runtimeAbort() + println("BAD: after abort") +} diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go new file mode 100644 index 0000000..09aa2b8 --- /dev/null +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "runtime/debug" + "unsafe" +) + +func init() { + register("BadTraceback", BadTraceback) +} + +func BadTraceback() { + // Disable GC to prevent traceback at unexpected time. + debug.SetGCPercent(-1) + // Out of an abundance of caution, also make sure that there are + // no GCs actively in progress. + runtime.GC() + + // Run badLR1 on its own stack to minimize the stack size and + // exercise the stack bounds logic in the hex dump. + go badLR1() + select {} +} + +//go:noinline +func badLR1() { + // We need two frames on LR machines because we'll smash this + // frame's saved LR. + badLR2(0) +} + +//go:noinline +func badLR2(arg int) { + // Smash the return PC or saved LR. + lrOff := unsafe.Sizeof(uintptr(0)) + if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" { + lrOff = 32 // FIXED_FRAME or sys.MinFrameSize + } + lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) + *lrPtr = 0xbad + + // Print a backtrace. This should include diagnostics for the + // bad return PC and a hex dump. + panic("backtrace") +} diff --git a/src/runtime/testdata/testprog/checkptr.go b/src/runtime/testdata/testprog/checkptr.go new file mode 100644 index 0000000..60e71e6 --- /dev/null +++ b/src/runtime/testdata/testprog/checkptr.go @@ -0,0 +1,119 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "time" + "unsafe" +) + +func init() { + register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr) + register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr) + register("CheckPtrAlignmentNilPtr", CheckPtrAlignmentNilPtr) + register("CheckPtrArithmetic", CheckPtrArithmetic) + register("CheckPtrArithmetic2", CheckPtrArithmetic2) + register("CheckPtrSize", CheckPtrSize) + register("CheckPtrSmall", CheckPtrSmall) + register("CheckPtrSliceOK", CheckPtrSliceOK) + register("CheckPtrSliceFail", CheckPtrSliceFail) + register("CheckPtrStringOK", CheckPtrStringOK) + register("CheckPtrStringFail", CheckPtrStringFail) + register("CheckPtrAlignmentNested", CheckPtrAlignmentNested) +} + +func CheckPtrAlignmentNoPtr() { + var x [2]int64 + p := unsafe.Pointer(&x[0]) + sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1)) +} + +func CheckPtrAlignmentPtr() { + var x [2]int64 + p := unsafe.Pointer(&x[0]) + sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1)) +} + +// CheckPtrAlignmentNilPtr tests that checkptrAlignment doesn't crash +// on nil pointers (#47430). +func CheckPtrAlignmentNilPtr() { + var do func(int) + do = func(n int) { + // Inflate the stack so runtime.shrinkstack gets called during GC + if n > 0 { + do(n - 1) + } + + var p unsafe.Pointer + _ = (*int)(p) + } + + go func() { + for { + runtime.GC() + } + }() + + go func() { + for i := 0; ; i++ { + do(i % 1024) + } + }() + + time.Sleep(time.Second) +} + +func CheckPtrArithmetic() { + var x int + i := uintptr(unsafe.Pointer(&x)) + sink2 = (*int)(unsafe.Pointer(i)) +} + +func CheckPtrArithmetic2() { + var x [2]int64 + p := unsafe.Pointer(&x[1]) + var one uintptr = 1 + sink2 = unsafe.Pointer(uintptr(p) & ^one) +} + +func CheckPtrSize() { + p := new(int64) + sink2 = p + sink2 = (*[100]int64)(unsafe.Pointer(p)) +} + +func CheckPtrSmall() { + sink2 = unsafe.Pointer(uintptr(1)) +} + +func CheckPtrSliceOK() { + p := new([4]int64) + sink2 = unsafe.Slice(&p[1], 3) +} + +func CheckPtrSliceFail() { + p := new(int64) + sink2 = p + sink2 = unsafe.Slice(p, 100) +} + +func CheckPtrStringOK() { + p := new([4]byte) + sink2 = unsafe.String(&p[1], 3) +} + +func CheckPtrStringFail() { + p := new(byte) + sink2 = p + sink2 = unsafe.String(p, 100) +} + +func CheckPtrAlignmentNested() { + s := make([]int8, 100) + p := unsafe.Pointer(&s[0]) + n := 9 + _ = ((*[10]int8)(unsafe.Pointer((*[10]int64)(unsafe.Pointer(&p)))))[:n:n] +} diff --git a/src/runtime/testdata/testprog/crash.go b/src/runtime/testdata/testprog/crash.go new file mode 100644 index 0000000..38c8f6a --- /dev/null +++ b/src/runtime/testdata/testprog/crash.go @@ -0,0 +1,139 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +func init() { + register("Crash", Crash) + register("DoublePanic", DoublePanic) + register("ErrorPanic", ErrorPanic) + register("StringerPanic", StringerPanic) + register("DoubleErrorPanic", DoubleErrorPanic) + register("DoubleStringerPanic", DoubleStringerPanic) + register("StringPanic", StringPanic) + register("NilPanic", NilPanic) + register("CircularPanic", CircularPanic) +} + +func test(name string) { + defer func() { + if x := recover(); x != nil { + fmt.Printf(" recovered") + } + fmt.Printf(" done\n") + }() + fmt.Printf("%s:", name) + var s *string + _ = *s + fmt.Print("SHOULD NOT BE HERE") +} + +func testInNewThread(name string) { + c := make(chan bool) + go func() { + runtime.LockOSThread() + test(name) + c <- true + }() + <-c +} + +func Crash() { + runtime.LockOSThread() + test("main") + testInNewThread("new-thread") + testInNewThread("second-new-thread") + test("main-again") +} + +type P string + +func (p P) String() string { + // Try to free the "YYY" string header when the "XXX" + // panic is stringified. + runtime.GC() + runtime.GC() + runtime.GC() + return string(p) +} + +// Test that panic message is not clobbered. +// See issue 30150. +func DoublePanic() { + defer func() { + panic(P("YYY")) + }() + panic(P("XXX")) +} + +// Test that panic while panicking discards error message +// See issue 52257 +type exampleError struct{} + +func (e exampleError) Error() string { + panic("important error message") +} + +func ErrorPanic() { + panic(exampleError{}) +} + +type examplePanicError struct{} + +func (e examplePanicError) Error() string { + panic(exampleError{}) +} + +func DoubleErrorPanic() { + panic(examplePanicError{}) +} + +type exampleStringer struct{} + +func (s exampleStringer) String() string { + panic("important stringer message") +} + +func StringerPanic() { + panic(exampleStringer{}) +} + +type examplePanicStringer struct{} + +func (s examplePanicStringer) String() string { + panic(exampleStringer{}) +} + +func DoubleStringerPanic() { + panic(examplePanicStringer{}) +} + +func StringPanic() { + panic("important string message") +} + +func NilPanic() { + panic(nil) +} + +type exampleCircleStartError struct{} + +func (e exampleCircleStartError) Error() string { + panic(exampleCircleEndError{}) +} + +type exampleCircleEndError struct{} + +func (e exampleCircleEndError) Error() string { + panic(exampleCircleStartError{}) +} + +func CircularPanic() { + panic(exampleCircleStartError{}) +} diff --git a/src/runtime/testdata/testprog/crashdump.go b/src/runtime/testdata/testprog/crashdump.go new file mode 100644 index 0000000..bced397 --- /dev/null +++ b/src/runtime/testdata/testprog/crashdump.go @@ -0,0 +1,47 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime" +) + +func init() { + register("CrashDumpsAllThreads", CrashDumpsAllThreads) +} + +func CrashDumpsAllThreads() { + const count = 4 + runtime.GOMAXPROCS(count + 1) + + chans := make([]chan bool, count) + for i := range chans { + chans[i] = make(chan bool) + go crashDumpsAllThreadsLoop(i, chans[i]) + } + + // Wait for all the goroutines to start executing. + for _, c := range chans { + <-c + } + + // Tell our parent that all the goroutines are executing. + if _, err := os.NewFile(3, "pipe").WriteString("x"); err != nil { + fmt.Fprintf(os.Stderr, "write to pipe failed: %v\n", err) + os.Exit(2) + } + + select {} +} + +func crashDumpsAllThreadsLoop(i int, c chan bool) { + close(c) + for { + for j := 0; j < 0x7fffffff; j++ { + } + } +} diff --git a/src/runtime/testdata/testprog/deadlock.go b/src/runtime/testdata/testprog/deadlock.go new file mode 100644 index 0000000..781acbd --- /dev/null +++ b/src/runtime/testdata/testprog/deadlock.go @@ -0,0 +1,363 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +func init() { + registerInit("InitDeadlock", InitDeadlock) + registerInit("NoHelperGoroutines", NoHelperGoroutines) + + register("SimpleDeadlock", SimpleDeadlock) + register("LockedDeadlock", LockedDeadlock) + register("LockedDeadlock2", LockedDeadlock2) + register("GoexitDeadlock", GoexitDeadlock) + register("StackOverflow", StackOverflow) + register("ThreadExhaustion", ThreadExhaustion) + register("RecursivePanic", RecursivePanic) + register("RecursivePanic2", RecursivePanic2) + register("RecursivePanic3", RecursivePanic3) + register("RecursivePanic4", RecursivePanic4) + register("RecursivePanic5", RecursivePanic5) + register("GoexitExit", GoexitExit) + register("GoNil", GoNil) + register("MainGoroutineID", MainGoroutineID) + register("Breakpoint", Breakpoint) + register("GoexitInPanic", GoexitInPanic) + register("PanicAfterGoexit", PanicAfterGoexit) + register("RecoveredPanicAfterGoexit", RecoveredPanicAfterGoexit) + register("RecoverBeforePanicAfterGoexit", RecoverBeforePanicAfterGoexit) + register("RecoverBeforePanicAfterGoexit2", RecoverBeforePanicAfterGoexit2) + register("PanicTraceback", PanicTraceback) + register("GoschedInPanic", GoschedInPanic) + register("SyscallInPanic", SyscallInPanic) + register("PanicLoop", PanicLoop) +} + +func SimpleDeadlock() { + select {} + panic("not reached") +} + +func InitDeadlock() { + select {} + panic("not reached") +} + +func LockedDeadlock() { + runtime.LockOSThread() + select {} +} + +func LockedDeadlock2() { + go func() { + runtime.LockOSThread() + select {} + }() + time.Sleep(time.Millisecond) + select {} +} + +func GoexitDeadlock() { + F := func() { + for i := 0; i < 10; i++ { + } + } + + go F() + go F() + runtime.Goexit() +} + +func StackOverflow() { + var f func() byte + f = func() byte { + var buf [64 << 10]byte + return buf[0] + f() + } + debug.SetMaxStack(1474560) + f() +} + +func ThreadExhaustion() { + debug.SetMaxThreads(10) + c := make(chan int) + for i := 0; i < 100; i++ { + go func() { + runtime.LockOSThread() + c <- 0 + select {} + }() + <-c + } +} + +func RecursivePanic() { + func() { + defer func() { + fmt.Println(recover()) + }() + var x [8192]byte + func(x [8192]byte) { + defer func() { + if err := recover(); err != nil { + panic("wrap: " + err.(string)) + } + }() + panic("bad") + }(x) + }() + panic("again") +} + +// Same as RecursivePanic, but do the first recover and the second panic in +// separate defers, and make sure they are executed in the correct order. +func RecursivePanic2() { + func() { + defer func() { + fmt.Println(recover()) + }() + var x [8192]byte + func(x [8192]byte) { + defer func() { + panic("second panic") + }() + defer func() { + fmt.Println(recover()) + }() + panic("first panic") + }(x) + }() + panic("third panic") +} + +// Make sure that the first panic finished as a panic, even though the second +// panic was recovered +func RecursivePanic3() { + defer func() { + defer func() { + recover() + }() + panic("second panic") + }() + panic("first panic") +} + +// Test case where a single defer recovers one panic but starts another panic. If +// the second panic is never recovered, then the recovered first panic will still +// appear on the panic stack (labeled '[recovered]') and the runtime stack. +func RecursivePanic4() { + defer func() { + recover() + panic("second panic") + }() + panic("first panic") +} + +// Test case where we have an open-coded defer higher up the stack (in two), and +// in the current function (three) we recover in a defer while we still have +// another defer to be processed. +func RecursivePanic5() { + one() + panic("third panic") +} + +//go:noinline +func one() { + two() +} + +//go:noinline +func two() { + defer func() { + }() + + three() +} + +//go:noinline +func three() { + defer func() { + }() + + defer func() { + fmt.Println(recover()) + }() + + defer func() { + fmt.Println(recover()) + panic("second panic") + }() + + panic("first panic") +} + +func GoexitExit() { + println("t1") + go func() { + time.Sleep(time.Millisecond) + }() + i := 0 + println("t2") + runtime.SetFinalizer(&i, func(p *int) {}) + println("t3") + runtime.GC() + println("t4") + runtime.Goexit() +} + +func GoNil() { + defer func() { + recover() + }() + var f func() + go f() + select {} +} + +func MainGoroutineID() { + panic("test") +} + +func NoHelperGoroutines() { + i := 0 + runtime.SetFinalizer(&i, func(p *int) {}) + time.AfterFunc(time.Hour, func() {}) + panic("oops") +} + +func Breakpoint() { + runtime.Breakpoint() +} + +func GoexitInPanic() { + go func() { + defer func() { + runtime.Goexit() + }() + panic("hello") + }() + runtime.Goexit() +} + +type errorThatGosched struct{} + +func (errorThatGosched) Error() string { + runtime.Gosched() + return "errorThatGosched" +} + +func GoschedInPanic() { + panic(errorThatGosched{}) +} + +type errorThatPrint struct{} + +func (errorThatPrint) Error() string { + fmt.Println("1") + fmt.Println("2") + return "3" +} + +func SyscallInPanic() { + panic(errorThatPrint{}) +} + +func PanicAfterGoexit() { + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func RecoveredPanicAfterGoexit() { + defer func() { + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + panic("hello") + }() + runtime.Goexit() +} + +func RecoverBeforePanicAfterGoexit() { + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit runs the #2 defer. Its panic + // is caught by the #1 defer. For Goexit, we explicitly + // resume execution in the Goexit loop, instead of resuming + // execution in the caller (which would make the Goexit disappear!) + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func RecoverBeforePanicAfterGoexit2() { + for i := 0; i < 2; i++ { + defer func() { + }() + } + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit runs the #2 defer. Its panic + // is caught by the #1 defer. For Goexit, we explicitly + // resume execution in the Goexit loop, instead of resuming + // execution in the caller (which would make the Goexit disappear!) + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func PanicTraceback() { + pt1() +} + +func pt1() { + defer func() { + panic("panic pt1") + }() + pt2() +} + +func pt2() { + defer func() { + panic("panic pt2") + }() + panic("hello") +} + +type panicError struct{} + +func (*panicError) Error() string { + panic("double error") +} + +func PanicLoop() { + panic(&panicError{}) +} diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go new file mode 100644 index 0000000..5dc85fb --- /dev/null +++ b/src/runtime/testdata/testprog/gc.go @@ -0,0 +1,420 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "math" + "os" + "runtime" + "runtime/debug" + "runtime/metrics" + "sync" + "sync/atomic" + "time" + "unsafe" +) + +func init() { + register("GCFairness", GCFairness) + register("GCFairness2", GCFairness2) + register("GCSys", GCSys) + register("GCPhys", GCPhys) + register("DeferLiveness", DeferLiveness) + register("GCZombie", GCZombie) + register("GCMemoryLimit", GCMemoryLimit) + register("GCMemoryLimitNoGCPercent", GCMemoryLimitNoGCPercent) +} + +func GCSys() { + runtime.GOMAXPROCS(1) + memstats := new(runtime.MemStats) + runtime.GC() + runtime.ReadMemStats(memstats) + sys := memstats.Sys + + runtime.MemProfileRate = 0 // disable profiler + + itercount := 100000 + for i := 0; i < itercount; i++ { + workthegc() + } + + // Should only be using a few MB. + // We allocated 100 MB or (if not short) 1 GB. + runtime.ReadMemStats(memstats) + if sys > memstats.Sys { + sys = 0 + } else { + sys = memstats.Sys - sys + } + if sys > 16<<20 { + fmt.Printf("using too much memory: %d bytes\n", sys) + return + } + fmt.Printf("OK\n") +} + +var sink []byte + +func workthegc() []byte { + sink = make([]byte, 1029) + return sink +} + +func GCFairness() { + runtime.GOMAXPROCS(1) + f, err := os.Open("/dev/null") + if os.IsNotExist(err) { + // This test tests what it is intended to test only if writes are fast. + // If there is no /dev/null, we just don't execute the test. + fmt.Println("OK") + return + } + if err != nil { + fmt.Println(err) + os.Exit(1) + } + for i := 0; i < 2; i++ { + go func() { + for { + f.Write([]byte(".")) + } + }() + } + time.Sleep(10 * time.Millisecond) + fmt.Println("OK") +} + +func GCFairness2() { + // Make sure user code can't exploit the GC's high priority + // scheduling to make scheduling of user code unfair. See + // issue #15706. + runtime.GOMAXPROCS(1) + debug.SetGCPercent(1) + var count [3]int64 + var sink [3]any + for i := range count { + go func(i int) { + for { + sink[i] = make([]byte, 1024) + atomic.AddInt64(&count[i], 1) + } + }(i) + } + // Note: If the unfairness is really bad, it may not even get + // past the sleep. + // + // If the scheduling rules change, this may not be enough time + // to let all goroutines run, but for now we cycle through + // them rapidly. + // + // OpenBSD's scheduler makes every usleep() take at least + // 20ms, so we need a long time to ensure all goroutines have + // run. If they haven't run after 30ms, give it another 1000ms + // and check again. + time.Sleep(30 * time.Millisecond) + var fail bool + for i := range count { + if atomic.LoadInt64(&count[i]) == 0 { + fail = true + } + } + if fail { + time.Sleep(1 * time.Second) + for i := range count { + if atomic.LoadInt64(&count[i]) == 0 { + fmt.Printf("goroutine %d did not run\n", i) + return + } + } + } + fmt.Println("OK") +} + +func GCPhys() { + // This test ensures that heap-growth scavenging is working as intended. + // + // It attempts to construct a sizeable "swiss cheese" heap, with many + // allocChunk-sized holes. Then, it triggers a heap growth by trying to + // allocate as much memory as would fit in those holes. + // + // The heap growth should cause a large number of those holes to be + // returned to the OS. + + const ( + // The total amount of memory we're willing to allocate. + allocTotal = 32 << 20 + + // The page cache could hide 64 8-KiB pages from the scavenger today. + maxPageCache = (8 << 10) * 64 + ) + + // How big the allocations are needs to depend on the page size. + // If the page size is too big and the allocations are too small, + // they might not be aligned to the physical page size, so the scavenger + // will gloss over them. + pageSize := os.Getpagesize() + var allocChunk int + if pageSize <= 8<<10 { + allocChunk = 64 << 10 + } else { + allocChunk = 512 << 10 + } + allocs := allocTotal / allocChunk + + // Set GC percent just so this test is a little more consistent in the + // face of varying environments. + debug.SetGCPercent(100) + + // Set GOMAXPROCS to 1 to minimize the amount of memory held in the page cache, + // and to reduce the chance that the background scavenger gets scheduled. + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + + // Allocate allocTotal bytes of memory in allocChunk byte chunks. + // Alternate between whether the chunk will be held live or will be + // condemned to GC to create holes in the heap. + saved := make([][]byte, allocs/2+1) + condemned := make([][]byte, allocs/2) + for i := 0; i < allocs; i++ { + b := make([]byte, allocChunk) + if i%2 == 0 { + saved = append(saved, b) + } else { + condemned = append(condemned, b) + } + } + + // Run a GC cycle just so we're at a consistent state. + runtime.GC() + + // Drop the only reference to all the condemned memory. + condemned = nil + + // Clear the condemned memory. + runtime.GC() + + // At this point, the background scavenger is likely running + // and could pick up the work, so the next line of code doesn't + // end up doing anything. That's fine. What's important is that + // this test fails somewhat regularly if the runtime doesn't + // scavenge on heap growth, and doesn't fail at all otherwise. + + // Make a large allocation that in theory could fit, but won't + // because we turned the heap into swiss cheese. + saved = append(saved, make([]byte, allocTotal/2)) + + // heapBacked is an estimate of the amount of physical memory used by + // this test. HeapSys is an estimate of the size of the mapped virtual + // address space (which may or may not be backed by physical pages) + // whereas HeapReleased is an estimate of the amount of bytes returned + // to the OS. Their difference then roughly corresponds to the amount + // of virtual address space that is backed by physical pages. + // + // heapBacked also subtracts out maxPageCache bytes of memory because + // this is memory that may be hidden from the scavenger per-P. Since + // GOMAXPROCS=1 here, subtracting it out once is fine. + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + heapBacked := stats.HeapSys - stats.HeapReleased - maxPageCache + // If heapBacked does not exceed the heap goal by more than retainExtraPercent + // then the scavenger is working as expected; the newly-created holes have been + // scavenged immediately as part of the allocations which cannot fit in the holes. + // + // Since the runtime should scavenge the entirety of the remaining holes, + // theoretically there should be no more free and unscavenged memory. However due + // to other allocations that happen during this test we may still see some physical + // memory over-use. + overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc) + // Check against our overuse threshold, which is what the scavenger always reserves + // to encourage allocation of memory that doesn't need to be faulted in. + // + // Add additional slack in case the page size is large and the scavenger + // can't reach that memory because it doesn't constitute a complete aligned + // physical page. Assume the worst case: a full physical page out of each + // allocation. + threshold := 0.1 + float64(pageSize)/float64(allocChunk) + if overuse <= threshold { + fmt.Println("OK") + return + } + // Physical memory utilization exceeds the threshold, so heap-growth scavenging + // did not operate as expected. + // + // In the context of this test, this indicates a large amount of + // fragmentation with physical pages that are otherwise unused but not + // returned to the OS. + fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+ + "(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100, + stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved)) + runtime.KeepAlive(saved) + runtime.KeepAlive(condemned) +} + +// Test that defer closure is correctly scanned when the stack is scanned. +func DeferLiveness() { + var x [10]int + escape(&x) + fn := func() { + if x[0] != 42 { + panic("FAIL") + } + } + defer fn() + + x[0] = 42 + runtime.GC() + runtime.GC() + runtime.GC() +} + +//go:noinline +func escape(x any) { sink2 = x; sink2 = nil } + +var sink2 any + +// Test zombie object detection and reporting. +func GCZombie() { + // Allocate several objects of unusual size (so free slots are + // unlikely to all be re-allocated by the runtime). + const size = 190 + const count = 8192 / size + keep := make([]*byte, 0, (count+1)/2) + free := make([]uintptr, 0, (count+1)/2) + zombies := make([]*byte, 0, len(free)) + for i := 0; i < count; i++ { + obj := make([]byte, size) + p := &obj[0] + if i%2 == 0 { + keep = append(keep, p) + } else { + free = append(free, uintptr(unsafe.Pointer(p))) + } + } + + // Free the unreferenced objects. + runtime.GC() + + // Bring the free objects back to life. + for _, p := range free { + zombies = append(zombies, (*byte)(unsafe.Pointer(p))) + } + + // GC should detect the zombie objects. + runtime.GC() + println("failed") + runtime.KeepAlive(keep) + runtime.KeepAlive(zombies) +} + +func GCMemoryLimit() { + gcMemoryLimit(100) +} + +func GCMemoryLimitNoGCPercent() { + gcMemoryLimit(-1) +} + +// Test SetMemoryLimit functionality. +// +// This test lives here instead of runtime/debug because the entire +// implementation is in the runtime, and testprog gives us a more +// consistent testing environment to help avoid flakiness. +func gcMemoryLimit(gcPercent int) { + if oldProcs := runtime.GOMAXPROCS(4); oldProcs < 4 { + // Fail if the default GOMAXPROCS isn't at least 4. + // Whatever invokes this should check and do a proper t.Skip. + println("insufficient CPUs") + return + } + debug.SetGCPercent(gcPercent) + + const myLimit = 256 << 20 + if limit := debug.SetMemoryLimit(-1); limit != math.MaxInt64 { + print("expected MaxInt64 limit, got ", limit, " bytes instead\n") + return + } + if limit := debug.SetMemoryLimit(myLimit); limit != math.MaxInt64 { + print("expected MaxInt64 limit, got ", limit, " bytes instead\n") + return + } + if limit := debug.SetMemoryLimit(-1); limit != myLimit { + print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n") + return + } + + target := make(chan int64) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + sinkSize := int(<-target / memLimitUnit) + for { + if len(memLimitSink) != sinkSize { + memLimitSink = make([]*[memLimitUnit]byte, sinkSize) + } + for i := 0; i < len(memLimitSink); i++ { + memLimitSink[i] = new([memLimitUnit]byte) + // Write to this memory to slow down the allocator, otherwise + // we get flaky behavior. See #52433. + for j := range memLimitSink[i] { + memLimitSink[i][j] = 9 + } + } + // Again, Gosched to slow down the allocator. + runtime.Gosched() + select { + case newTarget := <-target: + if newTarget == math.MaxInt64 { + return + } + sinkSize = int(newTarget / memLimitUnit) + default: + } + } + }() + var m [2]metrics.Sample + m[0].Name = "/memory/classes/total:bytes" + m[1].Name = "/memory/classes/heap/released:bytes" + + // Don't set this too high, because this is a *live heap* target which + // is not directly comparable to a total memory limit. + maxTarget := int64((myLimit / 10) * 8) + increment := int64((myLimit / 10) * 1) + for i := increment; i < maxTarget; i += increment { + target <- i + + // Check to make sure the memory limit is maintained. + // We're just sampling here so if it transiently goes over we might miss it. + // The internal accounting is inconsistent anyway, so going over by a few + // pages is certainly possible. Just make sure we're within some bound. + // Note that to avoid flakiness due to #52433 (especially since we're allocating + // somewhat heavily here) this bound is kept loose. In practice the Go runtime + // should do considerably better than this bound. + bound := int64(myLimit + 16<<20) + start := time.Now() + for time.Since(start) < 200*time.Millisecond { + metrics.Read(m[:]) + retained := int64(m[0].Value.Uint64() - m[1].Value.Uint64()) + if retained > bound { + print("retained=", retained, " limit=", myLimit, " bound=", bound, "\n") + panic("exceeded memory limit by more than bound allows") + } + runtime.Gosched() + } + } + + if limit := debug.SetMemoryLimit(math.MaxInt64); limit != myLimit { + print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n") + return + } + println("OK") +} + +// Pick a value close to the page size. We want to m +const memLimitUnit = 8000 + +var memLimitSink []*[memLimitUnit]byte diff --git a/src/runtime/testdata/testprog/lockosthread.go b/src/runtime/testdata/testprog/lockosthread.go new file mode 100644 index 0000000..e9d7fdb --- /dev/null +++ b/src/runtime/testdata/testprog/lockosthread.go @@ -0,0 +1,246 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "sync" + "time" +) + +var mainTID int + +func init() { + registerInit("LockOSThreadMain", func() { + // init is guaranteed to run on the main thread. + mainTID = gettid() + }) + register("LockOSThreadMain", LockOSThreadMain) + + registerInit("LockOSThreadAlt", func() { + // Lock the OS thread now so main runs on the main thread. + runtime.LockOSThread() + }) + register("LockOSThreadAlt", LockOSThreadAlt) + + registerInit("LockOSThreadAvoidsStatePropagation", func() { + // Lock the OS thread now so main runs on the main thread. + runtime.LockOSThread() + }) + register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation) + register("LockOSThreadTemplateThreadRace", LockOSThreadTemplateThreadRace) +} + +func LockOSThreadMain() { + // gettid only works on Linux, so on other platforms this just + // checks that the runtime doesn't do anything terrible. + + // This requires GOMAXPROCS=1 from the beginning to reliably + // start a goroutine on the main thread. + if runtime.GOMAXPROCS(-1) != 1 { + println("requires GOMAXPROCS=1") + os.Exit(1) + } + + ready := make(chan bool, 1) + go func() { + // Because GOMAXPROCS=1, this *should* be on the main + // thread. Stay there. + runtime.LockOSThread() + if mainTID != 0 && gettid() != mainTID { + println("failed to start goroutine on main thread") + os.Exit(1) + } + // Exit with the thread locked, which should exit the + // main thread. + ready <- true + }() + <-ready + time.Sleep(1 * time.Millisecond) + // Check that this goroutine is still running on a different + // thread. + if mainTID != 0 && gettid() == mainTID { + println("goroutine migrated to locked thread") + os.Exit(1) + } + println("OK") +} + +func LockOSThreadAlt() { + // This is running locked to the main OS thread. + + var subTID int + ready := make(chan bool, 1) + go func() { + // This goroutine must be running on a new thread. + runtime.LockOSThread() + subTID = gettid() + ready <- true + // Exit with the thread locked. + }() + <-ready + runtime.UnlockOSThread() + for i := 0; i < 100; i++ { + time.Sleep(1 * time.Millisecond) + // Check that this goroutine is running on a different thread. + if subTID != 0 && gettid() == subTID { + println("locked thread reused") + os.Exit(1) + } + exists, supported := tidExists(subTID) + if !supported || !exists { + goto ok + } + } + println("sub thread", subTID, "still running") + return +ok: + println("OK") +} + +func LockOSThreadAvoidsStatePropagation() { + // This test is similar to LockOSThreadAlt in that it will detect if a thread + // which should have died is still running. However, rather than do this with + // thread IDs, it does this by unsharing state on that thread. This way, it + // also detects whether new threads were cloned from the dead thread, and not + // from a clean thread. Cloning from a locked thread is undesirable since + // cloned threads will inherit potentially unwanted OS state. + // + // unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on + // Linux, so on other platforms this just checks that the runtime doesn't + // do anything terrible. + // + // This is running locked to the main OS thread. + + // GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is + // cloned from. + if runtime.GOMAXPROCS(-1) != 1 { + println("requires GOMAXPROCS=1") + os.Exit(1) + } + + if err := chdir("/"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + // On systems other than Linux, cwd == "". + cwd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if cwd != "" && cwd != "/" { + println("unexpected cwd", cwd, " wanted /") + os.Exit(1) + } + + ready := make(chan bool, 1) + go func() { + // This goroutine must be running on a new thread. + runtime.LockOSThread() + + // Unshare details about the FS, like the CWD, with + // the rest of the process on this thread. + // On systems other than Linux, this is a no-op. + if err := unshareFs(); err != nil { + if err == errNotPermitted { + println("unshare not permitted") + os.Exit(0) + } + println("failed to unshare fs:", err.Error()) + os.Exit(1) + } + // Chdir to somewhere else on this thread. + // On systems other than Linux, this is a no-op. + if err := chdir("/tmp"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + + // The state on this thread is now considered "tainted", but it + // should no longer be observable in any other context. + + ready <- true + // Exit with the thread locked. + }() + <-ready + + // Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if + // for some reason state from the (hopefully dead) locked thread above + // propagated into a newly created thread (via clone), or that thread + // is actually being re-used, then we should get scheduled on such a + // thread with high likelihood. + done := make(chan bool) + go func() { + runtime.LockOSThread() + + // Get the CWD and check if this is the same as the main thread's + // CWD. Every thread should share the same CWD. + // On systems other than Linux, wd == "". + wd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if wd != cwd { + println("bad state from old thread propagated after it should have died") + os.Exit(1) + } + <-done + + runtime.UnlockOSThread() + }() + done <- true + runtime.UnlockOSThread() + println("OK") +} + +func LockOSThreadTemplateThreadRace() { + // This test attempts to reproduce the race described in + // golang.org/issue/38931. To do so, we must have a stop-the-world + // (achieved via ReadMemStats) racing with two LockOSThread calls. + // + // While this test attempts to line up the timing, it is only expected + // to fail (and thus hang) around 2% of the time if the race is + // present. + + // Ensure enough Ps to actually run everything in parallel. Though on + // <4 core machines, we are still at the whim of the kernel scheduler. + runtime.GOMAXPROCS(4) + + go func() { + // Stop the world; race with LockOSThread below. + var m runtime.MemStats + for { + runtime.ReadMemStats(&m) + } + }() + + // Try to synchronize both LockOSThreads. + start := time.Now().Add(10 * time.Millisecond) + + var wg sync.WaitGroup + wg.Add(2) + + for i := 0; i < 2; i++ { + go func() { + for time.Now().Before(start) { + } + + // Add work to the local runq to trigger early startm + // in handoffp. + go func() {}() + + runtime.LockOSThread() + runtime.Gosched() // add a preemption point. + wg.Done() + }() + } + + wg.Wait() + // If both LockOSThreads completed then we did not hit the race. + println("OK") +} diff --git a/src/runtime/testdata/testprog/main.go b/src/runtime/testdata/testprog/main.go new file mode 100644 index 0000000..ae491a2 --- /dev/null +++ b/src/runtime/testdata/testprog/main.go @@ -0,0 +1,35 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testprog/map.go b/src/runtime/testdata/testprog/map.go new file mode 100644 index 0000000..5524289 --- /dev/null +++ b/src/runtime/testdata/testprog/map.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func init() { + register("concurrentMapWrites", concurrentMapWrites) + register("concurrentMapReadWrite", concurrentMapReadWrite) + register("concurrentMapIterateWrite", concurrentMapIterateWrite) +} + +func concurrentMapWrites() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + m[6] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} + +func concurrentMapReadWrite() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + _ = m[6] + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} + +func concurrentMapIterateWrite() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + for range m { + } + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} diff --git a/src/runtime/testdata/testprog/memprof.go b/src/runtime/testdata/testprog/memprof.go new file mode 100644 index 0000000..0392e60 --- /dev/null +++ b/src/runtime/testdata/testprog/memprof.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("MemProf", MemProf) +} + +var memProfBuf bytes.Buffer +var memProfStr string + +func MemProf() { + // Force heap sampling for determinism. + runtime.MemProfileRate = 1 + + for i := 0; i < 10; i++ { + fmt.Fprintf(&memProfBuf, "%*d\n", i, i) + } + memProfStr = memProfBuf.String() + + runtime.GC() + + f, err := os.CreateTemp("", "memprof") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println(name) +} diff --git a/src/runtime/testdata/testprog/misc.go b/src/runtime/testdata/testprog/misc.go new file mode 100644 index 0000000..7ccd389 --- /dev/null +++ b/src/runtime/testdata/testprog/misc.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func init() { + register("NumGoroutine", NumGoroutine) +} + +func NumGoroutine() { + println(runtime.NumGoroutine()) +} diff --git a/src/runtime/testdata/testprog/numcpu_freebsd.go b/src/runtime/testdata/testprog/numcpu_freebsd.go new file mode 100644 index 0000000..310c212 --- /dev/null +++ b/src/runtime/testdata/testprog/numcpu_freebsd.go @@ -0,0 +1,140 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" +) + +var ( + cpuSetRE = regexp.MustCompile(`(\d,?)+`) +) + +func init() { + register("FreeBSDNumCPU", FreeBSDNumCPU) + register("FreeBSDNumCPUHelper", FreeBSDNumCPUHelper) +} + +func FreeBSDNumCPUHelper() { + fmt.Printf("%d\n", runtime.NumCPU()) +} + +func FreeBSDNumCPU() { + _, err := exec.LookPath("cpuset") + if err != nil { + // Can not test without cpuset command. + fmt.Println("OK") + return + } + _, err = exec.LookPath("sysctl") + if err != nil { + // Can not test without sysctl command. + fmt.Println("OK") + return + } + cmd := exec.Command("sysctl", "-n", "kern.smp.active") + output, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("fail to launch '%s', error: %s, output: %s\n", strings.Join(cmd.Args, " "), err, output) + return + } + if !bytes.Equal(output, []byte("1\n")) { + // SMP mode deactivated in kernel. + fmt.Println("OK") + return + } + + list, err := getList() + if err != nil { + fmt.Printf("%s\n", err) + return + } + err = checkNCPU(list) + if err != nil { + fmt.Printf("%s\n", err) + return + } + if len(list) >= 2 { + err = checkNCPU(list[:len(list)-1]) + if err != nil { + fmt.Printf("%s\n", err) + return + } + } + fmt.Println("OK") + return +} + +func getList() ([]string, error) { + pid := syscall.Getpid() + + // Launch cpuset to print a list of available CPUs: pid <PID> mask: 0, 1, 2, 3. + cmd := exec.Command("cpuset", "-g", "-p", strconv.Itoa(pid)) + cmdline := strings.Join(cmd.Args, " ") + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("fail to execute '%s': %s", cmdline, err) + } + output, _, ok := bytes.Cut(output, []byte("\n")) + if !ok { + return nil, fmt.Errorf("invalid output from '%s', '\\n' not found: %s", cmdline, output) + } + + _, cpus, ok := bytes.Cut(output, []byte(":")) + if !ok { + return nil, fmt.Errorf("invalid output from '%s', ':' not found: %s", cmdline, output) + } + + var list []string + for _, val := range bytes.Split(cpus, []byte(",")) { + index := string(bytes.TrimSpace(val)) + if len(index) == 0 { + continue + } + list = append(list, index) + } + if len(list) == 0 { + return nil, fmt.Errorf("empty CPU list from '%s': %s", cmdline, output) + } + return list, nil +} + +func checkNCPU(list []string) error { + listString := strings.Join(list, ",") + if len(listString) == 0 { + return fmt.Errorf("could not check against an empty CPU list") + } + + cListString := cpuSetRE.FindString(listString) + if len(cListString) == 0 { + return fmt.Errorf("invalid cpuset output '%s'", listString) + } + // Launch FreeBSDNumCPUHelper() with specified CPUs list. + cmd := exec.Command("cpuset", "-l", cListString, os.Args[0], "FreeBSDNumCPUHelper") + cmdline := strings.Join(cmd.Args, " ") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("fail to launch child '%s', error: %s, output: %s", cmdline, err, output) + } + + // NumCPU from FreeBSDNumCPUHelper come with '\n'. + output = bytes.TrimSpace(output) + n, err := strconv.Atoi(string(output)) + if err != nil { + return fmt.Errorf("fail to parse output from child '%s', error: %s, output: %s", cmdline, err, output) + } + if n != len(list) { + return fmt.Errorf("runtime.NumCPU() expected to %d, got %d when run with CPU list %s", len(list), n, cListString) + } + return nil +} diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go new file mode 100644 index 0000000..c8deabe --- /dev/null +++ b/src/runtime/testdata/testprog/panicprint.go @@ -0,0 +1,111 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type MyBool bool +type MyComplex128 complex128 +type MyComplex64 complex64 +type MyFloat32 float32 +type MyFloat64 float64 +type MyInt int +type MyInt8 int8 +type MyInt16 int16 +type MyInt32 int32 +type MyInt64 int64 +type MyString string +type MyUint uint +type MyUint8 uint8 +type MyUint16 uint16 +type MyUint32 uint32 +type MyUint64 uint64 +type MyUintptr uintptr + +func panicCustomComplex64() { + panic(MyComplex64(0.11 + 3i)) +} + +func panicCustomComplex128() { + panic(MyComplex128(32.1 + 10i)) +} + +func panicCustomString() { + panic(MyString("Panic")) +} + +func panicCustomBool() { + panic(MyBool(true)) +} + +func panicCustomInt() { + panic(MyInt(93)) +} + +func panicCustomInt8() { + panic(MyInt8(93)) +} + +func panicCustomInt16() { + panic(MyInt16(93)) +} + +func panicCustomInt32() { + panic(MyInt32(93)) +} + +func panicCustomInt64() { + panic(MyInt64(93)) +} + +func panicCustomUint() { + panic(MyUint(93)) +} + +func panicCustomUint8() { + panic(MyUint8(93)) +} + +func panicCustomUint16() { + panic(MyUint16(93)) +} + +func panicCustomUint32() { + panic(MyUint32(93)) +} + +func panicCustomUint64() { + panic(MyUint64(93)) +} + +func panicCustomUintptr() { + panic(MyUintptr(93)) +} + +func panicCustomFloat64() { + panic(MyFloat64(-93.70)) +} + +func panicCustomFloat32() { + panic(MyFloat32(-93.70)) +} + +func init() { + register("panicCustomComplex64", panicCustomComplex64) + register("panicCustomComplex128", panicCustomComplex128) + register("panicCustomBool", panicCustomBool) + register("panicCustomFloat32", panicCustomFloat32) + register("panicCustomFloat64", panicCustomFloat64) + register("panicCustomInt", panicCustomInt) + register("panicCustomInt8", panicCustomInt8) + register("panicCustomInt16", panicCustomInt16) + register("panicCustomInt32", panicCustomInt32) + register("panicCustomInt64", panicCustomInt64) + register("panicCustomString", panicCustomString) + register("panicCustomUint", panicCustomUint) + register("panicCustomUint8", panicCustomUint8) + register("panicCustomUint16", panicCustomUint16) + register("panicCustomUint32", panicCustomUint32) + register("panicCustomUint64", panicCustomUint64) + register("panicCustomUintptr", panicCustomUintptr) +} diff --git a/src/runtime/testdata/testprog/panicrace.go b/src/runtime/testdata/testprog/panicrace.go new file mode 100644 index 0000000..f058994 --- /dev/null +++ b/src/runtime/testdata/testprog/panicrace.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "sync" +) + +func init() { + register("PanicRace", PanicRace) +} + +func PanicRace() { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer func() { + wg.Done() + runtime.Gosched() + }() + panic("crash") + }() + wg.Wait() +} diff --git a/src/runtime/testdata/testprog/preempt.go b/src/runtime/testdata/testprog/preempt.go new file mode 100644 index 0000000..fb6755a --- /dev/null +++ b/src/runtime/testdata/testprog/preempt.go @@ -0,0 +1,75 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "runtime/debug" + "sync/atomic" +) + +func init() { + register("AsyncPreempt", AsyncPreempt) +} + +func AsyncPreempt() { + // Run with just 1 GOMAXPROCS so the runtime is required to + // use scheduler preemption. + runtime.GOMAXPROCS(1) + // Disable GC so we have complete control of what we're testing. + debug.SetGCPercent(-1) + // Out of an abundance of caution, also make sure that there are + // no GCs actively in progress. The sweep phase of a GC cycle + // for instance tries to preempt Ps at the very beginning. + runtime.GC() + + // Start a goroutine with no sync safe-points. + var ready, ready2 uint32 + go func() { + for { + atomic.StoreUint32(&ready, 1) + dummy() + dummy() + } + }() + // Also start one with a frameless function. + // This is an especially interesting case for + // LR machines. + go func() { + atomic.AddUint32(&ready2, 1) + frameless() + }() + // Also test empty infinite loop. + go func() { + atomic.AddUint32(&ready2, 1) + for { + } + }() + + // Wait for the goroutine to stop passing through sync + // safe-points. + for atomic.LoadUint32(&ready) == 0 || atomic.LoadUint32(&ready2) < 2 { + runtime.Gosched() + } + + // Run a GC, which will have to stop the goroutine for STW and + // for stack scanning. If this doesn't work, the test will + // deadlock and timeout. + runtime.GC() + + println("OK") +} + +//go:noinline +func frameless() { + for i := int64(0); i < 1<<62; i++ { + out += i * i * i * i * i * 12345 + } +} + +var out int64 + +//go:noinline +func dummy() {} diff --git a/src/runtime/testdata/testprog/signal.go b/src/runtime/testdata/testprog/signal.go new file mode 100644 index 0000000..cc5ac8a --- /dev/null +++ b/src/runtime/testdata/testprog/signal.go @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package main + +import ( + "syscall" + "time" +) + +func init() { + register("SignalExitStatus", SignalExitStatus) +} + +func SignalExitStatus() { + syscall.Kill(syscall.Getpid(), syscall.SIGTERM) + + // Should die immediately, but we've seen flakiness on various + // systems (see issue 14063). It's possible that the signal is + // being delivered to a different thread and we are returning + // and exiting before that thread runs again. Give the program + // a little while to die to make sure we pick up the signal + // before we return and exit the program. The time here + // shouldn't matter--we'll never really sleep this long. + time.Sleep(time.Second) +} diff --git a/src/runtime/testdata/testprog/sleep.go b/src/runtime/testdata/testprog/sleep.go new file mode 100644 index 0000000..b230e60 --- /dev/null +++ b/src/runtime/testdata/testprog/sleep.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "time" +) + +// for golang.org/issue/27250 + +func init() { + register("After1", After1) +} + +func After1() { + os.Stdout.WriteString("ready\n") + os.Stdout.Close() + <-time.After(1 * time.Second) +} diff --git a/src/runtime/testdata/testprog/stringconcat.go b/src/runtime/testdata/testprog/stringconcat.go new file mode 100644 index 0000000..f233e66 --- /dev/null +++ b/src/runtime/testdata/testprog/stringconcat.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +func init() { + register("stringconcat", stringconcat) +} + +func stringconcat() { + s0 := strings.Repeat("0", 1<<10) + s1 := strings.Repeat("1", 1<<10) + s2 := strings.Repeat("2", 1<<10) + s3 := strings.Repeat("3", 1<<10) + s := s0 + s1 + s2 + s3 + panic(s) +} diff --git a/src/runtime/testdata/testprog/syscall_windows.go b/src/runtime/testdata/testprog/syscall_windows.go new file mode 100644 index 0000000..71bf384 --- /dev/null +++ b/src/runtime/testdata/testprog/syscall_windows.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/syscall/windows" + "runtime" + "sync" + "syscall" + "unsafe" +) + +func init() { + register("RaiseException", RaiseException) + register("ZeroDivisionException", ZeroDivisionException) + register("StackMemory", StackMemory) +} + +func RaiseException() { + const EXCEPTION_NONCONTINUABLE = 1 + mod := syscall.MustLoadDLL("kernel32.dll") + proc := mod.MustFindProc("RaiseException") + proc.Call(0xbad, EXCEPTION_NONCONTINUABLE, 0, 0) + println("RaiseException should not return") +} + +func ZeroDivisionException() { + x := 1 + y := 0 + z := x / y + println(z) +} + +func getPagefileUsage() (uintptr, error) { + p, err := syscall.GetCurrentProcess() + if err != nil { + return 0, err + } + var m windows.PROCESS_MEMORY_COUNTERS + err = windows.GetProcessMemoryInfo(p, &m, uint32(unsafe.Sizeof(m))) + if err != nil { + return 0, err + } + return m.PagefileUsage, nil +} + +func StackMemory() { + mem1, err := getPagefileUsage() + if err != nil { + panic(err) + } + const threadCount = 100 + var wg sync.WaitGroup + for i := 0; i < threadCount; i++ { + wg.Add(1) + go func() { + runtime.LockOSThread() + wg.Done() + select {} + }() + } + wg.Wait() + mem2, err := getPagefileUsage() + if err != nil { + panic(err) + } + // assumes that this process creates 1 thread for each + // thread locked goroutine plus extra 5 threads + // like sysmon and others + print((mem2 - mem1) / (threadCount + 5)) +} diff --git a/src/runtime/testdata/testprog/syscalls.go b/src/runtime/testdata/testprog/syscalls.go new file mode 100644 index 0000000..098d5ca --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" +) + +var errNotPermitted = errors.New("operation not permitted") diff --git a/src/runtime/testdata/testprog/syscalls_linux.go b/src/runtime/testdata/testprog/syscalls_linux.go new file mode 100644 index 0000000..48f8014 --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls_linux.go @@ -0,0 +1,58 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "syscall" +) + +func gettid() int { + return syscall.Gettid() +} + +func tidExists(tid int) (exists, supported bool) { + stat, err := os.ReadFile(fmt.Sprintf("/proc/self/task/%d/stat", tid)) + if os.IsNotExist(err) { + return false, true + } + // Check if it's a zombie thread. + state := bytes.Fields(stat)[2] + return !(len(state) == 1 && state[0] == 'Z'), true +} + +func getcwd() (string, error) { + if !syscall.ImplementsGetwd { + return "", nil + } + // Use the syscall to get the current working directory. + // This is imperative for checking for OS thread state + // after an unshare since os.Getwd might just check the + // environment, or use some other mechanism. + var buf [4096]byte + n, err := syscall.Getcwd(buf[:]) + if err != nil { + return "", err + } + // Subtract one for null terminator. + return string(buf[:n-1]), nil +} + +func unshareFs() error { + err := syscall.Unshare(syscall.CLONE_FS) + if err != nil { + errno, ok := err.(syscall.Errno) + if ok && errno == syscall.EPERM { + return errNotPermitted + } + } + return err +} + +func chdir(path string) error { + return syscall.Chdir(path) +} diff --git a/src/runtime/testdata/testprog/syscalls_none.go b/src/runtime/testdata/testprog/syscalls_none.go new file mode 100644 index 0000000..068bb59 --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls_none.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux +// +build !linux + +package main + +func gettid() int { + return 0 +} + +func tidExists(tid int) (exists, supported bool) { + return false, false +} + +func getcwd() (string, error) { + return "", nil +} + +func unshareFs() error { + return nil +} + +func chdir(path string) error { + return nil +} diff --git a/src/runtime/testdata/testprog/timeprof.go b/src/runtime/testdata/testprog/timeprof.go new file mode 100644 index 0000000..1e90af4 --- /dev/null +++ b/src/runtime/testdata/testprog/timeprof.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("TimeProf", TimeProf) +} + +func TimeProf() { + f, err := os.CreateTemp("", "timeprof") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + t0 := time.Now() + // We should get a profiling signal 100 times a second, + // so running for 1/10 second should be sufficient. + for time.Since(t0) < time.Second/10 { + } + + pprof.StopCPUProfile() + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println(name) +} diff --git a/src/runtime/testdata/testprog/traceback_ancestors.go b/src/runtime/testdata/testprog/traceback_ancestors.go new file mode 100644 index 0000000..8fc1aa7 --- /dev/null +++ b/src/runtime/testdata/testprog/traceback_ancestors.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "runtime" + "strings" +) + +func init() { + register("TracebackAncestors", TracebackAncestors) +} + +const numGoroutines = 3 +const numFrames = 2 + +func TracebackAncestors() { + w := make(chan struct{}) + recurseThenCallGo(w, numGoroutines, numFrames, true) + <-w + printStack() + close(w) +} + +var ignoreGoroutines = make(map[string]bool) + +func printStack() { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + all := string(buf[:n]) + var saved string + + // Delete any ignored goroutines, if present. + for all != "" { + var g string + g, all, _ = strings.Cut(all, "\n\n") + + if strings.HasPrefix(g, "goroutine ") { + id, _, _ := strings.Cut(strings.TrimPrefix(g, "goroutine "), " ") + if ignoreGoroutines[id] { + continue + } + } + if saved != "" { + saved += "\n\n" + } + saved += g + } + + fmt.Print(saved) + return + } + buf = make([]byte, 2*len(buf)) + } +} + +func recurseThenCallGo(w chan struct{}, frames int, goroutines int, main bool) { + if frames == 0 { + // Signal to TracebackAncestors that we are done recursing and starting goroutines. + w <- struct{}{} + <-w + return + } + if goroutines == 0 { + // Record which goroutine this is so we can ignore it + // in the traceback if it hasn't finished exiting by + // the time we printStack. + if !main { + ignoreGoroutines[goroutineID()] = true + } + + // Start the next goroutine now that there are no more recursions left + // for this current goroutine. + go recurseThenCallGo(w, frames-1, numFrames, false) + return + } + recurseThenCallGo(w, frames, goroutines-1, main) +} + +func goroutineID() string { + buf := make([]byte, 128) + runtime.Stack(buf, false) + prefix := []byte("goroutine ") + var found bool + if buf, found = bytes.CutPrefix(buf, prefix); !found { + panic(fmt.Sprintf("expected %q at beginning of traceback:\n%s", prefix, buf)) + } + id, _, _ := bytes.Cut(buf, []byte(" ")) + return string(id) +} diff --git a/src/runtime/testdata/testprog/unsafe.go b/src/runtime/testdata/testprog/unsafe.go new file mode 100644 index 0000000..021b08f --- /dev/null +++ b/src/runtime/testdata/testprog/unsafe.go @@ -0,0 +1,12 @@ +package main + +import "unsafe" + +func init() { + register("panicOnNilAndEleSizeIsZero", panicOnNilAndEleSizeIsZero) +} + +func panicOnNilAndEleSizeIsZero() { + var p *struct{} + _ = unsafe.Slice(p, 5) +} diff --git a/src/runtime/testdata/testprog/vdso.go b/src/runtime/testdata/testprog/vdso.go new file mode 100644 index 0000000..b18bc74 --- /dev/null +++ b/src/runtime/testdata/testprog/vdso.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Invoke signal handler in the VDSO context (see issue 32912). + +package main + +import ( + "fmt" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("SignalInVDSO", signalInVDSO) +} + +func signalInVDSO() { + f, err := os.CreateTemp("", "timeprofnow") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + t0 := time.Now() + t1 := t0 + // We should get a profiling signal 100 times a second, + // so running for 1 second should be sufficient. + for t1.Sub(t0) < time.Second { + t1 = time.Now() + } + + pprof.StopCPUProfile() + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := os.Remove(name); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println("success") +} |