From 73df946d56c74384511a194dd01dbe099584fd1a Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 15:14:23 +0200 Subject: Adding upstream version 1.16.10. Signed-off-by: Daniel Baumann --- src/runtime/testdata/testprog/abort.go | 23 ++ src/runtime/testdata/testprog/badtraceback.go | 47 +++ src/runtime/testdata/testprog/checkptr.go | 51 +++ src/runtime/testdata/testprog/crash.go | 66 ++++ src/runtime/testdata/testprog/deadlock.go | 363 +++++++++++++++++++++ src/runtime/testdata/testprog/gc.go | 302 +++++++++++++++++ src/runtime/testdata/testprog/lockosthread.go | 246 ++++++++++++++ src/runtime/testdata/testprog/main.go | 35 ++ src/runtime/testdata/testprog/map.go | 77 +++++ src/runtime/testdata/testprog/memprof.go | 51 +++ src/runtime/testdata/testprog/misc.go | 15 + src/runtime/testdata/testprog/numcpu_freebsd.go | 141 ++++++++ src/runtime/testdata/testprog/panicprint.go | 111 +++++++ src/runtime/testdata/testprog/panicrace.go | 27 ++ src/runtime/testdata/testprog/preempt.go | 71 ++++ src/runtime/testdata/testprog/signal.go | 29 ++ src/runtime/testdata/testprog/sleep.go | 17 + src/runtime/testdata/testprog/stringconcat.go | 20 ++ src/runtime/testdata/testprog/syscall_windows.go | 70 ++++ src/runtime/testdata/testprog/syscalls.go | 11 + src/runtime/testdata/testprog/syscalls_linux.go | 58 ++++ src/runtime/testdata/testprog/syscalls_none.go | 27 ++ src/runtime/testdata/testprog/timeprof.go | 45 +++ .../testdata/testprog/traceback_ancestors.go | 99 ++++++ src/runtime/testdata/testprog/vdso.go | 54 +++ 25 files changed, 2056 insertions(+) create mode 100644 src/runtime/testdata/testprog/abort.go create mode 100644 src/runtime/testdata/testprog/badtraceback.go create mode 100644 src/runtime/testdata/testprog/checkptr.go create mode 100644 src/runtime/testdata/testprog/crash.go create mode 100644 src/runtime/testdata/testprog/deadlock.go create mode 100644 src/runtime/testdata/testprog/gc.go create mode 100644 src/runtime/testdata/testprog/lockosthread.go create mode 100644 src/runtime/testdata/testprog/main.go create mode 100644 src/runtime/testdata/testprog/map.go create mode 100644 src/runtime/testdata/testprog/memprof.go create mode 100644 src/runtime/testdata/testprog/misc.go create mode 100644 src/runtime/testdata/testprog/numcpu_freebsd.go create mode 100644 src/runtime/testdata/testprog/panicprint.go create mode 100644 src/runtime/testdata/testprog/panicrace.go create mode 100644 src/runtime/testdata/testprog/preempt.go create mode 100644 src/runtime/testdata/testprog/signal.go create mode 100644 src/runtime/testdata/testprog/sleep.go create mode 100644 src/runtime/testdata/testprog/stringconcat.go create mode 100644 src/runtime/testdata/testprog/syscall_windows.go create mode 100644 src/runtime/testdata/testprog/syscalls.go create mode 100644 src/runtime/testdata/testprog/syscalls_linux.go create mode 100644 src/runtime/testdata/testprog/syscalls_none.go create mode 100644 src/runtime/testdata/testprog/timeprof.go create mode 100644 src/runtime/testdata/testprog/traceback_ancestors.go create mode 100644 src/runtime/testdata/testprog/vdso.go (limited to 'src/runtime/testdata/testprog') diff --git a/src/runtime/testdata/testprog/abort.go b/src/runtime/testdata/testprog/abort.go new file mode 100644 index 0000000..9e79d4d --- /dev/null +++ b/src/runtime/testdata/testprog/abort.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "unsafe" // for go:linkname + +func init() { + register("Abort", Abort) +} + +//go:linkname runtimeAbort runtime.abort +func runtimeAbort() + +func Abort() { + defer func() { + recover() + panic("BAD: recovered from abort") + }() + runtimeAbort() + println("BAD: after abort") +} diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go new file mode 100644 index 0000000..d558adc --- /dev/null +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "runtime/debug" + "unsafe" +) + +func init() { + register("BadTraceback", BadTraceback) +} + +func BadTraceback() { + // Disable GC to prevent traceback at unexpected time. + debug.SetGCPercent(-1) + + // Run badLR1 on its own stack to minimize the stack size and + // exercise the stack bounds logic in the hex dump. + go badLR1() + select {} +} + +//go:noinline +func badLR1() { + // We need two frames on LR machines because we'll smash this + // frame's saved LR. + badLR2(0) +} + +//go:noinline +func badLR2(arg int) { + // Smash the return PC or saved LR. + lrOff := unsafe.Sizeof(uintptr(0)) + if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" { + lrOff = 32 // FIXED_FRAME or sys.MinFrameSize + } + lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) + *lrPtr = 0xbad + + // Print a backtrace. This should include diagnostics for the + // bad return PC and a hex dump. + panic("backtrace") +} diff --git a/src/runtime/testdata/testprog/checkptr.go b/src/runtime/testdata/testprog/checkptr.go new file mode 100644 index 0000000..e0a2794 --- /dev/null +++ b/src/runtime/testdata/testprog/checkptr.go @@ -0,0 +1,51 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +func init() { + register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr) + register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr) + register("CheckPtrArithmetic", CheckPtrArithmetic) + register("CheckPtrArithmetic2", CheckPtrArithmetic2) + register("CheckPtrSize", CheckPtrSize) + register("CheckPtrSmall", CheckPtrSmall) +} + +func CheckPtrAlignmentNoPtr() { + var x [2]int64 + p := unsafe.Pointer(&x[0]) + sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1)) +} + +func CheckPtrAlignmentPtr() { + var x [2]int64 + p := unsafe.Pointer(&x[0]) + sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1)) +} + +func CheckPtrArithmetic() { + var x int + i := uintptr(unsafe.Pointer(&x)) + sink2 = (*int)(unsafe.Pointer(i)) +} + +func CheckPtrArithmetic2() { + var x [2]int64 + p := unsafe.Pointer(&x[1]) + var one uintptr = 1 + sink2 = unsafe.Pointer(uintptr(p) & ^one) +} + +func CheckPtrSize() { + p := new(int64) + sink2 = p + sink2 = (*[100]int64)(unsafe.Pointer(p)) +} + +func CheckPtrSmall() { + sink2 = unsafe.Pointer(uintptr(1)) +} diff --git a/src/runtime/testdata/testprog/crash.go b/src/runtime/testdata/testprog/crash.go new file mode 100644 index 0000000..c4990cd --- /dev/null +++ b/src/runtime/testdata/testprog/crash.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +func init() { + register("Crash", Crash) + register("DoublePanic", DoublePanic) +} + +func test(name string) { + defer func() { + if x := recover(); x != nil { + fmt.Printf(" recovered") + } + fmt.Printf(" done\n") + }() + fmt.Printf("%s:", name) + var s *string + _ = *s + fmt.Print("SHOULD NOT BE HERE") +} + +func testInNewThread(name string) { + c := make(chan bool) + go func() { + runtime.LockOSThread() + test(name) + c <- true + }() + <-c +} + +func Crash() { + runtime.LockOSThread() + test("main") + testInNewThread("new-thread") + testInNewThread("second-new-thread") + test("main-again") +} + +type P string + +func (p P) String() string { + // Try to free the "YYY" string header when the "XXX" + // panic is stringified. + runtime.GC() + runtime.GC() + runtime.GC() + return string(p) +} + +// Test that panic message is not clobbered. +// See issue 30150. +func DoublePanic() { + defer func() { + panic(P("YYY")) + }() + panic(P("XXX")) +} diff --git a/src/runtime/testdata/testprog/deadlock.go b/src/runtime/testdata/testprog/deadlock.go new file mode 100644 index 0000000..781acbd --- /dev/null +++ b/src/runtime/testdata/testprog/deadlock.go @@ -0,0 +1,363 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +func init() { + registerInit("InitDeadlock", InitDeadlock) + registerInit("NoHelperGoroutines", NoHelperGoroutines) + + register("SimpleDeadlock", SimpleDeadlock) + register("LockedDeadlock", LockedDeadlock) + register("LockedDeadlock2", LockedDeadlock2) + register("GoexitDeadlock", GoexitDeadlock) + register("StackOverflow", StackOverflow) + register("ThreadExhaustion", ThreadExhaustion) + register("RecursivePanic", RecursivePanic) + register("RecursivePanic2", RecursivePanic2) + register("RecursivePanic3", RecursivePanic3) + register("RecursivePanic4", RecursivePanic4) + register("RecursivePanic5", RecursivePanic5) + register("GoexitExit", GoexitExit) + register("GoNil", GoNil) + register("MainGoroutineID", MainGoroutineID) + register("Breakpoint", Breakpoint) + register("GoexitInPanic", GoexitInPanic) + register("PanicAfterGoexit", PanicAfterGoexit) + register("RecoveredPanicAfterGoexit", RecoveredPanicAfterGoexit) + register("RecoverBeforePanicAfterGoexit", RecoverBeforePanicAfterGoexit) + register("RecoverBeforePanicAfterGoexit2", RecoverBeforePanicAfterGoexit2) + register("PanicTraceback", PanicTraceback) + register("GoschedInPanic", GoschedInPanic) + register("SyscallInPanic", SyscallInPanic) + register("PanicLoop", PanicLoop) +} + +func SimpleDeadlock() { + select {} + panic("not reached") +} + +func InitDeadlock() { + select {} + panic("not reached") +} + +func LockedDeadlock() { + runtime.LockOSThread() + select {} +} + +func LockedDeadlock2() { + go func() { + runtime.LockOSThread() + select {} + }() + time.Sleep(time.Millisecond) + select {} +} + +func GoexitDeadlock() { + F := func() { + for i := 0; i < 10; i++ { + } + } + + go F() + go F() + runtime.Goexit() +} + +func StackOverflow() { + var f func() byte + f = func() byte { + var buf [64 << 10]byte + return buf[0] + f() + } + debug.SetMaxStack(1474560) + f() +} + +func ThreadExhaustion() { + debug.SetMaxThreads(10) + c := make(chan int) + for i := 0; i < 100; i++ { + go func() { + runtime.LockOSThread() + c <- 0 + select {} + }() + <-c + } +} + +func RecursivePanic() { + func() { + defer func() { + fmt.Println(recover()) + }() + var x [8192]byte + func(x [8192]byte) { + defer func() { + if err := recover(); err != nil { + panic("wrap: " + err.(string)) + } + }() + panic("bad") + }(x) + }() + panic("again") +} + +// Same as RecursivePanic, but do the first recover and the second panic in +// separate defers, and make sure they are executed in the correct order. +func RecursivePanic2() { + func() { + defer func() { + fmt.Println(recover()) + }() + var x [8192]byte + func(x [8192]byte) { + defer func() { + panic("second panic") + }() + defer func() { + fmt.Println(recover()) + }() + panic("first panic") + }(x) + }() + panic("third panic") +} + +// Make sure that the first panic finished as a panic, even though the second +// panic was recovered +func RecursivePanic3() { + defer func() { + defer func() { + recover() + }() + panic("second panic") + }() + panic("first panic") +} + +// Test case where a single defer recovers one panic but starts another panic. If +// the second panic is never recovered, then the recovered first panic will still +// appear on the panic stack (labeled '[recovered]') and the runtime stack. +func RecursivePanic4() { + defer func() { + recover() + panic("second panic") + }() + panic("first panic") +} + +// Test case where we have an open-coded defer higher up the stack (in two), and +// in the current function (three) we recover in a defer while we still have +// another defer to be processed. +func RecursivePanic5() { + one() + panic("third panic") +} + +//go:noinline +func one() { + two() +} + +//go:noinline +func two() { + defer func() { + }() + + three() +} + +//go:noinline +func three() { + defer func() { + }() + + defer func() { + fmt.Println(recover()) + }() + + defer func() { + fmt.Println(recover()) + panic("second panic") + }() + + panic("first panic") +} + +func GoexitExit() { + println("t1") + go func() { + time.Sleep(time.Millisecond) + }() + i := 0 + println("t2") + runtime.SetFinalizer(&i, func(p *int) {}) + println("t3") + runtime.GC() + println("t4") + runtime.Goexit() +} + +func GoNil() { + defer func() { + recover() + }() + var f func() + go f() + select {} +} + +func MainGoroutineID() { + panic("test") +} + +func NoHelperGoroutines() { + i := 0 + runtime.SetFinalizer(&i, func(p *int) {}) + time.AfterFunc(time.Hour, func() {}) + panic("oops") +} + +func Breakpoint() { + runtime.Breakpoint() +} + +func GoexitInPanic() { + go func() { + defer func() { + runtime.Goexit() + }() + panic("hello") + }() + runtime.Goexit() +} + +type errorThatGosched struct{} + +func (errorThatGosched) Error() string { + runtime.Gosched() + return "errorThatGosched" +} + +func GoschedInPanic() { + panic(errorThatGosched{}) +} + +type errorThatPrint struct{} + +func (errorThatPrint) Error() string { + fmt.Println("1") + fmt.Println("2") + return "3" +} + +func SyscallInPanic() { + panic(errorThatPrint{}) +} + +func PanicAfterGoexit() { + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func RecoveredPanicAfterGoexit() { + defer func() { + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + panic("hello") + }() + runtime.Goexit() +} + +func RecoverBeforePanicAfterGoexit() { + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit runs the #2 defer. Its panic + // is caught by the #1 defer. For Goexit, we explicitly + // resume execution in the Goexit loop, instead of resuming + // execution in the caller (which would make the Goexit disappear!) + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func RecoverBeforePanicAfterGoexit2() { + for i := 0; i < 2; i++ { + defer func() { + }() + } + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit runs the #2 defer. Its panic + // is caught by the #1 defer. For Goexit, we explicitly + // resume execution in the Goexit loop, instead of resuming + // execution in the caller (which would make the Goexit disappear!) + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} + +func PanicTraceback() { + pt1() +} + +func pt1() { + defer func() { + panic("panic pt1") + }() + pt2() +} + +func pt2() { + defer func() { + panic("panic pt2") + }() + panic("hello") +} + +type panicError struct{} + +func (*panicError) Error() string { + panic("double error") +} + +func PanicLoop() { + panic(&panicError{}) +} diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go new file mode 100644 index 0000000..74732cd --- /dev/null +++ b/src/runtime/testdata/testprog/gc.go @@ -0,0 +1,302 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime" + "runtime/debug" + "sync/atomic" + "time" + "unsafe" +) + +func init() { + register("GCFairness", GCFairness) + register("GCFairness2", GCFairness2) + register("GCSys", GCSys) + register("GCPhys", GCPhys) + register("DeferLiveness", DeferLiveness) + register("GCZombie", GCZombie) +} + +func GCSys() { + runtime.GOMAXPROCS(1) + memstats := new(runtime.MemStats) + runtime.GC() + runtime.ReadMemStats(memstats) + sys := memstats.Sys + + runtime.MemProfileRate = 0 // disable profiler + + itercount := 100000 + for i := 0; i < itercount; i++ { + workthegc() + } + + // Should only be using a few MB. + // We allocated 100 MB or (if not short) 1 GB. + runtime.ReadMemStats(memstats) + if sys > memstats.Sys { + sys = 0 + } else { + sys = memstats.Sys - sys + } + if sys > 16<<20 { + fmt.Printf("using too much memory: %d bytes\n", sys) + return + } + fmt.Printf("OK\n") +} + +var sink []byte + +func workthegc() []byte { + sink = make([]byte, 1029) + return sink +} + +func GCFairness() { + runtime.GOMAXPROCS(1) + f, err := os.Open("/dev/null") + if os.IsNotExist(err) { + // This test tests what it is intended to test only if writes are fast. + // If there is no /dev/null, we just don't execute the test. + fmt.Println("OK") + return + } + if err != nil { + fmt.Println(err) + os.Exit(1) + } + for i := 0; i < 2; i++ { + go func() { + for { + f.Write([]byte(".")) + } + }() + } + time.Sleep(10 * time.Millisecond) + fmt.Println("OK") +} + +func GCFairness2() { + // Make sure user code can't exploit the GC's high priority + // scheduling to make scheduling of user code unfair. See + // issue #15706. + runtime.GOMAXPROCS(1) + debug.SetGCPercent(1) + var count [3]int64 + var sink [3]interface{} + for i := range count { + go func(i int) { + for { + sink[i] = make([]byte, 1024) + atomic.AddInt64(&count[i], 1) + } + }(i) + } + // Note: If the unfairness is really bad, it may not even get + // past the sleep. + // + // If the scheduling rules change, this may not be enough time + // to let all goroutines run, but for now we cycle through + // them rapidly. + // + // OpenBSD's scheduler makes every usleep() take at least + // 20ms, so we need a long time to ensure all goroutines have + // run. If they haven't run after 30ms, give it another 1000ms + // and check again. + time.Sleep(30 * time.Millisecond) + var fail bool + for i := range count { + if atomic.LoadInt64(&count[i]) == 0 { + fail = true + } + } + if fail { + time.Sleep(1 * time.Second) + for i := range count { + if atomic.LoadInt64(&count[i]) == 0 { + fmt.Printf("goroutine %d did not run\n", i) + return + } + } + } + fmt.Println("OK") +} + +func GCPhys() { + // This test ensures that heap-growth scavenging is working as intended. + // + // It sets up a specific scenario: it allocates two pairs of objects whose + // sizes sum to size. One object in each pair is "small" (though must be + // large enough to be considered a large object by the runtime) and one is + // large. The small objects are kept while the large objects are freed, + // creating two large unscavenged holes in the heap. The heap goal should + // also be small as a result (so size must be at least as large as the + // minimum heap size). We then allocate one large object, bigger than both + // pairs of objects combined. This allocation, because it will tip + // HeapSys-HeapReleased well above the heap goal, should trigger heap-growth + // scavenging and scavenge most, if not all, of the large holes we created + // earlier. + const ( + // Size must be also large enough to be considered a large + // object (not in any size-segregated span). + size = 4 << 20 + split = 64 << 10 + objects = 2 + + // The page cache could hide 64 8-KiB pages from the scavenger today. + maxPageCache = (8 << 10) * 64 + + // Reduce GOMAXPROCS down to 4 if it's greater. We need to bound the amount + // of memory held in the page cache because the scavenger can't reach it. + // The page cache will hold at most maxPageCache of memory per-P, so this + // bounds the amount of memory hidden from the scavenger to 4*maxPageCache + // at most. + maxProcs = 4 + ) + // Set GOGC so that this test operates under consistent assumptions. + debug.SetGCPercent(100) + procs := runtime.GOMAXPROCS(-1) + if procs > maxProcs { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs)) + procs = runtime.GOMAXPROCS(-1) + } + // Save objects which we want to survive, and condemn objects which we don't. + // Note that we condemn objects in this way and release them all at once in + // order to avoid having the GC start freeing up these objects while the loop + // is still running and filling in the holes we intend to make. + saved := make([][]byte, 0, objects+1) + condemned := make([][]byte, 0, objects) + for i := 0; i < 2*objects; i++ { + if i%2 == 0 { + saved = append(saved, make([]byte, split)) + } else { + condemned = append(condemned, make([]byte, size-split)) + } + } + condemned = nil + // Clean up the heap. This will free up every other object created above + // (i.e. everything in condemned) creating holes in the heap. + // Also, if the condemned objects are still being swept, its possible that + // the scavenging that happens as a result of the next allocation won't see + // the holes at all. We call runtime.GC() twice here so that when we allocate + // our large object there's no race with sweeping. + runtime.GC() + runtime.GC() + // Perform one big allocation which should also scavenge any holes. + // + // The heap goal will rise after this object is allocated, so it's very + // important that we try to do all the scavenging in a single allocation + // that exceeds the heap goal. Otherwise the rising heap goal could foil our + // test. + saved = append(saved, make([]byte, objects*size)) + // Clean up the heap again just to put it in a known state. + runtime.GC() + // heapBacked is an estimate of the amount of physical memory used by + // this test. HeapSys is an estimate of the size of the mapped virtual + // address space (which may or may not be backed by physical pages) + // whereas HeapReleased is an estimate of the amount of bytes returned + // to the OS. Their difference then roughly corresponds to the amount + // of virtual address space that is backed by physical pages. + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + heapBacked := stats.HeapSys - stats.HeapReleased + // If heapBacked does not exceed the heap goal by more than retainExtraPercent + // then the scavenger is working as expected; the newly-created holes have been + // scavenged immediately as part of the allocations which cannot fit in the holes. + // + // Since the runtime should scavenge the entirety of the remaining holes, + // theoretically there should be no more free and unscavenged memory. However due + // to other allocations that happen during this test we may still see some physical + // memory over-use. + overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc) + // Compute the threshold. + // + // In theory, this threshold should just be zero, but that's not possible in practice. + // Firstly, the runtime's page cache can hide up to maxPageCache of free memory from the + // scavenger per P. To account for this, we increase the threshold by the ratio between the + // total amount the runtime could hide from the scavenger to the amount of memory we expect + // to be able to scavenge here, which is (size-split)*objects. This computation is the crux + // GOMAXPROCS above; if GOMAXPROCS is too high the threshold just becomes 100%+ since the + // amount of memory being allocated is fixed. Then we add 5% to account for noise, such as + // other allocations this test may have performed that we don't explicitly account for The + // baseline threshold here is around 11% for GOMAXPROCS=1, capping out at around 30% for + // GOMAXPROCS=4. + threshold := 0.05 + float64(procs)*maxPageCache/float64((size-split)*objects) + if overuse <= threshold { + fmt.Println("OK") + return + } + // Physical memory utilization exceeds the threshold, so heap-growth scavenging + // did not operate as expected. + // + // In the context of this test, this indicates a large amount of + // fragmentation with physical pages that are otherwise unused but not + // returned to the OS. + fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+ + "(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100, + stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved)) + runtime.KeepAlive(saved) +} + +// Test that defer closure is correctly scanned when the stack is scanned. +func DeferLiveness() { + var x [10]int + escape(&x) + fn := func() { + if x[0] != 42 { + panic("FAIL") + } + } + defer fn() + + x[0] = 42 + runtime.GC() + runtime.GC() + runtime.GC() +} + +//go:noinline +func escape(x interface{}) { sink2 = x; sink2 = nil } + +var sink2 interface{} + +// Test zombie object detection and reporting. +func GCZombie() { + // Allocate several objects of unusual size (so free slots are + // unlikely to all be re-allocated by the runtime). + const size = 190 + const count = 8192 / size + keep := make([]*byte, 0, (count+1)/2) + free := make([]uintptr, 0, (count+1)/2) + zombies := make([]*byte, 0, len(free)) + for i := 0; i < count; i++ { + obj := make([]byte, size) + p := &obj[0] + if i%2 == 0 { + keep = append(keep, p) + } else { + free = append(free, uintptr(unsafe.Pointer(p))) + } + } + + // Free the unreferenced objects. + runtime.GC() + + // Bring the free objects back to life. + for _, p := range free { + zombies = append(zombies, (*byte)(unsafe.Pointer(p))) + } + + // GC should detect the zombie objects. + runtime.GC() + println("failed") + runtime.KeepAlive(keep) + runtime.KeepAlive(zombies) +} diff --git a/src/runtime/testdata/testprog/lockosthread.go b/src/runtime/testdata/testprog/lockosthread.go new file mode 100644 index 0000000..e9d7fdb --- /dev/null +++ b/src/runtime/testdata/testprog/lockosthread.go @@ -0,0 +1,246 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "sync" + "time" +) + +var mainTID int + +func init() { + registerInit("LockOSThreadMain", func() { + // init is guaranteed to run on the main thread. + mainTID = gettid() + }) + register("LockOSThreadMain", LockOSThreadMain) + + registerInit("LockOSThreadAlt", func() { + // Lock the OS thread now so main runs on the main thread. + runtime.LockOSThread() + }) + register("LockOSThreadAlt", LockOSThreadAlt) + + registerInit("LockOSThreadAvoidsStatePropagation", func() { + // Lock the OS thread now so main runs on the main thread. + runtime.LockOSThread() + }) + register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation) + register("LockOSThreadTemplateThreadRace", LockOSThreadTemplateThreadRace) +} + +func LockOSThreadMain() { + // gettid only works on Linux, so on other platforms this just + // checks that the runtime doesn't do anything terrible. + + // This requires GOMAXPROCS=1 from the beginning to reliably + // start a goroutine on the main thread. + if runtime.GOMAXPROCS(-1) != 1 { + println("requires GOMAXPROCS=1") + os.Exit(1) + } + + ready := make(chan bool, 1) + go func() { + // Because GOMAXPROCS=1, this *should* be on the main + // thread. Stay there. + runtime.LockOSThread() + if mainTID != 0 && gettid() != mainTID { + println("failed to start goroutine on main thread") + os.Exit(1) + } + // Exit with the thread locked, which should exit the + // main thread. + ready <- true + }() + <-ready + time.Sleep(1 * time.Millisecond) + // Check that this goroutine is still running on a different + // thread. + if mainTID != 0 && gettid() == mainTID { + println("goroutine migrated to locked thread") + os.Exit(1) + } + println("OK") +} + +func LockOSThreadAlt() { + // This is running locked to the main OS thread. + + var subTID int + ready := make(chan bool, 1) + go func() { + // This goroutine must be running on a new thread. + runtime.LockOSThread() + subTID = gettid() + ready <- true + // Exit with the thread locked. + }() + <-ready + runtime.UnlockOSThread() + for i := 0; i < 100; i++ { + time.Sleep(1 * time.Millisecond) + // Check that this goroutine is running on a different thread. + if subTID != 0 && gettid() == subTID { + println("locked thread reused") + os.Exit(1) + } + exists, supported := tidExists(subTID) + if !supported || !exists { + goto ok + } + } + println("sub thread", subTID, "still running") + return +ok: + println("OK") +} + +func LockOSThreadAvoidsStatePropagation() { + // This test is similar to LockOSThreadAlt in that it will detect if a thread + // which should have died is still running. However, rather than do this with + // thread IDs, it does this by unsharing state on that thread. This way, it + // also detects whether new threads were cloned from the dead thread, and not + // from a clean thread. Cloning from a locked thread is undesirable since + // cloned threads will inherit potentially unwanted OS state. + // + // unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on + // Linux, so on other platforms this just checks that the runtime doesn't + // do anything terrible. + // + // This is running locked to the main OS thread. + + // GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is + // cloned from. + if runtime.GOMAXPROCS(-1) != 1 { + println("requires GOMAXPROCS=1") + os.Exit(1) + } + + if err := chdir("/"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + // On systems other than Linux, cwd == "". + cwd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if cwd != "" && cwd != "/" { + println("unexpected cwd", cwd, " wanted /") + os.Exit(1) + } + + ready := make(chan bool, 1) + go func() { + // This goroutine must be running on a new thread. + runtime.LockOSThread() + + // Unshare details about the FS, like the CWD, with + // the rest of the process on this thread. + // On systems other than Linux, this is a no-op. + if err := unshareFs(); err != nil { + if err == errNotPermitted { + println("unshare not permitted") + os.Exit(0) + } + println("failed to unshare fs:", err.Error()) + os.Exit(1) + } + // Chdir to somewhere else on this thread. + // On systems other than Linux, this is a no-op. + if err := chdir("/tmp"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + + // The state on this thread is now considered "tainted", but it + // should no longer be observable in any other context. + + ready <- true + // Exit with the thread locked. + }() + <-ready + + // Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if + // for some reason state from the (hopefully dead) locked thread above + // propagated into a newly created thread (via clone), or that thread + // is actually being re-used, then we should get scheduled on such a + // thread with high likelihood. + done := make(chan bool) + go func() { + runtime.LockOSThread() + + // Get the CWD and check if this is the same as the main thread's + // CWD. Every thread should share the same CWD. + // On systems other than Linux, wd == "". + wd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if wd != cwd { + println("bad state from old thread propagated after it should have died") + os.Exit(1) + } + <-done + + runtime.UnlockOSThread() + }() + done <- true + runtime.UnlockOSThread() + println("OK") +} + +func LockOSThreadTemplateThreadRace() { + // This test attempts to reproduce the race described in + // golang.org/issue/38931. To do so, we must have a stop-the-world + // (achieved via ReadMemStats) racing with two LockOSThread calls. + // + // While this test attempts to line up the timing, it is only expected + // to fail (and thus hang) around 2% of the time if the race is + // present. + + // Ensure enough Ps to actually run everything in parallel. Though on + // <4 core machines, we are still at the whim of the kernel scheduler. + runtime.GOMAXPROCS(4) + + go func() { + // Stop the world; race with LockOSThread below. + var m runtime.MemStats + for { + runtime.ReadMemStats(&m) + } + }() + + // Try to synchronize both LockOSThreads. + start := time.Now().Add(10 * time.Millisecond) + + var wg sync.WaitGroup + wg.Add(2) + + for i := 0; i < 2; i++ { + go func() { + for time.Now().Before(start) { + } + + // Add work to the local runq to trigger early startm + // in handoffp. + go func() {}() + + runtime.LockOSThread() + runtime.Gosched() // add a preemption point. + wg.Done() + }() + } + + wg.Wait() + // If both LockOSThreads completed then we did not hit the race. + println("OK") +} diff --git a/src/runtime/testdata/testprog/main.go b/src/runtime/testdata/testprog/main.go new file mode 100644 index 0000000..ae491a2 --- /dev/null +++ b/src/runtime/testdata/testprog/main.go @@ -0,0 +1,35 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testprog/map.go b/src/runtime/testdata/testprog/map.go new file mode 100644 index 0000000..5524289 --- /dev/null +++ b/src/runtime/testdata/testprog/map.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func init() { + register("concurrentMapWrites", concurrentMapWrites) + register("concurrentMapReadWrite", concurrentMapReadWrite) + register("concurrentMapIterateWrite", concurrentMapIterateWrite) +} + +func concurrentMapWrites() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + m[6] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} + +func concurrentMapReadWrite() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + _ = m[6] + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} + +func concurrentMapIterateWrite() { + m := map[int]int{} + c := make(chan struct{}) + go func() { + for i := 0; i < 10000; i++ { + m[5] = 0 + runtime.Gosched() + } + c <- struct{}{} + }() + go func() { + for i := 0; i < 10000; i++ { + for range m { + } + runtime.Gosched() + } + c <- struct{}{} + }() + <-c + <-c +} diff --git a/src/runtime/testdata/testprog/memprof.go b/src/runtime/testdata/testprog/memprof.go new file mode 100644 index 0000000..0392e60 --- /dev/null +++ b/src/runtime/testdata/testprog/memprof.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("MemProf", MemProf) +} + +var memProfBuf bytes.Buffer +var memProfStr string + +func MemProf() { + // Force heap sampling for determinism. + runtime.MemProfileRate = 1 + + for i := 0; i < 10; i++ { + fmt.Fprintf(&memProfBuf, "%*d\n", i, i) + } + memProfStr = memProfBuf.String() + + runtime.GC() + + f, err := os.CreateTemp("", "memprof") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println(name) +} diff --git a/src/runtime/testdata/testprog/misc.go b/src/runtime/testdata/testprog/misc.go new file mode 100644 index 0000000..7ccd389 --- /dev/null +++ b/src/runtime/testdata/testprog/misc.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func init() { + register("NumGoroutine", NumGoroutine) +} + +func NumGoroutine() { + println(runtime.NumGoroutine()) +} diff --git a/src/runtime/testdata/testprog/numcpu_freebsd.go b/src/runtime/testdata/testprog/numcpu_freebsd.go new file mode 100644 index 0000000..aff36ec --- /dev/null +++ b/src/runtime/testdata/testprog/numcpu_freebsd.go @@ -0,0 +1,141 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" +) + +var ( + cpuSetRE = regexp.MustCompile(`(\d,?)+`) +) + +func init() { + register("FreeBSDNumCPU", FreeBSDNumCPU) + register("FreeBSDNumCPUHelper", FreeBSDNumCPUHelper) +} + +func FreeBSDNumCPUHelper() { + fmt.Printf("%d\n", runtime.NumCPU()) +} + +func FreeBSDNumCPU() { + _, err := exec.LookPath("cpuset") + if err != nil { + // Can not test without cpuset command. + fmt.Println("OK") + return + } + _, err = exec.LookPath("sysctl") + if err != nil { + // Can not test without sysctl command. + fmt.Println("OK") + return + } + cmd := exec.Command("sysctl", "-n", "kern.smp.active") + output, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("fail to launch '%s', error: %s, output: %s\n", strings.Join(cmd.Args, " "), err, output) + return + } + if bytes.Equal(output, []byte("1\n")) == false { + // SMP mode deactivated in kernel. + fmt.Println("OK") + return + } + + list, err := getList() + if err != nil { + fmt.Printf("%s\n", err) + return + } + err = checkNCPU(list) + if err != nil { + fmt.Printf("%s\n", err) + return + } + if len(list) >= 2 { + err = checkNCPU(list[:len(list)-1]) + if err != nil { + fmt.Printf("%s\n", err) + return + } + } + fmt.Println("OK") + return +} + +func getList() ([]string, error) { + pid := syscall.Getpid() + + // Launch cpuset to print a list of available CPUs: pid mask: 0, 1, 2, 3. + cmd := exec.Command("cpuset", "-g", "-p", strconv.Itoa(pid)) + cmdline := strings.Join(cmd.Args, " ") + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("fail to execute '%s': %s", cmdline, err) + } + pos := bytes.IndexRune(output, '\n') + if pos == -1 { + return nil, fmt.Errorf("invalid output from '%s', '\\n' not found: %s", cmdline, output) + } + output = output[0:pos] + + pos = bytes.IndexRune(output, ':') + if pos == -1 { + return nil, fmt.Errorf("invalid output from '%s', ':' not found: %s", cmdline, output) + } + + var list []string + for _, val := range bytes.Split(output[pos+1:], []byte(",")) { + index := string(bytes.TrimSpace(val)) + if len(index) == 0 { + continue + } + list = append(list, index) + } + if len(list) == 0 { + return nil, fmt.Errorf("empty CPU list from '%s': %s", cmdline, output) + } + return list, nil +} + +func checkNCPU(list []string) error { + listString := strings.Join(list, ",") + if len(listString) == 0 { + return fmt.Errorf("could not check against an empty CPU list") + } + + cListString := cpuSetRE.FindString(listString) + if len(cListString) == 0 { + return fmt.Errorf("invalid cpuset output '%s'", listString) + } + // Launch FreeBSDNumCPUHelper() with specified CPUs list. + cmd := exec.Command("cpuset", "-l", cListString, os.Args[0], "FreeBSDNumCPUHelper") + cmdline := strings.Join(cmd.Args, " ") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("fail to launch child '%s', error: %s, output: %s", cmdline, err, output) + } + + // NumCPU from FreeBSDNumCPUHelper come with '\n'. + output = bytes.TrimSpace(output) + n, err := strconv.Atoi(string(output)) + if err != nil { + return fmt.Errorf("fail to parse output from child '%s', error: %s, output: %s", cmdline, err, output) + } + if n != len(list) { + return fmt.Errorf("runtime.NumCPU() expected to %d, got %d when run with CPU list %s", len(list), n, cListString) + } + return nil +} diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go new file mode 100644 index 0000000..c8deabe --- /dev/null +++ b/src/runtime/testdata/testprog/panicprint.go @@ -0,0 +1,111 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type MyBool bool +type MyComplex128 complex128 +type MyComplex64 complex64 +type MyFloat32 float32 +type MyFloat64 float64 +type MyInt int +type MyInt8 int8 +type MyInt16 int16 +type MyInt32 int32 +type MyInt64 int64 +type MyString string +type MyUint uint +type MyUint8 uint8 +type MyUint16 uint16 +type MyUint32 uint32 +type MyUint64 uint64 +type MyUintptr uintptr + +func panicCustomComplex64() { + panic(MyComplex64(0.11 + 3i)) +} + +func panicCustomComplex128() { + panic(MyComplex128(32.1 + 10i)) +} + +func panicCustomString() { + panic(MyString("Panic")) +} + +func panicCustomBool() { + panic(MyBool(true)) +} + +func panicCustomInt() { + panic(MyInt(93)) +} + +func panicCustomInt8() { + panic(MyInt8(93)) +} + +func panicCustomInt16() { + panic(MyInt16(93)) +} + +func panicCustomInt32() { + panic(MyInt32(93)) +} + +func panicCustomInt64() { + panic(MyInt64(93)) +} + +func panicCustomUint() { + panic(MyUint(93)) +} + +func panicCustomUint8() { + panic(MyUint8(93)) +} + +func panicCustomUint16() { + panic(MyUint16(93)) +} + +func panicCustomUint32() { + panic(MyUint32(93)) +} + +func panicCustomUint64() { + panic(MyUint64(93)) +} + +func panicCustomUintptr() { + panic(MyUintptr(93)) +} + +func panicCustomFloat64() { + panic(MyFloat64(-93.70)) +} + +func panicCustomFloat32() { + panic(MyFloat32(-93.70)) +} + +func init() { + register("panicCustomComplex64", panicCustomComplex64) + register("panicCustomComplex128", panicCustomComplex128) + register("panicCustomBool", panicCustomBool) + register("panicCustomFloat32", panicCustomFloat32) + register("panicCustomFloat64", panicCustomFloat64) + register("panicCustomInt", panicCustomInt) + register("panicCustomInt8", panicCustomInt8) + register("panicCustomInt16", panicCustomInt16) + register("panicCustomInt32", panicCustomInt32) + register("panicCustomInt64", panicCustomInt64) + register("panicCustomString", panicCustomString) + register("panicCustomUint", panicCustomUint) + register("panicCustomUint8", panicCustomUint8) + register("panicCustomUint16", panicCustomUint16) + register("panicCustomUint32", panicCustomUint32) + register("panicCustomUint64", panicCustomUint64) + register("panicCustomUintptr", panicCustomUintptr) +} diff --git a/src/runtime/testdata/testprog/panicrace.go b/src/runtime/testdata/testprog/panicrace.go new file mode 100644 index 0000000..f058994 --- /dev/null +++ b/src/runtime/testdata/testprog/panicrace.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "sync" +) + +func init() { + register("PanicRace", PanicRace) +} + +func PanicRace() { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer func() { + wg.Done() + runtime.Gosched() + }() + panic("crash") + }() + wg.Wait() +} diff --git a/src/runtime/testdata/testprog/preempt.go b/src/runtime/testdata/testprog/preempt.go new file mode 100644 index 0000000..1c74d0e --- /dev/null +++ b/src/runtime/testdata/testprog/preempt.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "runtime/debug" + "sync/atomic" +) + +func init() { + register("AsyncPreempt", AsyncPreempt) +} + +func AsyncPreempt() { + // Run with just 1 GOMAXPROCS so the runtime is required to + // use scheduler preemption. + runtime.GOMAXPROCS(1) + // Disable GC so we have complete control of what we're testing. + debug.SetGCPercent(-1) + + // Start a goroutine with no sync safe-points. + var ready, ready2 uint32 + go func() { + for { + atomic.StoreUint32(&ready, 1) + dummy() + dummy() + } + }() + // Also start one with a frameless function. + // This is an especially interesting case for + // LR machines. + go func() { + atomic.AddUint32(&ready2, 1) + frameless() + }() + // Also test empty infinite loop. + go func() { + atomic.AddUint32(&ready2, 1) + for { + } + }() + + // Wait for the goroutine to stop passing through sync + // safe-points. + for atomic.LoadUint32(&ready) == 0 || atomic.LoadUint32(&ready2) < 2 { + runtime.Gosched() + } + + // Run a GC, which will have to stop the goroutine for STW and + // for stack scanning. If this doesn't work, the test will + // deadlock and timeout. + runtime.GC() + + println("OK") +} + +//go:noinline +func frameless() { + for i := int64(0); i < 1<<62; i++ { + out += i * i * i * i * i * 12345 + } +} + +var out int64 + +//go:noinline +func dummy() {} diff --git a/src/runtime/testdata/testprog/signal.go b/src/runtime/testdata/testprog/signal.go new file mode 100644 index 0000000..417e105 --- /dev/null +++ b/src/runtime/testdata/testprog/signal.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows,!plan9 + +package main + +import ( + "syscall" + "time" +) + +func init() { + register("SignalExitStatus", SignalExitStatus) +} + +func SignalExitStatus() { + syscall.Kill(syscall.Getpid(), syscall.SIGTERM) + + // Should die immediately, but we've seen flakiness on various + // systems (see issue 14063). It's possible that the signal is + // being delivered to a different thread and we are returning + // and exiting before that thread runs again. Give the program + // a little while to die to make sure we pick up the signal + // before we return and exit the program. The time here + // shouldn't matter--we'll never really sleep this long. + time.Sleep(time.Second) +} diff --git a/src/runtime/testdata/testprog/sleep.go b/src/runtime/testdata/testprog/sleep.go new file mode 100644 index 0000000..86e2f6c --- /dev/null +++ b/src/runtime/testdata/testprog/sleep.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "time" + +// for golang.org/issue/27250 + +func init() { + register("After1", After1) +} + +func After1() { + <-time.After(1 * time.Second) +} diff --git a/src/runtime/testdata/testprog/stringconcat.go b/src/runtime/testdata/testprog/stringconcat.go new file mode 100644 index 0000000..f233e66 --- /dev/null +++ b/src/runtime/testdata/testprog/stringconcat.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +func init() { + register("stringconcat", stringconcat) +} + +func stringconcat() { + s0 := strings.Repeat("0", 1<<10) + s1 := strings.Repeat("1", 1<<10) + s2 := strings.Repeat("2", 1<<10) + s3 := strings.Repeat("3", 1<<10) + s := s0 + s1 + s2 + s3 + panic(s) +} diff --git a/src/runtime/testdata/testprog/syscall_windows.go b/src/runtime/testdata/testprog/syscall_windows.go new file mode 100644 index 0000000..b4b6644 --- /dev/null +++ b/src/runtime/testdata/testprog/syscall_windows.go @@ -0,0 +1,70 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/syscall/windows" + "runtime" + "sync" + "syscall" + "unsafe" +) + +func init() { + register("RaiseException", RaiseException) + register("ZeroDivisionException", ZeroDivisionException) + register("StackMemory", StackMemory) +} + +func RaiseException() { + const EXCEPTION_NONCONTINUABLE = 1 + mod := syscall.MustLoadDLL("kernel32.dll") + proc := mod.MustFindProc("RaiseException") + proc.Call(0xbad, EXCEPTION_NONCONTINUABLE, 0, 0) + println("RaiseException should not return") +} + +func ZeroDivisionException() { + x := 1 + y := 0 + z := x / y + println(z) +} + +func getPagefileUsage() (uintptr, error) { + p, err := syscall.GetCurrentProcess() + if err != nil { + return 0, err + } + var m windows.PROCESS_MEMORY_COUNTERS + err = windows.GetProcessMemoryInfo(p, &m, uint32(unsafe.Sizeof(m))) + if err != nil { + return 0, err + } + return m.PagefileUsage, nil +} + +func StackMemory() { + mem1, err := getPagefileUsage() + if err != nil { + panic(err) + } + const threadCount = 100 + var wg sync.WaitGroup + for i := 0; i < threadCount; i++ { + wg.Add(1) + go func() { + runtime.LockOSThread() + wg.Done() + select {} + }() + } + wg.Wait() + mem2, err := getPagefileUsage() + if err != nil { + panic(err) + } + print((mem2 - mem1) / threadCount) +} diff --git a/src/runtime/testdata/testprog/syscalls.go b/src/runtime/testdata/testprog/syscalls.go new file mode 100644 index 0000000..098d5ca --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" +) + +var errNotPermitted = errors.New("operation not permitted") diff --git a/src/runtime/testdata/testprog/syscalls_linux.go b/src/runtime/testdata/testprog/syscalls_linux.go new file mode 100644 index 0000000..48f8014 --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls_linux.go @@ -0,0 +1,58 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "syscall" +) + +func gettid() int { + return syscall.Gettid() +} + +func tidExists(tid int) (exists, supported bool) { + stat, err := os.ReadFile(fmt.Sprintf("/proc/self/task/%d/stat", tid)) + if os.IsNotExist(err) { + return false, true + } + // Check if it's a zombie thread. + state := bytes.Fields(stat)[2] + return !(len(state) == 1 && state[0] == 'Z'), true +} + +func getcwd() (string, error) { + if !syscall.ImplementsGetwd { + return "", nil + } + // Use the syscall to get the current working directory. + // This is imperative for checking for OS thread state + // after an unshare since os.Getwd might just check the + // environment, or use some other mechanism. + var buf [4096]byte + n, err := syscall.Getcwd(buf[:]) + if err != nil { + return "", err + } + // Subtract one for null terminator. + return string(buf[:n-1]), nil +} + +func unshareFs() error { + err := syscall.Unshare(syscall.CLONE_FS) + if err != nil { + errno, ok := err.(syscall.Errno) + if ok && errno == syscall.EPERM { + return errNotPermitted + } + } + return err +} + +func chdir(path string) error { + return syscall.Chdir(path) +} diff --git a/src/runtime/testdata/testprog/syscalls_none.go b/src/runtime/testdata/testprog/syscalls_none.go new file mode 100644 index 0000000..7f8ded3 --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls_none.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package main + +func gettid() int { + return 0 +} + +func tidExists(tid int) (exists, supported bool) { + return false, false +} + +func getcwd() (string, error) { + return "", nil +} + +func unshareFs() error { + return nil +} + +func chdir(path string) error { + return nil +} diff --git a/src/runtime/testdata/testprog/timeprof.go b/src/runtime/testdata/testprog/timeprof.go new file mode 100644 index 0000000..1e90af4 --- /dev/null +++ b/src/runtime/testdata/testprog/timeprof.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("TimeProf", TimeProf) +} + +func TimeProf() { + f, err := os.CreateTemp("", "timeprof") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + t0 := time.Now() + // We should get a profiling signal 100 times a second, + // so running for 1/10 second should be sufficient. + for time.Since(t0) < time.Second/10 { + } + + pprof.StopCPUProfile() + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println(name) +} diff --git a/src/runtime/testdata/testprog/traceback_ancestors.go b/src/runtime/testdata/testprog/traceback_ancestors.go new file mode 100644 index 0000000..0ee402c --- /dev/null +++ b/src/runtime/testdata/testprog/traceback_ancestors.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "runtime" + "strings" +) + +func init() { + register("TracebackAncestors", TracebackAncestors) +} + +const numGoroutines = 3 +const numFrames = 2 + +func TracebackAncestors() { + w := make(chan struct{}) + recurseThenCallGo(w, numGoroutines, numFrames, true) + <-w + printStack() + close(w) +} + +var ignoreGoroutines = make(map[string]bool) + +func printStack() { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + tb := string(buf[:n]) + + // Delete any ignored goroutines, if present. + pos := 0 + for pos < len(tb) { + next := pos + strings.Index(tb[pos:], "\n\n") + if next < pos { + next = len(tb) + } else { + next += len("\n\n") + } + + if strings.HasPrefix(tb[pos:], "goroutine ") { + id := tb[pos+len("goroutine "):] + id = id[:strings.IndexByte(id, ' ')] + if ignoreGoroutines[id] { + tb = tb[:pos] + tb[next:] + next = pos + } + } + pos = next + } + + fmt.Print(tb) + return + } + buf = make([]byte, 2*len(buf)) + } +} + +func recurseThenCallGo(w chan struct{}, frames int, goroutines int, main bool) { + if frames == 0 { + // Signal to TracebackAncestors that we are done recursing and starting goroutines. + w <- struct{}{} + <-w + return + } + if goroutines == 0 { + // Record which goroutine this is so we can ignore it + // in the traceback if it hasn't finished exiting by + // the time we printStack. + if !main { + ignoreGoroutines[goroutineID()] = true + } + + // Start the next goroutine now that there are no more recursions left + // for this current goroutine. + go recurseThenCallGo(w, frames-1, numFrames, false) + return + } + recurseThenCallGo(w, frames, goroutines-1, main) +} + +func goroutineID() string { + buf := make([]byte, 128) + runtime.Stack(buf, false) + const prefix = "goroutine " + if !bytes.HasPrefix(buf, []byte(prefix)) { + panic(fmt.Sprintf("expected %q at beginning of traceback:\n%s", prefix, buf)) + } + buf = buf[len(prefix):] + n := bytes.IndexByte(buf, ' ') + return string(buf[:n]) +} diff --git a/src/runtime/testdata/testprog/vdso.go b/src/runtime/testdata/testprog/vdso.go new file mode 100644 index 0000000..d2a300d --- /dev/null +++ b/src/runtime/testdata/testprog/vdso.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Invoke signal hander in the VDSO context (see issue 32912). + +package main + +import ( + "fmt" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("SignalInVDSO", signalInVDSO) +} + +func signalInVDSO() { + f, err := os.CreateTemp("", "timeprofnow") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + t0 := time.Now() + t1 := t0 + // We should get a profiling signal 100 times a second, + // so running for 1 second should be sufficient. + for t1.Sub(t0) < time.Second { + t1 = time.Now() + } + + pprof.StopCPUProfile() + + name := f.Name() + if err := f.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + if err := os.Remove(name); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + fmt.Println("success") +} -- cgit v1.2.3