summaryrefslogtreecommitdiffstats
path: root/src/runtime/race
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
commit43a123c1ae6613b3efeed291fa552ecd909d3acf (patch)
treefd92518b7024bc74031f78a1cf9e454b65e73665 /src/runtime/race
parentInitial commit. (diff)
downloadgolang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.tar.xz
golang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.zip
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/runtime/race.go649
-rw-r--r--src/runtime/race/README17
-rw-r--r--src/runtime/race/doc.go11
-rw-r--r--src/runtime/race/internal/amd64v1/doc.go10
-rw-r--r--src/runtime/race/internal/amd64v1/race_darwin.sysobin0 -> 541464 bytes
-rw-r--r--src/runtime/race/internal/amd64v1/race_freebsd.sysobin0 -> 712464 bytes
-rw-r--r--src/runtime/race/internal/amd64v1/race_linux.sysobin0 -> 563848 bytes
-rw-r--r--src/runtime/race/internal/amd64v1/race_netbsd.sysobin0 -> 714520 bytes
-rw-r--r--src/runtime/race/internal/amd64v1/race_openbsd.sysobin0 -> 688784 bytes
-rw-r--r--src/runtime/race/internal/amd64v1/race_windows.sysobin0 -> 461185 bytes
-rw-r--r--src/runtime/race/internal/amd64v3/doc.go10
-rw-r--r--src/runtime/race/internal/amd64v3/race_linux.sysobin0 -> 563664 bytes
-rwxr-xr-xsrc/runtime/race/mkcgo.sh20
-rw-r--r--src/runtime/race/output_test.go442
-rw-r--r--src/runtime/race/race.go20
-rw-r--r--src/runtime/race/race_darwin_amd64.go101
-rw-r--r--src/runtime/race/race_darwin_arm64.go95
-rw-r--r--src/runtime/race/race_darwin_arm64.sysobin0 -> 484988 bytes
-rw-r--r--src/runtime/race/race_linux_arm64.sysobin0 -> 530736 bytes
-rw-r--r--src/runtime/race/race_linux_ppc64le.sysobin0 -> 669736 bytes
-rw-r--r--src/runtime/race/race_linux_s390x.sysobin0 -> 565472 bytes
-rw-r--r--src/runtime/race/race_linux_test.go65
-rw-r--r--src/runtime/race/race_test.go250
-rw-r--r--src/runtime/race/race_unix_test.go29
-rw-r--r--src/runtime/race/race_v1_amd64.go9
-rw-r--r--src/runtime/race/race_v3_amd64.go9
-rw-r--r--src/runtime/race/race_windows_test.go46
-rw-r--r--src/runtime/race/sched_test.go48
-rw-r--r--src/runtime/race/syso_test.go33
-rw-r--r--src/runtime/race/testdata/atomic_test.go325
-rw-r--r--src/runtime/race/testdata/cgo_test.go21
-rw-r--r--src/runtime/race/testdata/cgo_test_main.go30
-rw-r--r--src/runtime/race/testdata/chan_test.go787
-rw-r--r--src/runtime/race/testdata/comp_test.go186
-rw-r--r--src/runtime/race/testdata/finalizer_test.go68
-rw-r--r--src/runtime/race/testdata/io_test.go75
-rw-r--r--src/runtime/race/testdata/issue12225_test.go20
-rw-r--r--src/runtime/race/testdata/issue12664_test.go76
-rw-r--r--src/runtime/race/testdata/issue13264_test.go13
-rw-r--r--src/runtime/race/testdata/map_test.go335
-rw-r--r--src/runtime/race/testdata/mop_test.go2131
-rw-r--r--src/runtime/race/testdata/mutex_test.go150
-rw-r--r--src/runtime/race/testdata/pool_test.go47
-rw-r--r--src/runtime/race/testdata/reflect_test.go46
-rw-r--r--src/runtime/race/testdata/regression_test.go189
-rw-r--r--src/runtime/race/testdata/rwmutex_test.go154
-rw-r--r--src/runtime/race/testdata/select_test.go293
-rw-r--r--src/runtime/race/testdata/slice_test.go608
-rw-r--r--src/runtime/race/testdata/sync_test.go202
-rw-r--r--src/runtime/race/testdata/waitgroup_test.go360
-rw-r--r--src/runtime/race/timer_test.go33
-rw-r--r--src/runtime/race0.go44
-rw-r--r--src/runtime/race_amd64.s457
-rw-r--r--src/runtime/race_arm64.s498
-rw-r--r--src/runtime/race_ppc64le.s601
-rw-r--r--src/runtime/race_s390x.s391
56 files changed, 10004 insertions, 0 deletions
diff --git a/src/runtime/race.go b/src/runtime/race.go
new file mode 100644
index 0000000..f83a04d
--- /dev/null
+++ b/src/runtime/race.go
@@ -0,0 +1,649 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package runtime
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// Public race detection API, present iff build with -race.
+
+func RaceRead(addr unsafe.Pointer)
+func RaceWrite(addr unsafe.Pointer)
+func RaceReadRange(addr unsafe.Pointer, len int)
+func RaceWriteRange(addr unsafe.Pointer, len int)
+
+func RaceErrors() int {
+ var n uint64
+ racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
+ return int(n)
+}
+
+//go:nosplit
+
+// RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
+// between goroutines. These inform the race detector about actual synchronization
+// that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
+// sections of code).
+// RaceAcquire establishes a happens-before relation with the preceding
+// RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
+// In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
+// RaceAcquire is equivalent to atomic_load(memory_order_acquire).
+func RaceAcquire(addr unsafe.Pointer) {
+ raceacquire(addr)
+}
+
+//go:nosplit
+
+// RaceRelease performs a release operation on addr that
+// can synchronize with a later RaceAcquire on addr.
+//
+// In terms of the C memory model, RaceRelease is equivalent to
+// atomic_store(memory_order_release).
+func RaceRelease(addr unsafe.Pointer) {
+ racerelease(addr)
+}
+
+//go:nosplit
+
+// RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
+// relation with the preceding RaceRelease or RaceReleaseMerge on addr.
+//
+// In terms of the C memory model, RaceReleaseMerge is equivalent to
+// atomic_exchange(memory_order_release).
+func RaceReleaseMerge(addr unsafe.Pointer) {
+ racereleasemerge(addr)
+}
+
+//go:nosplit
+
+// RaceDisable disables handling of race synchronization events in the current goroutine.
+// Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
+// Non-synchronization events (memory accesses, function entry/exit) still affect
+// the race detector.
+func RaceDisable() {
+ gp := getg()
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
+ }
+ gp.raceignore++
+}
+
+//go:nosplit
+
+// RaceEnable re-enables handling of race events in the current goroutine.
+func RaceEnable() {
+ gp := getg()
+ gp.raceignore--
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
+ }
+}
+
+// Private interface for the runtime.
+
+const raceenabled = true
+
+// For all functions accepting callerpc and pc,
+// callerpc is a return PC of the function that calls this function,
+// pc is start PC of the function that calls this function.
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
+ kind := t.kind & kindMask
+ if kind == kindArray || kind == kindStruct {
+ // for composite objects we have to read every address
+ // because a write might happen to any subobject.
+ racereadrangepc(addr, t.size, callerpc, pc)
+ } else {
+ // for non-composite objects we can read just the start
+ // address, as any write must write the first byte.
+ racereadpc(addr, callerpc, pc)
+ }
+}
+
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
+ kind := t.kind & kindMask
+ if kind == kindArray || kind == kindStruct {
+ // for composite objects we have to write every address
+ // because a write might happen to any subobject.
+ racewriterangepc(addr, t.size, callerpc, pc)
+ } else {
+ // for non-composite objects we can write just the start
+ // address, as any write must write the first byte.
+ racewritepc(addr, callerpc, pc)
+ }
+}
+
+//go:noescape
+func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
+
+//go:noescape
+func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
+
+type symbolizeCodeContext struct {
+ pc uintptr
+ fn *byte
+ file *byte
+ line uintptr
+ off uintptr
+ res uintptr
+}
+
+var qq = [...]byte{'?', '?', 0}
+var dash = [...]byte{'-', 0}
+
+const (
+ raceGetProcCmd = iota
+ raceSymbolizeCodeCmd
+ raceSymbolizeDataCmd
+)
+
+// Callback from C into Go, runs on g0.
+func racecallback(cmd uintptr, ctx unsafe.Pointer) {
+ switch cmd {
+ case raceGetProcCmd:
+ throw("should have been handled by racecallbackthunk")
+ case raceSymbolizeCodeCmd:
+ raceSymbolizeCode((*symbolizeCodeContext)(ctx))
+ case raceSymbolizeDataCmd:
+ raceSymbolizeData((*symbolizeDataContext)(ctx))
+ default:
+ throw("unknown command")
+ }
+}
+
+// raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
+// information about the code at that pc.
+//
+// The race detector has already subtracted 1 from pcs, so they point to the last
+// byte of call instructions (including calls to runtime.racewrite and friends).
+//
+// If the incoming pc is part of an inlined function, *ctx is populated
+// with information about the inlined function, and on return ctx.pc is set
+// to a pc in the logically containing function. (The race detector should call this
+// function again with that pc.)
+//
+// If the incoming pc is not part of an inlined function, the return pc is unchanged.
+func raceSymbolizeCode(ctx *symbolizeCodeContext) {
+ pc := ctx.pc
+ fi := findfunc(pc)
+ f := fi._Func()
+ if f != nil {
+ file, line := f.FileLine(pc)
+ if line != 0 {
+ if inldata := funcdata(fi, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ for {
+ ix := pcdatavalue(fi, _PCDATA_InlTreeIndex, pc, nil)
+ if ix >= 0 {
+ if inltree[ix].funcID == funcID_wrapper {
+ // ignore wrappers
+ // Back up to an instruction in the "caller".
+ pc = f.Entry() + uintptr(inltree[ix].parentPc)
+ continue
+ }
+ ctx.pc = f.Entry() + uintptr(inltree[ix].parentPc) // "caller" pc
+ ctx.fn = cfuncnameFromNameOff(fi, inltree[ix].nameOff)
+ ctx.line = uintptr(line)
+ ctx.file = &bytes(file)[0] // assume NUL-terminated
+ ctx.off = pc - f.Entry()
+ ctx.res = 1
+ return
+ }
+ break
+ }
+ }
+ ctx.fn = cfuncname(fi)
+ ctx.line = uintptr(line)
+ ctx.file = &bytes(file)[0] // assume NUL-terminated
+ ctx.off = pc - f.Entry()
+ ctx.res = 1
+ return
+ }
+ }
+ ctx.fn = &qq[0]
+ ctx.file = &dash[0]
+ ctx.line = 0
+ ctx.off = ctx.pc
+ ctx.res = 1
+}
+
+type symbolizeDataContext struct {
+ addr uintptr
+ heap uintptr
+ start uintptr
+ size uintptr
+ name *byte
+ file *byte
+ line uintptr
+ res uintptr
+}
+
+func raceSymbolizeData(ctx *symbolizeDataContext) {
+ if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
+ ctx.heap = 1
+ ctx.start = base
+ ctx.size = span.elemsize
+ ctx.res = 1
+ }
+}
+
+// Race runtime functions called via runtime·racecall.
+//
+//go:linkname __tsan_init __tsan_init
+var __tsan_init byte
+
+//go:linkname __tsan_fini __tsan_fini
+var __tsan_fini byte
+
+//go:linkname __tsan_proc_create __tsan_proc_create
+var __tsan_proc_create byte
+
+//go:linkname __tsan_proc_destroy __tsan_proc_destroy
+var __tsan_proc_destroy byte
+
+//go:linkname __tsan_map_shadow __tsan_map_shadow
+var __tsan_map_shadow byte
+
+//go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
+var __tsan_finalizer_goroutine byte
+
+//go:linkname __tsan_go_start __tsan_go_start
+var __tsan_go_start byte
+
+//go:linkname __tsan_go_end __tsan_go_end
+var __tsan_go_end byte
+
+//go:linkname __tsan_malloc __tsan_malloc
+var __tsan_malloc byte
+
+//go:linkname __tsan_free __tsan_free
+var __tsan_free byte
+
+//go:linkname __tsan_acquire __tsan_acquire
+var __tsan_acquire byte
+
+//go:linkname __tsan_release __tsan_release
+var __tsan_release byte
+
+//go:linkname __tsan_release_acquire __tsan_release_acquire
+var __tsan_release_acquire byte
+
+//go:linkname __tsan_release_merge __tsan_release_merge
+var __tsan_release_merge byte
+
+//go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
+var __tsan_go_ignore_sync_begin byte
+
+//go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
+var __tsan_go_ignore_sync_end byte
+
+//go:linkname __tsan_report_count __tsan_report_count
+var __tsan_report_count byte
+
+// Mimic what cmd/cgo would do.
+//
+//go:cgo_import_static __tsan_init
+//go:cgo_import_static __tsan_fini
+//go:cgo_import_static __tsan_proc_create
+//go:cgo_import_static __tsan_proc_destroy
+//go:cgo_import_static __tsan_map_shadow
+//go:cgo_import_static __tsan_finalizer_goroutine
+//go:cgo_import_static __tsan_go_start
+//go:cgo_import_static __tsan_go_end
+//go:cgo_import_static __tsan_malloc
+//go:cgo_import_static __tsan_free
+//go:cgo_import_static __tsan_acquire
+//go:cgo_import_static __tsan_release
+//go:cgo_import_static __tsan_release_acquire
+//go:cgo_import_static __tsan_release_merge
+//go:cgo_import_static __tsan_go_ignore_sync_begin
+//go:cgo_import_static __tsan_go_ignore_sync_end
+//go:cgo_import_static __tsan_report_count
+
+// These are called from race_amd64.s.
+//
+//go:cgo_import_static __tsan_read
+//go:cgo_import_static __tsan_read_pc
+//go:cgo_import_static __tsan_read_range
+//go:cgo_import_static __tsan_write
+//go:cgo_import_static __tsan_write_pc
+//go:cgo_import_static __tsan_write_range
+//go:cgo_import_static __tsan_func_enter
+//go:cgo_import_static __tsan_func_exit
+
+//go:cgo_import_static __tsan_go_atomic32_load
+//go:cgo_import_static __tsan_go_atomic64_load
+//go:cgo_import_static __tsan_go_atomic32_store
+//go:cgo_import_static __tsan_go_atomic64_store
+//go:cgo_import_static __tsan_go_atomic32_exchange
+//go:cgo_import_static __tsan_go_atomic64_exchange
+//go:cgo_import_static __tsan_go_atomic32_fetch_add
+//go:cgo_import_static __tsan_go_atomic64_fetch_add
+//go:cgo_import_static __tsan_go_atomic32_compare_exchange
+//go:cgo_import_static __tsan_go_atomic64_compare_exchange
+
+// start/end of global data (data+bss).
+var racedatastart uintptr
+var racedataend uintptr
+
+// start/end of heap for race_amd64.s
+var racearenastart uintptr
+var racearenaend uintptr
+
+func racefuncenter(callpc uintptr)
+func racefuncenterfp(fp uintptr)
+func racefuncexit()
+func raceread(addr uintptr)
+func racewrite(addr uintptr)
+func racereadrange(addr, size uintptr)
+func racewriterange(addr, size uintptr)
+func racereadrangepc1(addr, size, pc uintptr)
+func racewriterangepc1(addr, size, pc uintptr)
+func racecallbackthunk(uintptr)
+
+// racecall allows calling an arbitrary function fn from C race runtime
+// with up to 4 uintptr arguments.
+func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
+
+// checks if the address has shadow (i.e. heap or data/bss).
+//
+//go:nosplit
+func isvalidaddr(addr unsafe.Pointer) bool {
+ return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
+ racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
+}
+
+//go:nosplit
+func raceinit() (gctx, pctx uintptr) {
+ // On most machines, cgo is required to initialize libc, which is used by race runtime.
+ if !iscgo && GOOS != "darwin" {
+ throw("raceinit: race build must use cgo")
+ }
+
+ racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
+
+ // Round data segment to page boundaries, because it's used in mmap().
+ start := ^uintptr(0)
+ end := uintptr(0)
+ if start > firstmoduledata.noptrdata {
+ start = firstmoduledata.noptrdata
+ }
+ if start > firstmoduledata.data {
+ start = firstmoduledata.data
+ }
+ if start > firstmoduledata.noptrbss {
+ start = firstmoduledata.noptrbss
+ }
+ if start > firstmoduledata.bss {
+ start = firstmoduledata.bss
+ }
+ if end < firstmoduledata.enoptrdata {
+ end = firstmoduledata.enoptrdata
+ }
+ if end < firstmoduledata.edata {
+ end = firstmoduledata.edata
+ }
+ if end < firstmoduledata.enoptrbss {
+ end = firstmoduledata.enoptrbss
+ }
+ if end < firstmoduledata.ebss {
+ end = firstmoduledata.ebss
+ }
+ size := alignUp(end-start, _PageSize)
+ racecall(&__tsan_map_shadow, start, size, 0, 0)
+ racedatastart = start
+ racedataend = start + size
+
+ return
+}
+
+var raceFiniLock mutex
+
+//go:nosplit
+func racefini() {
+ // racefini() can only be called once to avoid races.
+ // This eventually (via __tsan_fini) calls C.exit which has
+ // undefined behavior if called more than once. If the lock is
+ // already held it's assumed that the first caller exits the program
+ // so other calls can hang forever without an issue.
+ lock(&raceFiniLock)
+ // We're entering external code that may call ExitProcess on
+ // Windows.
+ osPreemptExtEnter(getg().m)
+ racecall(&__tsan_fini, 0, 0, 0, 0)
+}
+
+//go:nosplit
+func raceproccreate() uintptr {
+ var ctx uintptr
+ racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
+ return ctx
+}
+
+//go:nosplit
+func raceprocdestroy(ctx uintptr) {
+ racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
+}
+
+//go:nosplit
+func racemapshadow(addr unsafe.Pointer, size uintptr) {
+ if racearenastart == 0 {
+ racearenastart = uintptr(addr)
+ }
+ if racearenaend < uintptr(addr)+size {
+ racearenaend = uintptr(addr) + size
+ }
+ racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
+}
+
+//go:nosplit
+func racemalloc(p unsafe.Pointer, sz uintptr) {
+ racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
+}
+
+//go:nosplit
+func racefree(p unsafe.Pointer, sz uintptr) {
+ racecall(&__tsan_free, uintptr(p), sz, 0, 0)
+}
+
+//go:nosplit
+func racegostart(pc uintptr) uintptr {
+ gp := getg()
+ var spawng *g
+ if gp.m.curg != nil {
+ spawng = gp.m.curg
+ } else {
+ spawng = gp
+ }
+
+ var racectx uintptr
+ racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
+ return racectx
+}
+
+//go:nosplit
+func racegoend() {
+ racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
+}
+
+//go:nosplit
+func racectxend(racectx uintptr) {
+ racecall(&__tsan_go_end, racectx, 0, 0, 0)
+}
+
+//go:nosplit
+func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
+ gp := getg()
+ if gp != gp.m.curg {
+ // The call is coming from manual instrumentation of Go code running on g0/gsignal.
+ // Not interesting.
+ return
+ }
+ if callpc != 0 {
+ racefuncenter(callpc)
+ }
+ racewriterangepc1(uintptr(addr), sz, pc)
+ if callpc != 0 {
+ racefuncexit()
+ }
+}
+
+//go:nosplit
+func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
+ gp := getg()
+ if gp != gp.m.curg {
+ // The call is coming from manual instrumentation of Go code running on g0/gsignal.
+ // Not interesting.
+ return
+ }
+ if callpc != 0 {
+ racefuncenter(callpc)
+ }
+ racereadrangepc1(uintptr(addr), sz, pc)
+ if callpc != 0 {
+ racefuncexit()
+ }
+}
+
+//go:nosplit
+func raceacquire(addr unsafe.Pointer) {
+ raceacquireg(getg(), addr)
+}
+
+//go:nosplit
+func raceacquireg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
+ if !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racerelease(addr unsafe.Pointer) {
+ racereleaseg(getg(), addr)
+}
+
+//go:nosplit
+func racereleaseg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racereleaseacquire(addr unsafe.Pointer) {
+ racereleaseacquireg(getg(), addr)
+}
+
+//go:nosplit
+func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racereleasemerge(addr unsafe.Pointer) {
+ racereleasemergeg(getg(), addr)
+}
+
+//go:nosplit
+func racereleasemergeg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racefingo() {
+ racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
+}
+
+// The declarations below generate ABI wrappers for functions
+// implemented in assembly in this package but declared in another
+// package.
+
+//go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
+func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
+
+//go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
+func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
+
+//go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
+func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
+
+//go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
+func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
+
+//go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
+func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
+
+//go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
+func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+//go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
+func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
+
+//go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
+func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
+
+//go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
+func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
+
+//go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
+func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
+
+//go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
+func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
+
+//go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
+func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
+
+//go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
+func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
+
+//go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
+func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
+
+//go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
+func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
+
+//go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
+func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
+
+//go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
+func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
+
+//go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
+func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
+
+//go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
+func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+
+//go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
+func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
+
+//go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
+func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
+
+//go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
+func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
+
+//go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
+func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
diff --git a/src/runtime/race/README b/src/runtime/race/README
new file mode 100644
index 0000000..596700a
--- /dev/null
+++ b/src/runtime/race/README
@@ -0,0 +1,17 @@
+runtime/race package contains the data race detector runtime library.
+It is based on ThreadSanitizer race detector, that is currently a part of
+the LLVM project (https://github.com/llvm/llvm-project/tree/main/compiler-rt).
+
+To update the .syso files use golang.org/x/build/cmd/racebuild.
+
+race_darwin_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_freebsd_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_netbsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_windows_amd64.syso built with LLVM 89f7ccea6f6488c443655880229c54db1f180153 and Go f62d3202bf9dbb3a00ad2a2c63ff4fa4188c5d3b.
+race_linux_arm64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_darwin_arm64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_openbsd_amd64.syso built with LLVM fcf6ae2f070eba73074b6ec8d8281e54d29dbeeb and Go 8f2db14cd35bbd674cb2988a508306de6655e425.
+race_linux_s390x.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+internal/amd64v3/race_linux.syso built with LLVM 74c2d4f6024c8f160871a2baa928d0b42415f183 and Go c0f27eb3d580c8b9efd73802678eba4c6c9461be.
+internal/amd64v1/race_linux.syso built with LLVM 74c2d4f6024c8f160871a2baa928d0b42415f183 and Go c0f27eb3d580c8b9efd73802678eba4c6c9461be.
diff --git a/src/runtime/race/doc.go b/src/runtime/race/doc.go
new file mode 100644
index 0000000..60a20df
--- /dev/null
+++ b/src/runtime/race/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package race implements data race detection logic.
+// No public interface is provided.
+// For details about the race detector see
+// https://golang.org/doc/articles/race_detector.html
+package race
+
+//go:generate ./mkcgo.sh
diff --git a/src/runtime/race/internal/amd64v1/doc.go b/src/runtime/race/internal/amd64v1/doc.go
new file mode 100644
index 0000000..ccb088c
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package holds the race detector .syso for
+// amd64 architectures with GOAMD64<v3.
+
+//go:build amd64 && ((linux && !amd64.v3) || darwin || freebsd || netbsd || openbsd || windows)
+
+package amd64v1
diff --git a/src/runtime/race/internal/amd64v1/race_darwin.syso b/src/runtime/race/internal/amd64v1/race_darwin.syso
new file mode 100644
index 0000000..e5d848c
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_darwin.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_freebsd.syso b/src/runtime/race/internal/amd64v1/race_freebsd.syso
new file mode 100644
index 0000000..b3a4383
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_freebsd.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_linux.syso b/src/runtime/race/internal/amd64v1/race_linux.syso
new file mode 100644
index 0000000..68f1508
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_linux.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_netbsd.syso b/src/runtime/race/internal/amd64v1/race_netbsd.syso
new file mode 100644
index 0000000..e6cc4bf
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_netbsd.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_openbsd.syso b/src/runtime/race/internal/amd64v1/race_openbsd.syso
new file mode 100644
index 0000000..9fefd87
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_openbsd.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v1/race_windows.syso b/src/runtime/race/internal/amd64v1/race_windows.syso
new file mode 100644
index 0000000..9fbf9b4
--- /dev/null
+++ b/src/runtime/race/internal/amd64v1/race_windows.syso
Binary files differ
diff --git a/src/runtime/race/internal/amd64v3/doc.go b/src/runtime/race/internal/amd64v3/doc.go
new file mode 100644
index 0000000..215998a
--- /dev/null
+++ b/src/runtime/race/internal/amd64v3/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package holds the race detector .syso for
+// amd64 architectures with GOAMD64>=v3.
+
+//go:build amd64 && linux && amd64.v3
+
+package amd64v3
diff --git a/src/runtime/race/internal/amd64v3/race_linux.syso b/src/runtime/race/internal/amd64v3/race_linux.syso
new file mode 100644
index 0000000..33c3e76
--- /dev/null
+++ b/src/runtime/race/internal/amd64v3/race_linux.syso
Binary files differ
diff --git a/src/runtime/race/mkcgo.sh b/src/runtime/race/mkcgo.sh
new file mode 100755
index 0000000..6ebe5a4
--- /dev/null
+++ b/src/runtime/race/mkcgo.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+hdr='
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by mkcgo.sh. DO NOT EDIT.
+
+//go:build race
+
+'
+
+convert() {
+ (echo "$hdr"; go tool cgo -dynpackage race -dynimport $1) | gofmt
+}
+
+convert race_darwin_arm64.syso >race_darwin_arm64.go
+convert internal/amd64v1/race_darwin.syso >race_darwin_amd64.go
+
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
new file mode 100644
index 0000000..0dcdabe
--- /dev/null
+++ b/src/runtime/race/output_test.go
@@ -0,0 +1,442 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package race_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestOutput(t *testing.T) {
+ pkgdir := t.TempDir()
+ out, err := exec.Command(testenv.GoToolPath(t), "install", "-race", "-pkgdir="+pkgdir, "testing").CombinedOutput()
+ if err != nil {
+ t.Fatalf("go install -race: %v\n%s", err, out)
+ }
+
+ for _, test := range tests {
+ if test.goos != "" && test.goos != runtime.GOOS {
+ t.Logf("test %v runs only on %v, skipping: ", test.name, test.goos)
+ continue
+ }
+ dir := t.TempDir()
+ source := "main.go"
+ if test.run == "test" {
+ source = "main_test.go"
+ }
+ src := filepath.Join(dir, source)
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("failed to create file: %v", err)
+ }
+ _, err = f.WriteString(test.source)
+ if err != nil {
+ f.Close()
+ t.Fatalf("failed to write: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("failed to close file: %v", err)
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), test.run, "-race", "-pkgdir="+pkgdir, src)
+ // GODEBUG spoils program output, GOMAXPROCS makes it flaky.
+ for _, env := range os.Environ() {
+ if strings.HasPrefix(env, "GODEBUG=") ||
+ strings.HasPrefix(env, "GOMAXPROCS=") ||
+ strings.HasPrefix(env, "GORACE=") {
+ continue
+ }
+ cmd.Env = append(cmd.Env, env)
+ }
+ cmd.Env = append(cmd.Env,
+ "GOMAXPROCS=1", // see comment in race_test.go
+ "GORACE="+test.gorace,
+ )
+ got, _ := cmd.CombinedOutput()
+ matched := false
+ for _, re := range test.re {
+ if regexp.MustCompile(re).MatchString(string(got)) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ exp := fmt.Sprintf("expect:\n%v\n", test.re[0])
+ if len(test.re) > 1 {
+ exp = fmt.Sprintf("expected one of %d patterns:\n",
+ len(test.re))
+ for k, re := range test.re {
+ exp += fmt.Sprintf("pattern %d:\n%v\n", k, re)
+ }
+ }
+ t.Fatalf("failed test case %v, %sgot:\n%s",
+ test.name, exp, got)
+ }
+ }
+}
+
+var tests = []struct {
+ name string
+ run string
+ goos string
+ gorace string
+ source string
+ re []string
+}{
+ {"simple", "run", "", "atexit_sleep_ms=0", `
+package main
+import "time"
+var xptr *int
+var donechan chan bool
+func main() {
+ done := make(chan bool)
+ x := 0
+ startRacer(&x, done)
+ store(&x, 43)
+ <-done
+}
+func store(x *int, v int) {
+ *x = v
+}
+func startRacer(x *int, done chan bool) {
+ xptr = x
+ donechan = done
+ go racer()
+}
+func racer() {
+ time.Sleep(10*time.Millisecond)
+ store(xptr, 42)
+ donechan <- true
+}
+`, []string{`==================
+WARNING: DATA RACE
+Write at 0x[0-9,a-f]+ by goroutine [0-9]:
+ main\.store\(\)
+ .+/main\.go:14 \+0x[0-9,a-f]+
+ main\.racer\(\)
+ .+/main\.go:23 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by main goroutine:
+ main\.store\(\)
+ .+/main\.go:14 \+0x[0-9,a-f]+
+ main\.main\(\)
+ .+/main\.go:10 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ main\.startRacer\(\)
+ .+/main\.go:19 \+0x[0-9,a-f]+
+ main\.main\(\)
+ .+/main\.go:9 \+0x[0-9,a-f]+
+==================
+Found 1 data race\(s\)
+exit status 66
+`}},
+
+ {"exitcode", "run", "", "atexit_sleep_ms=0 exitcode=13", `
+package main
+func main() {
+ done := make(chan bool)
+ x := 0; _ = x
+ go func() {
+ x = 42
+ done <- true
+ }()
+ x = 43
+ <-done
+}
+`, []string{`exit status 13`}},
+
+ {"strip_path_prefix", "run", "", "atexit_sleep_ms=0 strip_path_prefix=/main.", `
+package main
+func main() {
+ done := make(chan bool)
+ x := 0; _ = x
+ go func() {
+ x = 42
+ done <- true
+ }()
+ x = 43
+ <-done
+}
+`, []string{`
+ go:7 \+0x[0-9,a-f]+
+`}},
+
+ {"halt_on_error", "run", "", "atexit_sleep_ms=0 halt_on_error=1", `
+package main
+func main() {
+ done := make(chan bool)
+ x := 0; _ = x
+ go func() {
+ x = 42
+ done <- true
+ }()
+ x = 43
+ <-done
+}
+`, []string{`
+==================
+exit status 66
+`}},
+
+ {"test_fails_on_race", "test", "", "atexit_sleep_ms=0", `
+package main_test
+import "testing"
+func TestFail(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ _ = x
+ go func() {
+ x = 42
+ done <- true
+ }()
+ x = 43
+ <-done
+ t.Log(t.Failed())
+}
+`, []string{`
+==================
+--- FAIL: TestFail \([0-9.]+s\)
+.*main_test.go:14: true
+.*testing.go:.*: race detected during execution of test
+FAIL`}},
+
+ {"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", `
+package main
+func main() {
+ done := make(chan string)
+ data := make([]byte, 10)
+ go func() {
+ done <- string(data)
+ }()
+ data[0] = 1
+ <-done
+}
+`, []string{`
+ runtime\.slicebytetostring\(\)
+ .*/runtime/string\.go:.*
+ main\.main\.func1\(\)
+ .*/main.go:7`}},
+
+ // Test for https://golang.org/issue/33309
+ {"midstack_inlining_traceback", "run", "linux", "atexit_sleep_ms=0", `
+package main
+
+var x int
+var c chan int
+func main() {
+ c = make(chan int)
+ go f()
+ x = 1
+ <-c
+}
+
+func f() {
+ g(c)
+}
+
+func g(c chan int) {
+ h(c)
+}
+
+func h(c chan int) {
+ c <- x
+}
+`, []string{`==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by goroutine [0-9]:
+ main\.h\(\)
+ .+/main\.go:22 \+0x[0-9,a-f]+
+ main\.g\(\)
+ .+/main\.go:18 \+0x[0-9,a-f]+
+ main\.f\(\)
+ .+/main\.go:14 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by main goroutine:
+ main\.main\(\)
+ .+/main\.go:9 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ main\.main\(\)
+ .+/main\.go:8 \+0x[0-9,a-f]+
+==================
+Found 1 data race\(s\)
+exit status 66
+`}},
+
+ // Test for https://golang.org/issue/17190
+ {"external_cgo_thread", "run", "linux", "atexit_sleep_ms=0", `
+package main
+
+/*
+#include <pthread.h>
+typedef struct cb {
+ int foo;
+} cb;
+extern void goCallback();
+static inline void *threadFunc(void *p) {
+ goCallback();
+ return 0;
+}
+static inline void startThread(cb* c) {
+ pthread_t th;
+ pthread_create(&th, 0, threadFunc, 0);
+}
+*/
+import "C"
+
+var done chan bool
+var racy int
+
+//export goCallback
+func goCallback() {
+ racy++
+ done <- true
+}
+
+func main() {
+ done = make(chan bool)
+ var c C.cb
+ C.startThread(&c)
+ racy++
+ <- done
+}
+`, []string{`==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by main goroutine:
+ main\.main\(\)
+ .*/main\.go:34 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by goroutine [0-9]:
+ main\.goCallback\(\)
+ .*/main\.go:27 \+0x[0-9,a-f]+
+ _cgoexp_[0-9a-z]+_goCallback\(\)
+ .*_cgo_gotypes\.go:[0-9]+ \+0x[0-9,a-f]+
+ _cgoexp_[0-9a-z]+_goCallback\(\)
+ <autogenerated>:1 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ runtime\.newextram\(\)
+ .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
+==================`,
+ `==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by .*:
+ main\..*
+ .*/main\.go:[0-9]+ \+0x[0-9,a-f]+(?s).*
+
+Previous write at 0x[0-9,a-f]+ by .*:
+ main\..*
+ .*/main\.go:[0-9]+ \+0x[0-9,a-f]+(?s).*
+
+Goroutine [0-9] \(running\) created at:
+ runtime\.newextram\(\)
+ .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
+==================`}},
+ {"second_test_passes", "test", "", "atexit_sleep_ms=0", `
+package main_test
+import "testing"
+func TestFail(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ _ = x
+ go func() {
+ x = 42
+ done <- true
+ }()
+ x = 43
+ <-done
+}
+
+func TestPass(t *testing.T) {
+}
+`, []string{`
+==================
+--- FAIL: TestFail \([0-9.]+s\)
+.*testing.go:.*: race detected during execution of test
+FAIL`}},
+ {"mutex", "run", "", "atexit_sleep_ms=0", `
+package main
+import (
+ "sync"
+ "fmt"
+)
+func main() {
+ c := make(chan bool, 1)
+ threads := 1
+ iterations := 20000
+ data := 0
+ var wg sync.WaitGroup
+ for i := 0; i < threads; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < iterations; i++ {
+ c <- true
+ data += 1
+ <- c
+ }
+ }()
+ }
+ for i := 0; i < iterations; i++ {
+ c <- true
+ data += 1
+ <- c
+ }
+ wg.Wait()
+ if (data == iterations*(threads+1)) { fmt.Println("pass") }
+}`, []string{`pass`}},
+ // Test for https://github.com/golang/go/issues/37355
+ {"chanmm", "run", "", "atexit_sleep_ms=0", `
+package main
+import (
+ "sync"
+ "time"
+)
+func main() {
+ c := make(chan bool, 1)
+ var data uint64
+ var wg sync.WaitGroup
+ wg.Add(2)
+ c <- true
+ go func() {
+ defer wg.Done()
+ c <- true
+ }()
+ go func() {
+ defer wg.Done()
+ time.Sleep(time.Second)
+ <-c
+ data = 2
+ }()
+ data = 1
+ <-c
+ wg.Wait()
+ _ = data
+}
+`, []string{`==================
+WARNING: DATA RACE
+Write at 0x[0-9,a-f]+ by goroutine [0-9]:
+ main\.main\.func2\(\)
+ .*/main\.go:21 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by main goroutine:
+ main\.main\(\)
+ .*/main\.go:23 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ main\.main\(\)
+ .*/main.go:[0-9]+ \+0x[0-9,a-f]+
+==================`}},
+}
diff --git a/src/runtime/race/race.go b/src/runtime/race/race.go
new file mode 100644
index 0000000..9c508eb
--- /dev/null
+++ b/src/runtime/race/race.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race && ((linux && (amd64 || arm64 || ppc64le || s390x)) || ((freebsd || netbsd || openbsd || windows) && amd64))
+
+package race
+
+// This file merely ensures that we link in runtime/cgo in race build,
+// this in turn ensures that runtime uses pthread_create to create threads.
+// The prebuilt race runtime lives in race_GOOS_GOARCH.syso.
+// Calls to the runtime are done directly from src/runtime/race.go.
+
+// On darwin we always use system DLLs to create threads,
+// so we use race_darwin_$GOARCH.go to provide the syso-derived
+// symbol information without needing to invoke cgo.
+// This allows -race to be used on Mac systems without a C toolchain.
+
+// void __race_unused_func(void);
+import "C"
diff --git a/src/runtime/race/race_darwin_amd64.go b/src/runtime/race/race_darwin_amd64.go
new file mode 100644
index 0000000..fbb838a
--- /dev/null
+++ b/src/runtime/race/race_darwin_amd64.go
@@ -0,0 +1,101 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by mkcgo.sh. DO NOT EDIT.
+
+//go:build race
+
+package race
+
+//go:cgo_import_dynamic _Block_object_assign _Block_object_assign ""
+//go:cgo_import_dynamic _Block_object_dispose _Block_object_dispose ""
+//go:cgo_import_dynamic _NSConcreteStackBlock _NSConcreteStackBlock ""
+//go:cgo_import_dynamic _NSGetArgv _NSGetArgv ""
+//go:cgo_import_dynamic _NSGetEnviron _NSGetEnviron ""
+//go:cgo_import_dynamic _NSGetExecutablePath _NSGetExecutablePath ""
+//go:cgo_import_dynamic __bzero __bzero ""
+//go:cgo_import_dynamic __error __error ""
+//go:cgo_import_dynamic __fork __fork ""
+//go:cgo_import_dynamic __mmap __mmap ""
+//go:cgo_import_dynamic __munmap __munmap ""
+//go:cgo_import_dynamic __stack_chk_fail __stack_chk_fail ""
+//go:cgo_import_dynamic __stack_chk_guard __stack_chk_guard ""
+//go:cgo_import_dynamic _dyld_get_image_header _dyld_get_image_header ""
+//go:cgo_import_dynamic _dyld_get_image_name _dyld_get_image_name ""
+//go:cgo_import_dynamic _dyld_get_image_vmaddr_slide _dyld_get_image_vmaddr_slide ""
+//go:cgo_import_dynamic _dyld_get_shared_cache_range _dyld_get_shared_cache_range ""
+//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
+//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
+//go:cgo_import_dynamic _exit _exit ""
+//go:cgo_import_dynamic abort abort ""
+//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
+//go:cgo_import_dynamic close close ""
+//go:cgo_import_dynamic dlsym dlsym ""
+//go:cgo_import_dynamic dup dup ""
+//go:cgo_import_dynamic dup2 dup2 ""
+//go:cgo_import_dynamic dyld_shared_cache_iterate_text dyld_shared_cache_iterate_text ""
+//go:cgo_import_dynamic execve execve ""
+//go:cgo_import_dynamic exit exit ""
+//go:cgo_import_dynamic fstat$INODE64 fstat$INODE64 ""
+//go:cgo_import_dynamic ftruncate ftruncate ""
+//go:cgo_import_dynamic getpid getpid ""
+//go:cgo_import_dynamic getrlimit getrlimit ""
+//go:cgo_import_dynamic gettimeofday gettimeofday ""
+//go:cgo_import_dynamic getuid getuid ""
+//go:cgo_import_dynamic grantpt grantpt ""
+//go:cgo_import_dynamic ioctl ioctl ""
+//go:cgo_import_dynamic isatty isatty ""
+//go:cgo_import_dynamic lstat$INODE64 lstat$INODE64 ""
+//go:cgo_import_dynamic mach_absolute_time mach_absolute_time ""
+//go:cgo_import_dynamic mach_task_self_ mach_task_self_ ""
+//go:cgo_import_dynamic mach_timebase_info mach_timebase_info ""
+//go:cgo_import_dynamic mach_vm_region_recurse mach_vm_region_recurse ""
+//go:cgo_import_dynamic madvise madvise ""
+//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
+//go:cgo_import_dynamic malloc_zones malloc_zones ""
+//go:cgo_import_dynamic memcpy memcpy ""
+//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
+//go:cgo_import_dynamic mkdir mkdir ""
+//go:cgo_import_dynamic mprotect mprotect ""
+//go:cgo_import_dynamic open open ""
+//go:cgo_import_dynamic pipe pipe ""
+//go:cgo_import_dynamic posix_openpt posix_openpt ""
+//go:cgo_import_dynamic posix_spawn posix_spawn ""
+//go:cgo_import_dynamic posix_spawn_file_actions_addclose posix_spawn_file_actions_addclose ""
+//go:cgo_import_dynamic posix_spawn_file_actions_adddup2 posix_spawn_file_actions_adddup2 ""
+//go:cgo_import_dynamic posix_spawn_file_actions_destroy posix_spawn_file_actions_destroy ""
+//go:cgo_import_dynamic posix_spawn_file_actions_init posix_spawn_file_actions_init ""
+//go:cgo_import_dynamic posix_spawnattr_destroy posix_spawnattr_destroy ""
+//go:cgo_import_dynamic posix_spawnattr_init posix_spawnattr_init ""
+//go:cgo_import_dynamic posix_spawnattr_setflags posix_spawnattr_setflags ""
+//go:cgo_import_dynamic pthread_attr_getstack pthread_attr_getstack ""
+//go:cgo_import_dynamic pthread_create pthread_create ""
+//go:cgo_import_dynamic pthread_get_stackaddr_np pthread_get_stackaddr_np ""
+//go:cgo_import_dynamic pthread_get_stacksize_np pthread_get_stacksize_np ""
+//go:cgo_import_dynamic pthread_getspecific pthread_getspecific ""
+//go:cgo_import_dynamic pthread_join pthread_join ""
+//go:cgo_import_dynamic pthread_self pthread_self ""
+//go:cgo_import_dynamic pthread_sigmask pthread_sigmask ""
+//go:cgo_import_dynamic pthread_threadid_np pthread_threadid_np ""
+//go:cgo_import_dynamic read read ""
+//go:cgo_import_dynamic readlink readlink ""
+//go:cgo_import_dynamic realpath$DARWIN_EXTSN realpath$DARWIN_EXTSN ""
+//go:cgo_import_dynamic rename rename ""
+//go:cgo_import_dynamic sched_yield sched_yield ""
+//go:cgo_import_dynamic setrlimit setrlimit ""
+//go:cgo_import_dynamic sigaction sigaction ""
+//go:cgo_import_dynamic stat$INODE64 stat$INODE64 ""
+//go:cgo_import_dynamic sysconf sysconf ""
+//go:cgo_import_dynamic sysctl sysctl ""
+//go:cgo_import_dynamic sysctlbyname sysctlbyname ""
+//go:cgo_import_dynamic task_info task_info ""
+//go:cgo_import_dynamic tcgetattr tcgetattr ""
+//go:cgo_import_dynamic tcsetattr tcsetattr ""
+//go:cgo_import_dynamic unlink unlink ""
+//go:cgo_import_dynamic unlockpt unlockpt ""
+//go:cgo_import_dynamic usleep usleep ""
+//go:cgo_import_dynamic vm_region_64 vm_region_64 ""
+//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
+//go:cgo_import_dynamic waitpid waitpid ""
+//go:cgo_import_dynamic write write ""
diff --git a/src/runtime/race/race_darwin_arm64.go b/src/runtime/race/race_darwin_arm64.go
new file mode 100644
index 0000000..fe8584c
--- /dev/null
+++ b/src/runtime/race/race_darwin_arm64.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by mkcgo.sh. DO NOT EDIT.
+
+//go:build race
+
+package race
+
+//go:cgo_import_dynamic _NSGetArgv _NSGetArgv ""
+//go:cgo_import_dynamic _NSGetEnviron _NSGetEnviron ""
+//go:cgo_import_dynamic _NSGetExecutablePath _NSGetExecutablePath ""
+//go:cgo_import_dynamic __error __error ""
+//go:cgo_import_dynamic __fork __fork ""
+//go:cgo_import_dynamic __mmap __mmap ""
+//go:cgo_import_dynamic __munmap __munmap ""
+//go:cgo_import_dynamic __stack_chk_fail __stack_chk_fail ""
+//go:cgo_import_dynamic __stack_chk_guard __stack_chk_guard ""
+//go:cgo_import_dynamic _dyld_get_image_header _dyld_get_image_header ""
+//go:cgo_import_dynamic _dyld_get_image_name _dyld_get_image_name ""
+//go:cgo_import_dynamic _dyld_get_image_vmaddr_slide _dyld_get_image_vmaddr_slide ""
+//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
+//go:cgo_import_dynamic _exit _exit ""
+//go:cgo_import_dynamic abort abort ""
+//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
+//go:cgo_import_dynamic bzero bzero ""
+//go:cgo_import_dynamic close close ""
+//go:cgo_import_dynamic dlsym dlsym ""
+//go:cgo_import_dynamic dup dup ""
+//go:cgo_import_dynamic dup2 dup2 ""
+//go:cgo_import_dynamic execve execve ""
+//go:cgo_import_dynamic exit exit ""
+//go:cgo_import_dynamic fstat fstat ""
+//go:cgo_import_dynamic ftruncate ftruncate ""
+//go:cgo_import_dynamic getpid getpid ""
+//go:cgo_import_dynamic getrlimit getrlimit ""
+//go:cgo_import_dynamic gettimeofday gettimeofday ""
+//go:cgo_import_dynamic getuid getuid ""
+//go:cgo_import_dynamic grantpt grantpt ""
+//go:cgo_import_dynamic ioctl ioctl ""
+//go:cgo_import_dynamic isatty isatty ""
+//go:cgo_import_dynamic lstat lstat ""
+//go:cgo_import_dynamic mach_absolute_time mach_absolute_time ""
+//go:cgo_import_dynamic mach_task_self_ mach_task_self_ ""
+//go:cgo_import_dynamic mach_timebase_info mach_timebase_info ""
+//go:cgo_import_dynamic mach_vm_region_recurse mach_vm_region_recurse ""
+//go:cgo_import_dynamic madvise madvise ""
+//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
+//go:cgo_import_dynamic malloc_zones malloc_zones ""
+//go:cgo_import_dynamic memcpy memcpy ""
+//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
+//go:cgo_import_dynamic mkdir mkdir ""
+//go:cgo_import_dynamic mprotect mprotect ""
+//go:cgo_import_dynamic open open ""
+//go:cgo_import_dynamic pipe pipe ""
+//go:cgo_import_dynamic posix_openpt posix_openpt ""
+//go:cgo_import_dynamic posix_spawn posix_spawn ""
+//go:cgo_import_dynamic posix_spawn_file_actions_addclose posix_spawn_file_actions_addclose ""
+//go:cgo_import_dynamic posix_spawn_file_actions_adddup2 posix_spawn_file_actions_adddup2 ""
+//go:cgo_import_dynamic posix_spawn_file_actions_destroy posix_spawn_file_actions_destroy ""
+//go:cgo_import_dynamic posix_spawn_file_actions_init posix_spawn_file_actions_init ""
+//go:cgo_import_dynamic posix_spawnattr_destroy posix_spawnattr_destroy ""
+//go:cgo_import_dynamic posix_spawnattr_init posix_spawnattr_init ""
+//go:cgo_import_dynamic posix_spawnattr_setflags posix_spawnattr_setflags ""
+//go:cgo_import_dynamic pthread_attr_getstack pthread_attr_getstack ""
+//go:cgo_import_dynamic pthread_create pthread_create ""
+//go:cgo_import_dynamic pthread_get_stackaddr_np pthread_get_stackaddr_np ""
+//go:cgo_import_dynamic pthread_get_stacksize_np pthread_get_stacksize_np ""
+//go:cgo_import_dynamic pthread_getspecific pthread_getspecific ""
+//go:cgo_import_dynamic pthread_join pthread_join ""
+//go:cgo_import_dynamic pthread_self pthread_self ""
+//go:cgo_import_dynamic pthread_sigmask pthread_sigmask ""
+//go:cgo_import_dynamic pthread_threadid_np pthread_threadid_np ""
+//go:cgo_import_dynamic read read ""
+//go:cgo_import_dynamic readlink readlink ""
+//go:cgo_import_dynamic realpath$DARWIN_EXTSN realpath$DARWIN_EXTSN ""
+//go:cgo_import_dynamic rename rename ""
+//go:cgo_import_dynamic sched_yield sched_yield ""
+//go:cgo_import_dynamic setrlimit setrlimit ""
+//go:cgo_import_dynamic sigaction sigaction ""
+//go:cgo_import_dynamic stat stat ""
+//go:cgo_import_dynamic sysconf sysconf ""
+//go:cgo_import_dynamic sysctl sysctl ""
+//go:cgo_import_dynamic sysctlbyname sysctlbyname ""
+//go:cgo_import_dynamic task_info task_info ""
+//go:cgo_import_dynamic tcgetattr tcgetattr ""
+//go:cgo_import_dynamic tcsetattr tcsetattr ""
+//go:cgo_import_dynamic unlink unlink ""
+//go:cgo_import_dynamic unlockpt unlockpt ""
+//go:cgo_import_dynamic usleep usleep ""
+//go:cgo_import_dynamic vm_region_64 vm_region_64 ""
+//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
+//go:cgo_import_dynamic waitpid waitpid ""
+//go:cgo_import_dynamic write write ""
diff --git a/src/runtime/race/race_darwin_arm64.syso b/src/runtime/race/race_darwin_arm64.syso
new file mode 100644
index 0000000..4a23df2
--- /dev/null
+++ b/src/runtime/race/race_darwin_arm64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_arm64.syso b/src/runtime/race/race_linux_arm64.syso
new file mode 100644
index 0000000..c8b3f48
--- /dev/null
+++ b/src/runtime/race/race_linux_arm64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_ppc64le.syso b/src/runtime/race/race_linux_ppc64le.syso
new file mode 100644
index 0000000..1939f29
--- /dev/null
+++ b/src/runtime/race/race_linux_ppc64le.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_s390x.syso b/src/runtime/race/race_linux_s390x.syso
new file mode 100644
index 0000000..ed4a300
--- /dev/null
+++ b/src/runtime/race/race_linux_s390x.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_test.go b/src/runtime/race/race_linux_test.go
new file mode 100644
index 0000000..947ed7c
--- /dev/null
+++ b/src/runtime/race/race_linux_test.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && race
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestAtomicMmap(t *testing.T) {
+ // Test that atomic operations work on "external" memory. Previously they crashed (#16206).
+ // Also do a sanity correctness check: under race detector atomic operations
+ // are implemented inside of race runtime.
+ mem, err := syscall.Mmap(-1, 0, 1<<20, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("mmap failed: %v", err)
+ }
+ defer syscall.Munmap(mem)
+ a := (*uint64)(unsafe.Pointer(&mem[0]))
+ if *a != 0 {
+ t.Fatalf("bad atomic value: %v, want 0", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 1 {
+ t.Fatalf("bad atomic value: %v, want 1", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 2 {
+ t.Fatalf("bad atomic value: %v, want 2", *a)
+ }
+}
+
+func TestAtomicPageBoundary(t *testing.T) {
+ // Test that atomic access near (but not cross) a page boundary
+ // doesn't fault. See issue 60825.
+
+ // Mmap two pages of memory, and make the second page inaccessible,
+ // so we have an address at the end of a page.
+ pagesize := syscall.Getpagesize()
+ b, err := syscall.Mmap(0, 0, 2*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("mmap failed %s", err)
+ }
+ defer syscall.Munmap(b)
+ err = syscall.Mprotect(b[pagesize:], syscall.PROT_NONE)
+ if err != nil {
+ t.Fatalf("mprotect high failed %s\n", err)
+ }
+
+ // This should not fault.
+ a := (*uint32)(unsafe.Pointer(&b[pagesize-4]))
+ atomic.StoreUint32(a, 1)
+ if x := atomic.LoadUint32(a); x != 1 {
+ t.Fatalf("bad atomic value: %v, want 1", x)
+ }
+ if x := atomic.AddUint32(a, 1); x != 2 {
+ t.Fatalf("bad atomic value: %v, want 2", x)
+ }
+}
diff --git a/src/runtime/race/race_test.go b/src/runtime/race/race_test.go
new file mode 100644
index 0000000..4fe6168
--- /dev/null
+++ b/src/runtime/race/race_test.go
@@ -0,0 +1,250 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+// This program is used to verify the race detector
+// by running the tests and parsing their output.
+// It does not check stack correctness, completeness or anything else:
+// it merely verifies that if a test is expected to be racy
+// then the race is detected.
+package race_test
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "log"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+var (
+ passedTests = 0
+ totalTests = 0
+ falsePos = 0
+ falseNeg = 0
+ failingPos = 0
+ failingNeg = 0
+ failed = false
+)
+
+const (
+ visibleLen = 40
+ testPrefix = "=== RUN Test"
+)
+
+func TestRace(t *testing.T) {
+ testOutput, err := runTests(t)
+ if err != nil {
+ t.Fatalf("Failed to run tests: %v\n%v", err, string(testOutput))
+ }
+ reader := bufio.NewReader(bytes.NewReader(testOutput))
+
+ funcName := ""
+ var tsanLog []string
+ for {
+ s, err := nextLine(reader)
+ if err != nil {
+ fmt.Printf("%s\n", processLog(funcName, tsanLog))
+ break
+ }
+ if strings.HasPrefix(s, testPrefix) {
+ fmt.Printf("%s\n", processLog(funcName, tsanLog))
+ tsanLog = make([]string, 0, 100)
+ funcName = s[len(testPrefix):]
+ } else {
+ tsanLog = append(tsanLog, s)
+ }
+ }
+
+ if totalTests == 0 {
+ t.Fatalf("failed to parse test output:\n%s", testOutput)
+ }
+ fmt.Printf("\nPassed %d of %d tests (%.02f%%, %d+, %d-)\n",
+ passedTests, totalTests, 100*float64(passedTests)/float64(totalTests), falsePos, falseNeg)
+ fmt.Printf("%d expected failures (%d has not fail)\n", failingPos+failingNeg, failingNeg)
+ if failed {
+ t.Fail()
+ }
+}
+
+// nextLine is a wrapper around bufio.Reader.ReadString.
+// It reads a line up to the next '\n' character. Error
+// is non-nil if there are no lines left, and nil
+// otherwise.
+func nextLine(r *bufio.Reader) (string, error) {
+ s, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ log.Fatalf("nextLine: expected EOF, received %v", err)
+ }
+ return s, err
+ }
+ return s[:len(s)-1], nil
+}
+
+// processLog verifies whether the given ThreadSanitizer's log
+// contains a race report, checks this information against
+// the name of the testcase and returns the result of this
+// comparison.
+func processLog(testName string, tsanLog []string) string {
+ if !strings.HasPrefix(testName, "Race") && !strings.HasPrefix(testName, "NoRace") {
+ return ""
+ }
+ gotRace := false
+ for _, s := range tsanLog {
+ if strings.Contains(s, "DATA RACE") {
+ gotRace = true
+ break
+ }
+ }
+
+ failing := strings.Contains(testName, "Failing")
+ expRace := !strings.HasPrefix(testName, "No")
+ for len(testName) < visibleLen {
+ testName += " "
+ }
+ if expRace == gotRace {
+ passedTests++
+ totalTests++
+ if failing {
+ failed = true
+ failingNeg++
+ }
+ return fmt.Sprintf("%s .", testName)
+ }
+ pos := ""
+ if expRace {
+ falseNeg++
+ } else {
+ falsePos++
+ pos = "+"
+ }
+ if failing {
+ failingPos++
+ } else {
+ failed = true
+ }
+ totalTests++
+ return fmt.Sprintf("%s %s%s", testName, "FAILED", pos)
+}
+
+// runTests assures that the package and its dependencies is
+// built with instrumentation enabled and returns the output of 'go test'
+// which includes possible data race reports from ThreadSanitizer.
+func runTests(t *testing.T) ([]byte, error) {
+ tests, err := filepath.Glob("./testdata/*_test.go")
+ if err != nil {
+ return nil, err
+ }
+ args := []string{"test", "-race", "-v"}
+ args = append(args, tests...)
+ cmd := exec.Command(testenv.GoToolPath(t), args...)
+ // The following flags turn off heuristics that suppress seemingly identical reports.
+ // It is required because the tests contain a lot of data races on the same addresses
+ // (the tests are simple and the memory is constantly reused).
+ for _, env := range os.Environ() {
+ if strings.HasPrefix(env, "GOMAXPROCS=") ||
+ strings.HasPrefix(env, "GODEBUG=") ||
+ strings.HasPrefix(env, "GORACE=") {
+ continue
+ }
+ cmd.Env = append(cmd.Env, env)
+ }
+ // We set GOMAXPROCS=1 to prevent test flakiness.
+ // There are two sources of flakiness:
+ // 1. Some tests rely on particular execution order.
+ // If the order is different, race does not happen at all.
+ // 2. Ironically, ThreadSanitizer runtime contains a logical race condition
+ // that can lead to false negatives if racy accesses happen literally at the same time.
+ // Tests used to work reliably in the good old days of GOMAXPROCS=1.
+ // So let's set it for now. A more reliable solution is to explicitly annotate tests
+ // with required execution order by means of a special "invisible" synchronization primitive
+ // (that's what is done for C++ ThreadSanitizer tests). This is issue #14119.
+ cmd.Env = append(cmd.Env,
+ "GOMAXPROCS=1",
+ "GORACE=suppress_equal_stacks=0 suppress_equal_addresses=0",
+ )
+ // There are races: we expect tests to fail and the exit code to be non-zero.
+ out, _ := cmd.CombinedOutput()
+ if bytes.Contains(out, []byte("fatal error:")) {
+ // But don't expect runtime to crash.
+ return out, fmt.Errorf("runtime fatal error")
+ }
+ return out, nil
+}
+
+func TestIssue8102(t *testing.T) {
+ // If this compiles with -race, the test passes.
+ type S struct {
+ x any
+ i int
+ }
+ c := make(chan int)
+ a := [2]*int{}
+ for ; ; c <- *a[S{}.i] {
+ if t != nil {
+ break
+ }
+ }
+}
+
+func TestIssue9137(t *testing.T) {
+ a := []string{"a"}
+ i := 0
+ a[i], a[len(a)-1], a = a[len(a)-1], "", a[:len(a)-1]
+ if len(a) != 0 || a[:1][0] != "" {
+ t.Errorf("mangled a: %q %q", a, a[:1])
+ }
+}
+
+func BenchmarkSyncLeak(b *testing.B) {
+ const (
+ G = 1000
+ S = 1000
+ H = 10
+ )
+ var wg sync.WaitGroup
+ wg.Add(G)
+ for g := 0; g < G; g++ {
+ go func() {
+ defer wg.Done()
+ hold := make([][]uint32, H)
+ for i := 0; i < b.N; i++ {
+ a := make([]uint32, S)
+ atomic.AddUint32(&a[rand.Intn(len(a))], 1)
+ hold[rand.Intn(len(hold))] = a
+ }
+ _ = hold
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkStackLeak(b *testing.B) {
+ done := make(chan bool, 1)
+ for i := 0; i < b.N; i++ {
+ go func() {
+ growStack(rand.Intn(100))
+ done <- true
+ }()
+ <-done
+ }
+}
+
+func growStack(i int) {
+ if i == 0 {
+ return
+ }
+ growStack(i - 1)
+}
diff --git a/src/runtime/race/race_unix_test.go b/src/runtime/race/race_unix_test.go
new file mode 100644
index 0000000..3cf53b0
--- /dev/null
+++ b/src/runtime/race/race_unix_test.go
@@ -0,0 +1,29 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race && (darwin || freebsd || linux)
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+// Test that race detector does not crash when accessing non-Go allocated memory (issue 9136).
+func TestNonGoMemory(t *testing.T) {
+ data, err := syscall.Mmap(-1, 0, 4096, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("failed to mmap memory: %v", err)
+ }
+ defer syscall.Munmap(data)
+ p := (*uint32)(unsafe.Pointer(&data[0]))
+ atomic.AddUint32(p, 1)
+ (*p)++
+ if *p != 2 {
+ t.Fatalf("data[0] = %v, expect 2", *p)
+ }
+}
diff --git a/src/runtime/race/race_v1_amd64.go b/src/runtime/race/race_v1_amd64.go
new file mode 100644
index 0000000..7c40db1
--- /dev/null
+++ b/src/runtime/race/race_v1_amd64.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && !amd64.v3) || darwin || freebsd || netbsd || openbsd || windows
+
+package race
+
+import _ "runtime/race/internal/amd64v1"
diff --git a/src/runtime/race/race_v3_amd64.go b/src/runtime/race/race_v3_amd64.go
new file mode 100644
index 0000000..80728d8
--- /dev/null
+++ b/src/runtime/race/race_v3_amd64.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && amd64.v3
+
+package race
+
+import _ "runtime/race/internal/amd64v3"
diff --git a/src/runtime/race/race_windows_test.go b/src/runtime/race/race_windows_test.go
new file mode 100644
index 0000000..143b483
--- /dev/null
+++ b/src/runtime/race/race_windows_test.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows && race
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestAtomicMmap(t *testing.T) {
+ // Test that atomic operations work on "external" memory. Previously they crashed (#16206).
+ // Also do a sanity correctness check: under race detector atomic operations
+ // are implemented inside of race runtime.
+ kernel32 := syscall.NewLazyDLL("kernel32.dll")
+ VirtualAlloc := kernel32.NewProc("VirtualAlloc")
+ VirtualFree := kernel32.NewProc("VirtualFree")
+ const (
+ MEM_COMMIT = 0x00001000
+ MEM_RESERVE = 0x00002000
+ MEM_RELEASE = 0x8000
+ PAGE_READWRITE = 0x04
+ )
+ mem, _, err := syscall.Syscall6(VirtualAlloc.Addr(), 4, 0, 1<<20, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE, 0, 0)
+ if err != 0 {
+ t.Fatalf("VirtualAlloc failed: %v", err)
+ }
+ defer syscall.Syscall(VirtualFree.Addr(), 3, mem, 1<<20, MEM_RELEASE)
+ a := (*uint64)(unsafe.Pointer(mem))
+ if *a != 0 {
+ t.Fatalf("bad atomic value: %v, want 0", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 1 {
+ t.Fatalf("bad atomic value: %v, want 1", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 2 {
+ t.Fatalf("bad atomic value: %v, want 2", *a)
+ }
+}
diff --git a/src/runtime/race/sched_test.go b/src/runtime/race/sched_test.go
new file mode 100644
index 0000000..a66860c
--- /dev/null
+++ b/src/runtime/race/sched_test.go
@@ -0,0 +1,48 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package race_test
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestRandomScheduling(t *testing.T) {
+ // Scheduler is most consistent with GOMAXPROCS=1.
+ // Use that to make the test most likely to fail.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ const N = 10
+ out := make([][]int, N)
+ for i := 0; i < N; i++ {
+ c := make(chan int, N)
+ for j := 0; j < N; j++ {
+ go func(j int) {
+ c <- j
+ }(j)
+ }
+ row := make([]int, N)
+ for j := 0; j < N; j++ {
+ row[j] = <-c
+ }
+ out[i] = row
+ }
+
+ for i := 0; i < N; i++ {
+ if !reflect.DeepEqual(out[0], out[i]) {
+ return // found a different order
+ }
+ }
+
+ var buf strings.Builder
+ for i := 0; i < N; i++ {
+ fmt.Fprintf(&buf, "%v\n", out[i])
+ }
+ t.Fatalf("consistent goroutine execution order:\n%v", buf.String())
+}
diff --git a/src/runtime/race/syso_test.go b/src/runtime/race/syso_test.go
new file mode 100644
index 0000000..2f1a91c
--- /dev/null
+++ b/src/runtime/race/syso_test.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package race
+
+import (
+ "bytes"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestIssue37485(t *testing.T) {
+ files, err := filepath.Glob("./*.syso")
+ if err != nil {
+ t.Fatalf("can't find syso files: %s", err)
+ }
+ for _, f := range files {
+ cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), "tool", "nm", f)
+ res, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("nm of %s failed: %s", f, err)
+ continue
+ }
+ if bytes.Contains(res, []byte("getauxval")) {
+ t.Errorf("%s contains getauxval", f)
+ }
+ }
+}
diff --git a/src/runtime/race/testdata/atomic_test.go b/src/runtime/race/testdata/atomic_test.go
new file mode 100644
index 0000000..4ce7260
--- /dev/null
+++ b/src/runtime/race/testdata/atomic_test.go
@@ -0,0 +1,325 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "unsafe"
+)
+
+func TestNoRaceAtomicAddInt64(t *testing.T) {
+ var x1, x2 int8
+ _ = x1 + x2
+ var s int64
+ ch := make(chan bool, 2)
+ go func() {
+ x1 = 1
+ if atomic.AddInt64(&s, 1) == 2 {
+ x2 = 1
+ }
+ ch <- true
+ }()
+ go func() {
+ x2 = 1
+ if atomic.AddInt64(&s, 1) == 2 {
+ x1 = 1
+ }
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceAtomicAddInt64(t *testing.T) {
+ var x1, x2 int8
+ _ = x1 + x2
+ var s int64
+ ch := make(chan bool, 2)
+ go func() {
+ x1 = 1
+ if atomic.AddInt64(&s, 1) == 1 {
+ x2 = 1
+ }
+ ch <- true
+ }()
+ go func() {
+ x2 = 1
+ if atomic.AddInt64(&s, 1) == 1 {
+ x1 = 1
+ }
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceAtomicAddInt32(t *testing.T) {
+ var x1, x2 int8
+ _ = x1 + x2
+ var s int32
+ ch := make(chan bool, 2)
+ go func() {
+ x1 = 1
+ if atomic.AddInt32(&s, 1) == 2 {
+ x2 = 1
+ }
+ ch <- true
+ }()
+ go func() {
+ x2 = 1
+ if atomic.AddInt32(&s, 1) == 2 {
+ x1 = 1
+ }
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceAtomicLoadAddInt32(t *testing.T) {
+ var x int64
+ _ = x
+ var s int32
+ go func() {
+ x = 2
+ atomic.AddInt32(&s, 1)
+ }()
+ for atomic.LoadInt32(&s) != 1 {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicLoadStoreInt32(t *testing.T) {
+ var x int64
+ _ = x
+ var s int32
+ go func() {
+ x = 2
+ atomic.StoreInt32(&s, 1)
+ }()
+ for atomic.LoadInt32(&s) != 1 {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicStoreCASInt32(t *testing.T) {
+ var x int64
+ _ = x
+ var s int32
+ go func() {
+ x = 2
+ atomic.StoreInt32(&s, 1)
+ }()
+ for !atomic.CompareAndSwapInt32(&s, 1, 0) {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicCASLoadInt32(t *testing.T) {
+ var x int64
+ _ = x
+ var s int32
+ go func() {
+ x = 2
+ if !atomic.CompareAndSwapInt32(&s, 0, 1) {
+ panic("")
+ }
+ }()
+ for atomic.LoadInt32(&s) != 1 {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicCASCASInt32(t *testing.T) {
+ var x int64
+ _ = x
+ var s int32
+ go func() {
+ x = 2
+ if !atomic.CompareAndSwapInt32(&s, 0, 1) {
+ panic("")
+ }
+ }()
+ for !atomic.CompareAndSwapInt32(&s, 1, 0) {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicCASCASInt32_2(t *testing.T) {
+ var x1, x2 int8
+ _ = x1 + x2
+ var s int32
+ ch := make(chan bool, 2)
+ go func() {
+ x1 = 1
+ if !atomic.CompareAndSwapInt32(&s, 0, 1) {
+ x2 = 1
+ }
+ ch <- true
+ }()
+ go func() {
+ x2 = 1
+ if !atomic.CompareAndSwapInt32(&s, 0, 1) {
+ x1 = 1
+ }
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceAtomicLoadInt64(t *testing.T) {
+ var x int32
+ _ = x
+ var s int64
+ go func() {
+ x = 2
+ atomic.AddInt64(&s, 1)
+ }()
+ for atomic.LoadInt64(&s) != 1 {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicCASCASUInt64(t *testing.T) {
+ var x int64
+ _ = x
+ var s uint64
+ go func() {
+ x = 2
+ if !atomic.CompareAndSwapUint64(&s, 0, 1) {
+ panic("")
+ }
+ }()
+ for !atomic.CompareAndSwapUint64(&s, 1, 0) {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicLoadStorePointer(t *testing.T) {
+ var x int64
+ _ = x
+ var s unsafe.Pointer
+ var y int = 2
+ var p unsafe.Pointer = unsafe.Pointer(&y)
+ go func() {
+ x = 2
+ atomic.StorePointer(&s, p)
+ }()
+ for atomic.LoadPointer(&s) != p {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestNoRaceAtomicStoreCASUint64(t *testing.T) {
+ var x int64
+ _ = x
+ var s uint64
+ go func() {
+ x = 2
+ atomic.StoreUint64(&s, 1)
+ }()
+ for !atomic.CompareAndSwapUint64(&s, 1, 0) {
+ runtime.Gosched()
+ }
+ x = 1
+}
+
+func TestRaceAtomicStoreLoad(t *testing.T) {
+ c := make(chan bool)
+ var a uint64
+ go func() {
+ atomic.StoreUint64(&a, 1)
+ c <- true
+ }()
+ _ = a
+ <-c
+}
+
+func TestRaceAtomicLoadStore(t *testing.T) {
+ c := make(chan bool)
+ var a uint64
+ go func() {
+ _ = atomic.LoadUint64(&a)
+ c <- true
+ }()
+ a = 1
+ <-c
+}
+
+func TestRaceAtomicAddLoad(t *testing.T) {
+ c := make(chan bool)
+ var a uint64
+ go func() {
+ atomic.AddUint64(&a, 1)
+ c <- true
+ }()
+ _ = a
+ <-c
+}
+
+func TestRaceAtomicAddStore(t *testing.T) {
+ c := make(chan bool)
+ var a uint64
+ go func() {
+ atomic.AddUint64(&a, 1)
+ c <- true
+ }()
+ a = 42
+ <-c
+}
+
+// A nil pointer in an atomic operation should not deadlock
+// the rest of the program. Used to hang indefinitely.
+func TestNoRaceAtomicCrash(t *testing.T) {
+ var mutex sync.Mutex
+ var nilptr *int32
+ panics := 0
+ defer func() {
+ if x := recover(); x != nil {
+ mutex.Lock()
+ panics++
+ mutex.Unlock()
+ } else {
+ panic("no panic")
+ }
+ }()
+ atomic.AddInt32(nilptr, 1)
+}
+
+func TestNoRaceDeferAtomicStore(t *testing.T) {
+ // Test that when an atomic function is deferred directly, the
+ // GC scans it correctly. See issue 42599.
+ type foo struct {
+ bar int64
+ }
+
+ var doFork func(f *foo, depth int)
+ doFork = func(f *foo, depth int) {
+ atomic.StoreInt64(&f.bar, 1)
+ defer atomic.StoreInt64(&f.bar, 0)
+ if depth > 0 {
+ for i := 0; i < 2; i++ {
+ f2 := &foo{}
+ go doFork(f2, depth-1)
+ }
+ }
+ runtime.GC()
+ }
+
+ f := &foo{}
+ doFork(f, 11)
+}
diff --git a/src/runtime/race/testdata/cgo_test.go b/src/runtime/race/testdata/cgo_test.go
new file mode 100644
index 0000000..211ef7d
--- /dev/null
+++ b/src/runtime/race/testdata/cgo_test.go
@@ -0,0 +1,21 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+func TestNoRaceCgoSync(t *testing.T) {
+ cmd := exec.Command(testenv.GoToolPath(t), "run", "-race", "cgo_test_main.go")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("program exited with error: %v\n", err)
+ }
+}
diff --git a/src/runtime/race/testdata/cgo_test_main.go b/src/runtime/race/testdata/cgo_test_main.go
new file mode 100644
index 0000000..620cea1
--- /dev/null
+++ b/src/runtime/race/testdata/cgo_test_main.go
@@ -0,0 +1,30 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+int sync;
+
+void Notify(void)
+{
+ __sync_fetch_and_add(&sync, 1);
+}
+
+void Wait(void)
+{
+ while(__sync_fetch_and_add(&sync, 0) == 0) {}
+}
+*/
+import "C"
+
+func main() {
+ data := 0
+ go func() {
+ data = 1
+ C.Notify()
+ }()
+ C.Wait()
+ _ = data
+}
diff --git a/src/runtime/race/testdata/chan_test.go b/src/runtime/race/testdata/chan_test.go
new file mode 100644
index 0000000..e39ad4f
--- /dev/null
+++ b/src/runtime/race/testdata/chan_test.go
@@ -0,0 +1,787 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestNoRaceChanSync(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ v = 1
+ c <- 0
+ }()
+ <-c
+ v = 2
+}
+
+func TestNoRaceChanSyncRev(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ c <- 0
+ v = 2
+ }()
+ v = 1
+ <-c
+}
+
+func TestNoRaceChanAsync(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ c <- 0
+ }()
+ <-c
+ v = 2
+}
+
+func TestRaceChanAsyncRev(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ c <- 0
+ v = 1
+ }()
+ v = 2
+ <-c
+}
+
+func TestNoRaceChanAsyncCloseRecv(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ func() {
+ defer func() {
+ recover()
+ v = 2
+ }()
+ <-c
+ }()
+}
+
+func TestNoRaceChanAsyncCloseRecv2(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ _, _ = <-c
+ v = 2
+}
+
+func TestNoRaceChanAsyncCloseRecv3(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ for range c {
+ }
+ v = 2
+}
+
+func TestNoRaceChanSyncCloseRecv(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ func() {
+ defer func() {
+ recover()
+ v = 2
+ }()
+ <-c
+ }()
+}
+
+func TestNoRaceChanSyncCloseRecv2(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ _, _ = <-c
+ v = 2
+}
+
+func TestNoRaceChanSyncCloseRecv3(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ for range c {
+ }
+ v = 2
+}
+
+func TestRaceChanSyncCloseSend(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ func() {
+ defer func() {
+ recover()
+ }()
+ c <- 0
+ }()
+ v = 2
+}
+
+func TestRaceChanAsyncCloseSend(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ close(c)
+ }()
+ func() {
+ defer func() {
+ recover()
+ }()
+ for {
+ c <- 0
+ }
+ }()
+ v = 2
+}
+
+func TestRaceChanCloseClose(t *testing.T) {
+ compl := make(chan bool, 2)
+ v1 := 0
+ v2 := 0
+ _ = v1 + v2
+ c := make(chan int)
+ go func() {
+ defer func() {
+ if recover() != nil {
+ v2 = 2
+ }
+ compl <- true
+ }()
+ v1 = 1
+ close(c)
+ }()
+ go func() {
+ defer func() {
+ if recover() != nil {
+ v1 = 2
+ }
+ compl <- true
+ }()
+ v2 = 1
+ close(c)
+ }()
+ <-compl
+ <-compl
+}
+
+func TestRaceChanSendLen(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ go func() {
+ v = 1
+ c <- 1
+ }()
+ for len(c) == 0 {
+ runtime.Gosched()
+ }
+ v = 2
+}
+
+func TestRaceChanRecvLen(t *testing.T) {
+ v := 0
+ _ = v
+ c := make(chan int, 10)
+ c <- 1
+ go func() {
+ v = 1
+ <-c
+ }()
+ for len(c) != 0 {
+ runtime.Gosched()
+ }
+ v = 2
+}
+
+func TestRaceChanSendSend(t *testing.T) {
+ compl := make(chan bool, 2)
+ v1 := 0
+ v2 := 0
+ _ = v1 + v2
+ c := make(chan int, 1)
+ go func() {
+ v1 = 1
+ select {
+ case c <- 1:
+ default:
+ v2 = 2
+ }
+ compl <- true
+ }()
+ go func() {
+ v2 = 1
+ select {
+ case c <- 1:
+ default:
+ v1 = 2
+ }
+ compl <- true
+ }()
+ <-compl
+ <-compl
+}
+
+func TestNoRaceChanPtr(t *testing.T) {
+ type msg struct {
+ x int
+ }
+ c := make(chan *msg)
+ go func() {
+ c <- &msg{1}
+ }()
+ m := <-c
+ m.x = 2
+}
+
+func TestRaceChanWrongSend(t *testing.T) {
+ v1 := 0
+ v2 := 0
+ _ = v1 + v2
+ c := make(chan int, 2)
+ go func() {
+ v1 = 1
+ c <- 1
+ }()
+ go func() {
+ v2 = 2
+ c <- 2
+ }()
+ time.Sleep(1e7)
+ if <-c == 1 {
+ v2 = 3
+ } else {
+ v1 = 3
+ }
+}
+
+func TestRaceChanWrongClose(t *testing.T) {
+ v1 := 0
+ v2 := 0
+ _ = v1 + v2
+ c := make(chan int, 1)
+ done := make(chan bool)
+ go func() {
+ defer func() {
+ recover()
+ }()
+ v1 = 1
+ c <- 1
+ done <- true
+ }()
+ go func() {
+ time.Sleep(1e7)
+ v2 = 2
+ close(c)
+ done <- true
+ }()
+ time.Sleep(2e7)
+ if _, who := <-c; who {
+ v2 = 2
+ } else {
+ v1 = 2
+ }
+ <-done
+ <-done
+}
+
+func TestRaceChanSendClose(t *testing.T) {
+ compl := make(chan bool, 2)
+ c := make(chan int, 1)
+ go func() {
+ defer func() {
+ recover()
+ compl <- true
+ }()
+ c <- 1
+ }()
+ go func() {
+ time.Sleep(10 * time.Millisecond)
+ close(c)
+ compl <- true
+ }()
+ <-compl
+ <-compl
+}
+
+func TestRaceChanSendSelectClose(t *testing.T) {
+ compl := make(chan bool, 2)
+ c := make(chan int, 1)
+ c1 := make(chan int)
+ go func() {
+ defer func() {
+ recover()
+ compl <- true
+ }()
+ time.Sleep(10 * time.Millisecond)
+ select {
+ case c <- 1:
+ case <-c1:
+ }
+ }()
+ go func() {
+ close(c)
+ compl <- true
+ }()
+ <-compl
+ <-compl
+}
+
+func TestRaceSelectReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int, 10)
+ c2 := make(chan int, 10)
+ c3 := make(chan int)
+ c2 <- 1
+ go func() {
+ select {
+ case c1 <- x: // read of x races with...
+ case c3 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c2: // ... write to x here
+ case c3 <- 1:
+ }
+ <-done
+}
+
+func TestRaceSelectReadWriteSync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int)
+ c2 := make(chan int)
+ c3 := make(chan int)
+ // make c1 and c2 ready for communication
+ go func() {
+ <-c1
+ }()
+ go func() {
+ c2 <- 1
+ }()
+ go func() {
+ select {
+ case c1 <- x: // read of x races with...
+ case c3 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c2: // ... write to x here
+ case c3 <- 1:
+ }
+ <-done
+}
+
+func TestNoRaceSelectReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ c1 := make(chan int)
+ c2 := make(chan int)
+ go func() {
+ select {
+ case c1 <- x: // read of x does not race with...
+ case c2 <- 1:
+ }
+ done <- true
+ }()
+ select {
+ case x = <-c1: // ... write to x here
+ case c2 <- 1:
+ }
+ <-done
+}
+
+func TestRaceChanReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int, 10)
+ c2 := make(chan int, 10)
+ c2 <- 10
+ x := 0
+ go func() {
+ c1 <- x // read of x races with...
+ done <- true
+ }()
+ x = <-c2 // ... write to x here
+ <-done
+}
+
+func TestRaceChanReadWriteSync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int)
+ c2 := make(chan int)
+ // make c1 and c2 ready for communication
+ go func() {
+ <-c1
+ }()
+ go func() {
+ c2 <- 10
+ }()
+ x := 0
+ go func() {
+ c1 <- x // read of x races with...
+ done <- true
+ }()
+ x = <-c2 // ... write to x here
+ <-done
+}
+
+func TestNoRaceChanReadWriteAsync(t *testing.T) {
+ done := make(chan bool)
+ c1 := make(chan int, 10)
+ x := 0
+ go func() {
+ c1 <- x // read of x does not race with...
+ done <- true
+ }()
+ x = <-c1 // ... write to x here
+ <-done
+}
+
+func TestNoRaceProducerConsumerUnbuffered(t *testing.T) {
+ type Task struct {
+ f func()
+ done chan bool
+ }
+
+ queue := make(chan Task)
+
+ go func() {
+ t := <-queue
+ t.f()
+ t.done <- true
+ }()
+
+ doit := func(f func()) {
+ done := make(chan bool, 1)
+ queue <- Task{f, done}
+ <-done
+ }
+
+ x := 0
+ doit(func() {
+ x = 1
+ })
+ _ = x
+}
+
+func TestRaceChanItselfSend(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int, 10)
+ go func() {
+ c <- 0
+ compl <- true
+ }()
+ c = make(chan int, 20)
+ <-compl
+}
+
+func TestRaceChanItselfRecv(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int, 10)
+ c <- 1
+ go func() {
+ <-c
+ compl <- true
+ }()
+ time.Sleep(1e7)
+ c = make(chan int, 20)
+ <-compl
+}
+
+func TestRaceChanItselfNil(t *testing.T) {
+ c := make(chan int, 10)
+ go func() {
+ c <- 0
+ }()
+ time.Sleep(1e7)
+ c = nil
+ _ = c
+}
+
+func TestRaceChanItselfClose(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int)
+ go func() {
+ close(c)
+ compl <- true
+ }()
+ c = make(chan int)
+ <-compl
+}
+
+func TestRaceChanItselfLen(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int)
+ go func() {
+ _ = len(c)
+ compl <- true
+ }()
+ c = make(chan int)
+ <-compl
+}
+
+func TestRaceChanItselfCap(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int)
+ go func() {
+ _ = cap(c)
+ compl <- true
+ }()
+ c = make(chan int)
+ <-compl
+}
+
+func TestNoRaceChanCloseLen(t *testing.T) {
+ c := make(chan int, 10)
+ r := make(chan int, 10)
+ go func() {
+ r <- len(c)
+ }()
+ go func() {
+ close(c)
+ r <- 0
+ }()
+ <-r
+ <-r
+}
+
+func TestNoRaceChanCloseCap(t *testing.T) {
+ c := make(chan int, 10)
+ r := make(chan int, 10)
+ go func() {
+ r <- cap(c)
+ }()
+ go func() {
+ close(c)
+ r <- 0
+ }()
+ <-r
+ <-r
+}
+
+func TestRaceChanCloseSend(t *testing.T) {
+ compl := make(chan bool, 1)
+ c := make(chan int, 10)
+ go func() {
+ close(c)
+ compl <- true
+ }()
+ c <- 0
+ <-compl
+}
+
+func TestNoRaceChanMutex(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan struct{}, 1)
+ data := 0
+ _ = data
+ go func() {
+ mtx <- struct{}{}
+ data = 42
+ <-mtx
+ done <- struct{}{}
+ }()
+ mtx <- struct{}{}
+ data = 43
+ <-mtx
+ <-done
+}
+
+func TestNoRaceSelectMutex(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan struct{}, 1)
+ aux := make(chan bool)
+ data := 0
+ _ = data
+ go func() {
+ select {
+ case mtx <- struct{}{}:
+ case <-aux:
+ }
+ data = 42
+ select {
+ case <-mtx:
+ case <-aux:
+ }
+ done <- struct{}{}
+ }()
+ select {
+ case mtx <- struct{}{}:
+ case <-aux:
+ }
+ data = 43
+ select {
+ case <-mtx:
+ case <-aux:
+ }
+ <-done
+}
+
+func TestRaceChanSem(t *testing.T) {
+ done := make(chan struct{})
+ mtx := make(chan bool, 2)
+ data := 0
+ _ = data
+ go func() {
+ mtx <- true
+ data = 42
+ <-mtx
+ done <- struct{}{}
+ }()
+ mtx <- true
+ data = 43
+ <-mtx
+ <-done
+}
+
+func TestNoRaceChanWaitGroup(t *testing.T) {
+ const N = 10
+ chanWg := make(chan bool, N/2)
+ data := make([]int, N)
+ for i := 0; i < N; i++ {
+ chanWg <- true
+ go func(i int) {
+ data[i] = 42
+ <-chanWg
+ }(i)
+ }
+ for i := 0; i < cap(chanWg); i++ {
+ chanWg <- true
+ }
+ for i := 0; i < N; i++ {
+ _ = data[i]
+ }
+}
+
+// Test that sender synchronizes with receiver even if the sender was blocked.
+func TestNoRaceBlockedSendSync(t *testing.T) {
+ c := make(chan *int, 1)
+ c <- nil
+ go func() {
+ i := 42
+ c <- &i
+ }()
+ // Give the sender time to actually block.
+ // This sleep is completely optional: race report must not be printed
+ // regardless of whether the sender actually blocks or not.
+ // It cannot lead to flakiness.
+ time.Sleep(10 * time.Millisecond)
+ <-c
+ p := <-c
+ if *p != 42 {
+ t.Fatal()
+ }
+}
+
+// The same as TestNoRaceBlockedSendSync above, but sender unblock happens in a select.
+func TestNoRaceBlockedSelectSendSync(t *testing.T) {
+ c := make(chan *int, 1)
+ c <- nil
+ go func() {
+ i := 42
+ c <- &i
+ }()
+ time.Sleep(10 * time.Millisecond)
+ <-c
+ select {
+ case p := <-c:
+ if *p != 42 {
+ t.Fatal()
+ }
+ case <-make(chan int):
+ }
+}
+
+// Test that close synchronizes with a read from the empty closed channel.
+// See https://golang.org/issue/36714.
+func TestNoRaceCloseHappensBeforeRead(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ var loc int
+ var write = make(chan struct{})
+ var read = make(chan struct{})
+
+ go func() {
+ select {
+ case <-write:
+ _ = loc
+ default:
+ }
+ close(read)
+ }()
+
+ go func() {
+ loc = 1
+ close(write)
+ }()
+
+ <-read
+ }
+}
+
+// Test that we call the proper race detector function when c.elemsize==0.
+// See https://github.com/golang/go/issues/42598
+func TestNoRaceElemetSize0(t *testing.T) {
+ var x, y int
+ var c = make(chan struct{}, 2)
+ c <- struct{}{}
+ c <- struct{}{}
+ go func() {
+ x += 1
+ <-c
+ }()
+ go func() {
+ y += 1
+ <-c
+ }()
+ time.Sleep(10 * time.Millisecond)
+ c <- struct{}{}
+ c <- struct{}{}
+ x += 1
+ y += 1
+}
diff --git a/src/runtime/race/testdata/comp_test.go b/src/runtime/race/testdata/comp_test.go
new file mode 100644
index 0000000..27b2d00
--- /dev/null
+++ b/src/runtime/race/testdata/comp_test.go
@@ -0,0 +1,186 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "testing"
+)
+
+type P struct {
+ x, y int
+}
+
+type S struct {
+ s1, s2 P
+}
+
+func TestNoRaceComp(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S
+ go func() {
+ s.s2.x = 1
+ c <- true
+ }()
+ s.s2.y = 2
+ <-c
+}
+
+func TestNoRaceComp2(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S
+ go func() {
+ s.s1.x = 1
+ c <- true
+ }()
+ s.s1.y = 2
+ <-c
+}
+
+func TestRaceComp(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S
+ go func() {
+ s.s2.y = 1
+ c <- true
+ }()
+ s.s2.y = 2
+ <-c
+}
+
+func TestRaceComp2(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S
+ go func() {
+ s.s1.x = 1
+ c <- true
+ }()
+ s = S{}
+ <-c
+}
+
+func TestRaceComp3(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S
+ go func() {
+ s.s2.y = 1
+ c <- true
+ }()
+ s = S{}
+ <-c
+}
+
+func TestRaceCompArray(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]S, 10)
+ x := 4
+ go func() {
+ s[x].s2.y = 1
+ c <- true
+ }()
+ x = 5
+ <-c
+}
+
+type P2 P
+type S2 S
+
+func TestRaceConv1(t *testing.T) {
+ c := make(chan bool, 1)
+ var p P2
+ go func() {
+ p.x = 1
+ c <- true
+ }()
+ _ = P(p).x
+ <-c
+}
+
+func TestRaceConv2(t *testing.T) {
+ c := make(chan bool, 1)
+ var p P2
+ go func() {
+ p.x = 1
+ c <- true
+ }()
+ ptr := &p
+ _ = P(*ptr).x
+ <-c
+}
+
+func TestRaceConv3(t *testing.T) {
+ c := make(chan bool, 1)
+ var s S2
+ go func() {
+ s.s1.x = 1
+ c <- true
+ }()
+ _ = P2(S(s).s1).x
+ <-c
+}
+
+type X struct {
+ V [4]P
+}
+
+type X2 X
+
+func TestRaceConv4(t *testing.T) {
+ c := make(chan bool, 1)
+ var x X2
+ go func() {
+ x.V[1].x = 1
+ c <- true
+ }()
+ _ = P2(X(x).V[1]).x
+ <-c
+}
+
+type Ptr struct {
+ s1, s2 *P
+}
+
+func TestNoRaceCompPtr(t *testing.T) {
+ c := make(chan bool, 1)
+ p := Ptr{&P{}, &P{}}
+ go func() {
+ p.s1.x = 1
+ c <- true
+ }()
+ p.s1.y = 2
+ <-c
+}
+
+func TestNoRaceCompPtr2(t *testing.T) {
+ c := make(chan bool, 1)
+ p := Ptr{&P{}, &P{}}
+ go func() {
+ p.s1.x = 1
+ c <- true
+ }()
+ _ = p
+ <-c
+}
+
+func TestRaceCompPtr(t *testing.T) {
+ c := make(chan bool, 1)
+ p := Ptr{&P{}, &P{}}
+ go func() {
+ p.s2.x = 1
+ c <- true
+ }()
+ p.s2.x = 2
+ <-c
+}
+
+func TestRaceCompPtr2(t *testing.T) {
+ c := make(chan bool, 1)
+ p := Ptr{&P{}, &P{}}
+ go func() {
+ p.s2.x = 1
+ c <- true
+ }()
+ p.s2 = &P{}
+ <-c
+}
diff --git a/src/runtime/race/testdata/finalizer_test.go b/src/runtime/race/testdata/finalizer_test.go
new file mode 100644
index 0000000..3ac33d2
--- /dev/null
+++ b/src/runtime/race/testdata/finalizer_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestNoRaceFin(t *testing.T) {
+ c := make(chan bool)
+ go func() {
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
+ *x = "foo"
+ })
+ *x = "bar"
+ c <- true
+ }()
+ <-c
+ runtime.GC()
+ time.Sleep(100 * time.Millisecond)
+}
+
+var finVar struct {
+ sync.Mutex
+ cnt int
+}
+
+func TestNoRaceFinGlobal(t *testing.T) {
+ c := make(chan bool)
+ go func() {
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
+ finVar.Lock()
+ finVar.cnt++
+ finVar.Unlock()
+ })
+ c <- true
+ }()
+ <-c
+ runtime.GC()
+ time.Sleep(100 * time.Millisecond)
+ finVar.Lock()
+ finVar.cnt++
+ finVar.Unlock()
+}
+
+func TestRaceFin(t *testing.T) {
+ c := make(chan bool)
+ y := 0
+ _ = y
+ go func() {
+ x := new(string)
+ runtime.SetFinalizer(x, func(x *string) {
+ y = 42
+ })
+ c <- true
+ }()
+ <-c
+ runtime.GC()
+ time.Sleep(100 * time.Millisecond)
+ y = 66
+}
diff --git a/src/runtime/race/testdata/io_test.go b/src/runtime/race/testdata/io_test.go
new file mode 100644
index 0000000..3303cb0
--- /dev/null
+++ b/src/runtime/race/testdata/io_test.go
@@ -0,0 +1,75 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestNoRaceIOFile(t *testing.T) {
+ x := 0
+ path := t.TempDir()
+ fname := filepath.Join(path, "data")
+ go func() {
+ x = 42
+ f, _ := os.Create(fname)
+ f.Write([]byte("done"))
+ f.Close()
+ }()
+ for {
+ f, err := os.Open(fname)
+ if err != nil {
+ time.Sleep(1e6)
+ continue
+ }
+ buf := make([]byte, 100)
+ count, err := f.Read(buf)
+ if count == 0 {
+ time.Sleep(1e6)
+ continue
+ }
+ break
+ }
+ _ = x
+}
+
+var (
+ regHandler sync.Once
+ handlerData int
+)
+
+func TestNoRaceIOHttp(t *testing.T) {
+ regHandler.Do(func() {
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ handlerData++
+ fmt.Fprintf(w, "test")
+ handlerData++
+ })
+ })
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("net.Listen: %v", err)
+ }
+ defer ln.Close()
+ go http.Serve(ln, nil)
+ handlerData++
+ _, err = http.Get("http://" + ln.Addr().String())
+ if err != nil {
+ t.Fatalf("http.Get: %v", err)
+ }
+ handlerData++
+ _, err = http.Get("http://" + ln.Addr().String())
+ if err != nil {
+ t.Fatalf("http.Get: %v", err)
+ }
+ handlerData++
+}
diff --git a/src/runtime/race/testdata/issue12225_test.go b/src/runtime/race/testdata/issue12225_test.go
new file mode 100644
index 0000000..0494493
--- /dev/null
+++ b/src/runtime/race/testdata/issue12225_test.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import "unsafe"
+
+// golang.org/issue/12225
+// The test is that this compiles at all.
+
+//go:noinline
+func convert(s string) []byte {
+ return []byte(s)
+}
+
+func issue12225() {
+ println(*(*int)(unsafe.Pointer(&convert("")[0])))
+ println(*(*int)(unsafe.Pointer(&[]byte("")[0])))
+}
diff --git a/src/runtime/race/testdata/issue12664_test.go b/src/runtime/race/testdata/issue12664_test.go
new file mode 100644
index 0000000..714e83d
--- /dev/null
+++ b/src/runtime/race/testdata/issue12664_test.go
@@ -0,0 +1,76 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "fmt"
+ "testing"
+)
+
+var issue12664 = "hi"
+
+func TestRaceIssue12664(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664 = "bye"
+ close(c)
+ }()
+ fmt.Println(issue12664)
+ <-c
+}
+
+type MyI interface {
+ foo()
+}
+
+type MyT int
+
+func (MyT) foo() {
+}
+
+var issue12664_2 MyT = 0
+
+func TestRaceIssue12664_2(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_2 = 1
+ close(c)
+ }()
+ func(x MyI) {
+ // Never true, but prevents inlining.
+ if x.(MyT) == -1 {
+ close(c)
+ }
+ }(issue12664_2)
+ <-c
+}
+
+var issue12664_3 MyT = 0
+
+func TestRaceIssue12664_3(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_3 = 1
+ close(c)
+ }()
+ var r MyT
+ var i any = r
+ issue12664_3 = i.(MyT)
+ <-c
+}
+
+var issue12664_4 MyT = 0
+
+func TestRaceIssue12664_4(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_4 = 1
+ close(c)
+ }()
+ var r MyT
+ var i MyI = r
+ issue12664_4 = i.(MyT)
+ <-c
+}
diff --git a/src/runtime/race/testdata/issue13264_test.go b/src/runtime/race/testdata/issue13264_test.go
new file mode 100644
index 0000000..d42290d
--- /dev/null
+++ b/src/runtime/race/testdata/issue13264_test.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+// golang.org/issue/13264
+// The test is that this compiles at all.
+
+func issue13264() {
+ for ; ; []map[int]int{}[0][0] = 0 {
+ }
+}
diff --git a/src/runtime/race/testdata/map_test.go b/src/runtime/race/testdata/map_test.go
new file mode 100644
index 0000000..88e735e
--- /dev/null
+++ b/src/runtime/race/testdata/map_test.go
@@ -0,0 +1,335 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "testing"
+)
+
+func TestRaceMapRW(t *testing.T) {
+ m := make(map[int]int)
+ ch := make(chan bool, 1)
+ go func() {
+ _ = m[1]
+ ch <- true
+ }()
+ m[1] = 1
+ <-ch
+}
+
+func TestRaceMapRW2(t *testing.T) {
+ m := make(map[int]int)
+ ch := make(chan bool, 1)
+ go func() {
+ _, _ = m[1]
+ ch <- true
+ }()
+ m[1] = 1
+ <-ch
+}
+
+func TestRaceMapRWArray(t *testing.T) {
+ // Check instrumentation of unaddressable arrays (issue 4578).
+ m := make(map[int][2]int)
+ ch := make(chan bool, 1)
+ go func() {
+ _ = m[1][1]
+ ch <- true
+ }()
+ m[2] = [2]int{1, 2}
+ <-ch
+}
+
+func TestNoRaceMapRR(t *testing.T) {
+ m := make(map[int]int)
+ ch := make(chan bool, 1)
+ go func() {
+ _, _ = m[1]
+ ch <- true
+ }()
+ _ = m[1]
+ <-ch
+}
+
+func TestRaceMapRange(t *testing.T) {
+ m := make(map[int]int)
+ ch := make(chan bool, 1)
+ go func() {
+ for range m {
+ }
+ ch <- true
+ }()
+ m[1] = 1
+ <-ch
+}
+
+func TestRaceMapRange2(t *testing.T) {
+ m := make(map[int]int)
+ ch := make(chan bool, 1)
+ go func() {
+ for range m {
+ }
+ ch <- true
+ }()
+ m[1] = 1
+ <-ch
+}
+
+func TestNoRaceMapRangeRange(t *testing.T) {
+ m := make(map[int]int)
+ // now the map is not empty and range triggers an event
+ // should work without this (as in other tests)
+ // so it is suspicious if this test passes and others don't
+ m[0] = 0
+ ch := make(chan bool, 1)
+ go func() {
+ for range m {
+ }
+ ch <- true
+ }()
+ for range m {
+ }
+ <-ch
+}
+
+func TestRaceMapLen(t *testing.T) {
+ m := make(map[string]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ _ = len(m)
+ ch <- true
+ }()
+ m[""] = true
+ <-ch
+}
+
+func TestRaceMapDelete(t *testing.T) {
+ m := make(map[string]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ delete(m, "")
+ ch <- true
+ }()
+ m[""] = true
+ <-ch
+}
+
+func TestRaceMapLenDelete(t *testing.T) {
+ m := make(map[string]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ delete(m, "a")
+ ch <- true
+ }()
+ _ = len(m)
+ <-ch
+}
+
+func TestRaceMapVariable(t *testing.T) {
+ ch := make(chan bool, 1)
+ m := make(map[int]int)
+ _ = m
+ go func() {
+ m = make(map[int]int)
+ ch <- true
+ }()
+ m = make(map[int]int)
+ <-ch
+}
+
+func TestRaceMapVariable2(t *testing.T) {
+ ch := make(chan bool, 1)
+ m := make(map[int]int)
+ go func() {
+ m[1] = 1
+ ch <- true
+ }()
+ m = make(map[int]int)
+ <-ch
+}
+
+func TestRaceMapVariable3(t *testing.T) {
+ ch := make(chan bool, 1)
+ m := make(map[int]int)
+ go func() {
+ _ = m[1]
+ ch <- true
+ }()
+ m = make(map[int]int)
+ <-ch
+}
+
+type Big struct {
+ x [17]int32
+}
+
+func TestRaceMapLookupPartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ _ = m[*k]
+ <-ch
+}
+
+func TestRaceMapLookupPartKey2(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ _, _ = m[*k]
+ <-ch
+}
+func TestRaceMapDeletePartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ delete(m, *k)
+ <-ch
+}
+
+func TestRaceMapInsertPartKey(t *testing.T) {
+ k := &Big{}
+ m := make(map[Big]bool)
+ ch := make(chan bool, 1)
+ go func() {
+ k.x[8] = 1
+ ch <- true
+ }()
+ m[*k] = true
+ <-ch
+}
+
+func TestRaceMapInsertPartVal(t *testing.T) {
+ v := &Big{}
+ m := make(map[int]Big)
+ ch := make(chan bool, 1)
+ go func() {
+ v.x[8] = 1
+ ch <- true
+ }()
+ m[1] = *v
+ <-ch
+}
+
+// Test for issue 7561.
+func TestRaceMapAssignMultipleReturn(t *testing.T) {
+ connect := func() (int, error) { return 42, nil }
+ conns := make(map[int][]int)
+ conns[1] = []int{0}
+ ch := make(chan bool, 1)
+ var err error
+ _ = err
+ go func() {
+ conns[1][0], err = connect()
+ ch <- true
+ }()
+ x := conns[1][0]
+ _ = x
+ <-ch
+}
+
+// BigKey and BigVal must be larger than 256 bytes,
+// so that compiler sets KindGCProg for them.
+type BigKey [1000]*int
+
+type BigVal struct {
+ x int
+ y [1000]*int
+}
+
+func TestRaceMapBigKeyAccess1(t *testing.T) {
+ m := make(map[BigKey]int)
+ var k BigKey
+ ch := make(chan bool, 1)
+ go func() {
+ _ = m[k]
+ ch <- true
+ }()
+ k[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigKeyAccess2(t *testing.T) {
+ m := make(map[BigKey]int)
+ var k BigKey
+ ch := make(chan bool, 1)
+ go func() {
+ _, _ = m[k]
+ ch <- true
+ }()
+ k[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigKeyInsert(t *testing.T) {
+ m := make(map[BigKey]int)
+ var k BigKey
+ ch := make(chan bool, 1)
+ go func() {
+ m[k] = 1
+ ch <- true
+ }()
+ k[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigKeyDelete(t *testing.T) {
+ m := make(map[BigKey]int)
+ var k BigKey
+ ch := make(chan bool, 1)
+ go func() {
+ delete(m, k)
+ ch <- true
+ }()
+ k[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigValInsert(t *testing.T) {
+ m := make(map[int]BigVal)
+ var v BigVal
+ ch := make(chan bool, 1)
+ go func() {
+ m[1] = v
+ ch <- true
+ }()
+ v.y[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigValAccess1(t *testing.T) {
+ m := make(map[int]BigVal)
+ var v BigVal
+ ch := make(chan bool, 1)
+ go func() {
+ v = m[1]
+ ch <- true
+ }()
+ v.y[30] = new(int)
+ <-ch
+}
+
+func TestRaceMapBigValAccess2(t *testing.T) {
+ m := make(map[int]BigVal)
+ var v BigVal
+ ch := make(chan bool, 1)
+ go func() {
+ v, _ = m[1]
+ ch <- true
+ }()
+ v.y[30] = new(int)
+ <-ch
+}
diff --git a/src/runtime/race/testdata/mop_test.go b/src/runtime/race/testdata/mop_test.go
new file mode 100644
index 0000000..4a9ce26
--- /dev/null
+++ b/src/runtime/race/testdata/mop_test.go
@@ -0,0 +1,2131 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+type Point struct {
+ x, y int
+}
+
+type NamedPoint struct {
+ name string
+ p Point
+}
+
+type DummyWriter struct {
+ state int
+}
+type Writer interface {
+ Write(p []byte) (n int)
+}
+
+func (d DummyWriter) Write(p []byte) (n int) {
+ return 0
+}
+
+var GlobalX, GlobalY int = 0, 0
+var GlobalCh chan int = make(chan int, 2)
+
+func GlobalFunc1() {
+ GlobalY = GlobalX
+ GlobalCh <- 1
+}
+
+func GlobalFunc2() {
+ GlobalX = 1
+ GlobalCh <- 1
+}
+
+func TestRaceIntRWGlobalFuncs(t *testing.T) {
+ go GlobalFunc1()
+ go GlobalFunc2()
+ <-GlobalCh
+ <-GlobalCh
+}
+
+func TestRaceIntRWClosures(t *testing.T) {
+ var x, y int
+ _ = y
+ ch := make(chan int, 2)
+
+ go func() {
+ y = x
+ ch <- 1
+ }()
+ go func() {
+ x = 1
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceIntRWClosures(t *testing.T) {
+ var x, y int
+ _ = y
+ ch := make(chan int, 1)
+
+ go func() {
+ y = x
+ ch <- 1
+ }()
+ <-ch
+ go func() {
+ x = 1
+ ch <- 1
+ }()
+ <-ch
+
+}
+
+func TestRaceInt32RWClosures(t *testing.T) {
+ var x, y int32
+ _ = y
+ ch := make(chan bool, 2)
+
+ go func() {
+ y = x
+ ch <- true
+ }()
+ go func() {
+ x = 1
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceCase(t *testing.T) {
+ var y int
+ for x := -1; x <= 1; x++ {
+ switch {
+ case x < 0:
+ y = -1
+ case x == 0:
+ y = 0
+ case x > 0:
+ y = 1
+ }
+ }
+ y++
+}
+
+func TestRaceCaseCondition(t *testing.T) {
+ var x int = 0
+ ch := make(chan int, 2)
+
+ go func() {
+ x = 2
+ ch <- 1
+ }()
+ go func() {
+ switch x < 2 {
+ case true:
+ x = 1
+ //case false:
+ // x = 5
+ }
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceCaseCondition2(t *testing.T) {
+ // switch body is rearranged by the compiler so the tests
+ // passes even if we don't instrument '<'
+ var x int = 0
+ ch := make(chan int, 2)
+
+ go func() {
+ x = 2
+ ch <- 1
+ }()
+ go func() {
+ switch x < 2 {
+ case true:
+ x = 1
+ case false:
+ x = 5
+ }
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceCaseBody(t *testing.T) {
+ var x, y int
+ _ = y
+ ch := make(chan int, 2)
+
+ go func() {
+ y = x
+ ch <- 1
+ }()
+ go func() {
+ switch {
+ default:
+ x = 1
+ case x == 100:
+ x = -x
+ }
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceCaseFallthrough(t *testing.T) {
+ var x, y, z int
+ _ = y
+ ch := make(chan int, 2)
+ z = 1
+
+ go func() {
+ y = x
+ ch <- 1
+ }()
+ go func() {
+ switch {
+ case z == 1:
+ case z == 2:
+ x = 2
+ }
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceCaseFallthrough(t *testing.T) {
+ var x, y, z int
+ _ = y
+ ch := make(chan int, 2)
+ z = 1
+
+ go func() {
+ y = x
+ ch <- 1
+ }()
+ go func() {
+ switch {
+ case z == 1:
+ fallthrough
+ case z == 2:
+ x = 2
+ }
+ ch <- 1
+ }()
+
+ <-ch
+ <-ch
+}
+
+func TestRaceCaseIssue6418(t *testing.T) {
+ m := map[string]map[string]string{
+ "a": {
+ "b": "c",
+ },
+ }
+ ch := make(chan int)
+ go func() {
+ m["a"]["x"] = "y"
+ ch <- 1
+ }()
+ switch m["a"]["b"] {
+ }
+ <-ch
+}
+
+func TestRaceCaseType(t *testing.T) {
+ var x, y int
+ var i any = x
+ c := make(chan int, 1)
+ go func() {
+ switch i.(type) {
+ case nil:
+ case int:
+ }
+ c <- 1
+ }()
+ i = y
+ <-c
+}
+
+func TestRaceCaseTypeBody(t *testing.T) {
+ var x, y int
+ var i any = &x
+ c := make(chan int, 1)
+ go func() {
+ switch i := i.(type) {
+ case nil:
+ case *int:
+ *i = y
+ }
+ c <- 1
+ }()
+ x = y
+ <-c
+}
+
+func TestRaceCaseTypeIssue5890(t *testing.T) {
+ // spurious extra instrumentation of the initial interface
+ // value.
+ var x, y int
+ m := make(map[int]map[int]any)
+ m[0] = make(map[int]any)
+ c := make(chan int, 1)
+ go func() {
+ switch i := m[0][1].(type) {
+ case nil:
+ case *int:
+ *i = x
+ }
+ c <- 1
+ }()
+ m[0][1] = y
+ <-c
+}
+
+func TestNoRaceRange(t *testing.T) {
+ ch := make(chan int, 3)
+ a := [...]int{1, 2, 3}
+ for _, v := range a {
+ ch <- v
+ }
+ close(ch)
+}
+
+func TestNoRaceRangeIssue5446(t *testing.T) {
+ ch := make(chan int, 3)
+ a := []int{1, 2, 3}
+ b := []int{4}
+ // used to insert a spurious instrumentation of a[i]
+ // and crash.
+ i := 1
+ for i, a[i] = range b {
+ ch <- i
+ }
+ close(ch)
+}
+
+func TestRaceRange(t *testing.T) {
+ const N = 2
+ var a [N]int
+ var x, y int
+ _ = x + y
+ done := make(chan bool, N)
+ for i, v := range a {
+ go func(i int) {
+ // we don't want a write-vs-write race
+ // so there is no array b here
+ if i == 0 {
+ x = v
+ } else {
+ y = v
+ }
+ done <- true
+ }(i)
+ // Ensure the goroutine runs before we continue the loop.
+ runtime.Gosched()
+ }
+ for i := 0; i < N; i++ {
+ <-done
+ }
+}
+
+func TestRaceForInit(t *testing.T) {
+ c := make(chan int)
+ x := 0
+ go func() {
+ c <- x
+ }()
+ for x = 42; false; {
+ }
+ <-c
+}
+
+func TestNoRaceForInit(t *testing.T) {
+ done := make(chan bool)
+ c := make(chan bool)
+ x := 0
+ go func() {
+ for {
+ _, ok := <-c
+ if !ok {
+ done <- true
+ return
+ }
+ x++
+ }
+ }()
+ i := 0
+ for x = 42; i < 10; i++ {
+ c <- true
+ }
+ close(c)
+ <-done
+}
+
+func TestRaceForTest(t *testing.T) {
+ done := make(chan bool)
+ c := make(chan bool)
+ stop := false
+ go func() {
+ for {
+ _, ok := <-c
+ if !ok {
+ done <- true
+ return
+ }
+ stop = true
+ }
+ }()
+ for !stop {
+ c <- true
+ }
+ close(c)
+ <-done
+}
+
+func TestRaceForIncr(t *testing.T) {
+ done := make(chan bool)
+ c := make(chan bool)
+ x := 0
+ go func() {
+ for {
+ _, ok := <-c
+ if !ok {
+ done <- true
+ return
+ }
+ x++
+ }
+ }()
+ for i := 0; i < 10; x++ {
+ i++
+ c <- true
+ }
+ close(c)
+ <-done
+}
+
+func TestNoRaceForIncr(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ go func() {
+ x++
+ done <- true
+ }()
+ for i := 0; i < 0; x++ {
+ }
+ <-done
+}
+
+func TestRacePlus(t *testing.T) {
+ var x, y, z int
+ _ = y
+ ch := make(chan int, 2)
+
+ go func() {
+ y = x + z
+ ch <- 1
+ }()
+ go func() {
+ y = x + z + z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRacePlus2(t *testing.T) {
+ var x, y, z int
+ _ = y
+ ch := make(chan int, 2)
+
+ go func() {
+ x = 1
+ ch <- 1
+ }()
+ go func() {
+ y = +x + z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRacePlus(t *testing.T) {
+ var x, y, z, f int
+ _ = x + y + f
+ ch := make(chan int, 2)
+
+ go func() {
+ y = x + z
+ ch <- 1
+ }()
+ go func() {
+ f = z + x
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceComplement(t *testing.T) {
+ var x, y, z int
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = ^y
+ ch <- 1
+ }()
+ go func() {
+ y = ^z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceDiv(t *testing.T) {
+ var x, y, z int
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = y / (z + 1)
+ ch <- 1
+ }()
+ go func() {
+ y = z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceDivConst(t *testing.T) {
+ var x, y, z uint32
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = y / 3 // involves only a HMUL node
+ ch <- 1
+ }()
+ go func() {
+ y = z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceMod(t *testing.T) {
+ var x, y, z int
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = y % (z + 1)
+ ch <- 1
+ }()
+ go func() {
+ y = z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceModConst(t *testing.T) {
+ var x, y, z int
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = y % 3
+ ch <- 1
+ }()
+ go func() {
+ y = z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceRotate(t *testing.T) {
+ var x, y, z uint32
+ _ = x
+ ch := make(chan int, 2)
+
+ go func() {
+ x = y<<12 | y>>20
+ ch <- 1
+ }()
+ go func() {
+ y = z
+ ch <- 1
+ }()
+ <-ch
+ <-ch
+}
+
+// May crash if the instrumentation is reckless.
+func TestNoRaceEnoughRegisters(t *testing.T) {
+ // from erf.go
+ const (
+ sa1 = 1
+ sa2 = 2
+ sa3 = 3
+ sa4 = 4
+ sa5 = 5
+ sa6 = 6
+ sa7 = 7
+ sa8 = 8
+ )
+ var s, S float64
+ s = 3.1415
+ S = 1 + s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(sa5+s*(sa6+s*(sa7+s*sa8)))))))
+ s = S
+}
+
+// emptyFunc should not be inlined.
+func emptyFunc(x int) {
+ if false {
+ fmt.Println(x)
+ }
+}
+
+func TestRaceFuncArgument(t *testing.T) {
+ var x int
+ ch := make(chan bool, 1)
+ go func() {
+ emptyFunc(x)
+ ch <- true
+ }()
+ x = 1
+ <-ch
+}
+
+func TestRaceFuncArgument2(t *testing.T) {
+ var x int
+ ch := make(chan bool, 2)
+ go func() {
+ x = 42
+ ch <- true
+ }()
+ go func(y int) {
+ ch <- true
+ }(x)
+ <-ch
+ <-ch
+}
+
+func TestRaceSprint(t *testing.T) {
+ var x int
+ ch := make(chan bool, 1)
+ go func() {
+ fmt.Sprint(x)
+ ch <- true
+ }()
+ x = 1
+ <-ch
+}
+
+func TestRaceArrayCopy(t *testing.T) {
+ ch := make(chan bool, 1)
+ var a [5]int
+ go func() {
+ a[3] = 1
+ ch <- true
+ }()
+ a = [5]int{1, 2, 3, 4, 5}
+ <-ch
+}
+
+// Blows up a naive compiler.
+func TestRaceNestedArrayCopy(t *testing.T) {
+ ch := make(chan bool, 1)
+ type (
+ Point32 [2][2][2][2][2]Point
+ Point1024 [2][2][2][2][2]Point32
+ Point32k [2][2][2][2][2]Point1024
+ Point1M [2][2][2][2][2]Point32k
+ )
+ var a, b Point1M
+ go func() {
+ a[0][1][0][1][0][1][0][1][0][1][0][1][0][1][0][1][0][1][0][1].y = 1
+ ch <- true
+ }()
+ a = b
+ <-ch
+}
+
+func TestRaceStructRW(t *testing.T) {
+ p := Point{0, 0}
+ ch := make(chan bool, 1)
+ go func() {
+ p = Point{1, 1}
+ ch <- true
+ }()
+ q := p
+ <-ch
+ p = q
+}
+
+func TestRaceStructFieldRW1(t *testing.T) {
+ p := Point{0, 0}
+ ch := make(chan bool, 1)
+ go func() {
+ p.x = 1
+ ch <- true
+ }()
+ _ = p.x
+ <-ch
+}
+
+func TestNoRaceStructFieldRW1(t *testing.T) {
+ // Same struct, different variables, no
+ // pointers. The layout is known (at compile time?) ->
+ // no read on p
+ // writes on x and y
+ p := Point{0, 0}
+ ch := make(chan bool, 1)
+ go func() {
+ p.x = 1
+ ch <- true
+ }()
+ p.y = 1
+ <-ch
+ _ = p
+}
+
+func TestNoRaceStructFieldRW2(t *testing.T) {
+ // Same as NoRaceStructFieldRW1
+ // but p is a pointer, so there is a read on p
+ p := Point{0, 0}
+ ch := make(chan bool, 1)
+ go func() {
+ p.x = 1
+ ch <- true
+ }()
+ p.y = 1
+ <-ch
+ _ = p
+}
+
+func TestRaceStructFieldRW2(t *testing.T) {
+ p := &Point{0, 0}
+ ch := make(chan bool, 1)
+ go func() {
+ p.x = 1
+ ch <- true
+ }()
+ _ = p.x
+ <-ch
+}
+
+func TestRaceStructFieldRW3(t *testing.T) {
+ p := NamedPoint{name: "a", p: Point{0, 0}}
+ ch := make(chan bool, 1)
+ go func() {
+ p.p.x = 1
+ ch <- true
+ }()
+ _ = p.p.x
+ <-ch
+}
+
+func TestRaceEfaceWW(t *testing.T) {
+ var a, b any
+ ch := make(chan bool, 1)
+ go func() {
+ a = 1
+ ch <- true
+ }()
+ a = 2
+ <-ch
+ _, _ = a, b
+}
+
+func TestRaceIfaceWW(t *testing.T) {
+ var a, b Writer
+ ch := make(chan bool, 1)
+ go func() {
+ a = DummyWriter{1}
+ ch <- true
+ }()
+ a = DummyWriter{2}
+ <-ch
+ b = a
+ a = b
+}
+
+func TestRaceIfaceCmp(t *testing.T) {
+ var a, b Writer
+ a = DummyWriter{1}
+ ch := make(chan bool, 1)
+ go func() {
+ a = DummyWriter{1}
+ ch <- true
+ }()
+ _ = a == b
+ <-ch
+}
+
+func TestRaceIfaceCmpNil(t *testing.T) {
+ var a Writer
+ a = DummyWriter{1}
+ ch := make(chan bool, 1)
+ go func() {
+ a = DummyWriter{1}
+ ch <- true
+ }()
+ _ = a == nil
+ <-ch
+}
+
+func TestRaceEfaceConv(t *testing.T) {
+ c := make(chan bool)
+ v := 0
+ go func() {
+ go func(x any) {
+ }(v)
+ c <- true
+ }()
+ v = 42
+ <-c
+}
+
+type OsFile struct{}
+
+func (*OsFile) Read() {
+}
+
+type IoReader interface {
+ Read()
+}
+
+func TestRaceIfaceConv(t *testing.T) {
+ c := make(chan bool)
+ f := &OsFile{}
+ go func() {
+ go func(x IoReader) {
+ }(f)
+ c <- true
+ }()
+ f = &OsFile{}
+ <-c
+}
+
+func TestRaceError(t *testing.T) {
+ ch := make(chan bool, 1)
+ var err error
+ go func() {
+ err = nil
+ ch <- true
+ }()
+ _ = err
+ <-ch
+}
+
+func TestRaceIntptrRW(t *testing.T) {
+ var x, y int
+ var p *int = &x
+ ch := make(chan bool, 1)
+ go func() {
+ *p = 5
+ ch <- true
+ }()
+ y = *p
+ x = y
+ <-ch
+}
+
+func TestRaceStringRW(t *testing.T) {
+ ch := make(chan bool, 1)
+ s := ""
+ go func() {
+ s = "abacaba"
+ ch <- true
+ }()
+ _ = s
+ <-ch
+}
+
+func TestRaceStringPtrRW(t *testing.T) {
+ ch := make(chan bool, 1)
+ var x string
+ p := &x
+ go func() {
+ *p = "a"
+ ch <- true
+ }()
+ _ = *p
+ <-ch
+}
+
+func TestRaceFloat64WW(t *testing.T) {
+ var x, y float64
+ ch := make(chan bool, 1)
+ go func() {
+ x = 1.0
+ ch <- true
+ }()
+ x = 2.0
+ <-ch
+
+ y = x
+ x = y
+}
+
+func TestRaceComplex128WW(t *testing.T) {
+ var x, y complex128
+ ch := make(chan bool, 1)
+ go func() {
+ x = 2 + 2i
+ ch <- true
+ }()
+ x = 4 + 4i
+ <-ch
+
+ y = x
+ x = y
+}
+
+func TestRaceUnsafePtrRW(t *testing.T) {
+ var x, y, z int
+ x, y, z = 1, 2, 3
+ var p unsafe.Pointer = unsafe.Pointer(&x)
+ ch := make(chan bool, 1)
+ go func() {
+ p = (unsafe.Pointer)(&z)
+ ch <- true
+ }()
+ y = *(*int)(p)
+ x = y
+ <-ch
+}
+
+func TestRaceFuncVariableRW(t *testing.T) {
+ var f func(x int) int
+ f = func(x int) int {
+ return x * x
+ }
+ ch := make(chan bool, 1)
+ go func() {
+ f = func(x int) int {
+ return x
+ }
+ ch <- true
+ }()
+ y := f(1)
+ <-ch
+ x := y
+ y = x
+}
+
+func TestRaceFuncVariableWW(t *testing.T) {
+ var f func(x int) int
+ _ = f
+ ch := make(chan bool, 1)
+ go func() {
+ f = func(x int) int {
+ return x
+ }
+ ch <- true
+ }()
+ f = func(x int) int {
+ return x * x
+ }
+ <-ch
+}
+
+// This one should not belong to mop_test
+func TestRacePanic(t *testing.T) {
+ var x int
+ _ = x
+ var zero int = 0
+ ch := make(chan bool, 2)
+ go func() {
+ defer func() {
+ err := recover()
+ if err == nil {
+ panic("should be panicking")
+ }
+ x = 1
+ ch <- true
+ }()
+ var y int = 1 / zero
+ zero = y
+ }()
+ go func() {
+ defer func() {
+ err := recover()
+ if err == nil {
+ panic("should be panicking")
+ }
+ x = 2
+ ch <- true
+ }()
+ var y int = 1 / zero
+ zero = y
+ }()
+
+ <-ch
+ <-ch
+ if zero != 0 {
+ panic("zero has changed")
+ }
+}
+
+func TestNoRaceBlank(t *testing.T) {
+ var a [5]int
+ ch := make(chan bool, 1)
+ go func() {
+ _, _ = a[0], a[1]
+ ch <- true
+ }()
+ _, _ = a[2], a[3]
+ <-ch
+ a[1] = a[0]
+}
+
+func TestRaceAppendRW(t *testing.T) {
+ a := make([]int, 10)
+ ch := make(chan bool)
+ go func() {
+ _ = append(a, 1)
+ ch <- true
+ }()
+ a[0] = 1
+ <-ch
+}
+
+func TestRaceAppendLenRW(t *testing.T) {
+ a := make([]int, 0)
+ ch := make(chan bool)
+ go func() {
+ a = append(a, 1)
+ ch <- true
+ }()
+ _ = len(a)
+ <-ch
+}
+
+func TestRaceAppendCapRW(t *testing.T) {
+ a := make([]int, 0)
+ ch := make(chan string)
+ go func() {
+ a = append(a, 1)
+ ch <- ""
+ }()
+ _ = cap(a)
+ <-ch
+}
+
+func TestNoRaceFuncArgsRW(t *testing.T) {
+ ch := make(chan byte, 1)
+ var x byte
+ go func(y byte) {
+ _ = y
+ ch <- 0
+ }(x)
+ x = 1
+ <-ch
+}
+
+func TestRaceFuncArgsRW(t *testing.T) {
+ ch := make(chan byte, 1)
+ var x byte
+ go func(y *byte) {
+ _ = *y
+ ch <- 0
+ }(&x)
+ x = 1
+ <-ch
+}
+
+// from the mailing list, slightly modified
+// unprotected concurrent access to seen[]
+func TestRaceCrawl(t *testing.T) {
+ url := "dummyurl"
+ depth := 3
+ seen := make(map[string]bool)
+ ch := make(chan int, 100)
+ var wg sync.WaitGroup
+ var crawl func(string, int)
+ crawl = func(u string, d int) {
+ nurl := 0
+ defer func() {
+ ch <- nurl
+ }()
+ seen[u] = true
+ if d <= 0 {
+ wg.Done()
+ return
+ }
+ urls := [...]string{"a", "b", "c"}
+ for _, uu := range urls {
+ if _, ok := seen[uu]; !ok {
+ wg.Add(1)
+ go crawl(uu, d-1)
+ nurl++
+ }
+ }
+ wg.Done()
+ }
+ wg.Add(1)
+ go crawl(url, depth)
+ wg.Wait()
+}
+
+func TestRaceIndirection(t *testing.T) {
+ ch := make(chan struct{}, 1)
+ var y int
+ var x *int = &y
+ go func() {
+ *x = 1
+ ch <- struct{}{}
+ }()
+ *x = 2
+ <-ch
+ _ = *x
+}
+
+func TestRaceRune(t *testing.T) {
+ c := make(chan bool)
+ var x rune
+ go func() {
+ x = 1
+ c <- true
+ }()
+ _ = x
+ <-c
+}
+
+func TestRaceEmptyInterface1(t *testing.T) {
+ c := make(chan bool)
+ var x any
+ go func() {
+ x = nil
+ c <- true
+ }()
+ _ = x
+ <-c
+}
+
+func TestRaceEmptyInterface2(t *testing.T) {
+ c := make(chan bool)
+ var x any
+ go func() {
+ x = &Point{}
+ c <- true
+ }()
+ _ = x
+ <-c
+}
+
+func TestRaceTLS(t *testing.T) {
+ comm := make(chan *int)
+ done := make(chan bool, 2)
+ go func() {
+ var x int
+ comm <- &x
+ x = 1
+ x = *(<-comm)
+ done <- true
+ }()
+ go func() {
+ p := <-comm
+ *p = 2
+ comm <- p
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+func TestNoRaceHeapReallocation(t *testing.T) {
+ // It is possible that a future implementation
+ // of memory allocation will ruin this test.
+ // Increasing n might help in this case, so
+ // this test is a bit more generic than most of the
+ // others.
+ const n = 2
+ done := make(chan bool, n)
+ empty := func(p *int) {}
+ for i := 0; i < n; i++ {
+ ms := i
+ go func() {
+ <-time.After(time.Duration(ms) * time.Millisecond)
+ runtime.GC()
+ var x int
+ empty(&x) // x goes to the heap
+ done <- true
+ }()
+ }
+ for i := 0; i < n; i++ {
+ <-done
+ }
+}
+
+func TestRaceAnd(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if x == 1 && y == 1 {
+ }
+ <-c
+}
+
+func TestRaceAnd2(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if y == 0 && x == 1 {
+ }
+ <-c
+}
+
+func TestNoRaceAnd(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if y == 1 && x == 1 {
+ }
+ <-c
+}
+
+func TestRaceOr(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if x == 1 || y == 1 {
+ }
+ <-c
+}
+
+func TestRaceOr2(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if y == 1 || x == 1 {
+ }
+ <-c
+}
+
+func TestNoRaceOr(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ x = 1
+ c <- true
+ }()
+ if y == 0 || x == 1 {
+ }
+ <-c
+}
+
+func TestNoRaceShortCalc(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ y = 1
+ c <- true
+ }()
+ if x == 0 || y == 0 {
+ }
+ <-c
+}
+
+func TestNoRaceShortCalc2(t *testing.T) {
+ c := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ y = 1
+ c <- true
+ }()
+ if x == 1 && y == 0 {
+ }
+ <-c
+}
+
+func TestRaceFuncItself(t *testing.T) {
+ c := make(chan bool)
+ f := func() {}
+ go func() {
+ f()
+ c <- true
+ }()
+ f = func() {}
+ <-c
+}
+
+func TestNoRaceFuncUnlock(t *testing.T) {
+ ch := make(chan bool, 1)
+ var mu sync.Mutex
+ x := 0
+ _ = x
+ go func() {
+ mu.Lock()
+ x = 42
+ mu.Unlock()
+ ch <- true
+ }()
+ x = func(mu *sync.Mutex) int {
+ mu.Lock()
+ return 43
+ }(&mu)
+ mu.Unlock()
+ <-ch
+}
+
+func TestRaceStructInit(t *testing.T) {
+ type X struct {
+ x, y int
+ }
+ c := make(chan bool, 1)
+ y := 0
+ go func() {
+ y = 42
+ c <- true
+ }()
+ x := X{x: y}
+ _ = x
+ <-c
+}
+
+func TestRaceArrayInit(t *testing.T) {
+ c := make(chan bool, 1)
+ y := 0
+ go func() {
+ y = 42
+ c <- true
+ }()
+ x := []int{0, y, 42}
+ _ = x
+ <-c
+}
+
+func TestRaceMapInit(t *testing.T) {
+ c := make(chan bool, 1)
+ y := 0
+ go func() {
+ y = 42
+ c <- true
+ }()
+ x := map[int]int{0: 42, y: 42}
+ _ = x
+ <-c
+}
+
+func TestRaceMapInit2(t *testing.T) {
+ c := make(chan bool, 1)
+ y := 0
+ go func() {
+ y = 42
+ c <- true
+ }()
+ x := map[int]int{0: 42, 42: y}
+ _ = x
+ <-c
+}
+
+type Inter interface {
+ Foo(x int)
+}
+type InterImpl struct {
+ x, y int
+}
+
+//go:noinline
+func (p InterImpl) Foo(x int) {
+}
+
+type InterImpl2 InterImpl
+
+func (p *InterImpl2) Foo(x int) {
+ if p == nil {
+ InterImpl{}.Foo(x)
+ }
+ InterImpl(*p).Foo(x)
+}
+
+func TestRaceInterCall(t *testing.T) {
+ c := make(chan bool, 1)
+ p := InterImpl{}
+ var x Inter = p
+ go func() {
+ p2 := InterImpl{}
+ x = p2
+ c <- true
+ }()
+ x.Foo(0)
+ <-c
+}
+
+func TestRaceInterCall2(t *testing.T) {
+ c := make(chan bool, 1)
+ p := InterImpl{}
+ var x Inter = p
+ z := 0
+ go func() {
+ z = 42
+ c <- true
+ }()
+ x.Foo(z)
+ <-c
+}
+
+func TestRaceFuncCall(t *testing.T) {
+ c := make(chan bool, 1)
+ f := func(x, y int) {}
+ x, y := 0, 0
+ go func() {
+ y = 42
+ c <- true
+ }()
+ f(x, y)
+ <-c
+}
+
+func TestRaceMethodCall(t *testing.T) {
+ c := make(chan bool, 1)
+ i := InterImpl{}
+ x := 0
+ go func() {
+ x = 42
+ c <- true
+ }()
+ i.Foo(x)
+ <-c
+}
+
+func TestRaceMethodCall2(t *testing.T) {
+ c := make(chan bool, 1)
+ i := &InterImpl{}
+ go func() {
+ i = &InterImpl{}
+ c <- true
+ }()
+ i.Foo(0)
+ <-c
+}
+
+// Method value with concrete value receiver.
+func TestRaceMethodValue(t *testing.T) {
+ c := make(chan bool, 1)
+ i := InterImpl{}
+ go func() {
+ i = InterImpl{}
+ c <- true
+ }()
+ _ = i.Foo
+ <-c
+}
+
+// Method value with interface receiver.
+func TestRaceMethodValue2(t *testing.T) {
+ c := make(chan bool, 1)
+ var i Inter = InterImpl{}
+ go func() {
+ i = InterImpl{}
+ c <- true
+ }()
+ _ = i.Foo
+ <-c
+}
+
+// Method value with implicit dereference.
+func TestRaceMethodValue3(t *testing.T) {
+ c := make(chan bool, 1)
+ i := &InterImpl{}
+ go func() {
+ *i = InterImpl{}
+ c <- true
+ }()
+ _ = i.Foo // dereferences i.
+ <-c
+}
+
+// Method value implicitly taking receiver address.
+func TestNoRaceMethodValue(t *testing.T) {
+ c := make(chan bool, 1)
+ i := InterImpl2{}
+ go func() {
+ i = InterImpl2{}
+ c <- true
+ }()
+ _ = i.Foo // takes the address of i only.
+ <-c
+}
+
+func TestRacePanicArg(t *testing.T) {
+ c := make(chan bool, 1)
+ err := errors.New("err")
+ go func() {
+ err = errors.New("err2")
+ c <- true
+ }()
+ defer func() {
+ recover()
+ <-c
+ }()
+ panic(err)
+}
+
+func TestRaceDeferArg(t *testing.T) {
+ c := make(chan bool, 1)
+ x := 0
+ go func() {
+ x = 42
+ c <- true
+ }()
+ func() {
+ defer func(x int) {
+ }(x)
+ }()
+ <-c
+}
+
+type DeferT int
+
+func (d DeferT) Foo() {
+}
+
+func TestRaceDeferArg2(t *testing.T) {
+ c := make(chan bool, 1)
+ var x DeferT
+ go func() {
+ var y DeferT
+ x = y
+ c <- true
+ }()
+ func() {
+ defer x.Foo()
+ }()
+ <-c
+}
+
+func TestNoRaceAddrExpr(t *testing.T) {
+ c := make(chan bool, 1)
+ x := 0
+ go func() {
+ x = 42
+ c <- true
+ }()
+ _ = &x
+ <-c
+}
+
+type AddrT struct {
+ _ [256]byte
+ x int
+}
+
+type AddrT2 struct {
+ _ [512]byte
+ p *AddrT
+}
+
+func TestRaceAddrExpr(t *testing.T) {
+ c := make(chan bool, 1)
+ a := AddrT2{p: &AddrT{x: 42}}
+ go func() {
+ a.p = &AddrT{x: 43}
+ c <- true
+ }()
+ _ = &a.p.x
+ <-c
+}
+
+func TestRaceTypeAssert(t *testing.T) {
+ c := make(chan bool, 1)
+ x := 0
+ var i any = x
+ go func() {
+ y := 0
+ i = y
+ c <- true
+ }()
+ _ = i.(int)
+ <-c
+}
+
+func TestRaceBlockAs(t *testing.T) {
+ c := make(chan bool, 1)
+ var x, y int
+ go func() {
+ x = 42
+ c <- true
+ }()
+ x, y = y, x
+ <-c
+}
+
+func TestRaceBlockCall1(t *testing.T) {
+ done := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ f := func() (int, int) {
+ return 42, 43
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = x
+ <-done
+ if x != 42 || y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceBlockCall2(t *testing.T) {
+ done := make(chan bool)
+ x, y := 0, 0
+ go func() {
+ f := func() (int, int) {
+ return 42, 43
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = y
+ <-done
+ if x != 42 || y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceBlockCall3(t *testing.T) {
+ done := make(chan bool)
+ var x *int
+ y := 0
+ go func() {
+ f := func() (*int, int) {
+ i := 42
+ return &i, 43
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = x
+ <-done
+ if *x != 42 || y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceBlockCall4(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ var y *int
+ go func() {
+ f := func() (int, *int) {
+ i := 43
+ return 42, &i
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = y
+ <-done
+ if x != 42 || *y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceBlockCall5(t *testing.T) {
+ done := make(chan bool)
+ var x *int
+ y := 0
+ go func() {
+ f := func() (*int, int) {
+ i := 42
+ return &i, 43
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = y
+ <-done
+ if *x != 42 || y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceBlockCall6(t *testing.T) {
+ done := make(chan bool)
+ x := 0
+ var y *int
+ go func() {
+ f := func() (int, *int) {
+ i := 43
+ return 42, &i
+ }
+ x, y = f()
+ done <- true
+ }()
+ _ = x
+ <-done
+ if x != 42 || *y != 43 {
+ panic("corrupted data")
+ }
+}
+func TestRaceSliceSlice(t *testing.T) {
+ c := make(chan bool, 1)
+ x := make([]int, 10)
+ go func() {
+ x = make([]int, 20)
+ c <- true
+ }()
+ _ = x[2:3]
+ <-c
+}
+
+func TestRaceSliceSlice2(t *testing.T) {
+ c := make(chan bool, 1)
+ x := make([]int, 10)
+ i := 2
+ go func() {
+ i = 3
+ c <- true
+ }()
+ _ = x[i:4]
+ <-c
+}
+
+func TestRaceSliceString(t *testing.T) {
+ c := make(chan bool, 1)
+ x := "hello"
+ go func() {
+ x = "world"
+ c <- true
+ }()
+ _ = x[2:3]
+ <-c
+}
+
+func TestRaceSliceStruct(t *testing.T) {
+ type X struct {
+ x, y int
+ }
+ c := make(chan bool, 1)
+ x := make([]X, 10)
+ go func() {
+ y := make([]X, 10)
+ copy(y, x)
+ c <- true
+ }()
+ x[1].y = 42
+ <-c
+}
+
+func TestRaceAppendSliceStruct(t *testing.T) {
+ type X struct {
+ x, y int
+ }
+ c := make(chan bool, 1)
+ x := make([]X, 10)
+ go func() {
+ y := make([]X, 0, 10)
+ y = append(y, x...)
+ c <- true
+ }()
+ x[1].y = 42
+ <-c
+}
+
+func TestRaceStructInd(t *testing.T) {
+ c := make(chan bool, 1)
+ type Item struct {
+ x, y int
+ }
+ i := Item{}
+ go func(p *Item) {
+ *p = Item{}
+ c <- true
+ }(&i)
+ i.y = 42
+ <-c
+}
+
+func TestRaceAsFunc1(t *testing.T) {
+ var s []byte
+ c := make(chan bool, 1)
+ go func() {
+ var err error
+ s, err = func() ([]byte, error) {
+ t := []byte("hello world")
+ return t, nil
+ }()
+ c <- true
+ _ = err
+ }()
+ _ = string(s)
+ <-c
+}
+
+func TestRaceAsFunc2(t *testing.T) {
+ c := make(chan bool, 1)
+ x := 0
+ go func() {
+ func(x int) {
+ }(x)
+ c <- true
+ }()
+ x = 42
+ <-c
+}
+
+func TestRaceAsFunc3(t *testing.T) {
+ c := make(chan bool, 1)
+ var mu sync.Mutex
+ x := 0
+ go func() {
+ func(x int) {
+ mu.Lock()
+ }(x) // Read of x must be outside of the mutex.
+ mu.Unlock()
+ c <- true
+ }()
+ mu.Lock()
+ x = 42
+ mu.Unlock()
+ <-c
+}
+
+func TestNoRaceAsFunc4(t *testing.T) {
+ c := make(chan bool, 1)
+ var mu sync.Mutex
+ x := 0
+ _ = x
+ go func() {
+ x = func() int { // Write of x must be under the mutex.
+ mu.Lock()
+ return 42
+ }()
+ mu.Unlock()
+ c <- true
+ }()
+ mu.Lock()
+ x = 42
+ mu.Unlock()
+ <-c
+}
+
+func TestRaceHeapParam(t *testing.T) {
+ done := make(chan bool)
+ x := func() (x int) {
+ go func() {
+ x = 42
+ done <- true
+ }()
+ return
+ }()
+ _ = x
+ <-done
+}
+
+func TestNoRaceEmptyStruct(t *testing.T) {
+ type Empty struct{}
+ type X struct {
+ y int64
+ Empty
+ }
+ type Y struct {
+ x X
+ y int64
+ }
+ c := make(chan X)
+ var y Y
+ go func() {
+ x := y.x
+ c <- x
+ }()
+ y.y = 42
+ <-c
+}
+
+func TestRaceNestedStruct(t *testing.T) {
+ type X struct {
+ x, y int
+ }
+ type Y struct {
+ x X
+ }
+ c := make(chan Y)
+ var y Y
+ go func() {
+ c <- y
+ }()
+ y.x.y = 42
+ <-c
+}
+
+func TestRaceIssue5567(t *testing.T) {
+ testRaceRead(t, false)
+}
+
+func TestRaceIssue51618(t *testing.T) {
+ testRaceRead(t, true)
+}
+
+func testRaceRead(t *testing.T, pread bool) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ in := make(chan []byte)
+ res := make(chan error)
+ go func() {
+ var err error
+ defer func() {
+ close(in)
+ res <- err
+ }()
+ path := "mop_test.go"
+ f, err := os.Open(path)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+ var n, total int
+ b := make([]byte, 17) // the race is on b buffer
+ for err == nil {
+ if pread {
+ n, err = f.ReadAt(b, int64(total))
+ } else {
+ n, err = f.Read(b)
+ }
+ total += n
+ if n > 0 {
+ in <- b[:n]
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }()
+ h := crc32.New(crc32.MakeTable(0x12345678))
+ for b := range in {
+ h.Write(b)
+ }
+ _ = h.Sum(nil)
+ err := <-res
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestRaceIssue5654(t *testing.T) {
+ text := `Friends, Romans, countrymen, lend me your ears;
+I come to bury Caesar, not to praise him.
+The evil that men do lives after them;
+The good is oft interred with their bones;
+So let it be with Caesar. The noble Brutus
+Hath told you Caesar was ambitious:
+If it were so, it was a grievous fault,
+And grievously hath Caesar answer'd it.
+Here, under leave of Brutus and the rest -
+For Brutus is an honourable man;
+So are they all, all honourable men -
+Come I to speak in Caesar's funeral.
+He was my friend, faithful and just to me:
+But Brutus says he was ambitious;
+And Brutus is an honourable man.`
+
+ data := bytes.NewBufferString(text)
+ in := make(chan []byte)
+
+ go func() {
+ buf := make([]byte, 16)
+ var n int
+ var err error
+ for ; err == nil; n, err = data.Read(buf) {
+ in <- buf[:n]
+ }
+ close(in)
+ }()
+ res := ""
+ for s := range in {
+ res += string(s)
+ }
+ _ = res
+}
+
+type Base int
+
+func (b *Base) Foo() int {
+ return 42
+}
+
+func (b Base) Bar() int {
+ return int(b)
+}
+
+func TestNoRaceMethodThunk(t *testing.T) {
+ type Derived struct {
+ pad int
+ Base
+ }
+ var d Derived
+ done := make(chan bool)
+ go func() {
+ _ = d.Foo()
+ done <- true
+ }()
+ d = Derived{}
+ <-done
+}
+
+func TestRaceMethodThunk(t *testing.T) {
+ type Derived struct {
+ pad int
+ *Base
+ }
+ var d Derived
+ done := make(chan bool)
+ go func() {
+ _ = d.Foo()
+ done <- true
+ }()
+ d = Derived{}
+ <-done
+}
+
+func TestRaceMethodThunk2(t *testing.T) {
+ type Derived struct {
+ pad int
+ Base
+ }
+ var d Derived
+ done := make(chan bool)
+ go func() {
+ _ = d.Bar()
+ done <- true
+ }()
+ d = Derived{}
+ <-done
+}
+
+func TestRaceMethodThunk3(t *testing.T) {
+ type Derived struct {
+ pad int
+ *Base
+ }
+ var d Derived
+ d.Base = new(Base)
+ done := make(chan bool)
+ go func() {
+ _ = d.Bar()
+ done <- true
+ }()
+ d.Base = new(Base)
+ <-done
+}
+
+func TestRaceMethodThunk4(t *testing.T) {
+ type Derived struct {
+ pad int
+ *Base
+ }
+ var d Derived
+ d.Base = new(Base)
+ done := make(chan bool)
+ go func() {
+ _ = d.Bar()
+ done <- true
+ }()
+ *(*int)(d.Base) = 42
+ <-done
+}
+
+func TestNoRaceTinyAlloc(t *testing.T) {
+ const P = 4
+ const N = 1e6
+ var tinySink *byte
+ _ = tinySink
+ done := make(chan bool)
+ for p := 0; p < P; p++ {
+ go func() {
+ for i := 0; i < N; i++ {
+ var b byte
+ if b != 0 {
+ tinySink = &b // make it heap allocated
+ }
+ b = 42
+ }
+ done <- true
+ }()
+ }
+ for p := 0; p < P; p++ {
+ <-done
+ }
+}
+
+func TestNoRaceIssue60934(t *testing.T) {
+ // Test that runtime.RaceDisable state doesn't accidentally get applied to
+ // new goroutines.
+
+ // Create several goroutines that end after calling runtime.RaceDisable.
+ var wg sync.WaitGroup
+ ready := make(chan struct{})
+ wg.Add(32)
+ for i := 0; i < 32; i++ {
+ go func() {
+ <-ready // ensure we have multiple goroutines running at the same time
+ runtime.RaceDisable()
+ wg.Done()
+ }()
+ }
+ close(ready)
+ wg.Wait()
+
+ // Make sure race detector still works. If the runtime.RaceDisable state
+ // leaks, the happens-before edges here will be ignored and a race on x will
+ // be reported.
+ var x int
+ ch := make(chan struct{}, 0)
+ wg.Add(2)
+ go func() {
+ x = 1
+ ch <- struct{}{}
+ wg.Done()
+ }()
+ go func() {
+ <-ch
+ _ = x
+ wg.Done()
+ }()
+ wg.Wait()
+}
diff --git a/src/runtime/race/testdata/mutex_test.go b/src/runtime/race/testdata/mutex_test.go
new file mode 100644
index 0000000..9dbed9a
--- /dev/null
+++ b/src/runtime/race/testdata/mutex_test.go
@@ -0,0 +1,150 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestNoRaceMutex(t *testing.T) {
+ var mu sync.Mutex
+ var x int16 = 0
+ _ = x
+ ch := make(chan bool, 2)
+ go func() {
+ mu.Lock()
+ defer mu.Unlock()
+ x = 1
+ ch <- true
+ }()
+ go func() {
+ mu.Lock()
+ x = 2
+ mu.Unlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceMutex(t *testing.T) {
+ var mu sync.Mutex
+ var x int16 = 0
+ _ = x
+ ch := make(chan bool, 2)
+ go func() {
+ x = 1
+ mu.Lock()
+ defer mu.Unlock()
+ ch <- true
+ }()
+ go func() {
+ x = 2
+ mu.Lock()
+ mu.Unlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceMutex2(t *testing.T) {
+ var mu1 sync.Mutex
+ var mu2 sync.Mutex
+ var x int8 = 0
+ _ = x
+ ch := make(chan bool, 2)
+ go func() {
+ mu1.Lock()
+ defer mu1.Unlock()
+ x = 1
+ ch <- true
+ }()
+ go func() {
+ mu2.Lock()
+ x = 2
+ mu2.Unlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceMutexPureHappensBefore(t *testing.T) {
+ var mu sync.Mutex
+ var x int16 = 0
+ _ = x
+ written := false
+ ch := make(chan bool, 2)
+ go func() {
+ x = 1
+ mu.Lock()
+ written = true
+ mu.Unlock()
+ ch <- true
+ }()
+ go func() {
+ time.Sleep(100 * time.Microsecond)
+ mu.Lock()
+ for !written {
+ mu.Unlock()
+ time.Sleep(100 * time.Microsecond)
+ mu.Lock()
+ }
+ mu.Unlock()
+ x = 1
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceMutexSemaphore(t *testing.T) {
+ var mu sync.Mutex
+ ch := make(chan bool, 2)
+ x := 0
+ _ = x
+ mu.Lock()
+ go func() {
+ x = 1
+ mu.Unlock()
+ ch <- true
+ }()
+ go func() {
+ mu.Lock()
+ x = 2
+ mu.Unlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+// from doc/go_mem.html
+func TestNoRaceMutexExampleFromHtml(t *testing.T) {
+ var l sync.Mutex
+ a := ""
+
+ l.Lock()
+ go func() {
+ a = "hello, world"
+ l.Unlock()
+ }()
+ l.Lock()
+ _ = a
+}
+
+func TestRaceMutexOverwrite(t *testing.T) {
+ c := make(chan bool, 1)
+ var mu sync.Mutex
+ go func() {
+ mu = sync.Mutex{}
+ c <- true
+ }()
+ mu.Lock()
+ <-c
+}
diff --git a/src/runtime/race/testdata/pool_test.go b/src/runtime/race/testdata/pool_test.go
new file mode 100644
index 0000000..a96913e
--- /dev/null
+++ b/src/runtime/race/testdata/pool_test.go
@@ -0,0 +1,47 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestRacePool(t *testing.T) {
+ // Pool randomly drops the argument on the floor during Put.
+ // Repeat so that at least one iteration gets reuse.
+ for i := 0; i < 10; i++ {
+ c := make(chan int)
+ p := &sync.Pool{New: func() any { return make([]byte, 10) }}
+ x := p.Get().([]byte)
+ x[0] = 1
+ p.Put(x)
+ go func() {
+ y := p.Get().([]byte)
+ y[0] = 2
+ c <- 1
+ }()
+ x[0] = 3
+ <-c
+ }
+}
+
+func TestNoRacePool(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ p := &sync.Pool{New: func() any { return make([]byte, 10) }}
+ x := p.Get().([]byte)
+ x[0] = 1
+ p.Put(x)
+ go func() {
+ y := p.Get().([]byte)
+ y[0] = 2
+ p.Put(y)
+ }()
+ time.Sleep(100 * time.Millisecond)
+ x = p.Get().([]byte)
+ x[0] = 3
+ }
+}
diff --git a/src/runtime/race/testdata/reflect_test.go b/src/runtime/race/testdata/reflect_test.go
new file mode 100644
index 0000000..b567400
--- /dev/null
+++ b/src/runtime/race/testdata/reflect_test.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestRaceReflectRW(t *testing.T) {
+ ch := make(chan bool, 1)
+ i := 0
+ v := reflect.ValueOf(&i)
+ go func() {
+ v.Elem().Set(reflect.ValueOf(1))
+ ch <- true
+ }()
+ _ = v.Elem().Int()
+ <-ch
+}
+
+func TestRaceReflectWW(t *testing.T) {
+ ch := make(chan bool, 1)
+ i := 0
+ v := reflect.ValueOf(&i)
+ go func() {
+ v.Elem().Set(reflect.ValueOf(1))
+ ch <- true
+ }()
+ v.Elem().Set(reflect.ValueOf(2))
+ <-ch
+}
+
+func TestRaceReflectCopyWW(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]byte, 2)
+ v := reflect.ValueOf(a)
+ go func() {
+ reflect.Copy(v, v)
+ ch <- true
+ }()
+ reflect.Copy(v, v)
+ <-ch
+}
diff --git a/src/runtime/race/testdata/regression_test.go b/src/runtime/race/testdata/regression_test.go
new file mode 100644
index 0000000..6a7802f
--- /dev/null
+++ b/src/runtime/race/testdata/regression_test.go
@@ -0,0 +1,189 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code patterns that caused problems in the past.
+
+package race_test
+
+import (
+ "testing"
+)
+
+type LogImpl struct {
+ x int
+}
+
+func NewLog() (l LogImpl) {
+ c := make(chan bool)
+ go func() {
+ _ = l
+ c <- true
+ }()
+ l = LogImpl{}
+ <-c
+ return
+}
+
+var _ LogImpl = NewLog()
+
+func MakeMap() map[int]int {
+ return make(map[int]int)
+}
+
+func InstrumentMapLen() {
+ _ = len(MakeMap())
+}
+
+func InstrumentMapLen2() {
+ m := make(map[int]map[int]int)
+ _ = len(m[0])
+}
+
+func InstrumentMapLen3() {
+ m := make(map[int]*map[int]int)
+ _ = len(*m[0])
+}
+
+func TestRaceUnaddressableMapLen(t *testing.T) {
+ m := make(map[int]map[int]int)
+ ch := make(chan int, 1)
+ m[0] = make(map[int]int)
+ go func() {
+ _ = len(m[0])
+ ch <- 0
+ }()
+ m[0][0] = 1
+ <-ch
+}
+
+type Rect struct {
+ x, y int
+}
+
+type Image struct {
+ min, max Rect
+}
+
+//go:noinline
+func NewImage() Image {
+ return Image{}
+}
+
+func AddrOfTemp() {
+ _ = NewImage().min
+}
+
+type TypeID int
+
+func (t *TypeID) encodeType(x int) (tt TypeID, err error) {
+ switch x {
+ case 0:
+ return t.encodeType(x * x)
+ }
+ return 0, nil
+}
+
+type stack []int
+
+func (s *stack) push(x int) {
+ *s = append(*s, x)
+}
+
+func (s *stack) pop() int {
+ i := len(*s)
+ n := (*s)[i-1]
+ *s = (*s)[:i-1]
+ return n
+}
+
+func TestNoRaceStackPushPop(t *testing.T) {
+ var s stack
+ go func(s *stack) {}(&s)
+ s.push(1)
+ x := s.pop()
+ _ = x
+}
+
+type RpcChan struct {
+ c chan bool
+}
+
+var makeChanCalls int
+
+//go:noinline
+func makeChan() *RpcChan {
+ makeChanCalls++
+ c := &RpcChan{make(chan bool, 1)}
+ c.c <- true
+ return c
+}
+
+func call() bool {
+ x := <-makeChan().c
+ return x
+}
+
+func TestNoRaceRpcChan(t *testing.T) {
+ makeChanCalls = 0
+ _ = call()
+ if makeChanCalls != 1 {
+ t.Fatalf("makeChanCalls %d, expected 1\n", makeChanCalls)
+ }
+}
+
+func divInSlice() {
+ v := make([]int64, 10)
+ i := 1
+ _ = v[(i*4)/3]
+}
+
+func TestNoRaceReturn(t *testing.T) {
+ c := make(chan int)
+ noRaceReturn(c)
+ <-c
+}
+
+// Return used to do an implicit a = a, causing a read/write race
+// with the goroutine. Compiler has an optimization to avoid that now.
+// See issue 4014.
+func noRaceReturn(c chan int) (a, b int) {
+ a = 42
+ go func() {
+ _ = a
+ c <- 1
+ }()
+ return a, 10
+}
+
+func issue5431() {
+ var p **inltype
+ if inlinetest(p).x && inlinetest(p).y {
+ } else if inlinetest(p).x || inlinetest(p).y {
+ }
+}
+
+type inltype struct {
+ x, y bool
+}
+
+func inlinetest(p **inltype) *inltype {
+ return *p
+}
+
+type iface interface {
+ Foo() *struct{ b bool }
+}
+
+type Int int
+
+func (i Int) Foo() *struct{ b bool } {
+ return &struct{ b bool }{false}
+}
+
+func TestNoRaceForInfiniteLoop(t *testing.T) {
+ var x Int
+ // interface conversion causes nodes to be put on init list
+ for iface(x).Foo().b {
+ }
+}
diff --git a/src/runtime/race/testdata/rwmutex_test.go b/src/runtime/race/testdata/rwmutex_test.go
new file mode 100644
index 0000000..39219e5
--- /dev/null
+++ b/src/runtime/race/testdata/rwmutex_test.go
@@ -0,0 +1,154 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestRaceMutexRWMutex(t *testing.T) {
+ var mu1 sync.Mutex
+ var mu2 sync.RWMutex
+ var x int16 = 0
+ _ = x
+ ch := make(chan bool, 2)
+ go func() {
+ mu1.Lock()
+ defer mu1.Unlock()
+ x = 1
+ ch <- true
+ }()
+ go func() {
+ mu2.Lock()
+ x = 2
+ mu2.Unlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestNoRaceRWMutex(t *testing.T) {
+ var mu sync.RWMutex
+ var x, y int64 = 0, 1
+ _ = y
+ ch := make(chan bool, 2)
+ go func() {
+ mu.Lock()
+ defer mu.Unlock()
+ x = 2
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y = x
+ mu.RUnlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+}
+
+func TestRaceRWMutexMultipleReaders(t *testing.T) {
+ var mu sync.RWMutex
+ var x, y int64 = 0, 1
+ ch := make(chan bool, 4)
+ go func() {
+ mu.Lock()
+ defer mu.Unlock()
+ x = 2
+ ch <- true
+ }()
+ // Use three readers so that no matter what order they're
+ // scheduled in, two will be on the same side of the write
+ // lock above.
+ go func() {
+ mu.RLock()
+ y = x + 1
+ mu.RUnlock()
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y = x + 2
+ mu.RUnlock()
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y = x + 3
+ mu.RUnlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+ <-ch
+ <-ch
+ _ = y
+}
+
+func TestNoRaceRWMutexMultipleReaders(t *testing.T) {
+ var mu sync.RWMutex
+ x := int64(0)
+ ch := make(chan bool, 4)
+ go func() {
+ mu.Lock()
+ defer mu.Unlock()
+ x = 2
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y := x + 1
+ _ = y
+ mu.RUnlock()
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y := x + 2
+ _ = y
+ mu.RUnlock()
+ ch <- true
+ }()
+ go func() {
+ mu.RLock()
+ y := x + 3
+ _ = y
+ mu.RUnlock()
+ ch <- true
+ }()
+ <-ch
+ <-ch
+ <-ch
+ <-ch
+}
+
+func TestNoRaceRWMutexTransitive(t *testing.T) {
+ var mu sync.RWMutex
+ x := int64(0)
+ ch := make(chan bool, 2)
+ go func() {
+ mu.RLock()
+ _ = x
+ mu.RUnlock()
+ ch <- true
+ }()
+ go func() {
+ time.Sleep(1e7)
+ mu.RLock()
+ _ = x
+ mu.RUnlock()
+ ch <- true
+ }()
+ time.Sleep(2e7)
+ mu.Lock()
+ x = 42
+ mu.Unlock()
+ <-ch
+ <-ch
+}
diff --git a/src/runtime/race/testdata/select_test.go b/src/runtime/race/testdata/select_test.go
new file mode 100644
index 0000000..9a43f9b
--- /dev/null
+++ b/src/runtime/race/testdata/select_test.go
@@ -0,0 +1,293 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestNoRaceSelect1(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool)
+ c := make(chan bool)
+ c1 := make(chan bool)
+
+ go func() {
+ x = 1
+ // At least two channels are needed because
+ // otherwise the compiler optimizes select out.
+ // See comment in runtime/select.go:^func selectgo.
+ select {
+ case c <- true:
+ case c1 <- true:
+ }
+ compl <- true
+ }()
+ select {
+ case <-c:
+ case c1 <- true:
+ }
+ x = 2
+ <-compl
+}
+
+func TestNoRaceSelect2(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool)
+ c := make(chan bool)
+ c1 := make(chan bool)
+ go func() {
+ select {
+ case <-c:
+ case <-c1:
+ }
+ x = 1
+ compl <- true
+ }()
+ x = 2
+ close(c)
+ runtime.Gosched()
+ <-compl
+}
+
+func TestNoRaceSelect3(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool)
+ c := make(chan bool, 10)
+ c1 := make(chan bool)
+ go func() {
+ x = 1
+ select {
+ case c <- true:
+ case <-c1:
+ }
+ compl <- true
+ }()
+ <-c
+ x = 2
+ <-compl
+}
+
+func TestNoRaceSelect4(t *testing.T) {
+ type Task struct {
+ f func()
+ done chan bool
+ }
+
+ queue := make(chan Task)
+ dummy := make(chan bool)
+
+ go func() {
+ for {
+ select {
+ case t := <-queue:
+ t.f()
+ t.done <- true
+ }
+ }
+ }()
+
+ doit := func(f func()) {
+ done := make(chan bool, 1)
+ select {
+ case queue <- Task{f, done}:
+ case <-dummy:
+ }
+ select {
+ case <-done:
+ case <-dummy:
+ }
+ }
+
+ var x int
+ doit(func() {
+ x = 1
+ })
+ _ = x
+}
+
+func TestNoRaceSelect5(t *testing.T) {
+ test := func(sel, needSched bool) {
+ var x int
+ _ = x
+ ch := make(chan bool)
+ c1 := make(chan bool)
+
+ done := make(chan bool, 2)
+ go func() {
+ if needSched {
+ runtime.Gosched()
+ }
+ // println(1)
+ x = 1
+ if sel {
+ select {
+ case ch <- true:
+ case <-c1:
+ }
+ } else {
+ ch <- true
+ }
+ done <- true
+ }()
+
+ go func() {
+ // println(2)
+ if sel {
+ select {
+ case <-ch:
+ case <-c1:
+ }
+ } else {
+ <-ch
+ }
+ x = 1
+ done <- true
+ }()
+ <-done
+ <-done
+ }
+
+ test(true, true)
+ test(true, false)
+ test(false, true)
+ test(false, false)
+}
+
+func TestRaceSelect1(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool, 2)
+ c := make(chan bool)
+ c1 := make(chan bool)
+
+ go func() {
+ <-c
+ <-c
+ }()
+ f := func() {
+ select {
+ case c <- true:
+ case c1 <- true:
+ }
+ x = 1
+ compl <- true
+ }
+ go f()
+ go f()
+ <-compl
+ <-compl
+}
+
+func TestRaceSelect2(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool)
+ c := make(chan bool)
+ c1 := make(chan bool)
+ go func() {
+ x = 1
+ select {
+ case <-c:
+ case <-c1:
+ }
+ compl <- true
+ }()
+ close(c)
+ x = 2
+ <-compl
+}
+
+func TestRaceSelect3(t *testing.T) {
+ var x int
+ _ = x
+ compl := make(chan bool)
+ c := make(chan bool)
+ c1 := make(chan bool)
+ go func() {
+ x = 1
+ select {
+ case c <- true:
+ case c1 <- true:
+ }
+ compl <- true
+ }()
+ x = 2
+ select {
+ case <-c:
+ }
+ <-compl
+}
+
+func TestRaceSelect4(t *testing.T) {
+ done := make(chan bool, 1)
+ var x int
+ go func() {
+ select {
+ default:
+ x = 2
+ }
+ done <- true
+ }()
+ _ = x
+ <-done
+}
+
+// The idea behind this test:
+// there are two variables, access to one
+// of them is synchronized, access to the other
+// is not.
+// Select must (unconditionally) choose the non-synchronized variable
+// thus causing exactly one race.
+// Currently this test doesn't look like it accomplishes
+// this goal.
+func TestRaceSelect5(t *testing.T) {
+ done := make(chan bool, 1)
+ c1 := make(chan bool, 1)
+ c2 := make(chan bool)
+ var x, y int
+ go func() {
+ select {
+ case c1 <- true:
+ x = 1
+ case c2 <- true:
+ y = 1
+ }
+ done <- true
+ }()
+ _ = x
+ _ = y
+ <-done
+}
+
+// select statements may introduce
+// flakiness: whether this test contains
+// a race depends on the scheduling
+// (some may argue that the code contains
+// this race by definition)
+/*
+func TestFlakyDefault(t *testing.T) {
+ var x int
+ c := make(chan bool, 1)
+ done := make(chan bool, 1)
+ go func() {
+ select {
+ case <-c:
+ x = 2
+ default:
+ x = 3
+ }
+ done <- true
+ }()
+ x = 1
+ c <- true
+ _ = x
+ <-done
+}
+*/
diff --git a/src/runtime/race/testdata/slice_test.go b/src/runtime/race/testdata/slice_test.go
new file mode 100644
index 0000000..9009a9a
--- /dev/null
+++ b/src/runtime/race/testdata/slice_test.go
@@ -0,0 +1,608 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+)
+
+func TestRaceSliceRW(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 2)
+ go func() {
+ a[1] = 1
+ ch <- true
+ }()
+ _ = a[1]
+ <-ch
+}
+
+func TestNoRaceSliceRW(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 2)
+ go func() {
+ a[0] = 1
+ ch <- true
+ }()
+ _ = a[1]
+ <-ch
+}
+
+func TestRaceSliceWW(t *testing.T) {
+ a := make([]int, 10)
+ ch := make(chan bool, 1)
+ go func() {
+ a[1] = 1
+ ch <- true
+ }()
+ a[1] = 2
+ <-ch
+}
+
+func TestNoRaceArrayWW(t *testing.T) {
+ var a [5]int
+ ch := make(chan bool, 1)
+ go func() {
+ a[0] = 1
+ ch <- true
+ }()
+ a[1] = 2
+ <-ch
+}
+
+func TestRaceArrayWW(t *testing.T) {
+ var a [5]int
+ ch := make(chan bool, 1)
+ go func() {
+ a[1] = 1
+ ch <- true
+ }()
+ a[1] = 2
+ <-ch
+}
+
+func TestNoRaceSliceWriteLen(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]bool, 1)
+ go func() {
+ a[0] = true
+ ch <- true
+ }()
+ _ = len(a)
+ <-ch
+}
+
+func TestNoRaceSliceWriteCap(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]uint64, 100)
+ go func() {
+ a[50] = 123
+ ch <- true
+ }()
+ _ = cap(a)
+ <-ch
+}
+
+func TestRaceSliceCopyRead(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 10)
+ b := make([]int, 10)
+ go func() {
+ _ = a[5]
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestNoRaceSliceWriteCopy(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 10)
+ b := make([]int, 10)
+ go func() {
+ a[5] = 1
+ ch <- true
+ }()
+ copy(a[:5], b[:5])
+ <-ch
+}
+
+func TestRaceSliceCopyWrite2(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 10)
+ b := make([]int, 10)
+ go func() {
+ b[5] = 1
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestRaceSliceCopyWrite3(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]byte, 10)
+ go func() {
+ a[7] = 1
+ ch <- true
+ }()
+ copy(a, "qwertyqwerty")
+ <-ch
+}
+
+func TestNoRaceSliceCopyRead(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]int, 10)
+ b := make([]int, 10)
+ go func() {
+ _ = b[5]
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestRacePointerSliceCopyRead(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]*int, 10)
+ b := make([]*int, 10)
+ go func() {
+ _ = a[5]
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestNoRacePointerSliceWriteCopy(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]*int, 10)
+ b := make([]*int, 10)
+ go func() {
+ a[5] = new(int)
+ ch <- true
+ }()
+ copy(a[:5], b[:5])
+ <-ch
+}
+
+func TestRacePointerSliceCopyWrite2(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]*int, 10)
+ b := make([]*int, 10)
+ go func() {
+ b[5] = new(int)
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestNoRacePointerSliceCopyRead(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]*int, 10)
+ b := make([]*int, 10)
+ go func() {
+ _ = b[5]
+ ch <- true
+ }()
+ copy(a, b)
+ <-ch
+}
+
+func TestNoRaceSliceWriteSlice2(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]float64, 10)
+ go func() {
+ a[2] = 1.0
+ ch <- true
+ }()
+ _ = a[0:5]
+ <-ch
+}
+
+func TestRaceSliceWriteSlice(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]float64, 10)
+ go func() {
+ a[2] = 1.0
+ ch <- true
+ }()
+ a = a[5:10]
+ <-ch
+}
+
+func TestNoRaceSliceWriteSlice(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]float64, 10)
+ go func() {
+ a[2] = 1.0
+ ch <- true
+ }()
+ _ = a[5:10]
+ <-ch
+}
+
+func TestNoRaceSliceLenCap(t *testing.T) {
+ ch := make(chan bool, 1)
+ a := make([]struct{}, 10)
+ go func() {
+ _ = len(a)
+ ch <- true
+ }()
+ _ = cap(a)
+ <-ch
+}
+
+func TestNoRaceStructSlicesRangeWrite(t *testing.T) {
+ type Str struct {
+ a []int
+ b []int
+ }
+ ch := make(chan bool, 1)
+ var s Str
+ s.a = make([]int, 10)
+ s.b = make([]int, 10)
+ go func() {
+ for range s.a {
+ }
+ ch <- true
+ }()
+ s.b[5] = 5
+ <-ch
+}
+
+func TestRaceSliceDifferent(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ s2 := s
+ go func() {
+ s[3] = 3
+ c <- true
+ }()
+ // false negative because s2 is PAUTO w/o PHEAP
+ // so we do not instrument it
+ s2[3] = 3
+ <-c
+}
+
+func TestRaceSliceRangeWrite(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s[3] = 3
+ c <- true
+ }()
+ for _, v := range s {
+ _ = v
+ }
+ <-c
+}
+
+func TestNoRaceSliceRangeWrite(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s[3] = 3
+ c <- true
+ }()
+ for range s {
+ }
+ <-c
+}
+
+func TestRaceSliceRangeAppend(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s = append(s, 3)
+ c <- true
+ }()
+ for range s {
+ }
+ <-c
+}
+
+func TestNoRaceSliceRangeAppend(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ _ = append(s, 3)
+ c <- true
+ }()
+ for range s {
+ }
+ <-c
+}
+
+func TestRaceSliceVarWrite(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s[3] = 3
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceVarRead(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ _ = s[3]
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceVarRange(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ for range s {
+ }
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceVarAppend(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ _ = append(s, 10)
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceVarCopy(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s2 := make([]int, 10)
+ copy(s, s2)
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceVarCopy2(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s2 := make([]int, 10)
+ copy(s2, s)
+ c <- true
+ }()
+ s = make([]int, 20)
+ <-c
+}
+
+func TestRaceSliceAppend(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10, 20)
+ go func() {
+ _ = append(s, 1)
+ c <- true
+ }()
+ _ = append(s, 2)
+ <-c
+}
+
+func TestRaceSliceAppendWrite(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ _ = append(s, 1)
+ c <- true
+ }()
+ s[0] = 42
+ <-c
+}
+
+func TestRaceSliceAppendSlice(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ go func() {
+ s2 := make([]int, 10)
+ _ = append(s, s2...)
+ c <- true
+ }()
+ s[0] = 42
+ <-c
+}
+
+func TestRaceSliceAppendSlice2(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ s2foobar := make([]int, 10)
+ go func() {
+ _ = append(s, s2foobar...)
+ c <- true
+ }()
+ s2foobar[5] = 42
+ <-c
+}
+
+func TestRaceSliceAppendString(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]byte, 10)
+ go func() {
+ _ = append(s, "qwerty"...)
+ c <- true
+ }()
+ s[0] = 42
+ <-c
+}
+
+func TestRacePointerSliceAppend(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]*int, 10, 20)
+ go func() {
+ _ = append(s, new(int))
+ c <- true
+ }()
+ _ = append(s, new(int))
+ <-c
+}
+
+func TestRacePointerSliceAppendWrite(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]*int, 10)
+ go func() {
+ _ = append(s, new(int))
+ c <- true
+ }()
+ s[0] = new(int)
+ <-c
+}
+
+func TestRacePointerSliceAppendSlice(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]*int, 10)
+ go func() {
+ s2 := make([]*int, 10)
+ _ = append(s, s2...)
+ c <- true
+ }()
+ s[0] = new(int)
+ <-c
+}
+
+func TestRacePointerSliceAppendSlice2(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]*int, 10)
+ s2foobar := make([]*int, 10)
+ go func() {
+ _ = append(s, s2foobar...)
+ c <- true
+ }()
+ println("WRITE:", &s2foobar[5])
+ s2foobar[5] = nil
+ <-c
+}
+
+func TestNoRaceSliceIndexAccess(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ v := 0
+ go func() {
+ _ = v
+ c <- true
+ }()
+ s[v] = 1
+ <-c
+}
+
+func TestNoRaceSliceIndexAccess2(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ v := 0
+ go func() {
+ _ = v
+ c <- true
+ }()
+ _ = s[v]
+ <-c
+}
+
+func TestRaceSliceIndexAccess(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ v := 0
+ go func() {
+ v = 1
+ c <- true
+ }()
+ s[v] = 1
+ <-c
+}
+
+func TestRaceSliceIndexAccess2(t *testing.T) {
+ c := make(chan bool, 1)
+ s := make([]int, 10)
+ v := 0
+ go func() {
+ v = 1
+ c <- true
+ }()
+ _ = s[v]
+ <-c
+}
+
+func TestRaceSliceByteToString(t *testing.T) {
+ c := make(chan string)
+ s := make([]byte, 10)
+ go func() {
+ c <- string(s)
+ }()
+ s[0] = 42
+ <-c
+}
+
+func TestRaceSliceRuneToString(t *testing.T) {
+ c := make(chan string)
+ s := make([]rune, 10)
+ go func() {
+ c <- string(s)
+ }()
+ s[9] = 42
+ <-c
+}
+
+func TestRaceConcatString(t *testing.T) {
+ s := "hello"
+ c := make(chan string, 1)
+ go func() {
+ c <- s + " world"
+ }()
+ s = "world"
+ <-c
+}
+
+func TestRaceCompareString(t *testing.T) {
+ s1 := "hello"
+ s2 := "world"
+ c := make(chan bool, 1)
+ go func() {
+ c <- s1 == s2
+ }()
+ s1 = s2
+ <-c
+}
+
+func TestRaceSlice3(t *testing.T) {
+ done := make(chan bool)
+ x := make([]int, 10)
+ i := 2
+ go func() {
+ i = 3
+ done <- true
+ }()
+ _ = x[:1:i]
+ <-done
+}
+
+var saved string
+
+func TestRaceSlice4(t *testing.T) {
+ // See issue 36794.
+ data := []byte("hello there")
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ _ = string(data)
+ wg.Done()
+ }()
+ copy(data, data[2:])
+ wg.Wait()
+}
diff --git a/src/runtime/race/testdata/sync_test.go b/src/runtime/race/testdata/sync_test.go
new file mode 100644
index 0000000..b5fcd6c
--- /dev/null
+++ b/src/runtime/race/testdata/sync_test.go
@@ -0,0 +1,202 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestNoRaceCond(t *testing.T) {
+ x := 0
+ _ = x
+ condition := 0
+ var mu sync.Mutex
+ cond := sync.NewCond(&mu)
+ go func() {
+ x = 1
+ mu.Lock()
+ condition = 1
+ cond.Signal()
+ mu.Unlock()
+ }()
+ mu.Lock()
+ for condition != 1 {
+ cond.Wait()
+ }
+ mu.Unlock()
+ x = 2
+}
+
+func TestRaceCond(t *testing.T) {
+ done := make(chan bool)
+ var mu sync.Mutex
+ cond := sync.NewCond(&mu)
+ x := 0
+ _ = x
+ condition := 0
+ go func() {
+ time.Sleep(10 * time.Millisecond) // Enter cond.Wait loop
+ x = 1
+ mu.Lock()
+ condition = 1
+ cond.Signal()
+ mu.Unlock()
+ time.Sleep(10 * time.Millisecond) // Exit cond.Wait loop
+ mu.Lock()
+ x = 3
+ mu.Unlock()
+ done <- true
+ }()
+ mu.Lock()
+ for condition != 1 {
+ cond.Wait()
+ }
+ mu.Unlock()
+ x = 2
+ <-done
+}
+
+// We do not currently automatically
+// parse this test. It is intended that the creation
+// stack is observed manually not to contain
+// off-by-one errors
+func TestRaceAnnounceThreads(t *testing.T) {
+ const N = 7
+ allDone := make(chan bool, N)
+
+ var x int
+ _ = x
+
+ var f, g, h func()
+ f = func() {
+ x = 1
+ go g()
+ go func() {
+ x = 1
+ allDone <- true
+ }()
+ x = 2
+ allDone <- true
+ }
+
+ g = func() {
+ for i := 0; i < 2; i++ {
+ go func() {
+ x = 1
+ allDone <- true
+ }()
+ allDone <- true
+ }
+ }
+
+ h = func() {
+ x = 1
+ x = 2
+ go f()
+ allDone <- true
+ }
+
+ go h()
+
+ for i := 0; i < N; i++ {
+ <-allDone
+ }
+}
+
+func TestNoRaceAfterFunc1(t *testing.T) {
+ i := 2
+ c := make(chan bool)
+ var f func()
+ f = func() {
+ i--
+ if i >= 0 {
+ time.AfterFunc(0, f)
+ } else {
+ c <- true
+ }
+ }
+
+ time.AfterFunc(0, f)
+ <-c
+}
+
+func TestNoRaceAfterFunc2(t *testing.T) {
+ var x int
+ _ = x
+ timer := time.AfterFunc(10, func() {
+ x = 1
+ })
+ defer timer.Stop()
+}
+
+func TestNoRaceAfterFunc3(t *testing.T) {
+ c := make(chan bool, 1)
+ x := 0
+ _ = x
+ time.AfterFunc(1e7, func() {
+ x = 1
+ c <- true
+ })
+ <-c
+}
+
+func TestRaceAfterFunc3(t *testing.T) {
+ c := make(chan bool, 2)
+ x := 0
+ _ = x
+ time.AfterFunc(1e7, func() {
+ x = 1
+ c <- true
+ })
+ time.AfterFunc(2e7, func() {
+ x = 2
+ c <- true
+ })
+ <-c
+ <-c
+}
+
+// This test's output is intended to be
+// observed manually. One should check
+// that goroutine creation stack is
+// comprehensible.
+func TestRaceGoroutineCreationStack(t *testing.T) {
+ var x int
+ _ = x
+ var ch = make(chan bool, 1)
+
+ f1 := func() {
+ x = 1
+ ch <- true
+ }
+ f2 := func() { go f1() }
+ f3 := func() { go f2() }
+ f4 := func() { go f3() }
+
+ go f4()
+ x = 2
+ <-ch
+}
+
+// A nil pointer in a mutex method call should not
+// corrupt the race detector state.
+// Used to hang indefinitely.
+func TestNoRaceNilMutexCrash(t *testing.T) {
+ var mutex sync.Mutex
+ panics := 0
+ defer func() {
+ if x := recover(); x != nil {
+ mutex.Lock()
+ panics++
+ mutex.Unlock()
+ } else {
+ panic("no panic")
+ }
+ }()
+ var othermutex *sync.RWMutex
+ othermutex.RLock()
+}
diff --git a/src/runtime/race/testdata/waitgroup_test.go b/src/runtime/race/testdata/waitgroup_test.go
new file mode 100644
index 0000000..1693373
--- /dev/null
+++ b/src/runtime/race/testdata/waitgroup_test.go
@@ -0,0 +1,360 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestNoRaceWaitGroup(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ n := 1
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ j := i
+ go func() {
+ x = j
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestRaceWaitGroup(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ n := 2
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ j := i
+ go func() {
+ x = j
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestNoRaceWaitGroup2(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ x = 1
+ wg.Done()
+ }()
+ wg.Wait()
+ x = 2
+}
+
+// incrementing counter in Add and locking wg's mutex
+func TestRaceWaitGroupAsMutex(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ c := make(chan bool, 2)
+ go func() {
+ wg.Wait()
+ time.Sleep(100 * time.Millisecond)
+ wg.Add(+1)
+ x = 1
+ wg.Add(-1)
+ c <- true
+ }()
+ go func() {
+ wg.Wait()
+ time.Sleep(100 * time.Millisecond)
+ wg.Add(+1)
+ x = 2
+ wg.Add(-1)
+ c <- true
+ }()
+ <-c
+ <-c
+}
+
+// Incorrect usage: Add is too late.
+func TestRaceWaitGroupWrongWait(t *testing.T) {
+ c := make(chan bool, 2)
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ go func() {
+ wg.Add(1)
+ runtime.Gosched()
+ x = 1
+ wg.Done()
+ c <- true
+ }()
+ go func() {
+ wg.Add(1)
+ runtime.Gosched()
+ x = 2
+ wg.Done()
+ c <- true
+ }()
+ wg.Wait()
+ <-c
+ <-c
+}
+
+func TestRaceWaitGroupWrongAdd(t *testing.T) {
+ c := make(chan bool, 2)
+ var wg sync.WaitGroup
+ go func() {
+ wg.Add(1)
+ time.Sleep(100 * time.Millisecond)
+ wg.Done()
+ c <- true
+ }()
+ go func() {
+ wg.Add(1)
+ time.Sleep(100 * time.Millisecond)
+ wg.Done()
+ c <- true
+ }()
+ time.Sleep(50 * time.Millisecond)
+ wg.Wait()
+ <-c
+ <-c
+}
+
+func TestNoRaceWaitGroupMultipleWait(t *testing.T) {
+ c := make(chan bool, 2)
+ var wg sync.WaitGroup
+ go func() {
+ wg.Wait()
+ c <- true
+ }()
+ go func() {
+ wg.Wait()
+ c <- true
+ }()
+ wg.Wait()
+ <-c
+ <-c
+}
+
+func TestNoRaceWaitGroupMultipleWait2(t *testing.T) {
+ c := make(chan bool, 2)
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ wg.Done()
+ wg.Wait()
+ c <- true
+ }()
+ go func() {
+ wg.Done()
+ wg.Wait()
+ c <- true
+ }()
+ wg.Wait()
+ <-c
+ <-c
+}
+
+func TestNoRaceWaitGroupMultipleWait3(t *testing.T) {
+ const P = 3
+ var data [P]int
+ done := make(chan bool, P)
+ var wg sync.WaitGroup
+ wg.Add(P)
+ for p := 0; p < P; p++ {
+ go func(p int) {
+ data[p] = 42
+ wg.Done()
+ }(p)
+ }
+ for p := 0; p < P; p++ {
+ go func() {
+ wg.Wait()
+ for p1 := 0; p1 < P; p1++ {
+ _ = data[p1]
+ }
+ done <- true
+ }()
+ }
+ for p := 0; p < P; p++ {
+ <-done
+ }
+}
+
+// Correct usage but still a race
+func TestRaceWaitGroup2(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ x = 1
+ wg.Done()
+ }()
+ go func() {
+ x = 2
+ wg.Done()
+ }()
+ wg.Wait()
+}
+
+func TestNoRaceWaitGroupPanicRecover(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ defer func() {
+ err := recover()
+ if err != "sync: negative WaitGroup counter" {
+ t.Fatalf("Unexpected panic: %#v", err)
+ }
+ x = 2
+ }()
+ x = 1
+ wg.Add(-1)
+}
+
+// TODO: this is actually a panic-synchronization test, not a
+// WaitGroup test. Move it to another *_test file
+// Is it possible to get a race by synchronization via panic?
+func TestNoRaceWaitGroupPanicRecover2(t *testing.T) {
+ var x int
+ _ = x
+ var wg sync.WaitGroup
+ ch := make(chan bool, 1)
+ var f func() = func() {
+ x = 2
+ ch <- true
+ }
+ go func() {
+ defer func() {
+ err := recover()
+ if err != "sync: negative WaitGroup counter" {
+ }
+ go f()
+ }()
+ x = 1
+ wg.Add(-1)
+ }()
+
+ <-ch
+}
+
+func TestNoRaceWaitGroupTransitive(t *testing.T) {
+ x, y := 0, 0
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ x = 42
+ wg.Done()
+ }()
+ go func() {
+ time.Sleep(1e7)
+ y = 42
+ wg.Done()
+ }()
+ wg.Wait()
+ _ = x
+ _ = y
+}
+
+func TestNoRaceWaitGroupReuse(t *testing.T) {
+ const P = 3
+ var data [P]int
+ var wg sync.WaitGroup
+ for try := 0; try < 3; try++ {
+ wg.Add(P)
+ for p := 0; p < P; p++ {
+ go func(p int) {
+ data[p]++
+ wg.Done()
+ }(p)
+ }
+ wg.Wait()
+ for p := 0; p < P; p++ {
+ data[p]++
+ }
+ }
+}
+
+func TestNoRaceWaitGroupReuse2(t *testing.T) {
+ const P = 3
+ var data [P]int
+ var wg sync.WaitGroup
+ for try := 0; try < 3; try++ {
+ wg.Add(P)
+ for p := 0; p < P; p++ {
+ go func(p int) {
+ data[p]++
+ wg.Done()
+ }(p)
+ }
+ done := make(chan bool)
+ go func() {
+ wg.Wait()
+ for p := 0; p < P; p++ {
+ data[p]++
+ }
+ done <- true
+ }()
+ wg.Wait()
+ <-done
+ for p := 0; p < P; p++ {
+ data[p]++
+ }
+ }
+}
+
+func TestRaceWaitGroupReuse(t *testing.T) {
+ const P = 3
+ const T = 3
+ done := make(chan bool, T)
+ var wg sync.WaitGroup
+ for try := 0; try < T; try++ {
+ var data [P]int
+ wg.Add(P)
+ for p := 0; p < P; p++ {
+ go func(p int) {
+ time.Sleep(50 * time.Millisecond)
+ data[p]++
+ wg.Done()
+ }(p)
+ }
+ go func() {
+ wg.Wait()
+ for p := 0; p < P; p++ {
+ data[p]++
+ }
+ done <- true
+ }()
+ time.Sleep(100 * time.Millisecond)
+ wg.Wait()
+ }
+ for try := 0; try < T; try++ {
+ <-done
+ }
+}
+
+func TestNoRaceWaitGroupConcurrentAdd(t *testing.T) {
+ const P = 4
+ waiting := make(chan bool, P)
+ var wg sync.WaitGroup
+ for p := 0; p < P; p++ {
+ go func() {
+ wg.Add(1)
+ waiting <- true
+ wg.Done()
+ }()
+ }
+ for p := 0; p < P; p++ {
+ <-waiting
+ }
+ wg.Wait()
+}
diff --git a/src/runtime/race/timer_test.go b/src/runtime/race/timer_test.go
new file mode 100644
index 0000000..dd59005
--- /dev/null
+++ b/src/runtime/race/timer_test.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package race_test
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestTimers(t *testing.T) {
+ const goroutines = 8
+ var wg sync.WaitGroup
+ wg.Add(goroutines)
+ var mu sync.Mutex
+ for i := 0; i < goroutines; i++ {
+ go func() {
+ defer wg.Done()
+ ticker := time.NewTicker(1)
+ defer ticker.Stop()
+ for c := 0; c < 1000; c++ {
+ <-ticker.C
+ mu.Lock()
+ mu.Unlock()
+ }
+ }()
+ }
+ wg.Wait()
+}
diff --git a/src/runtime/race0.go b/src/runtime/race0.go
new file mode 100644
index 0000000..f36d438
--- /dev/null
+++ b/src/runtime/race0.go
@@ -0,0 +1,44 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+// Dummy race detection API, used when not built with -race.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const raceenabled = false
+
+// Because raceenabled is false, none of these functions should be called.
+
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceinit() (uintptr, uintptr) { throw("race"); return 0, 0 }
+func racefini() { throw("race") }
+func raceproccreate() uintptr { throw("race"); return 0 }
+func raceprocdestroy(ctx uintptr) { throw("race") }
+func racemapshadow(addr unsafe.Pointer, size uintptr) { throw("race") }
+func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func raceacquire(addr unsafe.Pointer) { throw("race") }
+func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
+func raceacquirectx(racectx uintptr, addr unsafe.Pointer) { throw("race") }
+func racerelease(addr unsafe.Pointer) { throw("race") }
+func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racereleaseacquire(addr unsafe.Pointer) { throw("race") }
+func racereleaseacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racereleasemerge(addr unsafe.Pointer) { throw("race") }
+func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racefingo() { throw("race") }
+func racemalloc(p unsafe.Pointer, sz uintptr) { throw("race") }
+func racefree(p unsafe.Pointer, sz uintptr) { throw("race") }
+func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
+func racegoend() { throw("race") }
+func racectxend(racectx uintptr) { throw("race") }
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
new file mode 100644
index 0000000..34ec200
--- /dev/null
+++ b/src/runtime/race_amd64.s
@@ -0,0 +1,457 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+#include "cgo/abi_amd64.h"
+
+// The following thunks allow calling the gcc-compiled race runtime directly
+// from Go code without going all the way through cgo.
+// First, it's much faster (up to 50% speedup for real Go programs).
+// Second, it eliminates race-related special cases from cgocall and scheduler.
+// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
+
+// A brief recap of the amd64 calling convention.
+// Arguments are passed in DI, SI, DX, CX, R8, R9, the rest is on stack.
+// Callee-saved registers are: BX, BP, R12-R15.
+// SP must be 16-byte aligned.
+// On Windows:
+// Arguments are passed in CX, DX, R8, R9, the rest is on stack.
+// Callee-saved registers are: BX, BP, DI, SI, R12-R15.
+// SP must be 16-byte aligned. Windows also requires "stack-backing" for the 4 register arguments:
+// https://msdn.microsoft.com/en-us/library/ms235286.aspx
+// We do not do this, because it seems to be intended for vararg/unprototyped functions.
+// Gcc-compiled race runtime does not try to use that space.
+
+#ifdef GOOS_windows
+#define RARG0 CX
+#define RARG1 DX
+#define RARG2 R8
+#define RARG3 R9
+#else
+#define RARG0 DI
+#define RARG1 SI
+#define RARG2 DX
+#define RARG3 CX
+#endif
+
+// func runtime·raceread(addr uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would render runtime.getcallerpc ineffective.
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVQ AX, RARG1
+ MOVQ (SP), RARG2
+ // void __tsan_read(ThreadState *thr, void *addr, void *pc);
+ MOVQ $__tsan_read(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceRead(addr uintptr)
+TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because raceread reads caller pc.
+ JMP runtime·raceread(SB)
+
+// void runtime·racereadpc(void *addr, void *callpc, void *pc)
+TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ callpc+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ ADDQ $1, RARG3 // pc is function start, tsan wants return address
+ // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVQ $__tsan_read_pc(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewrite(addr uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would render runtime.getcallerpc ineffective.
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVQ AX, RARG1
+ MOVQ (SP), RARG2
+ // void __tsan_write(ThreadState *thr, void *addr, void *pc);
+ MOVQ $__tsan_write(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWrite(addr uintptr)
+TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because racewrite reads caller pc.
+ JMP runtime·racewrite(SB)
+
+// void runtime·racewritepc(void *addr, void *callpc, void *pc)
+TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ callpc+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ ADDQ $1, RARG3 // pc is function start, tsan wants return address
+ // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVQ $__tsan_write_pc(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racereadrange(addr, size uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would render runtime.getcallerpc ineffective.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVQ AX, RARG1
+ MOVQ BX, RARG2
+ MOVQ (SP), RARG3
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_read_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceReadRange(addr, size uintptr)
+TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racereadrange reads caller pc.
+ JMP runtime·racereadrange(SB)
+
+// void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ ADDQ $1, RARG3 // pc is function start, tsan wants return address
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_read_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewriterange(addr, size uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would render runtime.getcallerpc ineffective.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVQ AX, RARG1
+ MOVQ BX, RARG2
+ MOVQ (SP), RARG3
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_write_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWriteRange(addr, size uintptr)
+TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racewriterange reads caller pc.
+ JMP runtime·racewriterange(SB)
+
+// void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
+ MOVQ addr+0(FP), RARG1
+ MOVQ size+8(FP), RARG2
+ MOVQ pc+16(FP), RARG3
+ ADDQ $1, RARG3 // pc is function start, tsan wants return address
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVQ $__tsan_write_range(SB), AX
+ JMP racecalladdr<>(SB)
+
+// If addr (RARG1) is out of range, do nothing.
+// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
+TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ CMPQ RARG1, runtime·racearenastart(SB)
+ JB data
+ CMPQ RARG1, runtime·racearenaend(SB)
+ JB call
+data:
+ CMPQ RARG1, runtime·racedatastart(SB)
+ JB ret
+ CMPQ RARG1, runtime·racedataend(SB)
+ JAE ret
+call:
+ MOVQ AX, AX // w/o this 6a miscompiles this function
+ JMP racecall<>(SB)
+ret:
+ RET
+
+// func runtime·racefuncenter(pc uintptr)
+// Called from instrumented code.
+TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+ MOVQ callpc+0(FP), R11
+ JMP racefuncenter<>(SB)
+
+// Common code for racefuncenter
+// R11 = caller's return address
+TEXT racefuncenter<>(SB), NOSPLIT, $0-0
+ MOVQ DX, BX // save function entry context (for closures)
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ R11, RARG1
+ // void __tsan_func_enter(ThreadState *thr, void *pc);
+ MOVQ $__tsan_func_enter(SB), AX
+ // racecall<> preserves BX
+ CALL racecall<>(SB)
+ MOVQ BX, DX // restore function entry context
+ RET
+
+// func runtime·racefuncexit()
+// Called from instrumented code.
+TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ // void __tsan_func_exit(ThreadState *thr);
+ MOVQ $__tsan_func_exit(SB), AX
+ JMP racecall<>(SB)
+
+// Atomic operations for sync/atomic package.
+
+// Load
+TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_load(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_load(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·LoadInt32(SB)
+
+TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+// Store
+TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_store(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_store(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·StoreInt32(SB)
+
+TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+// Swap
+TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·SwapInt32(SB)
+
+TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+// Add
+TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_fetch_add(SB), AX
+ CALL racecallatomic<>(SB)
+ MOVL add+8(FP), AX // convert fetch_add to add_fetch
+ ADDL AX, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_fetch_add(SB), AX
+ CALL racecallatomic<>(SB)
+ MOVQ add+8(FP), AX // convert fetch_add to add_fetch
+ ADDQ AX, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AddInt32(SB)
+
+TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+// CompareAndSwap
+TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_compare_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_compare_exchange(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt32(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+// Generic atomic operation implementation.
+// AX already contains target function.
+TEXT racecallatomic<>(SB), NOSPLIT, $0-0
+ // Trigger SIGSEGV early.
+ MOVQ 16(SP), R12
+ MOVBLZX (R12), R13
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ CMPQ R12, runtime·racearenastart(SB)
+ JB racecallatomic_data
+ CMPQ R12, runtime·racearenaend(SB)
+ JB racecallatomic_ok
+racecallatomic_data:
+ CMPQ R12, runtime·racedatastart(SB)
+ JB racecallatomic_ignore
+ CMPQ R12, runtime·racedataend(SB)
+ JAE racecallatomic_ignore
+racecallatomic_ok:
+ // Addr is within the good range, call the atomic function.
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ 8(SP), RARG1 // caller pc
+ MOVQ (SP), RARG2 // pc
+ LEAQ 16(SP), RARG3 // arguments
+ JMP racecall<>(SB) // does not return
+racecallatomic_ignore:
+ // Addr is outside the good range.
+ // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
+ // An attempt to synchronize on the address would cause crash.
+ MOVQ AX, BX // remember the original function
+ MOVQ $__tsan_go_ignore_sync_begin(SB), AX
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ CALL racecall<>(SB)
+ MOVQ BX, AX // restore the original function
+ // Call the atomic function.
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ 8(SP), RARG1 // caller pc
+ MOVQ (SP), RARG2 // pc
+ LEAQ 16(SP), RARG3 // arguments
+ CALL racecall<>(SB)
+ // Call __tsan_go_ignore_sync_end.
+ MOVQ $__tsan_go_ignore_sync_end(SB), AX
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ JMP racecall<>(SB)
+
+// void runtime·racecall(void(*f)(...), ...)
+// Calls C function f from race runtime and passes up to 4 arguments to it.
+// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
+TEXT runtime·racecall(SB), NOSPLIT, $0-0
+ MOVQ fn+0(FP), AX
+ MOVQ arg0+8(FP), RARG0
+ MOVQ arg1+16(FP), RARG1
+ MOVQ arg2+24(FP), RARG2
+ MOVQ arg3+32(FP), RARG3
+ JMP racecall<>(SB)
+
+// Switches SP to g0 stack and calls (AX). Arguments already set.
+TEXT racecall<>(SB), NOSPLIT, $0-0
+ MOVQ g_m(R14), R13
+ // Switch to g0 stack.
+ MOVQ SP, R12 // callee-saved, preserved across the CALL
+ MOVQ m_g0(R13), R10
+ CMPQ R10, R14
+ JE call // already on g0
+ MOVQ (g_sched+gobuf_sp)(R10), SP
+call:
+ ANDQ $~15, SP // alignment for gcc ABI
+ CALL AX
+ MOVQ R12, SP
+ // Back to Go world, set special registers.
+ // The g register (R14) is preserved in C.
+ XORPS X15, X15
+ RET
+
+// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
+// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
+// The overall effect of Go->C->Go call chain is similar to that of mcall.
+// RARG0 contains command code. RARG1 contains command-specific context.
+// See racecallback for command codes.
+TEXT runtime·racecallbackthunk(SB), NOSPLIT, $0-0
+ // Handle command raceGetProcCmd (0) here.
+ // First, code below assumes that we are on curg, while raceGetProcCmd
+ // can be executed on g0. Second, it is called frequently, so will
+ // benefit from this fast path.
+ CMPQ RARG0, $0
+ JNE rest
+ get_tls(RARG0)
+ MOVQ g(RARG0), RARG0
+ MOVQ g_m(RARG0), RARG0
+ MOVQ m_p(RARG0), RARG0
+ MOVQ p_raceprocctx(RARG0), RARG0
+ MOVQ RARG0, (RARG1)
+ RET
+
+rest:
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+ // Set g = g0.
+ get_tls(R12)
+ MOVQ g(R12), R14
+ MOVQ g_m(R14), R13
+ MOVQ m_g0(R13), R15
+ CMPQ R13, R15
+ JEQ noswitch // branch if already on g0
+ MOVQ R15, g(R12) // g = m->g0
+ MOVQ R15, R14 // set g register
+ PUSHQ RARG1 // func arg
+ PUSHQ RARG0 // func arg
+ CALL runtime·racecallback(SB)
+ POPQ R12
+ POPQ R12
+ // All registers are smashed after Go code, reload.
+ get_tls(R12)
+ MOVQ g(R12), R13
+ MOVQ g_m(R13), R13
+ MOVQ m_curg(R13), R14
+ MOVQ R14, g(R12) // g = m->curg
+ret:
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+noswitch:
+ // already on g0
+ PUSHQ RARG1 // func arg
+ PUSHQ RARG0 // func arg
+ CALL runtime·racecallback(SB)
+ POPQ R12
+ POPQ R12
+ JMP ret
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
new file mode 100644
index 0000000..c818345
--- /dev/null
+++ b/src/runtime/race_arm64.s
@@ -0,0 +1,498 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+#include "go_asm.h"
+#include "funcdata.h"
+#include "textflag.h"
+#include "tls_arm64.h"
+#include "cgo/abi_arm64.h"
+
+// The following thunks allow calling the gcc-compiled race runtime directly
+// from Go code without going all the way through cgo.
+// First, it's much faster (up to 50% speedup for real Go programs).
+// Second, it eliminates race-related special cases from cgocall and scheduler.
+// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
+
+// A brief recap of the arm64 calling convention.
+// Arguments are passed in R0...R7, the rest is on stack.
+// Callee-saved registers are: R19...R28.
+// Temporary registers are: R9...R15
+// SP must be 16-byte aligned.
+
+// When calling racecalladdr, R9 is the call target address.
+
+// The race ctx, ThreadState *thr below, is passed in R0 and loaded in racecalladdr.
+
+// Darwin may return unaligned thread pointer. Align it. (See tls_arm64.s)
+// No-op on other OSes.
+#ifdef TLS_darwin
+#define TP_ALIGN AND $~7, R0
+#else
+#define TP_ALIGN
+#endif
+
+// Load g from TLS. (See tls_arm64.s)
+#define load_g \
+ MRS_TPIDR_R0 \
+ TP_ALIGN \
+ MOVD runtime·tls_g(SB), R11 \
+ MOVD (R0)(R11), g
+
+// func runtime·raceread(addr uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVD R0, R1 // addr
+ MOVD LR, R2
+ // void __tsan_read(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_read(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceRead(addr uintptr)
+TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because raceread reads caller pc.
+ JMP runtime·raceread(SB)
+
+// func runtime·racereadpc(void *addr, void *callpc, void *pc)
+TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R1
+ MOVD callpc+8(FP), R2
+ MOVD pc+16(FP), R3
+ // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_read_pc(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewrite(addr uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVD R0, R1 // addr
+ MOVD LR, R2
+ // void __tsan_write(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_write(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWrite(addr uintptr)
+TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because racewrite reads caller pc.
+ JMP runtime·racewrite(SB)
+
+// func runtime·racewritepc(void *addr, void *callpc, void *pc)
+TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R1
+ MOVD callpc+8(FP), R2
+ MOVD pc+16(FP), R3
+ // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_write_pc(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·racereadrange(addr, size uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+ MOVD LR, R3
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceReadRange(addr, size uintptr)
+TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racereadrange reads caller pc.
+ JMP runtime·racereadrange(SB)
+
+// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R1
+ MOVD size+8(FP), R2
+ MOVD pc+16(FP), R3
+ ADD $4, R3 // pc is function start, tsan wants return address.
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewriterange(addr, size uintptr)
+// Called from instrumented code.
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+ MOVD LR, R3
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R9
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWriteRange(addr, size uintptr)
+TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racewriterange reads caller pc.
+ JMP runtime·racewriterange(SB)
+
+// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R1
+ MOVD size+8(FP), R2
+ MOVD pc+16(FP), R3
+ ADD $4, R3 // pc is function start, tsan wants return address.
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R9
+ JMP racecalladdr<>(SB)
+
+// If addr (R1) is out of range, do nothing.
+// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
+TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+ load_g
+ MOVD g_racectx(g), R0
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ MOVD runtime·racearenastart(SB), R10
+ CMP R10, R1
+ BLT data
+ MOVD runtime·racearenaend(SB), R10
+ CMP R10, R1
+ BLT call
+data:
+ MOVD runtime·racedatastart(SB), R10
+ CMP R10, R1
+ BLT ret
+ MOVD runtime·racedataend(SB), R10
+ CMP R10, R1
+ BGT ret
+call:
+ JMP racecall<>(SB)
+ret:
+ RET
+
+// func runtime·racefuncenter(pc uintptr)
+// Called from instrumented code.
+TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVD R0, R9 // callpc
+ JMP racefuncenter<>(SB)
+
+// Common code for racefuncenter
+// R9 = caller's return address
+TEXT racefuncenter<>(SB), NOSPLIT, $0-0
+ load_g
+ MOVD g_racectx(g), R0 // goroutine racectx
+ MOVD R9, R1
+ // void __tsan_func_enter(ThreadState *thr, void *pc);
+ MOVD $__tsan_func_enter(SB), R9
+ BL racecall<>(SB)
+ RET
+
+// func runtime·racefuncexit()
+// Called from instrumented code.
+TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
+ load_g
+ MOVD g_racectx(g), R0 // race context
+ // void __tsan_func_exit(ThreadState *thr);
+ MOVD $__tsan_func_exit(SB), R9
+ JMP racecall<>(SB)
+
+// Atomic operations for sync/atomic package.
+// R3 = addr of arguments passed to this function, it can
+// be fetched at 40(RSP) in racecallatomic after two times BL
+// R0, R1, R2 set in racecallatomic
+
+// Load
+TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_load(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_load(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·LoadInt32(SB)
+
+TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+// Store
+TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_store(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_store(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·StoreInt32(SB)
+
+TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+// Swap
+TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_exchange(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_exchange(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·SwapInt32(SB)
+
+TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+// Add
+TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_add(SB), R9
+ BL racecallatomic<>(SB)
+ MOVW add+8(FP), R0 // convert fetch_add to add_fetch
+ MOVW ret+16(FP), R1
+ ADD R0, R1, R0
+ MOVW R0, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_add(SB), R9
+ BL racecallatomic<>(SB)
+ MOVD add+8(FP), R0 // convert fetch_add to add_fetch
+ MOVD ret+16(FP), R1
+ ADD R0, R1, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AddInt32(SB)
+
+TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+// CompareAndSwap
+TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_compare_exchange(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_compare_exchange(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt32(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+// Generic atomic operation implementation.
+// R9 = addr of target function
+TEXT racecallatomic<>(SB), NOSPLIT, $0
+ // Set up these registers
+ // R0 = *ThreadState
+ // R1 = caller pc
+ // R2 = pc
+ // R3 = addr of incoming arg list
+
+ // Trigger SIGSEGV early.
+ MOVD 40(RSP), R3 // 1st arg is addr. after two times BL, get it at 40(RSP)
+ MOVB (R3), R13 // segv here if addr is bad
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ MOVD runtime·racearenastart(SB), R10
+ CMP R10, R3
+ BLT racecallatomic_data
+ MOVD runtime·racearenaend(SB), R10
+ CMP R10, R3
+ BLT racecallatomic_ok
+racecallatomic_data:
+ MOVD runtime·racedatastart(SB), R10
+ CMP R10, R3
+ BLT racecallatomic_ignore
+ MOVD runtime·racedataend(SB), R10
+ CMP R10, R3
+ BGE racecallatomic_ignore
+racecallatomic_ok:
+ // Addr is within the good range, call the atomic function.
+ load_g
+ MOVD g_racectx(g), R0 // goroutine context
+ MOVD 16(RSP), R1 // caller pc
+ MOVD R9, R2 // pc
+ ADD $40, RSP, R3
+ JMP racecall<>(SB) // does not return
+racecallatomic_ignore:
+ // Addr is outside the good range.
+ // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
+ // An attempt to synchronize on the address would cause crash.
+ MOVD R9, R21 // remember the original function
+ MOVD $__tsan_go_ignore_sync_begin(SB), R9
+ load_g
+ MOVD g_racectx(g), R0 // goroutine context
+ BL racecall<>(SB)
+ MOVD R21, R9 // restore the original function
+ // Call the atomic function.
+ // racecall will call LLVM race code which might clobber R28 (g)
+ load_g
+ MOVD g_racectx(g), R0 // goroutine context
+ MOVD 16(RSP), R1 // caller pc
+ MOVD R9, R2 // pc
+ ADD $40, RSP, R3 // arguments
+ BL racecall<>(SB)
+ // Call __tsan_go_ignore_sync_end.
+ MOVD $__tsan_go_ignore_sync_end(SB), R9
+ MOVD g_racectx(g), R0 // goroutine context
+ BL racecall<>(SB)
+ RET
+
+// func runtime·racecall(void(*f)(...), ...)
+// Calls C function f from race runtime and passes up to 4 arguments to it.
+// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
+TEXT runtime·racecall(SB), NOSPLIT, $0-0
+ MOVD fn+0(FP), R9
+ MOVD arg0+8(FP), R0
+ MOVD arg1+16(FP), R1
+ MOVD arg2+24(FP), R2
+ MOVD arg3+32(FP), R3
+ JMP racecall<>(SB)
+
+// Switches SP to g0 stack and calls (R9). Arguments already set.
+// Clobbers R19, R20.
+TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
+ MOVD g_m(g), R10
+ // Switch to g0 stack.
+ MOVD RSP, R19 // callee-saved, preserved across the CALL
+ MOVD R30, R20 // callee-saved, preserved across the CALL
+ MOVD m_g0(R10), R11
+ CMP R11, g
+ BEQ call // already on g0
+ MOVD (g_sched+gobuf_sp)(R11), R12
+ MOVD R12, RSP
+call:
+ BL R9
+ MOVD R19, RSP
+ JMP (R20)
+
+// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
+// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
+// The overall effect of Go->C->Go call chain is similar to that of mcall.
+// R0 contains command code. R1 contains command-specific context.
+// See racecallback for command codes.
+TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
+ // Handle command raceGetProcCmd (0) here.
+ // First, code below assumes that we are on curg, while raceGetProcCmd
+ // can be executed on g0. Second, it is called frequently, so will
+ // benefit from this fast path.
+ CBNZ R0, rest
+ MOVD g, R13
+#ifdef TLS_darwin
+ MOVD R27, R12 // save R27 a.k.a. REGTMP (callee-save in C). load_g clobbers it
+#endif
+ load_g
+#ifdef TLS_darwin
+ MOVD R12, R27
+#endif
+ MOVD g_m(g), R0
+ MOVD m_p(R0), R0
+ MOVD p_raceprocctx(R0), R0
+ MOVD R0, (R1)
+ MOVD R13, g
+ JMP (LR)
+rest:
+ // Save callee-saved registers (Go code won't respect that).
+ // 8(RSP) and 16(RSP) are for args passed through racecallback
+ SUB $176, RSP
+ MOVD LR, 0(RSP)
+
+ SAVE_R19_TO_R28(8*3)
+ SAVE_F8_TO_F15(8*13)
+ MOVD R29, (8*21)(RSP)
+ // Set g = g0.
+ // load_g will clobber R0, Save R0
+ MOVD R0, R13
+ load_g
+ // restore R0
+ MOVD R13, R0
+ MOVD g_m(g), R13
+ MOVD m_g0(R13), R14
+ CMP R14, g
+ BEQ noswitch // branch if already on g0
+ MOVD R14, g
+
+ MOVD R0, 8(RSP) // func arg
+ MOVD R1, 16(RSP) // func arg
+ BL runtime·racecallback(SB)
+
+ // All registers are smashed after Go code, reload.
+ MOVD g_m(g), R13
+ MOVD m_curg(R13), g // g = m->curg
+ret:
+ // Restore callee-saved registers.
+ MOVD 0(RSP), LR
+ MOVD (8*21)(RSP), R29
+ RESTORE_F8_TO_F15(8*13)
+ RESTORE_R19_TO_R28(8*3)
+ ADD $176, RSP
+ JMP (LR)
+
+noswitch:
+ // already on g0
+ MOVD R0, 8(RSP) // func arg
+ MOVD R1, 16(RSP) // func arg
+ BL runtime·racecallback(SB)
+ JMP ret
+
+#ifndef TLSG_IS_VARIABLE
+// tls_g, g value for each thread in TLS
+GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8
+#endif
diff --git a/src/runtime/race_ppc64le.s b/src/runtime/race_ppc64le.s
new file mode 100644
index 0000000..2826501
--- /dev/null
+++ b/src/runtime/race_ppc64le.s
@@ -0,0 +1,601 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+#include "asm_ppc64x.h"
+
+// The following functions allow calling the clang-compiled race runtime directly
+// from Go code without going all the way through cgo.
+// First, it's much faster (up to 50% speedup for real Go programs).
+// Second, it eliminates race-related special cases from cgocall and scheduler.
+// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
+
+// A brief recap of the ppc64le calling convention.
+// Arguments are passed in R3, R4, R5 ...
+// SP must be 16-byte aligned.
+
+// Note that for ppc64x, LLVM follows the standard ABI and
+// expects arguments in registers, so these functions move
+// the arguments from storage to the registers expected
+// by the ABI.
+
+// When calling from Go to Clang tsan code:
+// R3 is the 1st argument and is usually the ThreadState*
+// R4-? are the 2nd, 3rd, 4th, etc. arguments
+
+// When calling racecalladdr:
+// R8 is the call target address
+
+// The race ctx is passed in R3 and loaded in
+// racecalladdr.
+//
+// The sequence used to get the race ctx:
+// MOVD runtime·tls_g(SB), R10 // Address of TLS variable
+// MOVD 0(R10), g // g = R30
+// MOVD g_racectx(g), R3 // racectx == ThreadState
+
+// func runtime·RaceRead(addr uintptr)
+// Called from instrumented Go code
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVD R3, R4 // addr
+ MOVD LR, R5 // caller of this?
+ // void __tsan_read(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_read(SB), R8
+ BR racecalladdr<>(SB)
+
+TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
+ BR runtime·raceread(SB)
+
+// void runtime·racereadpc(void *addr, void *callpc, void *pc)
+TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R4
+ MOVD callpc+8(FP), R5
+ MOVD pc+16(FP), R6
+ // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_read_pc(SB), R8
+ BR racecalladdr<>(SB)
+
+// func runtime·RaceWrite(addr uintptr)
+// Called from instrumented Go code
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVD R3, R4 // addr
+ MOVD LR, R5 // caller has set LR via BL inst
+ // void __tsan_write(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_write(SB), R8
+ BR racecalladdr<>(SB)
+
+TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
+ JMP runtime·racewrite(SB)
+
+// void runtime·racewritepc(void *addr, void *callpc, void *pc)
+TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R4
+ MOVD callpc+8(FP), R5
+ MOVD pc+16(FP), R6
+ // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_write_pc(SB), R8
+ BR racecalladdr<>(SB)
+
+// func runtime·RaceReadRange(addr, size uintptr)
+// Called from instrumented Go code.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVD R4, R5 // size
+ MOVD R3, R4 // addr
+ MOVD LR, R6
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R8
+ BR racecalladdr<>(SB)
+
+// void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R4
+ MOVD size+8(FP), R5
+ MOVD pc+16(FP), R6
+ ADD $4, R6 // tsan wants return addr
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R8
+ BR racecalladdr<>(SB)
+
+TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
+ BR runtime·racereadrange(SB)
+
+// func runtime·RaceWriteRange(addr, size uintptr)
+// Called from instrumented Go code.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+ MOVD R4, R5 // size
+ MOVD R3, R4 // addr
+ MOVD LR, R6
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R8
+ BR racecalladdr<>(SB)
+
+TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
+ BR runtime·racewriterange(SB)
+
+// void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
+// Called from instrumented Go code
+TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
+ MOVD addr+0(FP), R4
+ MOVD size+8(FP), R5
+ MOVD pc+16(FP), R6
+ ADD $4, R6 // add 4 to inst offset?
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R8
+ BR racecalladdr<>(SB)
+
+// Call a __tsan function from Go code.
+// R8 = tsan function address
+// R3 = *ThreadState a.k.a. g_racectx from g
+// R4 = addr passed to __tsan function
+//
+// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
+TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_racectx(g), R3 // goroutine context
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ MOVD runtime·racearenastart(SB), R9
+ CMP R4, R9
+ BLT data
+ MOVD runtime·racearenaend(SB), R9
+ CMP R4, R9
+ BLT call
+data:
+ MOVD runtime·racedatastart(SB), R9
+ CMP R4, R9
+ BLT ret
+ MOVD runtime·racedataend(SB), R9
+ CMP R4, R9
+ BGT ret
+call:
+ // Careful!! racecall will save LR on its
+ // stack, which is OK as long as racecalladdr
+ // doesn't change in a way that generates a stack.
+ // racecall should return to the caller of
+ // recalladdr.
+ BR racecall<>(SB)
+ret:
+ RET
+
+// func runtime·racefuncenter(pc uintptr)
+// Called from instrumented Go code.
+TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+ MOVD callpc+0(FP), R8
+ BR racefuncenter<>(SB)
+
+// Common code for racefuncenter
+// R11 = caller's return address
+TEXT racefuncenter<>(SB), NOSPLIT, $0-0
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
+ MOVD R8, R4 // caller pc set by caller in R8
+ // void __tsan_func_enter(ThreadState *thr, void *pc);
+ MOVD $__tsan_func_enter(SB), R8
+ BR racecall<>(SB)
+ RET
+
+// func runtime·racefuncexit()
+// Called from Go instrumented code.
+TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
+ // void __tsan_func_exit(ThreadState *thr);
+ MOVD $__tsan_func_exit(SB), R8
+ BR racecall<>(SB)
+
+// Atomic operations for sync/atomic package.
+// Some use the __tsan versions instead
+// R6 = addr of arguments passed to this function
+// R3, R4, R5 set in racecallatomic
+
+// Load atomic in tsan
+TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic32_load(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic64_load(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ BR sync∕atomic·LoadInt32(SB)
+
+TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ BR sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ BR sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
+ GO_ARGS
+ BR sync∕atomic·LoadInt64(SB)
+
+// Store atomic in tsan
+TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic32_store(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic64_store(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ BR sync∕atomic·StoreInt32(SB)
+
+TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ BR sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ BR sync∕atomic·StoreInt64(SB)
+
+// Swap in tsan
+TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic32_exchange(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a)
+ MOVD $__tsan_go_atomic64_exchange(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·SwapInt32(SB)
+
+TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·SwapInt64(SB)
+
+// Add atomic in tsan
+TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic32_fetch_add(SB), R8
+ ADD $64, R1, R6 // addr of caller's 1st arg
+ BL racecallatomic<>(SB)
+ // The tsan fetch_add result is not as expected by Go,
+ // so the 'add' must be added to the result.
+ MOVW add+8(FP), R3 // The tsa fetch_add does not return the
+ MOVW ret+16(FP), R4 // result as expected by go, so fix it.
+ ADD R3, R4, R3
+ MOVW R3, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+ MOVD $__tsan_go_atomic64_fetch_add(SB), R8
+ ADD $64, R1, R6 // addr of caller's 1st arg
+ BL racecallatomic<>(SB)
+ // The tsan fetch_add result is not as expected by Go,
+ // so the 'add' must be added to the result.
+ MOVD add+8(FP), R3
+ MOVD ret+16(FP), R4
+ ADD R3, R4, R3
+ MOVD R3, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·AddInt32(SB)
+
+TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AddInt64(SB)
+
+// CompareAndSwap in tsan
+TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ // void __tsan_go_atomic32_compare_exchange(
+ // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
+ MOVD $__tsan_go_atomic32_compare_exchange(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ // void __tsan_go_atomic32_compare_exchange(
+ // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
+ MOVD $__tsan_go_atomic64_compare_exchange(SB), R8
+ ADD $32, R1, R6 // addr of caller's 1st arg
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ BR sync∕atomic·CompareAndSwapInt32(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ BR sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
+ GO_ARGS
+ BR sync∕atomic·CompareAndSwapInt64(SB)
+
+// Common function used to call tsan's atomic functions
+// R3 = *ThreadState
+// R4 = TODO: What's this supposed to be?
+// R5 = caller pc
+// R6 = addr of incoming arg list
+// R8 contains addr of target function.
+TEXT racecallatomic<>(SB), NOSPLIT, $0-0
+ // Trigger SIGSEGV early if address passed to atomic function is bad.
+ MOVD (R6), R7 // 1st arg is addr
+ MOVB (R7), R9 // segv here if addr is bad
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ MOVD runtime·racearenastart(SB), R9
+ CMP R7, R9
+ BLT racecallatomic_data
+ MOVD runtime·racearenaend(SB), R9
+ CMP R7, R9
+ BLT racecallatomic_ok
+racecallatomic_data:
+ MOVD runtime·racedatastart(SB), R9
+ CMP R7, R9
+ BLT racecallatomic_ignore
+ MOVD runtime·racedataend(SB), R9
+ CMP R7, R9
+ BGE racecallatomic_ignore
+racecallatomic_ok:
+ // Addr is within the good range, call the atomic function.
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
+ MOVD R8, R5 // pc is the function called
+ MOVD (R1), R4 // caller pc from stack
+ BL racecall<>(SB) // BL needed to maintain stack consistency
+ RET //
+racecallatomic_ignore:
+ // Addr is outside the good range.
+ // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
+ // An attempt to synchronize on the address would cause crash.
+ MOVD R8, R15 // save the original function
+ MOVD R6, R17 // save the original arg list addr
+ MOVD $__tsan_go_ignore_sync_begin(SB), R8 // func addr to call
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_racectx(g), R3 // goroutine context
+ BL racecall<>(SB)
+ MOVD R15, R8 // restore the original function
+ MOVD R17, R6 // restore arg list addr
+ // Call the atomic function.
+ // racecall will call LLVM race code which might clobber r30 (g)
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+
+ MOVD g_racectx(g), R3
+ MOVD R8, R4 // pc being called same TODO as above
+ MOVD (R1), R5 // caller pc from latest LR
+ BL racecall<>(SB)
+ // Call __tsan_go_ignore_sync_end.
+ MOVD $__tsan_go_ignore_sync_end(SB), R8
+ MOVD g_racectx(g), R3 // goroutine context g should still be good?
+ BL racecall<>(SB)
+ RET
+
+// void runtime·racecall(void(*f)(...), ...)
+// Calls C function f from race runtime and passes up to 4 arguments to it.
+// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
+TEXT runtime·racecall(SB), NOSPLIT, $0-0
+ MOVD fn+0(FP), R8
+ MOVD arg0+8(FP), R3
+ MOVD arg1+16(FP), R4
+ MOVD arg2+24(FP), R5
+ MOVD arg3+32(FP), R6
+ JMP racecall<>(SB)
+
+// Finds g0 and sets its stack
+// Arguments were loaded for call from Go to C
+TEXT racecall<>(SB), NOSPLIT, $0-0
+ // Set the LR slot for the ppc64 ABI
+ MOVD LR, R10
+ MOVD R10, 0(R1) // Go expectation
+ MOVD R10, 16(R1) // C ABI
+ // Get info from the current goroutine
+ MOVD runtime·tls_g(SB), R10 // g offset in TLS
+ MOVD 0(R10), g
+ MOVD g_m(g), R7 // m for g
+ MOVD R1, R16 // callee-saved, preserved across C call
+ MOVD m_g0(R7), R10 // g0 for m
+ CMP R10, g // same g0?
+ BEQ call // already on g0
+ MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1
+call:
+ // prepare frame for C ABI
+ SUB $32, R1 // create frame for callee saving LR, CR, R2 etc.
+ RLDCR $0, R1, $~15, R1 // align SP to 16 bytes
+ MOVD R8, CTR // R8 = caller addr
+ MOVD R8, R12 // expected by PPC64 ABI
+ BL (CTR)
+ XOR R0, R0 // clear R0 on return from Clang
+ MOVD R16, R1 // restore R1; R16 nonvol in Clang
+ MOVD runtime·tls_g(SB), R10 // find correct g
+ MOVD 0(R10), g
+ MOVD 16(R1), R10 // LR was saved away, restore for return
+ MOVD R10, LR
+ RET
+
+// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
+// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
+// The overall effect of Go->C->Go call chain is similar to that of mcall.
+// RARG0 contains command code. RARG1 contains command-specific context.
+// See racecallback for command codes.
+TEXT runtime·racecallbackthunk(SB), NOSPLIT, $-8
+ // Handle command raceGetProcCmd (0) here.
+ // First, code below assumes that we are on curg, while raceGetProcCmd
+ // can be executed on g0. Second, it is called frequently, so will
+ // benefit from this fast path.
+ XOR R0, R0 // clear R0 since we came from C code
+ CMP R3, $0
+ BNE rest
+ // g0 TODO: Don't modify g here since R30 is nonvolatile
+ MOVD g, R9
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+ MOVD g_m(g), R3
+ MOVD m_p(R3), R3
+ MOVD p_raceprocctx(R3), R3
+ MOVD R3, (R4)
+ MOVD R9, g // restore R30 ??
+ RET
+
+ // This is all similar to what cgo does
+ // Save registers according to the ppc64 ABI
+rest:
+ MOVD LR, R10 // save link register
+ MOVD R10, 16(R1)
+ MOVW CR, R10
+ MOVW R10, 8(R1)
+ MOVDU R1, -336(R1) // Allocate frame needed for outargs and register save area
+
+ MOVD R14, 328(R1)
+ MOVD R15, 48(R1)
+ MOVD R16, 56(R1)
+ MOVD R17, 64(R1)
+ MOVD R18, 72(R1)
+ MOVD R19, 80(R1)
+ MOVD R20, 88(R1)
+ MOVD R21, 96(R1)
+ MOVD R22, 104(R1)
+ MOVD R23, 112(R1)
+ MOVD R24, 120(R1)
+ MOVD R25, 128(R1)
+ MOVD R26, 136(R1)
+ MOVD R27, 144(R1)
+ MOVD R28, 152(R1)
+ MOVD R29, 160(R1)
+ MOVD g, 168(R1) // R30
+ MOVD R31, 176(R1)
+ FMOVD F14, 184(R1)
+ FMOVD F15, 192(R1)
+ FMOVD F16, 200(R1)
+ FMOVD F17, 208(R1)
+ FMOVD F18, 216(R1)
+ FMOVD F19, 224(R1)
+ FMOVD F20, 232(R1)
+ FMOVD F21, 240(R1)
+ FMOVD F22, 248(R1)
+ FMOVD F23, 256(R1)
+ FMOVD F24, 264(R1)
+ FMOVD F25, 272(R1)
+ FMOVD F26, 280(R1)
+ FMOVD F27, 288(R1)
+ FMOVD F28, 296(R1)
+ FMOVD F29, 304(R1)
+ FMOVD F30, 312(R1)
+ FMOVD F31, 320(R1)
+
+ MOVD R3, FIXED_FRAME+0(R1)
+ MOVD R4, FIXED_FRAME+8(R1)
+
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+
+ MOVD g_m(g), R7
+ MOVD m_g0(R7), R8
+ CMP g, R8
+ BEQ noswitch
+
+ MOVD R8, g // set g = m-> g0
+
+ BL runtime·racecallback(SB)
+
+ // All registers are clobbered after Go code, reload.
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10), g
+
+ MOVD g_m(g), R7
+ MOVD m_curg(R7), g // restore g = m->curg
+
+ret:
+ MOVD 328(R1), R14
+ MOVD 48(R1), R15
+ MOVD 56(R1), R16
+ MOVD 64(R1), R17
+ MOVD 72(R1), R18
+ MOVD 80(R1), R19
+ MOVD 88(R1), R20
+ MOVD 96(R1), R21
+ MOVD 104(R1), R22
+ MOVD 112(R1), R23
+ MOVD 120(R1), R24
+ MOVD 128(R1), R25
+ MOVD 136(R1), R26
+ MOVD 144(R1), R27
+ MOVD 152(R1), R28
+ MOVD 160(R1), R29
+ MOVD 168(R1), g // R30
+ MOVD 176(R1), R31
+ FMOVD 184(R1), F14
+ FMOVD 192(R1), F15
+ FMOVD 200(R1), F16
+ FMOVD 208(R1), F17
+ FMOVD 216(R1), F18
+ FMOVD 224(R1), F19
+ FMOVD 232(R1), F20
+ FMOVD 240(R1), F21
+ FMOVD 248(R1), F22
+ FMOVD 256(R1), F23
+ FMOVD 264(R1), F24
+ FMOVD 272(R1), F25
+ FMOVD 280(R1), F26
+ FMOVD 288(R1), F27
+ FMOVD 296(R1), F28
+ FMOVD 304(R1), F29
+ FMOVD 312(R1), F30
+ FMOVD 320(R1), F31
+
+ ADD $336, R1
+ MOVD 8(R1), R10
+ MOVFL R10, $0xff // Restore of CR
+ MOVD 16(R1), R10 // needed?
+ MOVD R10, LR
+ RET
+
+noswitch:
+ BL runtime·racecallback(SB)
+ JMP ret
+
+// tls_g, g value for each thread in TLS
+GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8
diff --git a/src/runtime/race_s390x.s b/src/runtime/race_s390x.s
new file mode 100644
index 0000000..beb7f83
--- /dev/null
+++ b/src/runtime/race_s390x.s
@@ -0,0 +1,391 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+// +build race
+
+#include "go_asm.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+// The following thunks allow calling the gcc-compiled race runtime directly
+// from Go code without going all the way through cgo.
+// First, it's much faster (up to 50% speedup for real Go programs).
+// Second, it eliminates race-related special cases from cgocall and scheduler.
+// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
+
+// A brief recap of the s390x C calling convention.
+// Arguments are passed in R2...R6, the rest is on stack.
+// Callee-saved registers are: R6...R13, R15.
+// Temporary registers are: R0...R5, R14.
+
+// When calling racecalladdr, R1 is the call target address.
+
+// The race ctx, ThreadState *thr below, is passed in R2 and loaded in racecalladdr.
+
+// func runtime·raceread(addr uintptr)
+// Called from instrumented code.
+TEXT runtime·raceread(SB), NOSPLIT, $0-8
+ // void __tsan_read(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_read(SB), R1
+ MOVD addr+0(FP), R3
+ MOVD R14, R4
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceRead(addr uintptr)
+TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because raceread reads caller pc.
+ JMP runtime·raceread(SB)
+
+// func runtime·racereadpc(void *addr, void *callpc, void *pc)
+TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
+ // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_read_pc(SB), R1
+ LMG addr+0(FP), R3, R5
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewrite(addr uintptr)
+// Called from instrumented code.
+TEXT runtime·racewrite(SB), NOSPLIT, $0-8
+ // void __tsan_write(ThreadState *thr, void *addr, void *pc);
+ MOVD $__tsan_write(SB), R1
+ MOVD addr+0(FP), R3
+ MOVD R14, R4
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWrite(addr uintptr)
+TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
+ // This needs to be a tail call, because racewrite reads caller pc.
+ JMP runtime·racewrite(SB)
+
+// func runtime·racewritepc(void *addr, void *callpc, void *pc)
+TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
+ // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
+ MOVD $__tsan_write_pc(SB), R1
+ LMG addr+0(FP), R3, R5
+ JMP racecalladdr<>(SB)
+
+// func runtime·racereadrange(addr, size uintptr)
+// Called from instrumented code.
+TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R1
+ LMG addr+0(FP), R3, R4
+ MOVD R14, R5
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceReadRange(addr, size uintptr)
+TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racereadrange reads caller pc.
+ JMP runtime·racereadrange(SB)
+
+// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
+ // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_read_range(SB), R1
+ LMG addr+0(FP), R3, R5
+ // pc is an interceptor address, but TSan expects it to point to the
+ // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
+ ADD $2, R5
+ JMP racecalladdr<>(SB)
+
+// func runtime·racewriterange(addr, size uintptr)
+// Called from instrumented code.
+TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R1
+ LMG addr+0(FP), R3, R4
+ MOVD R14, R5
+ JMP racecalladdr<>(SB)
+
+// func runtime·RaceWriteRange(addr, size uintptr)
+TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
+ // This needs to be a tail call, because racewriterange reads caller pc.
+ JMP runtime·racewriterange(SB)
+
+// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
+TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
+ // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
+ MOVD $__tsan_write_range(SB), R1
+ LMG addr+0(FP), R3, R5
+ // pc is an interceptor address, but TSan expects it to point to the
+ // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
+ ADD $2, R5
+ JMP racecalladdr<>(SB)
+
+// If R3 is out of range, do nothing. Otherwise, setup goroutine context and
+// invoke racecall. Other arguments are already set.
+TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+ MOVD runtime·racearenastart(SB), R0
+ CMPUBLT R3, R0, data // Before racearena start?
+ MOVD runtime·racearenaend(SB), R0
+ CMPUBLT R3, R0, call // Before racearena end?
+data:
+ MOVD runtime·racedatastart(SB), R0
+ CMPUBLT R3, R0, ret // Before racedata start?
+ MOVD runtime·racedataend(SB), R0
+ CMPUBGE R3, R0, ret // At or after racedata end?
+call:
+ MOVD g_racectx(g), R2
+ JMP racecall<>(SB)
+ret:
+ RET
+
+// func runtime·racefuncenter(pc uintptr)
+// Called from instrumented code.
+TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+ MOVD callpc+0(FP), R3
+ JMP racefuncenter<>(SB)
+
+// Common code for racefuncenter
+// R3 = caller's return address
+TEXT racefuncenter<>(SB), NOSPLIT, $0-0
+ // void __tsan_func_enter(ThreadState *thr, void *pc);
+ MOVD $__tsan_func_enter(SB), R1
+ MOVD g_racectx(g), R2
+ BL racecall<>(SB)
+ RET
+
+// func runtime·racefuncexit()
+// Called from instrumented code.
+TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+ // void __tsan_func_exit(ThreadState *thr);
+ MOVD $__tsan_func_exit(SB), R1
+ MOVD g_racectx(g), R2
+ JMP racecall<>(SB)
+
+// Atomic operations for sync/atomic package.
+
+// Load
+
+TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_load(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_load(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·LoadInt32(SB)
+
+TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·LoadInt64(SB)
+
+// Store
+
+TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_store(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_store(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
+ GO_ARGS
+ JMP sync∕atomic·StoreInt32(SB)
+
+TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
+ GO_ARGS
+ JMP sync∕atomic·StoreInt64(SB)
+
+// Swap
+
+TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_exchange(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_exchange(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·SwapInt32(SB)
+
+TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·SwapInt64(SB)
+
+// Add
+
+TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_add(SB), R1
+ BL racecallatomic<>(SB)
+ // TSan performed fetch_add, but Go needs add_fetch.
+ MOVW add+8(FP), R0
+ MOVW ret+16(FP), R1
+ ADD R0, R1, R0
+ MOVW R0, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_add(SB), R1
+ BL racecallatomic<>(SB)
+ // TSan performed fetch_add, but Go needs add_fetch.
+ MOVD add+8(FP), R0
+ MOVD ret+16(FP), R1
+ ADD R0, R1, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AddInt32(SB)
+
+TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AddInt64(SB)
+
+// CompareAndSwap
+
+TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_compare_exchange(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_compare_exchange(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt32(SB)
+
+TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
+ GO_ARGS
+ JMP sync∕atomic·CompareAndSwapInt64(SB)
+
+// Common code for atomic operations. Calls R1.
+TEXT racecallatomic<>(SB), NOSPLIT, $0
+ MOVD 24(R15), R5 // Address (arg1, after 2xBL).
+ // If we pass an invalid pointer to the TSan runtime, it will cause a
+ // "fatal error: unknown caller pc". So trigger a SEGV here instead.
+ MOVB (R5), R0
+ MOVD runtime·racearenastart(SB), R0
+ CMPUBLT R5, R0, racecallatomic_data // Before racearena start?
+ MOVD runtime·racearenaend(SB), R0
+ CMPUBLT R5, R0, racecallatomic_ok // Before racearena end?
+racecallatomic_data:
+ MOVD runtime·racedatastart(SB), R0
+ CMPUBLT R5, R0, racecallatomic_ignore // Before racedata start?
+ MOVD runtime·racedataend(SB), R0
+ CMPUBGE R5, R0, racecallatomic_ignore // At or after racearena end?
+racecallatomic_ok:
+ MOVD g_racectx(g), R2 // ThreadState *.
+ MOVD 8(R15), R3 // Caller PC.
+ MOVD R14, R4 // PC.
+ ADD $24, R15, R5 // Arguments.
+ // Tail call fails to restore R15, so use a normal one.
+ BL racecall<>(SB)
+ RET
+racecallatomic_ignore:
+ // Call __tsan_go_ignore_sync_begin to ignore synchronization during
+ // the atomic op. An attempt to synchronize on the address would cause
+ // a crash.
+ MOVD R1, R6 // Save target function.
+ MOVD R14, R7 // Save PC.
+ MOVD $__tsan_go_ignore_sync_begin(SB), R1
+ MOVD g_racectx(g), R2 // ThreadState *.
+ BL racecall<>(SB)
+ MOVD R6, R1 // Restore target function.
+ MOVD g_racectx(g), R2 // ThreadState *.
+ MOVD 8(R15), R3 // Caller PC.
+ MOVD R7, R4 // PC.
+ ADD $24, R15, R5 // Arguments.
+ BL racecall<>(SB)
+ MOVD $__tsan_go_ignore_sync_end(SB), R1
+ MOVD g_racectx(g), R2 // ThreadState *.
+ BL racecall<>(SB)
+ RET
+
+// func runtime·racecall(void(*f)(...), ...)
+// Calls C function f from race runtime and passes up to 4 arguments to it.
+// The arguments are never heap-object-preserving pointers, so we pretend there
+// are no arguments.
+TEXT runtime·racecall(SB), NOSPLIT, $0-0
+ MOVD fn+0(FP), R1
+ MOVD arg0+8(FP), R2
+ MOVD arg1+16(FP), R3
+ MOVD arg2+24(FP), R4
+ MOVD arg3+32(FP), R5
+ JMP racecall<>(SB)
+
+// Switches SP to g0 stack and calls R1. Arguments are already set.
+TEXT racecall<>(SB), NOSPLIT, $0-0
+ BL runtime·save_g(SB) // Save g for callbacks.
+ MOVD R15, R7 // Save SP.
+ MOVD g_m(g), R8 // R8 = thread.
+ MOVD m_g0(R8), R8 // R8 = g0.
+ CMPBEQ R8, g, call // Already on g0?
+ MOVD (g_sched+gobuf_sp)(R8), R15 // Switch SP to g0.
+call: SUB $160, R15 // Allocate C frame.
+ BL R1 // Call C code.
+ MOVD R7, R15 // Restore SP.
+ RET // Return to Go.
+
+// C->Go callback thunk that allows to call runtime·racesymbolize from C
+// code. racecall has only switched SP, finish g->g0 switch by setting correct
+// g. R2 contains command code, R3 contains command-specific context. See
+// racecallback for command codes.
+TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
+ STMG R6, R15, 48(R15) // Save non-volatile regs.
+ BL runtime·load_g(SB) // Saved by racecall.
+ CMPBNE R2, $0, rest // raceGetProcCmd?
+ MOVD g_m(g), R2 // R2 = thread.
+ MOVD m_p(R2), R2 // R2 = processor.
+ MVC $8, p_raceprocctx(R2), (R3) // *R3 = ThreadState *.
+ LMG 48(R15), R6, R15 // Restore non-volatile regs.
+ BR R14 // Return to C.
+rest: MOVD g_m(g), R4 // R4 = current thread.
+ MOVD m_g0(R4), g // Switch to g0.
+ SUB $24, R15 // Allocate Go argument slots.
+ STMG R2, R3, 8(R15) // Fill Go frame.
+ BL runtime·racecallback(SB) // Call Go code.
+ LMG 72(R15), R6, R15 // Restore non-volatile regs.
+ BR R14 // Return to C.