summaryrefslogtreecommitdiffstats
path: root/src/reflect
diff options
context:
space:
mode:
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/abi.go510
-rw-r--r--src/reflect/abi_test.go989
-rw-r--r--src/reflect/all_test.go8362
-rw-r--r--src/reflect/arena.go18
-rw-r--r--src/reflect/asm_386.s38
-rw-r--r--src/reflect/asm_amd64.s79
-rw-r--r--src/reflect/asm_arm.s42
-rw-r--r--src/reflect/asm_arm64.s79
-rw-r--r--src/reflect/asm_loong64.s40
-rw-r--r--src/reflect/asm_mips64x.s42
-rw-r--r--src/reflect/asm_mipsx.s42
-rw-r--r--src/reflect/asm_ppc64x.s83
-rw-r--r--src/reflect/asm_riscv64.s76
-rw-r--r--src/reflect/asm_s390x.s38
-rw-r--r--src/reflect/asm_wasm.s52
-rw-r--r--src/reflect/benchmark_test.go397
-rw-r--r--src/reflect/deepequal.go238
-rw-r--r--src/reflect/example_test.go209
-rw-r--r--src/reflect/export_test.go165
-rw-r--r--src/reflect/float32reg_generic.go23
-rw-r--r--src/reflect/float32reg_ppc64x.s30
-rw-r--r--src/reflect/float32reg_riscv64.s27
-rw-r--r--src/reflect/internal/example1/example.go10
-rw-r--r--src/reflect/internal/example2/example.go10
-rw-r--r--src/reflect/makefunc.go176
-rw-r--r--src/reflect/nih_test.go38
-rw-r--r--src/reflect/set_test.go227
-rw-r--r--src/reflect/stubs_ppc64x.go10
-rw-r--r--src/reflect/stubs_riscv64.go8
-rw-r--r--src/reflect/swapper.go78
-rw-r--r--src/reflect/tostring_test.go95
-rw-r--r--src/reflect/type.go3186
-rw-r--r--src/reflect/value.go3860
-rw-r--r--src/reflect/visiblefields.go105
-rw-r--r--src/reflect/visiblefields_test.go349
35 files changed, 19731 insertions, 0 deletions
diff --git a/src/reflect/abi.go b/src/reflect/abi.go
new file mode 100644
index 0000000..32cb314
--- /dev/null
+++ b/src/reflect/abi.go
@@ -0,0 +1,510 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+// These variables are used by the register assignment
+// algorithm in this file.
+//
+// They should be modified with care (no other reflect code
+// may be executing) and are generally only modified
+// when testing this package.
+//
+// They should never be set higher than their internal/abi
+// constant counterparts, because the system relies on a
+// structure that is at least large enough to hold the
+// registers the system supports.
+//
+// Currently they're set to zero because using the actual
+// constants will break every part of the toolchain that
+// uses reflect to call functions (e.g. go test, or anything
+// that uses text/template). The values that are currently
+// commented out there should be the actual values once
+// we're ready to use the register ABI everywhere.
+var (
+ intArgRegs = abi.IntArgRegs
+ floatArgRegs = abi.FloatArgRegs
+ floatRegSize = uintptr(abi.EffectiveFloatRegSize)
+)
+
+// abiStep represents an ABI "instruction." Each instruction
+// describes one part of how to translate between a Go value
+// in memory and a call frame.
+type abiStep struct {
+ kind abiStepKind
+
+ // offset and size together describe a part of a Go value
+ // in memory.
+ offset uintptr
+ size uintptr // size in bytes of the part
+
+ // These fields describe the ABI side of the translation.
+ stkOff uintptr // stack offset, used if kind == abiStepStack
+ ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer
+ freg int // FP register index, used if kind == abiStepFloatReg
+}
+
+// abiStepKind is the "op-code" for an abiStep instruction.
+type abiStepKind int
+
+const (
+ abiStepBad abiStepKind = iota
+ abiStepStack // copy to/from stack
+ abiStepIntReg // copy to/from integer register
+ abiStepPointer // copy pointer to/from integer register
+ abiStepFloatReg // copy to/from FP register
+)
+
+// abiSeq represents a sequence of ABI instructions for copying
+// from a series of reflect.Values to a call frame (for call arguments)
+// or vice-versa (for call results).
+//
+// An abiSeq should be populated by calling its addArg method.
+type abiSeq struct {
+ // steps is the set of instructions.
+ //
+ // The instructions are grouped together by whole arguments,
+ // with the starting index for the instructions
+ // of the i'th Go value available in valueStart.
+ //
+ // For instance, if this abiSeq represents 3 arguments
+ // passed to a function, then the 2nd argument's steps
+ // begin at steps[valueStart[1]].
+ //
+ // Because reflect accepts Go arguments in distinct
+ // Values and each Value is stored separately, each abiStep
+ // that begins a new argument will have its offset
+ // field == 0.
+ steps []abiStep
+ valueStart []int
+
+ stackBytes uintptr // stack space used
+ iregs, fregs int // registers used
+}
+
+func (a *abiSeq) dump() {
+ for i, p := range a.steps {
+ println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg)
+ }
+ print("values ")
+ for _, i := range a.valueStart {
+ print(i, " ")
+ }
+ println()
+ println("stack", a.stackBytes)
+ println("iregs", a.iregs)
+ println("fregs", a.fregs)
+}
+
+// stepsForValue returns the ABI instructions for translating
+// the i'th Go argument or return value represented by this
+// abiSeq to the Go ABI.
+func (a *abiSeq) stepsForValue(i int) []abiStep {
+ s := a.valueStart[i]
+ var e int
+ if i == len(a.valueStart)-1 {
+ e = len(a.steps)
+ } else {
+ e = a.valueStart[i+1]
+ }
+ return a.steps[s:e]
+}
+
+// addArg extends the abiSeq with a new Go value of type t.
+//
+// If the value was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+func (a *abiSeq) addArg(t *rtype) *abiStep {
+ // We'll always be adding a new value, so do that first.
+ pStart := len(a.steps)
+ a.valueStart = append(a.valueStart, pStart)
+ if t.size == 0 {
+ // If the size of the argument type is zero, then
+ // in order to degrade gracefully into ABI0, we need
+ // to stack-assign this type. The reason is that
+ // although zero-sized types take up no space on the
+ // stack, they do cause the next argument to be aligned.
+ // So just do that here, but don't bother actually
+ // generating a new ABI step for it (there's nothing to
+ // actually copy).
+ //
+ // We cannot handle this in the recursive case of
+ // regAssign because zero-sized *fields* of a
+ // non-zero-sized struct do not cause it to be
+ // stack-assigned. So we need a special case here
+ // at the top.
+ a.stackBytes = align(a.stackBytes, uintptr(t.align))
+ return nil
+ }
+ // Hold a copy of "a" so that we can roll back if
+ // register assignment fails.
+ aOld := *a
+ if !a.regAssign(t, 0) {
+ // Register assignment failed. Roll back any changes
+ // and stack-assign.
+ *a = aOld
+ a.stackAssign(t.size, uintptr(t.align))
+ return &a.steps[len(a.steps)-1]
+ }
+ return nil
+}
+
+// addRcvr extends the abiSeq with a new method call
+// receiver according to the interface calling convention.
+//
+// If the receiver was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+// Returns true if the receiver is a pointer.
+func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
+ // The receiver is always one word.
+ a.valueStart = append(a.valueStart, len(a.steps))
+ var ok, ptr bool
+ if ifaceIndir(rcvr) || rcvr.pointers() {
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
+ ptr = true
+ } else {
+ // TODO(mknyszek): Is this case even possible?
+ // The interface data work never contains a non-pointer
+ // value. This case was copied over from older code
+ // in the reflect package which only conditionally added
+ // a pointer bit to the reflect.(Value).Call stack frame's
+ // GC bitmap.
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
+ ptr = false
+ }
+ if !ok {
+ a.stackAssign(goarch.PtrSize, goarch.PtrSize)
+ return &a.steps[len(a.steps)-1], ptr
+ }
+ return nil, ptr
+}
+
+// regAssign attempts to reserve argument registers for a value of
+// type t, stored at some offset.
+//
+// It returns whether or not the assignment succeeded, but
+// leaves any changes it made to a.steps behind, so the caller
+// must undo that work by adjusting a.steps if it fails.
+//
+// This method along with the assign* methods represent the
+// complete register-assignment algorithm for the Go ABI.
+func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
+ switch t.Kind() {
+ case UnsafePointer, Pointer, Chan, Map, Func:
+ return a.assignIntN(offset, t.size, 1, 0b1)
+ case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
+ return a.assignIntN(offset, t.size, 1, 0b0)
+ case Int64, Uint64:
+ switch goarch.PtrSize {
+ case 4:
+ return a.assignIntN(offset, 4, 2, 0b0)
+ case 8:
+ return a.assignIntN(offset, 8, 1, 0b0)
+ }
+ case Float32, Float64:
+ return a.assignFloatN(offset, t.size, 1)
+ case Complex64:
+ return a.assignFloatN(offset, 4, 2)
+ case Complex128:
+ return a.assignFloatN(offset, 8, 2)
+ case String:
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
+ case Interface:
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
+ case Slice:
+ return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ switch tt.len {
+ case 0:
+ // There's nothing to assign, so don't modify
+ // a.steps but succeed so the caller doesn't
+ // try to stack-assign this value.
+ return true
+ case 1:
+ return a.regAssign(tt.elem, offset)
+ default:
+ return false
+ }
+ case Struct:
+ st := (*structType)(unsafe.Pointer(t))
+ for i := range st.fields {
+ f := &st.fields[i]
+ if !a.regAssign(f.typ, offset+f.offset) {
+ return false
+ }
+ }
+ return true
+ default:
+ print("t.Kind == ", t.Kind(), "\n")
+ panic("unknown type kind")
+ }
+ panic("unhandled register assignment path")
+}
+
+// assignIntN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n integer registers.
+//
+// Bit i in ptrMap indicates whether the i'th value is a pointer.
+// n must be <= 8.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
+ if n > 8 || n < 0 {
+ panic("invalid n")
+ }
+ if ptrMap != 0 && size != goarch.PtrSize {
+ panic("non-empty pointer map passed for non-pointer-size values")
+ }
+ if a.iregs+n > intArgRegs {
+ return false
+ }
+ for i := 0; i < n; i++ {
+ kind := abiStepIntReg
+ if ptrMap&(uint8(1)<<i) != 0 {
+ kind = abiStepPointer
+ }
+ a.steps = append(a.steps, abiStep{
+ kind: kind,
+ offset: offset + uintptr(i)*size,
+ size: size,
+ ireg: a.iregs,
+ })
+ a.iregs++
+ }
+ return true
+}
+
+// assignFloatN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n floating-point registers.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignFloatN(offset, size uintptr, n int) bool {
+ if n < 0 {
+ panic("invalid n")
+ }
+ if a.fregs+n > floatArgRegs || floatRegSize < size {
+ return false
+ }
+ for i := 0; i < n; i++ {
+ a.steps = append(a.steps, abiStep{
+ kind: abiStepFloatReg,
+ offset: offset + uintptr(i)*size,
+ size: size,
+ freg: a.fregs,
+ })
+ a.fregs++
+ }
+ return true
+}
+
+// stackAssign reserves space for one value that is "size" bytes
+// large with alignment "alignment" to the stack.
+//
+// Should not be called directly; use addArg instead.
+func (a *abiSeq) stackAssign(size, alignment uintptr) {
+ a.stackBytes = align(a.stackBytes, alignment)
+ a.steps = append(a.steps, abiStep{
+ kind: abiStepStack,
+ offset: 0, // Only used for whole arguments, so the memory offset is 0.
+ size: size,
+ stkOff: a.stackBytes,
+ })
+ a.stackBytes += size
+}
+
+// abiDesc describes the ABI for a function or method.
+type abiDesc struct {
+ // call and ret represent the translation steps for
+ // the call and return paths of a Go function.
+ call, ret abiSeq
+
+ // These fields describe the stack space allocated
+ // for the call. stackCallArgsSize is the amount of space
+ // reserved for arguments but not return values. retOffset
+ // is the offset at which return values begin, and
+ // spill is the size in bytes of additional space reserved
+ // to spill argument registers into in case of preemption in
+ // reflectcall's stack frame.
+ stackCallArgsSize, retOffset, spill uintptr
+
+ // stackPtrs is a bitmap that indicates whether
+ // each word in the ABI stack space (stack-assigned
+ // args + return values) is a pointer. Used
+ // as the heap pointer bitmap for stack space
+ // passed to reflectcall.
+ stackPtrs *bitVector
+
+ // inRegPtrs is a bitmap whose i'th bit indicates
+ // whether the i'th integer argument register contains
+ // a pointer. Used by makeFuncStub and methodValueCall
+ // to make result pointers visible to the GC.
+ //
+ // outRegPtrs is the same, but for result values.
+ // Used by reflectcall to make result pointers visible
+ // to the GC.
+ inRegPtrs, outRegPtrs abi.IntArgRegBitmap
+}
+
+func (a *abiDesc) dump() {
+ println("ABI")
+ println("call")
+ a.call.dump()
+ println("ret")
+ a.ret.dump()
+ println("stackCallArgsSize", a.stackCallArgsSize)
+ println("retOffset", a.retOffset)
+ println("spill", a.spill)
+ print("inRegPtrs:")
+ dumpPtrBitMap(a.inRegPtrs)
+ println()
+ print("outRegPtrs:")
+ dumpPtrBitMap(a.outRegPtrs)
+ println()
+}
+
+func dumpPtrBitMap(b abi.IntArgRegBitmap) {
+ for i := 0; i < intArgRegs; i++ {
+ x := 0
+ if b.Get(i) {
+ x = 1
+ }
+ print(" ", x)
+ }
+}
+
+func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
+ // We need to add space for this argument to
+ // the frame so that it can spill args into it.
+ //
+ // The size of this space is just the sum of the sizes
+ // of each register-allocated type.
+ //
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ spill := uintptr(0)
+
+ // Compute gc program & stack bitmap for stack arguments
+ stackPtrs := new(bitVector)
+
+ // Compute the stack frame pointer bitmap and register
+ // pointer bitmap for arguments.
+ inRegPtrs := abi.IntArgRegBitmap{}
+
+ // Compute abiSeq for input parameters.
+ var in abiSeq
+ if rcvr != nil {
+ stkStep, isPtr := in.addRcvr(rcvr)
+ if stkStep != nil {
+ if isPtr {
+ stackPtrs.append(1)
+ } else {
+ stackPtrs.append(0)
+ }
+ } else {
+ spill += goarch.PtrSize
+ }
+ }
+ for i, arg := range t.in() {
+ stkStep := in.addArg(arg)
+ if stkStep != nil {
+ addTypeBits(stackPtrs, stkStep.stkOff, arg)
+ } else {
+ spill = align(spill, uintptr(arg.align))
+ spill += arg.size
+ for _, st := range in.stepsForValue(i) {
+ if st.kind == abiStepPointer {
+ inRegPtrs.Set(st.ireg)
+ }
+ }
+ }
+ }
+ spill = align(spill, goarch.PtrSize)
+
+ // From the input parameters alone, we now know
+ // the stackCallArgsSize and retOffset.
+ stackCallArgsSize := in.stackBytes
+ retOffset := align(in.stackBytes, goarch.PtrSize)
+
+ // Compute the stack frame pointer bitmap and register
+ // pointer bitmap for return values.
+ outRegPtrs := abi.IntArgRegBitmap{}
+
+ // Compute abiSeq for output parameters.
+ var out abiSeq
+ // Stack-assigned return values do not share
+ // space with arguments like they do with registers,
+ // so we need to inject a stack offset here.
+ // Fake it by artificially extending stackBytes by
+ // the return offset.
+ out.stackBytes = retOffset
+ for i, res := range t.out() {
+ stkStep := out.addArg(res)
+ if stkStep != nil {
+ addTypeBits(stackPtrs, stkStep.stkOff, res)
+ } else {
+ for _, st := range out.stepsForValue(i) {
+ if st.kind == abiStepPointer {
+ outRegPtrs.Set(st.ireg)
+ }
+ }
+ }
+ }
+ // Undo the faking from earlier so that stackBytes
+ // is accurate.
+ out.stackBytes -= retOffset
+ return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, inRegPtrs, outRegPtrs}
+}
+
+// intFromReg loads an argSize sized integer from reg and places it at to.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ memmove(to, r.IntRegArgAddr(reg, argSize), argSize)
+}
+
+// intToReg loads an argSize sized integer and stores it into reg.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ memmove(r.IntRegArgAddr(reg, argSize), from, argSize)
+}
+
+// floatFromReg loads a float value from its register representation in r.
+//
+// argSize must be 4 or 8.
+func floatFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ *(*float32)(to) = archFloat32FromReg(r.Floats[reg])
+ case 8:
+ *(*float64)(to) = *(*float64)(unsafe.Pointer(&r.Floats[reg]))
+ default:
+ panic("bad argSize")
+ }
+}
+
+// floatToReg stores a float value in its register representation in r.
+//
+// argSize must be either 4 or 8.
+func floatToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ r.Floats[reg] = archFloat32ToReg(*(*float32)(from))
+ case 8:
+ r.Floats[reg] = *(*uint64)(from)
+ default:
+ panic("bad argSize")
+ }
+}
diff --git a/src/reflect/abi_test.go b/src/reflect/abi_test.go
new file mode 100644
index 0000000..9d93472
--- /dev/null
+++ b/src/reflect/abi_test.go
@@ -0,0 +1,989 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.regabiargs
+
+package reflect_test
+
+import (
+ "internal/abi"
+ "math"
+ "math/rand"
+ "reflect"
+ "runtime"
+ "testing"
+ "testing/quick"
+)
+
+// As of early May 2021 this is no longer necessary for amd64,
+// but it remains in case this is needed for the next register abi port.
+// TODO (1.18) If enabling register ABI on additional architectures turns out not to need this, remove it.
+type MagicLastTypeNameForTestingRegisterABI struct{}
+
+func TestMethodValueCallABI(t *testing.T) {
+ // Enable register-based reflect.Call and ensure we don't
+ // use potentially incorrect cached versions by clearing
+ // the cache before we start and after we're done.
+ defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize))
+
+ // This test is simple. Calling a method value involves
+ // pretty much just plumbing whatever arguments in whichever
+ // location through to reflectcall. They're already set up
+ // for us, so there isn't a whole lot to do. Let's just
+ // make sure that we can pass register and stack arguments
+ // through. The exact combination is not super important.
+ makeMethodValue := func(method string) (*StructWithMethods, any) {
+ s := new(StructWithMethods)
+ v := reflect.ValueOf(s).MethodByName(method)
+ return s, v.Interface()
+ }
+
+ a0 := StructFewRegs{
+ 10, 11, 12, 13,
+ 20.0, 21.0, 22.0, 23.0,
+ }
+ a1 := [4]uint64{100, 101, 102, 103}
+ a2 := StructFillRegs{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
+ }
+
+ s, i := makeMethodValue("AllRegsCall")
+ f0 := i.(func(StructFewRegs, MagicLastTypeNameForTestingRegisterABI) StructFewRegs)
+ r0 := f0(a0, MagicLastTypeNameForTestingRegisterABI{})
+ if r0 != a0 {
+ t.Errorf("bad method value call: got %#v, want %#v", r0, a0)
+ }
+ if s.Value != 1 {
+ t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 1)
+ }
+
+ s, i = makeMethodValue("RegsAndStackCall")
+ f1 := i.(func(StructFewRegs, [4]uint64, MagicLastTypeNameForTestingRegisterABI) (StructFewRegs, [4]uint64))
+ r0, r1 := f1(a0, a1, MagicLastTypeNameForTestingRegisterABI{})
+ if r0 != a0 {
+ t.Errorf("bad method value call: got %#v, want %#v", r0, a0)
+ }
+ if r1 != a1 {
+ t.Errorf("bad method value call: got %#v, want %#v", r1, a1)
+ }
+ if s.Value != 2 {
+ t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 2)
+ }
+
+ s, i = makeMethodValue("SpillStructCall")
+ f2 := i.(func(StructFillRegs, MagicLastTypeNameForTestingRegisterABI) StructFillRegs)
+ r2 := f2(a2, MagicLastTypeNameForTestingRegisterABI{})
+ if r2 != a2 {
+ t.Errorf("bad method value call: got %#v, want %#v", r2, a2)
+ }
+ if s.Value != 3 {
+ t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 3)
+ }
+
+ s, i = makeMethodValue("ValueRegMethodSpillInt")
+ f3 := i.(func(StructFillRegs, int, MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, int))
+ r3a, r3b := f3(a2, 42, MagicLastTypeNameForTestingRegisterABI{})
+ if r3a != a2 {
+ t.Errorf("bad method value call: got %#v, want %#v", r3a, a2)
+ }
+ if r3b != 42 {
+ t.Errorf("bad method value call: got %#v, want %#v", r3b, 42)
+ }
+ if s.Value != 4 {
+ t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 4)
+ }
+
+ s, i = makeMethodValue("ValueRegMethodSpillPtr")
+ f4 := i.(func(StructFillRegs, *byte, MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, *byte))
+ vb := byte(10)
+ r4a, r4b := f4(a2, &vb, MagicLastTypeNameForTestingRegisterABI{})
+ if r4a != a2 {
+ t.Errorf("bad method value call: got %#v, want %#v", r4a, a2)
+ }
+ if r4b != &vb {
+ t.Errorf("bad method value call: got %#v, want %#v", r4b, &vb)
+ }
+ if s.Value != 5 {
+ t.Errorf("bad method value call: failed to set s.Value: got %d, want %d", s.Value, 5)
+ }
+}
+
+type StructWithMethods struct {
+ Value int
+}
+
+type StructFewRegs struct {
+ a0, a1, a2, a3 int
+ f0, f1, f2, f3 float64
+}
+
+type StructFillRegs struct {
+ a0, a1, a2, a3, a4, a5, a6, a7, a8 int
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14 float64
+}
+
+func (m *StructWithMethods) AllRegsCall(s StructFewRegs, _ MagicLastTypeNameForTestingRegisterABI) StructFewRegs {
+ m.Value = 1
+ return s
+}
+
+func (m *StructWithMethods) RegsAndStackCall(s StructFewRegs, a [4]uint64, _ MagicLastTypeNameForTestingRegisterABI) (StructFewRegs, [4]uint64) {
+ m.Value = 2
+ return s, a
+}
+
+func (m *StructWithMethods) SpillStructCall(s StructFillRegs, _ MagicLastTypeNameForTestingRegisterABI) StructFillRegs {
+ m.Value = 3
+ return s
+}
+
+// When called as a method value, i is passed on the stack.
+// When called as a method, i is passed in a register.
+func (m *StructWithMethods) ValueRegMethodSpillInt(s StructFillRegs, i int, _ MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, int) {
+ m.Value = 4
+ return s, i
+}
+
+// When called as a method value, i is passed on the stack.
+// When called as a method, i is passed in a register.
+func (m *StructWithMethods) ValueRegMethodSpillPtr(s StructFillRegs, i *byte, _ MagicLastTypeNameForTestingRegisterABI) (StructFillRegs, *byte) {
+ m.Value = 5
+ return s, i
+}
+
+func TestReflectCallABI(t *testing.T) {
+ // Enable register-based reflect.Call and ensure we don't
+ // use potentially incorrect cached versions by clearing
+ // the cache before we start and after we're done.
+ defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize))
+
+ // Execute the functions defined below which all have the
+ // same form and perform the same function: pass all arguments
+ // to return values. The purpose is to test the call boundary
+ // and make sure it works.
+ r := rand.New(rand.NewSource(genValueRandSeed))
+ for _, fn := range abiCallTestCases {
+ fn := reflect.ValueOf(fn)
+ t.Run(runtime.FuncForPC(fn.Pointer()).Name(), func(t *testing.T) {
+ typ := fn.Type()
+ if typ.Kind() != reflect.Func {
+ t.Fatalf("test case is not a function, has type: %s", typ.String())
+ }
+ if typ.NumIn() != typ.NumOut() {
+ t.Fatalf("test case has different number of inputs and outputs: %d in, %d out", typ.NumIn(), typ.NumOut())
+ }
+ var args []reflect.Value
+ for i := 0; i < typ.NumIn(); i++ {
+ args = append(args, genValue(t, typ.In(i), r))
+ }
+ results := fn.Call(args)
+ for i := range results {
+ x, y := args[i].Interface(), results[i].Interface()
+ if reflect.DeepEqual(x, y) {
+ continue
+ }
+ t.Errorf("arg and result %d differ: got %+v, want %+v", i, y, x)
+ }
+ })
+ }
+}
+
+func TestReflectMakeFuncCallABI(t *testing.T) {
+ // Enable register-based reflect.MakeFunc and ensure we don't
+ // use potentially incorrect cached versions by clearing
+ // the cache before we start and after we're done.
+ defer reflect.SetArgRegs(reflect.SetArgRegs(abi.IntArgRegs, abi.FloatArgRegs, abi.EffectiveFloatRegSize))
+
+ // Execute the functions defined below which all have the
+ // same form and perform the same function: pass all arguments
+ // to return values. The purpose is to test the call boundary
+ // and make sure it works.
+ r := rand.New(rand.NewSource(genValueRandSeed))
+ makeFuncHandler := func(args []reflect.Value) []reflect.Value {
+ if len(args) == 0 {
+ return []reflect.Value{}
+ }
+ return args[:len(args)-1] // The last Value is an empty magic value.
+ }
+ for _, callFn := range abiMakeFuncTestCases {
+ fnTyp := reflect.TypeOf(callFn).In(0)
+ fn := reflect.MakeFunc(fnTyp, makeFuncHandler)
+ callFn := reflect.ValueOf(callFn)
+ t.Run(runtime.FuncForPC(callFn.Pointer()).Name(), func(t *testing.T) {
+ args := []reflect.Value{fn}
+ for i := 0; i < fnTyp.NumIn()-1; /* last one is magic type */ i++ {
+ args = append(args, genValue(t, fnTyp.In(i), r))
+ }
+ results := callFn.Call(args)
+ for i := range results {
+ x, y := args[i+1].Interface(), results[i].Interface()
+ if reflect.DeepEqual(x, y) {
+ continue
+ }
+ t.Errorf("arg and result %d differ: got %+v, want %+v", i, y, x)
+ }
+ })
+ }
+ t.Run("OnlyPointerInRegisterGC", func(t *testing.T) {
+ // This test attempts to induce a failure wherein
+ // the last pointer to an object is passed via registers.
+ // If makeFuncStub doesn't successfully store the pointer
+ // to a location visible to the GC, the object should be
+ // freed and then the next GC should notice that an object
+ // was inexplicably revived.
+ var f func(b *uint64, _ MagicLastTypeNameForTestingRegisterABI) *uint64
+ mkfn := reflect.MakeFunc(reflect.TypeOf(f), func(args []reflect.Value) []reflect.Value {
+ *(args[0].Interface().(*uint64)) = 5
+ return args[:1]
+ })
+ fn := mkfn.Interface().(func(*uint64, MagicLastTypeNameForTestingRegisterABI) *uint64)
+
+ // Call the MakeFunc'd function while trying pass the only pointer
+ // to a new heap-allocated uint64.
+ *reflect.CallGC = true
+ x := fn(new(uint64), MagicLastTypeNameForTestingRegisterABI{})
+ *reflect.CallGC = false
+
+ // Check for bad pointers (which should be x if things went wrong).
+ runtime.GC()
+
+ // Sanity check x.
+ if *x != 5 {
+ t.Fatalf("failed to set value in object")
+ }
+ })
+}
+
+var abiCallTestCases = []any{
+ passNone,
+ passInt,
+ passInt8,
+ passInt16,
+ passInt32,
+ passInt64,
+ passUint,
+ passUint8,
+ passUint16,
+ passUint32,
+ passUint64,
+ passFloat32,
+ passFloat64,
+ passComplex64,
+ passComplex128,
+ passManyInt,
+ passManyFloat64,
+ passArray1,
+ passArray,
+ passArray1Mix,
+ passString,
+ // TODO(mknyszek): Test passing interface values.
+ passSlice,
+ passPointer,
+ passStruct1,
+ passStruct2,
+ passStruct3,
+ passStruct4,
+ passStruct5,
+ passStruct6,
+ passStruct7,
+ passStruct8,
+ passStruct9,
+ passStruct10,
+ // TODO(mknyszek): Test passing unsafe.Pointer values.
+ // TODO(mknyszek): Test passing chan values.
+ passStruct11,
+ passStruct12,
+ passStruct13,
+ passStruct14,
+ passStruct15,
+ pass2Struct1,
+ passEmptyStruct,
+ passStruct10AndSmall,
+}
+
+// Functions for testing reflect function call functionality.
+
+//go:registerparams
+//go:noinline
+func passNone() {}
+
+//go:registerparams
+//go:noinline
+func passInt(a int) int {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passInt8(a int8) int8 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passInt16(a int16) int16 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passInt32(a int32) int32 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passInt64(a int64) int64 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passUint(a uint) uint {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passUint8(a uint8) uint8 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passUint16(a uint16) uint16 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passUint32(a uint32) uint32 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passUint64(a uint64) uint64 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passFloat32(a float32) float32 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passFloat64(a float64) float64 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passComplex64(a complex64) complex64 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passComplex128(a complex128) complex128 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passArray1(a [1]uint32) [1]uint32 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passArray(a [2]uintptr) [2]uintptr {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passArray1Mix(a int, b [1]uint32, c float64) (int, [1]uint32, float64) {
+ return a, b, c
+}
+
+//go:registerparams
+//go:noinline
+func passString(a string) string {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passSlice(a []byte) []byte {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passPointer(a *byte) *byte {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passManyInt(a, b, c, d, e, f, g, h, i, j int) (int, int, int, int, int, int, int, int, int, int) {
+ return a, b, c, d, e, f, g, h, i, j
+}
+
+//go:registerparams
+//go:noinline
+func passManyFloat64(a, b, c, d, e, f, g, h, i, j, l, m, n, o, p, q, r, s, t float64) (float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64) {
+ return a, b, c, d, e, f, g, h, i, j, l, m, n, o, p, q, r, s, t
+}
+
+//go:registerparams
+//go:noinline
+func passStruct1(a Struct1) Struct1 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct2(a Struct2) Struct2 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct3(a Struct3) Struct3 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct4(a Struct4) Struct4 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct5(a Struct5) Struct5 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct6(a Struct6) Struct6 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct7(a Struct7) Struct7 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct8(a Struct8) Struct8 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct9(a Struct9) Struct9 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct10(a Struct10) Struct10 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct11(a Struct11) Struct11 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct12(a Struct12) Struct12 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct13(a Struct13) Struct13 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct14(a Struct14) Struct14 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func passStruct15(a Struct15) Struct15 {
+ return a
+}
+
+//go:registerparams
+//go:noinline
+func pass2Struct1(a, b Struct1) (x, y Struct1) {
+ return a, b
+}
+
+//go:registerparams
+//go:noinline
+func passEmptyStruct(a int, b struct{}, c float64) (int, struct{}, float64) {
+ return a, b, c
+}
+
+// This test case forces a large argument to the stack followed by more
+// in-register arguments.
+//
+//go:registerparams
+//go:noinline
+func passStruct10AndSmall(a Struct10, b byte, c uint) (Struct10, byte, uint) {
+ return a, b, c
+}
+
+var abiMakeFuncTestCases = []any{
+ callArgsNone,
+ callArgsInt,
+ callArgsInt8,
+ callArgsInt16,
+ callArgsInt32,
+ callArgsInt64,
+ callArgsUint,
+ callArgsUint8,
+ callArgsUint16,
+ callArgsUint32,
+ callArgsUint64,
+ callArgsFloat32,
+ callArgsFloat64,
+ callArgsComplex64,
+ callArgsComplex128,
+ callArgsManyInt,
+ callArgsManyFloat64,
+ callArgsArray1,
+ callArgsArray,
+ callArgsArray1Mix,
+ callArgsString,
+ // TODO(mknyszek): Test callArgsing interface values.
+ callArgsSlice,
+ callArgsPointer,
+ callArgsStruct1,
+ callArgsStruct2,
+ callArgsStruct3,
+ callArgsStruct4,
+ callArgsStruct5,
+ callArgsStruct6,
+ callArgsStruct7,
+ callArgsStruct8,
+ callArgsStruct9,
+ callArgsStruct10,
+ // TODO(mknyszek): Test callArgsing unsafe.Pointer values.
+ // TODO(mknyszek): Test callArgsing chan values.
+ callArgsStruct11,
+ callArgsStruct12,
+ callArgsStruct13,
+ callArgsStruct14,
+ callArgsStruct15,
+ callArgs2Struct1,
+ callArgsEmptyStruct,
+}
+
+//go:registerparams
+//go:noinline
+func callArgsNone(f func(MagicLastTypeNameForTestingRegisterABI)) {
+ f(MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsInt(f func(int, MagicLastTypeNameForTestingRegisterABI) int, a0 int) int {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsInt8(f func(int8, MagicLastTypeNameForTestingRegisterABI) int8, a0 int8) int8 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsInt16(f func(int16, MagicLastTypeNameForTestingRegisterABI) int16, a0 int16) int16 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsInt32(f func(int32, MagicLastTypeNameForTestingRegisterABI) int32, a0 int32) int32 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsInt64(f func(int64, MagicLastTypeNameForTestingRegisterABI) int64, a0 int64) int64 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsUint(f func(uint, MagicLastTypeNameForTestingRegisterABI) uint, a0 uint) uint {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsUint8(f func(uint8, MagicLastTypeNameForTestingRegisterABI) uint8, a0 uint8) uint8 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsUint16(f func(uint16, MagicLastTypeNameForTestingRegisterABI) uint16, a0 uint16) uint16 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsUint32(f func(uint32, MagicLastTypeNameForTestingRegisterABI) uint32, a0 uint32) uint32 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsUint64(f func(uint64, MagicLastTypeNameForTestingRegisterABI) uint64, a0 uint64) uint64 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsFloat32(f func(float32, MagicLastTypeNameForTestingRegisterABI) float32, a0 float32) float32 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsFloat64(f func(float64, MagicLastTypeNameForTestingRegisterABI) float64, a0 float64) float64 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsComplex64(f func(complex64, MagicLastTypeNameForTestingRegisterABI) complex64, a0 complex64) complex64 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsComplex128(f func(complex128, MagicLastTypeNameForTestingRegisterABI) complex128, a0 complex128) complex128 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsArray1(f func([1]uint32, MagicLastTypeNameForTestingRegisterABI) [1]uint32, a0 [1]uint32) [1]uint32 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsArray(f func([2]uintptr, MagicLastTypeNameForTestingRegisterABI) [2]uintptr, a0 [2]uintptr) [2]uintptr {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsArray1Mix(f func(int, [1]uint32, float64, MagicLastTypeNameForTestingRegisterABI) (int, [1]uint32, float64), a0 int, a1 [1]uint32, a2 float64) (int, [1]uint32, float64) {
+ return f(a0, a1, a2, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsString(f func(string, MagicLastTypeNameForTestingRegisterABI) string, a0 string) string {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsSlice(f func([]byte, MagicLastTypeNameForTestingRegisterABI) []byte, a0 []byte) []byte {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsPointer(f func(*byte, MagicLastTypeNameForTestingRegisterABI) *byte, a0 *byte) *byte {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsManyInt(f func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 int, x MagicLastTypeNameForTestingRegisterABI) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9 int), a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 int) (int, int, int, int, int, int, int, int, int, int) {
+ return f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsManyFloat64(f func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 float64, x MagicLastTypeNameForTestingRegisterABI) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16, r17, r18 float64), a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 float64) (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16, r17, r18 float64) {
+ return f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct1(f func(Struct1, MagicLastTypeNameForTestingRegisterABI) Struct1, a0 Struct1) Struct1 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct2(f func(Struct2, MagicLastTypeNameForTestingRegisterABI) Struct2, a0 Struct2) Struct2 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct3(f func(Struct3, MagicLastTypeNameForTestingRegisterABI) Struct3, a0 Struct3) Struct3 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct4(f func(Struct4, MagicLastTypeNameForTestingRegisterABI) Struct4, a0 Struct4) Struct4 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct5(f func(Struct5, MagicLastTypeNameForTestingRegisterABI) Struct5, a0 Struct5) Struct5 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct6(f func(Struct6, MagicLastTypeNameForTestingRegisterABI) Struct6, a0 Struct6) Struct6 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct7(f func(Struct7, MagicLastTypeNameForTestingRegisterABI) Struct7, a0 Struct7) Struct7 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct8(f func(Struct8, MagicLastTypeNameForTestingRegisterABI) Struct8, a0 Struct8) Struct8 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct9(f func(Struct9, MagicLastTypeNameForTestingRegisterABI) Struct9, a0 Struct9) Struct9 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct10(f func(Struct10, MagicLastTypeNameForTestingRegisterABI) Struct10, a0 Struct10) Struct10 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct11(f func(Struct11, MagicLastTypeNameForTestingRegisterABI) Struct11, a0 Struct11) Struct11 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct12(f func(Struct12, MagicLastTypeNameForTestingRegisterABI) Struct12, a0 Struct12) Struct12 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct13(f func(Struct13, MagicLastTypeNameForTestingRegisterABI) Struct13, a0 Struct13) Struct13 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct14(f func(Struct14, MagicLastTypeNameForTestingRegisterABI) Struct14, a0 Struct14) Struct14 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsStruct15(f func(Struct15, MagicLastTypeNameForTestingRegisterABI) Struct15, a0 Struct15) Struct15 {
+ return f(a0, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgs2Struct1(f func(Struct1, Struct1, MagicLastTypeNameForTestingRegisterABI) (Struct1, Struct1), a0, a1 Struct1) (r0, r1 Struct1) {
+ return f(a0, a1, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+//go:registerparams
+//go:noinline
+func callArgsEmptyStruct(f func(int, struct{}, float64, MagicLastTypeNameForTestingRegisterABI) (int, struct{}, float64), a0 int, a1 struct{}, a2 float64) (int, struct{}, float64) {
+ return f(a0, a1, a2, MagicLastTypeNameForTestingRegisterABI{})
+}
+
+// Struct1 is a simple integer-only aggregate struct.
+type Struct1 struct {
+ A, B, C uint
+}
+
+// Struct2 is Struct1 but with an array-typed field that will
+// force it to get passed on the stack.
+type Struct2 struct {
+ A, B, C uint
+ D [2]uint32
+}
+
+// Struct3 is Struct2 but with an anonymous array-typed field.
+// This should act identically to Struct2.
+type Struct3 struct {
+ A, B, C uint
+ D [2]uint32
+}
+
+// Struct4 has byte-length fields that should
+// each use up a whole registers.
+type Struct4 struct {
+ A, B int8
+ C, D uint8
+ E bool
+}
+
+// Struct5 is a relatively large struct
+// with both integer and floating point values.
+type Struct5 struct {
+ A uint16
+ B int16
+ C, D uint32
+ E int32
+ F, G, H, I, J float32
+}
+
+// Struct6 has a nested struct.
+type Struct6 struct {
+ Struct1
+}
+
+// Struct7 is a struct with a nested array-typed field
+// that cannot be passed in registers as a result.
+type Struct7 struct {
+ Struct1
+ Struct2
+}
+
+// Struct8 is large aggregate struct type that may be
+// passed in registers.
+type Struct8 struct {
+ Struct5
+ Struct1
+}
+
+// Struct9 is a type that has an array type nested
+// 2 layers deep, and as a result needs to be passed
+// on the stack.
+type Struct9 struct {
+ Struct1
+ Struct7
+}
+
+// Struct10 is a struct type that is too large to be
+// passed in registers.
+type Struct10 struct {
+ Struct5
+ Struct8
+}
+
+// Struct11 is a struct type that has several reference
+// types in it.
+type Struct11 struct {
+ X map[string]int
+}
+
+// Struct12 has Struct11 embedded into it to test more
+// paths.
+type Struct12 struct {
+ A int
+ Struct11
+}
+
+// Struct13 tests an empty field.
+type Struct13 struct {
+ A int
+ X struct{}
+ B int
+}
+
+// Struct14 tests a non-zero-sized (and otherwise register-assignable)
+// struct with a field that is a non-zero length array with zero-sized members.
+type Struct14 struct {
+ A uintptr
+ X [3]struct{}
+ B float64
+}
+
+// Struct15 tests a non-zero-sized (and otherwise register-assignable)
+// struct with a struct field that is zero-sized but contains a
+// non-zero length array with zero-sized members.
+type Struct15 struct {
+ A uintptr
+ X struct {
+ Y [3]struct{}
+ }
+ B float64
+}
+
+const genValueRandSeed = 0
+
+// genValue generates a pseudorandom reflect.Value with type t.
+// The reflect.Value produced by this function is always the same
+// for the same type.
+func genValue(t *testing.T, typ reflect.Type, r *rand.Rand) reflect.Value {
+ // Re-seed and reset the PRNG because we want each value with the
+ // same type to be the same random value.
+ r.Seed(genValueRandSeed)
+ v, ok := quick.Value(typ, r)
+ if !ok {
+ t.Fatal("failed to generate value")
+ }
+ return v
+}
+
+func TestSignalingNaNArgument(t *testing.T) {
+ v := reflect.ValueOf(func(x float32) {
+ // make sure x is a signaling NaN.
+ u := math.Float32bits(x)
+ if u != snan {
+ t.Fatalf("signaling NaN not correct: %x\n", u)
+ }
+ })
+ v.Call([]reflect.Value{reflect.ValueOf(math.Float32frombits(snan))})
+}
+
+func TestSignalingNaNReturn(t *testing.T) {
+ v := reflect.ValueOf(func() float32 {
+ return math.Float32frombits(snan)
+ })
+ var x float32
+ reflect.ValueOf(&x).Elem().Set(v.Call(nil)[0])
+ // make sure x is a signaling NaN.
+ u := math.Float32bits(x)
+ if u != snan {
+ t.Fatalf("signaling NaN not correct: %x\n", u)
+ }
+}
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
new file mode 100644
index 0000000..28a7640
--- /dev/null
+++ b/src/reflect/all_test.go
@@ -0,0 +1,8362 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "encoding/base64"
+ "flag"
+ "fmt"
+ "go/token"
+ "internal/goarch"
+ "internal/testenv"
+ "io"
+ "math"
+ "math/rand"
+ "net"
+ "os"
+ . "reflect"
+ "reflect/internal/example1"
+ "reflect/internal/example2"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+var sink any
+
+func TestBool(t *testing.T) {
+ v := ValueOf(true)
+ if v.Bool() != true {
+ t.Fatal("ValueOf(true).Bool() = false")
+ }
+}
+
+type integer int
+type T struct {
+ a int
+ b float64
+ c string
+ d *int
+}
+
+var _ = T{} == T{} // tests depend on T being comparable
+
+type pair struct {
+ i any
+ s string
+}
+
+func assert(t *testing.T, s, want string) {
+ if s != want {
+ t.Errorf("have %#q want %#q", s, want)
+ }
+}
+
+var typeTests = []pair{
+ {struct{ x int }{}, "int"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x int16 }{}, "int16"},
+ {struct{ x int32 }{}, "int32"},
+ {struct{ x int64 }{}, "int64"},
+ {struct{ x uint }{}, "uint"},
+ {struct{ x uint8 }{}, "uint8"},
+ {struct{ x uint16 }{}, "uint16"},
+ {struct{ x uint32 }{}, "uint32"},
+ {struct{ x uint64 }{}, "uint64"},
+ {struct{ x float32 }{}, "float32"},
+ {struct{ x float64 }{}, "float64"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x (**int8) }{}, "**int8"},
+ {struct{ x (**integer) }{}, "**reflect_test.integer"},
+ {struct{ x ([32]int32) }{}, "[32]int32"},
+ {struct{ x ([]int8) }{}, "[]int8"},
+ {struct{ x (map[string]int32) }{}, "map[string]int32"},
+ {struct{ x (chan<- string) }{}, "chan<- string"},
+ {struct{ x (chan<- chan string) }{}, "chan<- chan string"},
+ {struct{ x (chan<- <-chan string) }{}, "chan<- <-chan string"},
+ {struct{ x (<-chan <-chan string) }{}, "<-chan <-chan string"},
+ {struct{ x (chan (<-chan string)) }{}, "chan (<-chan string)"},
+ {struct {
+ x struct {
+ c chan *int32
+ d float32
+ }
+ }{},
+ "struct { c chan *int32; d float32 }",
+ },
+ {struct{ x (func(a int8, b int32)) }{}, "func(int8, int32)"},
+ {struct {
+ x struct {
+ c func(chan *integer, *int8)
+ }
+ }{},
+ "struct { c func(chan *reflect_test.integer, *int8) }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int32
+ }
+ }{},
+ "struct { a int8; b int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int32
+ }
+ }{},
+ "struct { a int8; b int8; c int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int8
+ f int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int8; f int32 }",
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi there"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi there\"" }`,
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi \x00there\t\n\"\\"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi \\x00there\\t\\n\\\"\\\\\"" }`,
+ },
+ {struct {
+ x struct {
+ f func(args ...int)
+ }
+ }{},
+ "struct { f func(...int) }",
+ },
+ {struct {
+ x (interface {
+ a(func(func(int) int) func(func(int)) int)
+ b()
+ })
+ }{},
+ "interface { reflect_test.a(func(func(int) int) func(func(int)) int); reflect_test.b() }",
+ },
+ {struct {
+ x struct {
+ int32
+ int64
+ }
+ }{},
+ "struct { int32; int64 }",
+ },
+}
+
+var valueTests = []pair{
+ {new(int), "132"},
+ {new(int8), "8"},
+ {new(int16), "16"},
+ {new(int32), "32"},
+ {new(int64), "64"},
+ {new(uint), "132"},
+ {new(uint8), "8"},
+ {new(uint16), "16"},
+ {new(uint32), "32"},
+ {new(uint64), "64"},
+ {new(float32), "256.25"},
+ {new(float64), "512.125"},
+ {new(complex64), "532.125+10i"},
+ {new(complex128), "564.25+1i"},
+ {new(string), "stringy cheese"},
+ {new(bool), "true"},
+ {new(*int8), "*int8(0)"},
+ {new(**int8), "**int8(0)"},
+ {new([5]int32), "[5]int32{0, 0, 0, 0, 0}"},
+ {new(**integer), "**reflect_test.integer(0)"},
+ {new(map[string]int32), "map[string]int32{<can't iterate on maps>}"},
+ {new(chan<- string), "chan<- string"},
+ {new(func(a int8, b int32)), "func(int8, int32)(0)"},
+ {new(struct {
+ c chan *int32
+ d float32
+ }),
+ "struct { c chan *int32; d float32 }{chan *int32, 0}",
+ },
+ {new(struct{ c func(chan *integer, *int8) }),
+ "struct { c func(chan *reflect_test.integer, *int8) }{func(chan *reflect_test.integer, *int8)(0)}",
+ },
+ {new(struct {
+ a int8
+ b int32
+ }),
+ "struct { a int8; b int32 }{0, 0}",
+ },
+ {new(struct {
+ a int8
+ b int8
+ c int32
+ }),
+ "struct { a int8; b int8; c int32 }{0, 0, 0}",
+ },
+}
+
+func testType(t *testing.T, i int, typ Type, want string) {
+ s := typ.String()
+ if s != want {
+ t.Errorf("#%d: have %#q, want %#q", i, s, want)
+ }
+}
+
+func TestTypes(t *testing.T) {
+ for i, tt := range typeTests {
+ testType(t, i, ValueOf(tt.i).Field(0).Type(), tt.s)
+ }
+}
+
+func TestSet(t *testing.T) {
+ for i, tt := range valueTests {
+ v := ValueOf(tt.i)
+ v = v.Elem()
+ switch v.Kind() {
+ case Int:
+ v.SetInt(132)
+ case Int8:
+ v.SetInt(8)
+ case Int16:
+ v.SetInt(16)
+ case Int32:
+ v.SetInt(32)
+ case Int64:
+ v.SetInt(64)
+ case Uint:
+ v.SetUint(132)
+ case Uint8:
+ v.SetUint(8)
+ case Uint16:
+ v.SetUint(16)
+ case Uint32:
+ v.SetUint(32)
+ case Uint64:
+ v.SetUint(64)
+ case Float32:
+ v.SetFloat(256.25)
+ case Float64:
+ v.SetFloat(512.125)
+ case Complex64:
+ v.SetComplex(532.125 + 10i)
+ case Complex128:
+ v.SetComplex(564.25 + 1i)
+ case String:
+ v.SetString("stringy cheese")
+ case Bool:
+ v.SetBool(true)
+ }
+ s := valueToString(v)
+ if s != tt.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
+ }
+ }
+}
+
+func TestSetValue(t *testing.T) {
+ for i, tt := range valueTests {
+ v := ValueOf(tt.i).Elem()
+ switch v.Kind() {
+ case Int:
+ v.Set(ValueOf(int(132)))
+ case Int8:
+ v.Set(ValueOf(int8(8)))
+ case Int16:
+ v.Set(ValueOf(int16(16)))
+ case Int32:
+ v.Set(ValueOf(int32(32)))
+ case Int64:
+ v.Set(ValueOf(int64(64)))
+ case Uint:
+ v.Set(ValueOf(uint(132)))
+ case Uint8:
+ v.Set(ValueOf(uint8(8)))
+ case Uint16:
+ v.Set(ValueOf(uint16(16)))
+ case Uint32:
+ v.Set(ValueOf(uint32(32)))
+ case Uint64:
+ v.Set(ValueOf(uint64(64)))
+ case Float32:
+ v.Set(ValueOf(float32(256.25)))
+ case Float64:
+ v.Set(ValueOf(512.125))
+ case Complex64:
+ v.Set(ValueOf(complex64(532.125 + 10i)))
+ case Complex128:
+ v.Set(ValueOf(complex128(564.25 + 1i)))
+ case String:
+ v.Set(ValueOf("stringy cheese"))
+ case Bool:
+ v.Set(ValueOf(true))
+ }
+ s := valueToString(v)
+ if s != tt.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
+ }
+ }
+}
+
+func TestMapIterSet(t *testing.T) {
+ m := make(map[string]any, len(valueTests))
+ for _, tt := range valueTests {
+ m[tt.s] = tt.i
+ }
+ v := ValueOf(m)
+
+ k := New(v.Type().Key()).Elem()
+ e := New(v.Type().Elem()).Elem()
+
+ iter := v.MapRange()
+ for iter.Next() {
+ k.SetIterKey(iter)
+ e.SetIterValue(iter)
+ want := m[k.String()]
+ got := e.Interface()
+ if got != want {
+ t.Errorf("%q: want (%T) %v, got (%T) %v", k.String(), want, want, got, got)
+ }
+ if setkey, key := valueToString(k), valueToString(iter.Key()); setkey != key {
+ t.Errorf("MapIter.Key() = %q, MapIter.SetKey() = %q", key, setkey)
+ }
+ if setval, val := valueToString(e), valueToString(iter.Value()); setval != val {
+ t.Errorf("MapIter.Value() = %q, MapIter.SetValue() = %q", val, setval)
+ }
+ }
+
+ if testenv.OptimizationOff() {
+ return // no inlining with the noopt builder
+ }
+
+ got := int(testing.AllocsPerRun(10, func() {
+ iter := v.MapRange()
+ for iter.Next() {
+ k.SetIterKey(iter)
+ e.SetIterValue(iter)
+ }
+ }))
+ // Calling MapRange should not allocate even though it returns a *MapIter.
+ // The function is inlineable, so if the local usage does not escape
+ // the *MapIter, it can remain stack allocated.
+ want := 0
+ if got != want {
+ t.Errorf("wanted %d alloc, got %d", want, got)
+ }
+}
+
+func TestCanIntUintFloatComplex(t *testing.T) {
+ type integer int
+ type uinteger uint
+ type float float64
+ type complex complex128
+
+ var ops = [...]string{"CanInt", "CanUint", "CanFloat", "CanComplex"}
+
+ var testCases = []struct {
+ i any
+ want [4]bool
+ }{
+ // signed integer
+ {132, [...]bool{true, false, false, false}},
+ {int8(8), [...]bool{true, false, false, false}},
+ {int16(16), [...]bool{true, false, false, false}},
+ {int32(32), [...]bool{true, false, false, false}},
+ {int64(64), [...]bool{true, false, false, false}},
+ // unsigned integer
+ {uint(132), [...]bool{false, true, false, false}},
+ {uint8(8), [...]bool{false, true, false, false}},
+ {uint16(16), [...]bool{false, true, false, false}},
+ {uint32(32), [...]bool{false, true, false, false}},
+ {uint64(64), [...]bool{false, true, false, false}},
+ {uintptr(0xABCD), [...]bool{false, true, false, false}},
+ // floating-point
+ {float32(256.25), [...]bool{false, false, true, false}},
+ {float64(512.125), [...]bool{false, false, true, false}},
+ // complex
+ {complex64(532.125 + 10i), [...]bool{false, false, false, true}},
+ {complex128(564.25 + 1i), [...]bool{false, false, false, true}},
+ // underlying
+ {integer(-132), [...]bool{true, false, false, false}},
+ {uinteger(132), [...]bool{false, true, false, false}},
+ {float(256.25), [...]bool{false, false, true, false}},
+ {complex(532.125 + 10i), [...]bool{false, false, false, true}},
+ // not-acceptable
+ {"hello world", [...]bool{false, false, false, false}},
+ {new(int), [...]bool{false, false, false, false}},
+ {new(uint), [...]bool{false, false, false, false}},
+ {new(float64), [...]bool{false, false, false, false}},
+ {new(complex64), [...]bool{false, false, false, false}},
+ {new([5]int), [...]bool{false, false, false, false}},
+ {new(integer), [...]bool{false, false, false, false}},
+ {new(map[int]int), [...]bool{false, false, false, false}},
+ {new(chan<- int), [...]bool{false, false, false, false}},
+ {new(func(a int8)), [...]bool{false, false, false, false}},
+ {new(struct{ i int }), [...]bool{false, false, false, false}},
+ }
+
+ for i, tc := range testCases {
+ v := ValueOf(tc.i)
+ got := [...]bool{v.CanInt(), v.CanUint(), v.CanFloat(), v.CanComplex()}
+
+ for j := range tc.want {
+ if got[j] != tc.want[j] {
+ t.Errorf(
+ "#%d: v.%s() returned %t for type %T, want %t",
+ i,
+ ops[j],
+ got[j],
+ tc.i,
+ tc.want[j],
+ )
+ }
+ }
+ }
+}
+
+func TestCanSetField(t *testing.T) {
+ type embed struct{ x, X int }
+ type Embed struct{ x, X int }
+ type S1 struct {
+ embed
+ x, X int
+ }
+ type S2 struct {
+ *embed
+ x, X int
+ }
+ type S3 struct {
+ Embed
+ x, X int
+ }
+ type S4 struct {
+ *Embed
+ x, X int
+ }
+
+ type testCase struct {
+ // -1 means Addr().Elem() of current value
+ index []int
+ canSet bool
+ }
+ tests := []struct {
+ val Value
+ cases []testCase
+ }{{
+ val: ValueOf(&S1{}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, -1}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{1, -1}, false},
+ {[]int{2}, true},
+ {[]int{2, -1}, true},
+ },
+ }, {
+ val: ValueOf(&S2{embed: &embed{}}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, -1}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S3{}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, -1}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S4{Embed: &Embed{}}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, -1}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }}
+
+ for _, tt := range tests {
+ t.Run(tt.val.Type().Name(), func(t *testing.T) {
+ for _, tc := range tt.cases {
+ f := tt.val
+ for _, i := range tc.index {
+ if f.Kind() == Pointer {
+ f = f.Elem()
+ }
+ if i == -1 {
+ f = f.Addr().Elem()
+ } else {
+ f = f.Field(i)
+ }
+ }
+ if got := f.CanSet(); got != tc.canSet {
+ t.Errorf("CanSet() = %v, want %v", got, tc.canSet)
+ }
+ }
+ })
+ }
+}
+
+var _i = 7
+
+var valueToStringTests = []pair{
+ {123, "123"},
+ {123.5, "123.5"},
+ {byte(123), "123"},
+ {"abc", "abc"},
+ {T{123, 456.75, "hello", &_i}, "reflect_test.T{123, 456.75, hello, *int(&7)}"},
+ {new(chan *T), "*chan *reflect_test.T(&chan *reflect_test.T)"},
+ {[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[10]int(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+ {[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[]int(&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+}
+
+func TestValueToString(t *testing.T) {
+ for i, test := range valueToStringTests {
+ s := valueToString(ValueOf(test.i))
+ if s != test.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, test.s)
+ }
+ }
+}
+
+func TestArrayElemSet(t *testing.T) {
+ v := ValueOf(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}).Elem()
+ v.Index(4).SetInt(123)
+ s := valueToString(v)
+ const want = "[10]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
+ if s != want {
+ t.Errorf("[10]int: have %#q want %#q", s, want)
+ }
+
+ v = ValueOf([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
+ v.Index(4).SetInt(123)
+ s = valueToString(v)
+ const want1 = "[]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
+ if s != want1 {
+ t.Errorf("[]int: have %#q want %#q", s, want1)
+ }
+}
+
+func TestPtrPointTo(t *testing.T) {
+ var ip *int32
+ var i int32 = 1234
+ vip := ValueOf(&ip)
+ vi := ValueOf(&i).Elem()
+ vip.Elem().Set(vi.Addr())
+ if *ip != 1234 {
+ t.Errorf("got %d, want 1234", *ip)
+ }
+
+ ip = nil
+ vp := ValueOf(&ip).Elem()
+ vp.Set(Zero(vp.Type()))
+ if ip != nil {
+ t.Errorf("got non-nil (%p), want nil", ip)
+ }
+}
+
+func TestPtrSetNil(t *testing.T) {
+ var i int32 = 1234
+ ip := &i
+ vip := ValueOf(&ip)
+ vip.Elem().Set(Zero(vip.Elem().Type()))
+ if ip != nil {
+ t.Errorf("got non-nil (%d), want nil", *ip)
+ }
+}
+
+func TestMapSetNil(t *testing.T) {
+ m := make(map[string]int)
+ vm := ValueOf(&m)
+ vm.Elem().Set(Zero(vm.Elem().Type()))
+ if m != nil {
+ t.Errorf("got non-nil (%p), want nil", m)
+ }
+}
+
+func TestAll(t *testing.T) {
+ testType(t, 1, TypeOf((int8)(0)), "int8")
+ testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8")
+
+ typ := TypeOf((*struct {
+ c chan *int32
+ d float32
+ })(nil))
+ testType(t, 3, typ, "*struct { c chan *int32; d float32 }")
+ etyp := typ.Elem()
+ testType(t, 4, etyp, "struct { c chan *int32; d float32 }")
+ styp := etyp
+ f := styp.Field(0)
+ testType(t, 5, f.Type, "chan *int32")
+
+ f, present := styp.FieldByName("d")
+ if !present {
+ t.Errorf("FieldByName says present field is absent")
+ }
+ testType(t, 6, f.Type, "float32")
+
+ f, present = styp.FieldByName("absent")
+ if present {
+ t.Errorf("FieldByName says absent field is present")
+ }
+
+ typ = TypeOf([32]int32{})
+ testType(t, 7, typ, "[32]int32")
+ testType(t, 8, typ.Elem(), "int32")
+
+ typ = TypeOf((map[string]*int32)(nil))
+ testType(t, 9, typ, "map[string]*int32")
+ mtyp := typ
+ testType(t, 10, mtyp.Key(), "string")
+ testType(t, 11, mtyp.Elem(), "*int32")
+
+ typ = TypeOf((chan<- string)(nil))
+ testType(t, 12, typ, "chan<- string")
+ testType(t, 13, typ.Elem(), "string")
+
+ // make sure tag strings are not part of element type
+ typ = TypeOf(struct {
+ d []uint32 `reflect:"TAG"`
+ }{}).Field(0).Type
+ testType(t, 14, typ, "[]uint32")
+}
+
+func TestInterfaceGet(t *testing.T) {
+ var inter struct {
+ E any
+ }
+ inter.E = 123.456
+ v1 := ValueOf(&inter)
+ v2 := v1.Elem().Field(0)
+ assert(t, v2.Type().String(), "interface {}")
+ i2 := v2.Interface()
+ v3 := ValueOf(i2)
+ assert(t, v3.Type().String(), "float64")
+}
+
+func TestInterfaceValue(t *testing.T) {
+ var inter struct {
+ E any
+ }
+ inter.E = 123.456
+ v1 := ValueOf(&inter)
+ v2 := v1.Elem().Field(0)
+ assert(t, v2.Type().String(), "interface {}")
+ v3 := v2.Elem()
+ assert(t, v3.Type().String(), "float64")
+
+ i3 := v2.Interface()
+ if _, ok := i3.(float64); !ok {
+ t.Error("v2.Interface() did not return float64, got ", TypeOf(i3))
+ }
+}
+
+func TestFunctionValue(t *testing.T) {
+ var x any = func() {}
+ v := ValueOf(x)
+ if fmt.Sprint(v.Interface()) != fmt.Sprint(x) {
+ t.Fatalf("TestFunction returned wrong pointer")
+ }
+ assert(t, v.Type().String(), "func()")
+}
+
+func TestGrow(t *testing.T) {
+ v := ValueOf([]int(nil))
+ shouldPanic("reflect.Value.Grow using unaddressable value", func() { v.Grow(0) })
+ v = ValueOf(new([]int)).Elem()
+ v.Grow(0)
+ if !v.IsNil() {
+ t.Errorf("v.Grow(0) should still be nil")
+ }
+ v.Grow(1)
+ if v.Cap() == 0 {
+ t.Errorf("v.Cap = %v, want non-zero", v.Cap())
+ }
+ want := v.UnsafePointer()
+ v.Grow(1)
+ got := v.UnsafePointer()
+ if got != want {
+ t.Errorf("noop v.Grow should not change pointers")
+ }
+
+ t.Run("Append", func(t *testing.T) {
+ var got, want []T
+ v := ValueOf(&got).Elem()
+ appendValue := func(vt T) {
+ v.Grow(1)
+ v.SetLen(v.Len() + 1)
+ v.Index(v.Len() - 1).Set(ValueOf(vt))
+ }
+ for i := 0; i < 10; i++ {
+ vt := T{i, float64(i), strconv.Itoa(i), &i}
+ appendValue(vt)
+ want = append(want, vt)
+ }
+ if !DeepEqual(got, want) {
+ t.Errorf("value mismatch:\ngot %v\nwant %v", got, want)
+ }
+ })
+
+ t.Run("Rate", func(t *testing.T) {
+ var b []byte
+ v := ValueOf(new([]byte)).Elem()
+ for i := 0; i < 10; i++ {
+ b = append(b[:cap(b)], make([]byte, 1)...)
+ v.SetLen(v.Cap())
+ v.Grow(1)
+ if v.Cap() != cap(b) {
+ t.Errorf("v.Cap = %v, want %v", v.Cap(), cap(b))
+ }
+ }
+ })
+
+ t.Run("ZeroCapacity", func(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ v := ValueOf(new([]byte)).Elem()
+ v.Grow(61)
+ b := v.Bytes()
+ b = b[:cap(b)]
+ for i, c := range b {
+ if c != 0 {
+ t.Fatalf("Value.Bytes[%d] = 0x%02x, want 0x00", i, c)
+ }
+ b[i] = 0xff
+ }
+ runtime.GC()
+ }
+ })
+}
+
+var appendTests = []struct {
+ orig, extra []int
+}{
+ {nil, nil},
+ {[]int{}, nil},
+ {nil, []int{}},
+ {[]int{}, []int{}},
+ {nil, []int{22}},
+ {[]int{}, []int{22}},
+ {make([]int, 2, 4), nil},
+ {make([]int, 2, 4), []int{}},
+ {make([]int, 2, 4), []int{22}},
+ {make([]int, 2, 4), []int{22, 33, 44}},
+}
+
+func TestAppend(t *testing.T) {
+ for i, test := range appendTests {
+ origLen, extraLen := len(test.orig), len(test.extra)
+ want := append(test.orig, test.extra...)
+ // Convert extra from []int to []Value.
+ e0 := make([]Value, len(test.extra))
+ for j, e := range test.extra {
+ e0[j] = ValueOf(e)
+ }
+ // Convert extra from []int to *SliceValue.
+ e1 := ValueOf(test.extra)
+
+ // Test Append.
+ a0 := ValueOf(&test.orig).Elem()
+ have0 := Append(a0, e0...)
+ if have0.CanAddr() {
+ t.Errorf("Append #%d: have slice should not be addressable", i)
+ }
+ if !DeepEqual(have0.Interface(), want) {
+ t.Errorf("Append #%d: have %v, want %v (%p %p)", i, have0, want, test.orig, have0.Interface())
+ }
+ // Check that the orig and extra slices were not modified.
+ if a0.Len() != len(test.orig) {
+ t.Errorf("Append #%d: a0.Len: have %d, want %d", i, a0.Len(), origLen)
+ }
+ if len(test.orig) != origLen {
+ t.Errorf("Append #%d origLen: have %v, want %v", i, len(test.orig), origLen)
+ }
+ if len(test.extra) != extraLen {
+ t.Errorf("Append #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
+ }
+
+ // Test AppendSlice.
+ a1 := ValueOf(&test.orig).Elem()
+ have1 := AppendSlice(a1, e1)
+ if have1.CanAddr() {
+ t.Errorf("AppendSlice #%d: have slice should not be addressable", i)
+ }
+ if !DeepEqual(have1.Interface(), want) {
+ t.Errorf("AppendSlice #%d: have %v, want %v", i, have1, want)
+ }
+ // Check that the orig and extra slices were not modified.
+ if a1.Len() != len(test.orig) {
+ t.Errorf("AppendSlice #%d: a1.Len: have %d, want %d", i, a0.Len(), origLen)
+ }
+ if len(test.orig) != origLen {
+ t.Errorf("AppendSlice #%d origLen: have %v, want %v", i, len(test.orig), origLen)
+ }
+ if len(test.extra) != extraLen {
+ t.Errorf("AppendSlice #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
+ }
+
+ // Test Append and AppendSlice with unexported value.
+ ax := ValueOf(struct{ x []int }{test.orig}).Field(0)
+ shouldPanic("using unexported field", func() { Append(ax, e0...) })
+ shouldPanic("using unexported field", func() { AppendSlice(ax, e1) })
+ }
+}
+
+func TestCopy(t *testing.T) {
+ a := []int{1, 2, 3, 4, 10, 9, 8, 7}
+ b := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ c := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ for i := 0; i < len(b); i++ {
+ if b[i] != c[i] {
+ t.Fatalf("b != c before test")
+ }
+ }
+ a1 := a
+ b1 := b
+ aa := ValueOf(&a1).Elem()
+ ab := ValueOf(&b1).Elem()
+ for tocopy := 1; tocopy <= 7; tocopy++ {
+ aa.SetLen(tocopy)
+ Copy(ab, aa)
+ aa.SetLen(8)
+ for i := 0; i < tocopy; i++ {
+ if a[i] != b[i] {
+ t.Errorf("(i) tocopy=%d a[%d]=%d, b[%d]=%d",
+ tocopy, i, a[i], i, b[i])
+ }
+ }
+ for i := tocopy; i < len(b); i++ {
+ if b[i] != c[i] {
+ if i < len(a) {
+ t.Errorf("(ii) tocopy=%d a[%d]=%d, b[%d]=%d, c[%d]=%d",
+ tocopy, i, a[i], i, b[i], i, c[i])
+ } else {
+ t.Errorf("(iii) tocopy=%d b[%d]=%d, c[%d]=%d",
+ tocopy, i, b[i], i, c[i])
+ }
+ } else {
+ t.Logf("tocopy=%d elem %d is okay\n", tocopy, i)
+ }
+ }
+ }
+}
+
+func TestCopyString(t *testing.T) {
+ t.Run("Slice", func(t *testing.T) {
+ s := bytes.Repeat([]byte{'_'}, 8)
+ val := ValueOf(s)
+
+ n := Copy(val, ValueOf(""))
+ if expecting := []byte("________"); n != 0 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s, expecting)
+ }
+
+ n = Copy(val, ValueOf("hello"))
+ if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s, expecting)
+ }
+
+ n = Copy(val, ValueOf("helloworld"))
+ if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s, expecting)
+ }
+ })
+ t.Run("Array", func(t *testing.T) {
+ s := [...]byte{'_', '_', '_', '_', '_', '_', '_', '_'}
+ val := ValueOf(&s).Elem()
+
+ n := Copy(val, ValueOf(""))
+ if expecting := []byte("________"); n != 0 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s[:], expecting)
+ }
+
+ n = Copy(val, ValueOf("hello"))
+ if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s[:], expecting)
+ }
+
+ n = Copy(val, ValueOf("helloworld"))
+ if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s[:], expecting)
+ }
+ })
+}
+
+func TestCopyArray(t *testing.T) {
+ a := [8]int{1, 2, 3, 4, 10, 9, 8, 7}
+ b := [11]int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ c := b
+ aa := ValueOf(&a).Elem()
+ ab := ValueOf(&b).Elem()
+ Copy(ab, aa)
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ t.Errorf("(i) a[%d]=%d, b[%d]=%d", i, a[i], i, b[i])
+ }
+ }
+ for i := len(a); i < len(b); i++ {
+ if b[i] != c[i] {
+ t.Errorf("(ii) b[%d]=%d, c[%d]=%d", i, b[i], i, c[i])
+ } else {
+ t.Logf("elem %d is okay\n", i)
+ }
+ }
+}
+
+func TestBigUnnamedStruct(t *testing.T) {
+ b := struct{ a, b, c, d int64 }{1, 2, 3, 4}
+ v := ValueOf(b)
+ b1 := v.Interface().(struct {
+ a, b, c, d int64
+ })
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d {
+ t.Errorf("ValueOf(%v).Interface().(*Big) = %v", b, b1)
+ }
+}
+
+type big struct {
+ a, b, c, d, e int64
+}
+
+func TestBigStruct(t *testing.T) {
+ b := big{1, 2, 3, 4, 5}
+ v := ValueOf(b)
+ b1 := v.Interface().(big)
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d || b1.e != b.e {
+ t.Errorf("ValueOf(%v).Interface().(big) = %v", b, b1)
+ }
+}
+
+type Basic struct {
+ x int
+ y float32
+}
+
+type NotBasic Basic
+
+type DeepEqualTest struct {
+ a, b any
+ eq bool
+}
+
+// Simple functions for DeepEqual tests.
+var (
+ fn1 func() // nil.
+ fn2 func() // nil.
+ fn3 = func() { fn1() } // Not nil.
+)
+
+type self struct{}
+
+type Loop *Loop
+type Loopy any
+
+var loop1, loop2 Loop
+var loopy1, loopy2 Loopy
+var cycleMap1, cycleMap2, cycleMap3 map[string]any
+
+type structWithSelfPtr struct {
+ p *structWithSelfPtr
+ s string
+}
+
+func init() {
+ loop1 = &loop2
+ loop2 = &loop1
+
+ loopy1 = &loopy2
+ loopy2 = &loopy1
+
+ cycleMap1 = map[string]any{}
+ cycleMap1["cycle"] = cycleMap1
+ cycleMap2 = map[string]any{}
+ cycleMap2["cycle"] = cycleMap2
+ cycleMap3 = map[string]any{}
+ cycleMap3["different"] = cycleMap3
+}
+
+var deepEqualTests = []DeepEqualTest{
+ // Equalities
+ {nil, nil, true},
+ {1, 1, true},
+ {int32(1), int32(1), true},
+ {0.5, 0.5, true},
+ {float32(0.5), float32(0.5), true},
+ {"hello", "hello", true},
+ {make([]int, 10), make([]int, 10), true},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true},
+ {Basic{1, 0.5}, Basic{1, 0.5}, true},
+ {error(nil), error(nil), true},
+ {map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
+ {fn1, fn2, true},
+ {[]byte{1, 2, 3}, []byte{1, 2, 3}, true},
+ {[]MyByte{1, 2, 3}, []MyByte{1, 2, 3}, true},
+ {MyBytes{1, 2, 3}, MyBytes{1, 2, 3}, true},
+
+ // Inequalities
+ {1, 2, false},
+ {int32(1), int32(2), false},
+ {0.5, 0.6, false},
+ {float32(0.5), float32(0.6), false},
+ {"hello", "hey", false},
+ {make([]int, 10), make([]int, 11), false},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false},
+ {Basic{1, 0.5}, Basic{1, 0.6}, false},
+ {Basic{1, 0}, Basic{2, 0}, false},
+ {map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false},
+ {nil, 1, false},
+ {1, nil, false},
+ {fn1, fn3, false},
+ {fn3, fn3, false},
+ {[][]int{{1}}, [][]int{{2}}, false},
+ {&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false},
+
+ // Fun with floating point.
+ {math.NaN(), math.NaN(), false},
+ {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
+ {&[1]float64{math.NaN()}, self{}, true},
+ {[]float64{math.NaN()}, []float64{math.NaN()}, false},
+ {[]float64{math.NaN()}, self{}, true},
+ {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
+ {map[float64]float64{math.NaN(): 1}, self{}, true},
+
+ // Nil vs empty: not the same.
+ {[]int{}, []int(nil), false},
+ {[]int{}, []int{}, true},
+ {[]int(nil), []int(nil), true},
+ {map[int]int{}, map[int]int(nil), false},
+ {map[int]int{}, map[int]int{}, true},
+ {map[int]int(nil), map[int]int(nil), true},
+
+ // Mismatched types
+ {1, 1.0, false},
+ {int32(1), int64(1), false},
+ {0.5, "hello", false},
+ {[]int{1, 2, 3}, [3]int{1, 2, 3}, false},
+ {&[3]any{1, 2, 4}, &[3]any{1, 2, "s"}, false},
+ {Basic{1, 0.5}, NotBasic{1, 0.5}, false},
+ {map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
+ {[]byte{1, 2, 3}, []MyByte{1, 2, 3}, false},
+ {[]MyByte{1, 2, 3}, MyBytes{1, 2, 3}, false},
+ {[]byte{1, 2, 3}, MyBytes{1, 2, 3}, false},
+
+ // Possible loops.
+ {&loop1, &loop1, true},
+ {&loop1, &loop2, true},
+ {&loopy1, &loopy1, true},
+ {&loopy1, &loopy2, true},
+ {&cycleMap1, &cycleMap2, true},
+ {&cycleMap1, &cycleMap3, false},
+}
+
+func TestDeepEqual(t *testing.T) {
+ for _, test := range deepEqualTests {
+ if test.b == (self{}) {
+ test.b = test.a
+ }
+ if r := DeepEqual(test.a, test.b); r != test.eq {
+ t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
+ }
+ }
+}
+
+func TestTypeOf(t *testing.T) {
+ // Special case for nil
+ if typ := TypeOf(nil); typ != nil {
+ t.Errorf("expected nil type for nil value; got %v", typ)
+ }
+ for _, test := range deepEqualTests {
+ v := ValueOf(test.a)
+ if !v.IsValid() {
+ continue
+ }
+ typ := TypeOf(test.a)
+ if typ != v.Type() {
+ t.Errorf("TypeOf(%v) = %v, but ValueOf(%v).Type() = %v", test.a, typ, test.a, v.Type())
+ }
+ }
+}
+
+type Recursive struct {
+ x int
+ r *Recursive
+}
+
+func TestDeepEqualRecursiveStruct(t *testing.T) {
+ a, b := new(Recursive), new(Recursive)
+ *a = Recursive{12, a}
+ *b = Recursive{12, b}
+ if !DeepEqual(a, b) {
+ t.Error("DeepEqual(recursive same) = false, want true")
+ }
+}
+
+type _Complex struct {
+ a int
+ b [3]*_Complex
+ c *string
+ d map[float64]float64
+}
+
+func TestDeepEqualComplexStruct(t *testing.T) {
+ m := make(map[float64]float64)
+ stra, strb := "hello", "hello"
+ a, b := new(_Complex), new(_Complex)
+ *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
+ *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
+ if !DeepEqual(a, b) {
+ t.Error("DeepEqual(complex same) = false, want true")
+ }
+}
+
+func TestDeepEqualComplexStructInequality(t *testing.T) {
+ m := make(map[float64]float64)
+ stra, strb := "hello", "helloo" // Difference is here
+ a, b := new(_Complex), new(_Complex)
+ *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
+ *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
+ if DeepEqual(a, b) {
+ t.Error("DeepEqual(complex different) = true, want false")
+ }
+}
+
+type UnexpT struct {
+ m map[int]int
+}
+
+func TestDeepEqualUnexportedMap(t *testing.T) {
+ // Check that DeepEqual can look at unexported fields.
+ x1 := UnexpT{map[int]int{1: 2}}
+ x2 := UnexpT{map[int]int{1: 2}}
+ if !DeepEqual(&x1, &x2) {
+ t.Error("DeepEqual(x1, x2) = false, want true")
+ }
+
+ y1 := UnexpT{map[int]int{2: 3}}
+ if DeepEqual(&x1, &y1) {
+ t.Error("DeepEqual(x1, y1) = true, want false")
+ }
+}
+
+var deepEqualPerfTests = []struct {
+ x, y any
+}{
+ {x: int8(99), y: int8(99)},
+ {x: []int8{99}, y: []int8{99}},
+ {x: int16(99), y: int16(99)},
+ {x: []int16{99}, y: []int16{99}},
+ {x: int32(99), y: int32(99)},
+ {x: []int32{99}, y: []int32{99}},
+ {x: int64(99), y: int64(99)},
+ {x: []int64{99}, y: []int64{99}},
+ {x: int(999999), y: int(999999)},
+ {x: []int{999999}, y: []int{999999}},
+
+ {x: uint8(99), y: uint8(99)},
+ {x: []uint8{99}, y: []uint8{99}},
+ {x: uint16(99), y: uint16(99)},
+ {x: []uint16{99}, y: []uint16{99}},
+ {x: uint32(99), y: uint32(99)},
+ {x: []uint32{99}, y: []uint32{99}},
+ {x: uint64(99), y: uint64(99)},
+ {x: []uint64{99}, y: []uint64{99}},
+ {x: uint(999999), y: uint(999999)},
+ {x: []uint{999999}, y: []uint{999999}},
+ {x: uintptr(999999), y: uintptr(999999)},
+ {x: []uintptr{999999}, y: []uintptr{999999}},
+
+ {x: float32(1.414), y: float32(1.414)},
+ {x: []float32{1.414}, y: []float32{1.414}},
+ {x: float64(1.414), y: float64(1.414)},
+ {x: []float64{1.414}, y: []float64{1.414}},
+
+ {x: complex64(1.414), y: complex64(1.414)},
+ {x: []complex64{1.414}, y: []complex64{1.414}},
+ {x: complex128(1.414), y: complex128(1.414)},
+ {x: []complex128{1.414}, y: []complex128{1.414}},
+
+ {x: true, y: true},
+ {x: []bool{true}, y: []bool{true}},
+
+ {x: "abcdef", y: "abcdef"},
+ {x: []string{"abcdef"}, y: []string{"abcdef"}},
+
+ {x: []byte("abcdef"), y: []byte("abcdef")},
+ {x: [][]byte{[]byte("abcdef")}, y: [][]byte{[]byte("abcdef")}},
+
+ {x: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}, y: [6]byte{'a', 'b', 'c', 'a', 'b', 'c'}},
+ {x: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}, y: [][6]byte{[6]byte{'a', 'b', 'c', 'a', 'b', 'c'}}},
+}
+
+func TestDeepEqualAllocs(t *testing.T) {
+ for _, tt := range deepEqualPerfTests {
+ t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) {
+ got := testing.AllocsPerRun(100, func() {
+ if !DeepEqual(tt.x, tt.y) {
+ t.Errorf("DeepEqual(%v, %v)=false", tt.x, tt.y)
+ }
+ })
+ if int(got) != 0 {
+ t.Errorf("DeepEqual(%v, %v) allocated %d times", tt.x, tt.y, int(got))
+ }
+ })
+ }
+}
+
+func check2ndField(x any, offs uintptr, t *testing.T) {
+ s := ValueOf(x)
+ f := s.Type().Field(1)
+ if f.Offset != offs {
+ t.Error("mismatched offsets in structure alignment:", f.Offset, offs)
+ }
+}
+
+// Check that structure alignment & offsets viewed through reflect agree with those
+// from the compiler itself.
+func TestAlignment(t *testing.T) {
+ type T1inner struct {
+ a int
+ }
+ type T1 struct {
+ T1inner
+ f int
+ }
+ type T2inner struct {
+ a, b int
+ }
+ type T2 struct {
+ T2inner
+ f int
+ }
+
+ x := T1{T1inner{2}, 17}
+ check2ndField(x, uintptr(unsafe.Pointer(&x.f))-uintptr(unsafe.Pointer(&x)), t)
+
+ x1 := T2{T2inner{2, 3}, 17}
+ check2ndField(x1, uintptr(unsafe.Pointer(&x1.f))-uintptr(unsafe.Pointer(&x1)), t)
+}
+
+func Nil(a any, t *testing.T) {
+ n := ValueOf(a).Field(0)
+ if !n.IsNil() {
+ t.Errorf("%v should be nil", a)
+ }
+}
+
+func NotNil(a any, t *testing.T) {
+ n := ValueOf(a).Field(0)
+ if n.IsNil() {
+ t.Errorf("value of type %v should not be nil", ValueOf(a).Type().String())
+ }
+}
+
+func TestIsNil(t *testing.T) {
+ // These implement IsNil.
+ // Wrap in extra struct to hide interface type.
+ doNil := []any{
+ struct{ x *int }{},
+ struct{ x any }{},
+ struct{ x map[string]int }{},
+ struct{ x func() bool }{},
+ struct{ x chan int }{},
+ struct{ x []string }{},
+ struct{ x unsafe.Pointer }{},
+ }
+ for _, ts := range doNil {
+ ty := TypeOf(ts).Field(0).Type
+ v := Zero(ty)
+ v.IsNil() // panics if not okay to call
+ }
+
+ // Check the implementations
+ var pi struct {
+ x *int
+ }
+ Nil(pi, t)
+ pi.x = new(int)
+ NotNil(pi, t)
+
+ var si struct {
+ x []int
+ }
+ Nil(si, t)
+ si.x = make([]int, 10)
+ NotNil(si, t)
+
+ var ci struct {
+ x chan int
+ }
+ Nil(ci, t)
+ ci.x = make(chan int)
+ NotNil(ci, t)
+
+ var mi struct {
+ x map[int]int
+ }
+ Nil(mi, t)
+ mi.x = make(map[int]int)
+ NotNil(mi, t)
+
+ var ii struct {
+ x any
+ }
+ Nil(ii, t)
+ ii.x = 2
+ NotNil(ii, t)
+
+ var fi struct {
+ x func(t *testing.T)
+ }
+ Nil(fi, t)
+ fi.x = TestIsNil
+ NotNil(fi, t)
+}
+
+func TestIsZero(t *testing.T) {
+ for i, tt := range []struct {
+ x any
+ want bool
+ }{
+ // Booleans
+ {true, false},
+ {false, true},
+ // Numeric types
+ {int(0), true},
+ {int(1), false},
+ {int8(0), true},
+ {int8(1), false},
+ {int16(0), true},
+ {int16(1), false},
+ {int32(0), true},
+ {int32(1), false},
+ {int64(0), true},
+ {int64(1), false},
+ {uint(0), true},
+ {uint(1), false},
+ {uint8(0), true},
+ {uint8(1), false},
+ {uint16(0), true},
+ {uint16(1), false},
+ {uint32(0), true},
+ {uint32(1), false},
+ {uint64(0), true},
+ {uint64(1), false},
+ {float32(0), true},
+ {float32(1.2), false},
+ {float64(0), true},
+ {float64(1.2), false},
+ {math.Copysign(0, -1), false},
+ {complex64(0), true},
+ {complex64(1.2), false},
+ {complex128(0), true},
+ {complex128(1.2), false},
+ {complex(math.Copysign(0, -1), 0), false},
+ {complex(0, math.Copysign(0, -1)), false},
+ {complex(math.Copysign(0, -1), math.Copysign(0, -1)), false},
+ {uintptr(0), true},
+ {uintptr(128), false},
+ // Array
+ {Zero(TypeOf([5]string{})).Interface(), true},
+ {[5]string{}, true}, // comparable array
+ {[5]string{"", "", "", "a", ""}, false}, // comparable array
+ {[1]*int{}, true}, // direct pointer array
+ {[1]*int{new(int)}, false}, // direct pointer array
+ {[3][]int{}, true}, // incomparable array
+ {[3][]int{{1}}, false}, // incomparable array
+ {[1 << 12]byte{}, true},
+ {[1 << 12]byte{1}, false},
+ {[3]Value{}, true},
+ {[3]Value{{}, ValueOf(0), {}}, false},
+ // Chan
+ {(chan string)(nil), true},
+ {make(chan string), false},
+ {time.After(1), false},
+ // Func
+ {(func())(nil), true},
+ {New, false},
+ // Interface
+ {New(TypeOf(new(error)).Elem()).Elem(), true},
+ {(io.Reader)(strings.NewReader("")), false},
+ // Map
+ {(map[string]string)(nil), true},
+ {map[string]string{}, false},
+ {make(map[string]string), false},
+ // Pointer
+ {(*func())(nil), true},
+ {(*int)(nil), true},
+ {new(int), false},
+ // Slice
+ {[]string{}, false},
+ {([]string)(nil), true},
+ {make([]string, 0), false},
+ // Strings
+ {"", true},
+ {"not-zero", false},
+ // Structs
+ {T{}, true}, // comparable struct
+ {T{123, 456.75, "hello", &_i}, false}, // comparable struct
+ {struct{ p *int }{}, true}, // direct pointer struct
+ {struct{ p *int }{new(int)}, false}, // direct pointer struct
+ {struct{ s []int }{}, true}, // incomparable struct
+ {struct{ s []int }{[]int{1}}, false}, // incomparable struct
+ {struct{ Value }{}, true},
+ {struct{ Value }{ValueOf(0)}, false},
+ // UnsafePointer
+ {(unsafe.Pointer)(nil), true},
+ {(unsafe.Pointer)(new(int)), false},
+ } {
+ var x Value
+ if v, ok := tt.x.(Value); ok {
+ x = v
+ } else {
+ x = ValueOf(tt.x)
+ }
+
+ b := x.IsZero()
+ if b != tt.want {
+ t.Errorf("%d: IsZero((%s)(%+v)) = %t, want %t", i, x.Kind(), tt.x, b, tt.want)
+ }
+
+ if !Zero(TypeOf(tt.x)).IsZero() {
+ t.Errorf("%d: IsZero(Zero(TypeOf((%s)(%+v)))) is false", i, x.Kind(), tt.x)
+ }
+
+ p := New(x.Type()).Elem()
+ p.Set(x)
+ p.SetZero()
+ if !p.IsZero() {
+ t.Errorf("%d: IsZero((%s)(%+v)) is true after SetZero", i, p.Kind(), tt.x)
+ }
+ }
+
+ func() {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Error("should panic for invalid value")
+ }
+ }()
+ (Value{}).IsZero()
+ }()
+}
+
+func TestInterfaceExtraction(t *testing.T) {
+ var s struct {
+ W io.Writer
+ }
+
+ s.W = os.Stdout
+ v := Indirect(ValueOf(&s)).Field(0).Interface()
+ if v != s.W.(any) {
+ t.Error("Interface() on interface: ", v, s.W)
+ }
+}
+
+func TestNilPtrValueSub(t *testing.T) {
+ var pi *int
+ if pv := ValueOf(pi); pv.Elem().IsValid() {
+ t.Error("ValueOf((*int)(nil)).Elem().IsValid()")
+ }
+}
+
+func TestMap(t *testing.T) {
+ m := map[string]int{"a": 1, "b": 2}
+ mv := ValueOf(m)
+ if n := mv.Len(); n != len(m) {
+ t.Errorf("Len = %d, want %d", n, len(m))
+ }
+ keys := mv.MapKeys()
+ newmap := MakeMap(mv.Type())
+ for k, v := range m {
+ // Check that returned Keys match keys in range.
+ // These aren't required to be in the same order.
+ seen := false
+ for _, kv := range keys {
+ if kv.String() == k {
+ seen = true
+ break
+ }
+ }
+ if !seen {
+ t.Errorf("Missing key %q", k)
+ }
+
+ // Check that value lookup is correct.
+ vv := mv.MapIndex(ValueOf(k))
+ if vi := vv.Int(); vi != int64(v) {
+ t.Errorf("Key %q: have value %d, want %d", k, vi, v)
+ }
+
+ // Copy into new map.
+ newmap.SetMapIndex(ValueOf(k), ValueOf(v))
+ }
+ vv := mv.MapIndex(ValueOf("not-present"))
+ if vv.IsValid() {
+ t.Errorf("Invalid key: got non-nil value %s", valueToString(vv))
+ }
+
+ newm := newmap.Interface().(map[string]int)
+ if len(newm) != len(m) {
+ t.Errorf("length after copy: newm=%d, m=%d", len(newm), len(m))
+ }
+
+ for k, v := range newm {
+ mv, ok := m[k]
+ if mv != v {
+ t.Errorf("newm[%q] = %d, but m[%q] = %d, %v", k, v, k, mv, ok)
+ }
+ }
+
+ newmap.SetMapIndex(ValueOf("a"), Value{})
+ v, ok := newm["a"]
+ if ok {
+ t.Errorf("newm[\"a\"] = %d after delete", v)
+ }
+
+ mv = ValueOf(&m).Elem()
+ mv.Set(Zero(mv.Type()))
+ if m != nil {
+ t.Errorf("mv.Set(nil) failed")
+ }
+
+ type S string
+ shouldPanic("not assignable", func() { mv.MapIndex(ValueOf(S("key"))) })
+ shouldPanic("not assignable", func() { mv.SetMapIndex(ValueOf(S("key")), ValueOf(0)) })
+}
+
+func TestNilMap(t *testing.T) {
+ var m map[string]int
+ mv := ValueOf(m)
+ keys := mv.MapKeys()
+ if len(keys) != 0 {
+ t.Errorf(">0 keys for nil map: %v", keys)
+ }
+
+ // Check that value for missing key is zero.
+ x := mv.MapIndex(ValueOf("hello"))
+ if x.Kind() != Invalid {
+ t.Errorf("m.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
+ }
+
+ // Check big value too.
+ var mbig map[string][10 << 20]byte
+ x = ValueOf(mbig).MapIndex(ValueOf("hello"))
+ if x.Kind() != Invalid {
+ t.Errorf("mbig.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
+ }
+
+ // Test that deletes from a nil map succeed.
+ mv.SetMapIndex(ValueOf("hi"), Value{})
+}
+
+func TestChan(t *testing.T) {
+ for loop := 0; loop < 2; loop++ {
+ var c chan int
+ var cv Value
+
+ // check both ways to allocate channels
+ switch loop {
+ case 1:
+ c = make(chan int, 1)
+ cv = ValueOf(c)
+ case 0:
+ cv = MakeChan(TypeOf(c), 1)
+ c = cv.Interface().(chan int)
+ }
+
+ // Send
+ cv.Send(ValueOf(2))
+ if i := <-c; i != 2 {
+ t.Errorf("reflect Send 2, native recv %d", i)
+ }
+
+ // Recv
+ c <- 3
+ if i, ok := cv.Recv(); i.Int() != 3 || !ok {
+ t.Errorf("native send 3, reflect Recv %d, %t", i.Int(), ok)
+ }
+
+ // TryRecv fail
+ val, ok := cv.TryRecv()
+ if val.IsValid() || ok {
+ t.Errorf("TryRecv on empty chan: %s, %t", valueToString(val), ok)
+ }
+
+ // TryRecv success
+ c <- 4
+ val, ok = cv.TryRecv()
+ if !val.IsValid() {
+ t.Errorf("TryRecv on ready chan got nil")
+ } else if i := val.Int(); i != 4 || !ok {
+ t.Errorf("native send 4, TryRecv %d, %t", i, ok)
+ }
+
+ // TrySend fail
+ c <- 100
+ ok = cv.TrySend(ValueOf(5))
+ i := <-c
+ if ok {
+ t.Errorf("TrySend on full chan succeeded: value %d", i)
+ }
+
+ // TrySend success
+ ok = cv.TrySend(ValueOf(6))
+ if !ok {
+ t.Errorf("TrySend on empty chan failed")
+ select {
+ case x := <-c:
+ t.Errorf("TrySend failed but it did send %d", x)
+ default:
+ }
+ } else {
+ if i = <-c; i != 6 {
+ t.Errorf("TrySend 6, recv %d", i)
+ }
+ }
+
+ // Close
+ c <- 123
+ cv.Close()
+ if i, ok := cv.Recv(); i.Int() != 123 || !ok {
+ t.Errorf("send 123 then close; Recv %d, %t", i.Int(), ok)
+ }
+ if i, ok := cv.Recv(); i.Int() != 0 || ok {
+ t.Errorf("after close Recv %d, %t", i.Int(), ok)
+ }
+ }
+
+ // check creation of unbuffered channel
+ var c chan int
+ cv := MakeChan(TypeOf(c), 0)
+ c = cv.Interface().(chan int)
+ if cv.TrySend(ValueOf(7)) {
+ t.Errorf("TrySend on sync chan succeeded")
+ }
+ if v, ok := cv.TryRecv(); v.IsValid() || ok {
+ t.Errorf("TryRecv on sync chan succeeded: isvalid=%v ok=%v", v.IsValid(), ok)
+ }
+
+ // len/cap
+ cv = MakeChan(TypeOf(c), 10)
+ c = cv.Interface().(chan int)
+ for i := 0; i < 3; i++ {
+ c <- i
+ }
+ if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) {
+ t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c))
+ }
+}
+
+// caseInfo describes a single case in a select test.
+type caseInfo struct {
+ desc string
+ canSelect bool
+ recv Value
+ closed bool
+ helper func()
+ panic bool
+}
+
+var allselect = flag.Bool("allselect", false, "exhaustive select test")
+
+func TestSelect(t *testing.T) {
+ selectWatch.once.Do(func() { go selectWatcher() })
+
+ var x exhaustive
+ nch := 0
+ newop := func(n int, cap int) (ch, val Value) {
+ nch++
+ if nch%101%2 == 1 {
+ c := make(chan int, cap)
+ ch = ValueOf(c)
+ val = ValueOf(n)
+ } else {
+ c := make(chan string, cap)
+ ch = ValueOf(c)
+ val = ValueOf(fmt.Sprint(n))
+ }
+ return
+ }
+
+ for n := 0; x.Next(); n++ {
+ if testing.Short() && n >= 1000 {
+ break
+ }
+ if n >= 100000 && !*allselect {
+ break
+ }
+ if n%100000 == 0 && testing.Verbose() {
+ println("TestSelect", n)
+ }
+ var cases []SelectCase
+ var info []caseInfo
+
+ // Ready send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "ready send", canSelect: true})
+ }
+
+ // Ready recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ ch.Send(val)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "ready recv", canSelect: true, recv: val})
+ }
+
+ // Blocking send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Recv() }
+ info = append(info, caseInfo{desc: "blocking send", helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking send"})
+ }
+ }
+
+ // Blocking recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Send(val) }
+ info = append(info, caseInfo{desc: "blocking recv", recv: val, helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking recv"})
+ }
+ }
+
+ // Zero Chan send.
+ if x.Maybe() {
+ // Maybe include value to send.
+ var val Value
+ if x.Maybe() {
+ val = ValueOf(100)
+ }
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "zero Chan send"})
+ }
+
+ // Zero Chan receive.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ })
+ info = append(info, caseInfo{desc: "zero Chan recv"})
+ }
+
+ // nil Chan send.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf((chan int)(nil)),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "nil Chan send"})
+ }
+
+ // nil Chan recv.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf((chan int)(nil)),
+ })
+ info = append(info, caseInfo{desc: "nil Chan recv"})
+ }
+
+ // closed Chan send.
+ if x.Maybe() {
+ ch := make(chan int)
+ close(ch)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf(ch),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "closed Chan send", canSelect: true, panic: true})
+ }
+
+ // closed Chan recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ ch.Close()
+ val = Zero(val.Type())
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "closed Chan recv", canSelect: true, closed: true, recv: val})
+ }
+
+ var helper func() // goroutine to help the select complete
+
+ // Add default? Must be last case here, but will permute.
+ // Add the default if the select would otherwise
+ // block forever, and maybe add it anyway.
+ numCanSelect := 0
+ canProceed := false
+ canBlock := true
+ canPanic := false
+ helpers := []int{}
+ for i, c := range info {
+ if c.canSelect {
+ canProceed = true
+ canBlock = false
+ numCanSelect++
+ if c.panic {
+ canPanic = true
+ }
+ } else if c.helper != nil {
+ canProceed = true
+ helpers = append(helpers, i)
+ }
+ }
+ if !canProceed || x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectDefault,
+ })
+ info = append(info, caseInfo{desc: "default", canSelect: canBlock})
+ numCanSelect++
+ } else if canBlock {
+ // Select needs to communicate with another goroutine.
+ cas := &info[helpers[x.Choose(len(helpers))]]
+ helper = cas.helper
+ cas.canSelect = true
+ numCanSelect++
+ }
+
+ // Permute cases and case info.
+ // Doing too much here makes the exhaustive loop
+ // too exhausting, so just do two swaps.
+ for loop := 0; loop < 2; loop++ {
+ i := x.Choose(len(cases))
+ j := x.Choose(len(cases))
+ cases[i], cases[j] = cases[j], cases[i]
+ info[i], info[j] = info[j], info[i]
+ }
+
+ if helper != nil {
+ // We wait before kicking off a goroutine to satisfy a blocked select.
+ // The pause needs to be big enough to let the select block before
+ // we run the helper, but if we lose that race once in a while it's okay: the
+ // select will just proceed immediately. Not a big deal.
+ // For short tests we can grow [sic] the timeout a bit without fear of taking too long
+ pause := 10 * time.Microsecond
+ if testing.Short() {
+ pause = 100 * time.Microsecond
+ }
+ time.AfterFunc(pause, helper)
+ }
+
+ // Run select.
+ i, recv, recvOK, panicErr := runSelect(cases, info)
+ if panicErr != nil && !canPanic {
+ t.Fatalf("%s\npanicked unexpectedly: %v", fmtSelect(info), panicErr)
+ }
+ if panicErr == nil && canPanic && numCanSelect == 1 {
+ t.Fatalf("%s\nselected #%d incorrectly (should panic)", fmtSelect(info), i)
+ }
+ if panicErr != nil {
+ continue
+ }
+
+ cas := info[i]
+ if !cas.canSelect {
+ recvStr := ""
+ if recv.IsValid() {
+ recvStr = fmt.Sprintf(", received %v, %v", recv.Interface(), recvOK)
+ }
+ t.Fatalf("%s\nselected #%d incorrectly%s", fmtSelect(info), i, recvStr)
+ }
+ if cas.panic {
+ t.Fatalf("%s\nselected #%d incorrectly (case should panic)", fmtSelect(info), i)
+ }
+
+ if cases[i].Dir == SelectRecv {
+ if !recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ if !cas.recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but internal error: missing recv value", fmtSelect(info), i)
+ }
+ if recv.Interface() != cas.recv.Interface() || recvOK != !cas.closed {
+ if recv.Interface() == cas.recv.Interface() && recvOK == !cas.closed {
+ t.Fatalf("%s\nselected #%d, got %#v, %v, and DeepEqual is broken on %T", fmtSelect(info), i, recv.Interface(), recvOK, recv.Interface())
+ }
+ t.Fatalf("%s\nselected #%d but got %#v, %v, want %#v, %v", fmtSelect(info), i, recv.Interface(), recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ } else {
+ if recv.IsValid() || recvOK {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, Value{}, false)
+ }
+ }
+ }
+}
+
+func TestSelectMaxCases(t *testing.T) {
+ var sCases []SelectCase
+ channel := make(chan int)
+ close(channel)
+ for i := 0; i < 65536; i++ {
+ sCases = append(sCases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ }
+ // Should not panic
+ _, _, _ = Select(sCases)
+ sCases = append(sCases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ defer func() {
+ if err := recover(); err != nil {
+ if err.(string) != "reflect.Select: too many cases (max 65536)" {
+ t.Fatalf("unexpected error from select call with greater than max supported cases")
+ }
+ } else {
+ t.Fatalf("expected select call to panic with greater than max supported cases")
+ }
+ }()
+ // Should panic
+ _, _, _ = Select(sCases)
+}
+
+func TestSelectNop(t *testing.T) {
+ // "select { default: }" should always return the default case.
+ chosen, _, _ := Select([]SelectCase{{Dir: SelectDefault}})
+ if chosen != 0 {
+ t.Fatalf("expected Select to return 0, but got %#v", chosen)
+ }
+}
+
+// selectWatch and the selectWatcher are a watchdog mechanism for running Select.
+// If the selectWatcher notices that the select has been blocked for >1 second, it prints
+// an error describing the select and panics the entire test binary.
+var selectWatch struct {
+ sync.Mutex
+ once sync.Once
+ now time.Time
+ info []caseInfo
+}
+
+func selectWatcher() {
+ for {
+ time.Sleep(1 * time.Second)
+ selectWatch.Lock()
+ if selectWatch.info != nil && time.Since(selectWatch.now) > 10*time.Second {
+ fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info))
+ panic("select stuck")
+ }
+ selectWatch.Unlock()
+ }
+}
+
+// runSelect runs a single select test.
+// It returns the values returned by Select but also returns
+// a panic value if the Select panics.
+func runSelect(cases []SelectCase, info []caseInfo) (chosen int, recv Value, recvOK bool, panicErr any) {
+ defer func() {
+ panicErr = recover()
+
+ selectWatch.Lock()
+ selectWatch.info = nil
+ selectWatch.Unlock()
+ }()
+
+ selectWatch.Lock()
+ selectWatch.now = time.Now()
+ selectWatch.info = info
+ selectWatch.Unlock()
+
+ chosen, recv, recvOK = Select(cases)
+ return
+}
+
+// fmtSelect formats the information about a single select test.
+func fmtSelect(info []caseInfo) string {
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "\nselect {\n")
+ for i, cas := range info {
+ fmt.Fprintf(&buf, "%d: %s", i, cas.desc)
+ if cas.recv.IsValid() {
+ fmt.Fprintf(&buf, " val=%#v", cas.recv.Interface())
+ }
+ if cas.canSelect {
+ fmt.Fprintf(&buf, " canselect")
+ }
+ if cas.panic {
+ fmt.Fprintf(&buf, " panic")
+ }
+ fmt.Fprintf(&buf, "\n")
+ }
+ fmt.Fprintf(&buf, "}")
+ return buf.String()
+}
+
+type two [2]uintptr
+
+// Difficult test for function call because of
+// implicit padding between arguments.
+func dummy(b byte, c int, d byte, e two, f byte, g float32, h byte) (i byte, j int, k byte, l two, m byte, n float32, o byte) {
+ return b, c, d, e, f, g, h
+}
+
+func TestFunc(t *testing.T) {
+ ret := ValueOf(dummy).Call([]Value{
+ ValueOf(byte(10)),
+ ValueOf(20),
+ ValueOf(byte(30)),
+ ValueOf(two{40, 50}),
+ ValueOf(byte(60)),
+ ValueOf(float32(70)),
+ ValueOf(byte(80)),
+ })
+ if len(ret) != 7 {
+ t.Fatalf("Call returned %d values, want 7", len(ret))
+ }
+
+ i := byte(ret[0].Uint())
+ j := int(ret[1].Int())
+ k := byte(ret[2].Uint())
+ l := ret[3].Interface().(two)
+ m := byte(ret[4].Uint())
+ n := float32(ret[5].Float())
+ o := byte(ret[6].Uint())
+
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
+ }
+
+ for i, v := range ret {
+ if v.CanAddr() {
+ t.Errorf("result %d is addressable", i)
+ }
+ }
+}
+
+func TestCallConvert(t *testing.T) {
+ v := ValueOf(new(io.ReadWriter)).Elem()
+ f := ValueOf(func(r io.Reader) io.Reader { return r })
+ out := f.Call([]Value{v})
+ if len(out) != 1 || out[0].Type() != TypeOf(new(io.Reader)).Elem() || !out[0].IsNil() {
+ t.Errorf("expected [nil], got %v", out)
+ }
+}
+
+type emptyStruct struct{}
+
+type nonEmptyStruct struct {
+ member int
+}
+
+func returnEmpty() emptyStruct {
+ return emptyStruct{}
+}
+
+func takesEmpty(e emptyStruct) {
+}
+
+func returnNonEmpty(i int) nonEmptyStruct {
+ return nonEmptyStruct{member: i}
+}
+
+func takesNonEmpty(n nonEmptyStruct) int {
+ return n.member
+}
+
+func TestCallWithStruct(t *testing.T) {
+ r := ValueOf(returnEmpty).Call(nil)
+ if len(r) != 1 || r[0].Type() != TypeOf(emptyStruct{}) {
+ t.Errorf("returning empty struct returned %#v instead", r)
+ }
+ r = ValueOf(takesEmpty).Call([]Value{ValueOf(emptyStruct{})})
+ if len(r) != 0 {
+ t.Errorf("takesEmpty returned values: %#v", r)
+ }
+ r = ValueOf(returnNonEmpty).Call([]Value{ValueOf(42)})
+ if len(r) != 1 || r[0].Type() != TypeOf(nonEmptyStruct{}) || r[0].Field(0).Int() != 42 {
+ t.Errorf("returnNonEmpty returned %#v", r)
+ }
+ r = ValueOf(takesNonEmpty).Call([]Value{ValueOf(nonEmptyStruct{member: 42})})
+ if len(r) != 1 || r[0].Type() != TypeOf(1) || r[0].Int() != 42 {
+ t.Errorf("takesNonEmpty returned %#v", r)
+ }
+}
+
+func TestCallReturnsEmpty(t *testing.T) {
+ // Issue 21717: past-the-end pointer write in Call with
+ // nonzero-sized frame and zero-sized return value.
+ runtime.GC()
+ var finalized uint32
+ f := func() (emptyStruct, *[2]int64) {
+ i := new([2]int64) // big enough to not be tinyalloc'd, so finalizer always runs when i dies
+ runtime.SetFinalizer(i, func(*[2]int64) { atomic.StoreUint32(&finalized, 1) })
+ return emptyStruct{}, i
+ }
+ v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run.
+ timeout := time.After(5 * time.Second)
+ for atomic.LoadUint32(&finalized) == 0 {
+ select {
+ case <-timeout:
+ t.Fatal("finalizer did not run")
+ default:
+ }
+ runtime.Gosched()
+ runtime.GC()
+ }
+ runtime.KeepAlive(v)
+}
+
+func TestMakeFunc(t *testing.T) {
+ f := dummy
+ fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in })
+ ValueOf(&f).Elem().Set(fv)
+
+ // Call g with small arguments so that there is
+ // something predictable (and different from the
+ // correct results) in those positions on the stack.
+ g := dummy
+ g(1, 2, 3, two{4, 5}, 6, 7, 8)
+
+ // Call constructed function f.
+ i, j, k, l, m, n, o := f(10, 20, 30, two{40, 50}, 60, 70, 80)
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
+ }
+}
+
+func TestMakeFuncInterface(t *testing.T) {
+ fn := func(i int) int { return i }
+ incr := func(in []Value) []Value {
+ return []Value{ValueOf(int(in[0].Int() + 1))}
+ }
+ fv := MakeFunc(TypeOf(fn), incr)
+ ValueOf(&fn).Elem().Set(fv)
+ if r := fn(2); r != 3 {
+ t.Errorf("Call returned %d, want 3", r)
+ }
+ if r := fv.Call([]Value{ValueOf(14)})[0].Int(); r != 15 {
+ t.Errorf("Call returned %d, want 15", r)
+ }
+ if r := fv.Interface().(func(int) int)(26); r != 27 {
+ t.Errorf("Call returned %d, want 27", r)
+ }
+}
+
+func TestMakeFuncVariadic(t *testing.T) {
+ // Test that variadic arguments are packed into a slice and passed as last arg
+ fn := func(_ int, is ...int) []int { return nil }
+ fv := MakeFunc(TypeOf(fn), func(in []Value) []Value { return in[1:2] })
+ ValueOf(&fn).Elem().Set(fv)
+
+ r := fn(1, 2, 3)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fn(1, []int{2, 3}...)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fv.Call([]Value{ValueOf(1), ValueOf(2), ValueOf(3)})[0].Interface().([]int)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fv.CallSlice([]Value{ValueOf(1), ValueOf([]int{2, 3})})[0].Interface().([]int)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ f := fv.Interface().(func(int, ...int) []int)
+
+ r = f(1, 2, 3)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+ r = f(1, []int{2, 3}...)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+}
+
+// Dummy type that implements io.WriteCloser
+type WC struct {
+}
+
+func (w *WC) Write(p []byte) (n int, err error) {
+ return 0, nil
+}
+func (w *WC) Close() error {
+ return nil
+}
+
+func TestMakeFuncValidReturnAssignments(t *testing.T) {
+ // reflect.Values returned from the wrapped function should be assignment-converted
+ // to the types returned by the result of MakeFunc.
+
+ // Concrete types should be promotable to interfaces they implement.
+ var f func() error
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(io.EOF)}
+ }).Interface().(func() error)
+ f()
+
+ // Super-interfaces should be promotable to simpler interfaces.
+ var g func() io.Writer
+ g = MakeFunc(TypeOf(g), func([]Value) []Value {
+ var w io.WriteCloser = &WC{}
+ return []Value{ValueOf(&w).Elem()}
+ }).Interface().(func() io.Writer)
+ g()
+
+ // Channels should be promotable to directional channels.
+ var h func() <-chan int
+ h = MakeFunc(TypeOf(h), func([]Value) []Value {
+ return []Value{ValueOf(make(chan int))}
+ }).Interface().(func() <-chan int)
+ h()
+
+ // Unnamed types should be promotable to named types.
+ type T struct{ a, b, c int }
+ var i func() T
+ i = MakeFunc(TypeOf(i), func([]Value) []Value {
+ return []Value{ValueOf(struct{ a, b, c int }{a: 1, b: 2, c: 3})}
+ }).Interface().(func() T)
+ i()
+}
+
+func TestMakeFuncInvalidReturnAssignments(t *testing.T) {
+ // Type doesn't implement the required interface.
+ shouldPanic("", func() {
+ var f func() error
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(int(7))}
+ }).Interface().(func() error)
+ f()
+ })
+ // Assigning to an interface with additional methods.
+ shouldPanic("", func() {
+ var f func() io.ReadWriteCloser
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ var w io.WriteCloser = &WC{}
+ return []Value{ValueOf(&w).Elem()}
+ }).Interface().(func() io.ReadWriteCloser)
+ f()
+ })
+ // Directional channels can't be assigned to bidirectional ones.
+ shouldPanic("", func() {
+ var f func() chan int
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ var c <-chan int = make(chan int)
+ return []Value{ValueOf(c)}
+ }).Interface().(func() chan int)
+ f()
+ })
+ // Two named types which are otherwise identical.
+ shouldPanic("", func() {
+ type T struct{ a, b, c int }
+ type U struct{ a, b, c int }
+ var f func() T
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(U{a: 1, b: 2, c: 3})}
+ }).Interface().(func() T)
+ f()
+ })
+}
+
+type Point struct {
+ x, y int
+}
+
+// This will be index 0.
+func (p Point) AnotherMethod(scale int) int {
+ return -1
+}
+
+// This will be index 1.
+func (p Point) Dist(scale int) int {
+ //println("Point.Dist", p.x, p.y, scale)
+ return p.x*p.x*scale + p.y*p.y*scale
+}
+
+// This will be index 2.
+func (p Point) GCMethod(k int) int {
+ runtime.GC()
+ return k + p.x
+}
+
+// This will be index 3.
+func (p Point) NoArgs() {
+ // Exercise no-argument/no-result paths.
+}
+
+// This will be index 4.
+func (p Point) TotalDist(points ...Point) int {
+ tot := 0
+ for _, q := range points {
+ dx := q.x - p.x
+ dy := q.y - p.y
+ tot += dx*dx + dy*dy // Should call Sqrt, but it's just a test.
+
+ }
+ return tot
+}
+
+// This will be index 5.
+func (p *Point) Int64Method(x int64) int64 {
+ return x
+}
+
+// This will be index 6.
+func (p *Point) Int32Method(x int32) int32 {
+ return x
+}
+
+func TestMethod(t *testing.T) {
+ // Non-curried method of type.
+ p := Point{3, 4}
+ i := TypeOf(p).Method(1).Func.Call([]Value{ValueOf(p), ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Type Method returned %d; want 250", i)
+ }
+
+ m, ok := TypeOf(p).MethodByName("Dist")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ i = m.Func.Call([]Value{ValueOf(p), ValueOf(11)})[0].Int()
+ if i != 275 {
+ t.Errorf("Type MethodByName returned %d; want 275", i)
+ }
+
+ m, ok = TypeOf(p).MethodByName("NoArgs")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ n := len(m.Func.Call([]Value{ValueOf(p)}))
+ if n != 0 {
+ t.Errorf("NoArgs returned %d values; want 0", n)
+ }
+
+ i = TypeOf(&p).Method(1).Func.Call([]Value{ValueOf(&p), ValueOf(12)})[0].Int()
+ if i != 300 {
+ t.Errorf("Pointer Type Method returned %d; want 300", i)
+ }
+
+ m, ok = TypeOf(&p).MethodByName("Dist")
+ if !ok {
+ t.Fatalf("ptr method by name failed")
+ }
+ i = m.Func.Call([]Value{ValueOf(&p), ValueOf(13)})[0].Int()
+ if i != 325 {
+ t.Errorf("Pointer Type MethodByName returned %d; want 325", i)
+ }
+
+ m, ok = TypeOf(&p).MethodByName("NoArgs")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ n = len(m.Func.Call([]Value{ValueOf(&p)}))
+ if n != 0 {
+ t.Errorf("NoArgs returned %d values; want 0", n)
+ }
+
+ _, ok = TypeOf(&p).MethodByName("AA")
+ if ok {
+ t.Errorf(`MethodByName("AA") should have failed`)
+ }
+
+ _, ok = TypeOf(&p).MethodByName("ZZ")
+ if ok {
+ t.Errorf(`MethodByName("ZZ") should have failed`)
+ }
+
+ // Curried method of value.
+ tfunc := TypeOf((func(int) int)(nil))
+ v := ValueOf(p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(14)})[0].Int()
+ if i != 350 {
+ t.Errorf("Value Method returned %d; want 350", i)
+ }
+ v = ValueOf(p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(15)})[0].Int()
+ if i != 375 {
+ t.Errorf("Value MethodByName returned %d; want 375", i)
+ }
+ v = ValueOf(p).MethodByName("NoArgs")
+ v.Call(nil)
+
+ // Curried method of pointer.
+ v = ValueOf(&p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(16)})[0].Int()
+ if i != 400 {
+ t.Errorf("Pointer Value Method returned %d; want 400", i)
+ }
+ v = ValueOf(&p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(17)})[0].Int()
+ if i != 425 {
+ t.Errorf("Pointer Value MethodByName returned %d; want 425", i)
+ }
+ v = ValueOf(&p).MethodByName("NoArgs")
+ v.Call(nil)
+
+ // Curried method of interface value.
+ // Have to wrap interface value in a struct to get at it.
+ // Passing it to ValueOf directly would
+ // access the underlying Point, not the interface.
+ var x interface {
+ Dist(int) int
+ } = p
+ pv := ValueOf(&x).Elem()
+ v = pv.Method(0)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(18)})[0].Int()
+ if i != 450 {
+ t.Errorf("Interface Method returned %d; want 450", i)
+ }
+ v = pv.MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(19)})[0].Int()
+ if i != 475 {
+ t.Errorf("Interface MethodByName returned %d; want 475", i)
+ }
+}
+
+func TestMethodValue(t *testing.T) {
+ p := Point{3, 4}
+ var i int64
+
+ // Check that method value have the same underlying code pointers.
+ if p1, p2 := ValueOf(Point{1, 1}).Method(1), ValueOf(Point{2, 2}).Method(1); p1.Pointer() != p2.Pointer() {
+ t.Errorf("methodValueCall mismatched: %v - %v", p1, p2)
+ }
+
+ // Curried method of value.
+ tfunc := TypeOf((func(int) int)(nil))
+ v := ValueOf(p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Value Method returned %d; want 250", i)
+ }
+ v = ValueOf(p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(11)})[0].Int()
+ if i != 275 {
+ t.Errorf("Value MethodByName returned %d; want 275", i)
+ }
+ v = ValueOf(p).MethodByName("NoArgs")
+ ValueOf(v.Interface()).Call(nil)
+ v.Interface().(func())()
+
+ // Curried method of pointer.
+ v = ValueOf(&p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(12)})[0].Int()
+ if i != 300 {
+ t.Errorf("Pointer Value Method returned %d; want 300", i)
+ }
+ v = ValueOf(&p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(13)})[0].Int()
+ if i != 325 {
+ t.Errorf("Pointer Value MethodByName returned %d; want 325", i)
+ }
+ v = ValueOf(&p).MethodByName("NoArgs")
+ ValueOf(v.Interface()).Call(nil)
+ v.Interface().(func())()
+
+ // Curried method of pointer to pointer.
+ pp := &p
+ v = ValueOf(&pp).Elem().Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(14)})[0].Int()
+ if i != 350 {
+ t.Errorf("Pointer Pointer Value Method returned %d; want 350", i)
+ }
+ v = ValueOf(&pp).Elem().MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(15)})[0].Int()
+ if i != 375 {
+ t.Errorf("Pointer Pointer Value MethodByName returned %d; want 375", i)
+ }
+
+ // Curried method of interface value.
+ // Have to wrap interface value in a struct to get at it.
+ // Passing it to ValueOf directly would
+ // access the underlying Point, not the interface.
+ var s = struct {
+ X interface {
+ Dist(int) int
+ }
+ }{p}
+ pv := ValueOf(s).Field(0)
+ v = pv.Method(0)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(16)})[0].Int()
+ if i != 400 {
+ t.Errorf("Interface Method returned %d; want 400", i)
+ }
+ v = pv.MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(17)})[0].Int()
+ if i != 425 {
+ t.Errorf("Interface MethodByName returned %d; want 425", i)
+ }
+
+ // For issue #33628: method args are not stored at the right offset
+ // on amd64p32.
+ m64 := ValueOf(&p).MethodByName("Int64Method").Interface().(func(int64) int64)
+ if x := m64(123); x != 123 {
+ t.Errorf("Int64Method returned %d; want 123", x)
+ }
+ m32 := ValueOf(&p).MethodByName("Int32Method").Interface().(func(int32) int32)
+ if x := m32(456); x != 456 {
+ t.Errorf("Int32Method returned %d; want 456", x)
+ }
+}
+
+func TestVariadicMethodValue(t *testing.T) {
+ p := Point{3, 4}
+ points := []Point{{20, 21}, {22, 23}, {24, 25}}
+ want := int64(p.TotalDist(points[0], points[1], points[2]))
+
+ // Variadic method of type.
+ tfunc := TypeOf((func(Point, ...Point) int)(nil))
+ if tt := TypeOf(p).Method(4).Type; tt != tfunc {
+ t.Errorf("Variadic Method Type from TypeOf is %s; want %s", tt, tfunc)
+ }
+
+ // Curried method of value.
+ tfunc = TypeOf((func(...Point) int)(nil))
+ v := ValueOf(p).Method(4)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc)
+ }
+ i := ValueOf(v.Interface()).Call([]Value{ValueOf(points[0]), ValueOf(points[1]), ValueOf(points[2])})[0].Int()
+ if i != want {
+ t.Errorf("Variadic Method returned %d; want %d", i, want)
+ }
+ i = ValueOf(v.Interface()).CallSlice([]Value{ValueOf(points)})[0].Int()
+ if i != want {
+ t.Errorf("Variadic Method CallSlice returned %d; want %d", i, want)
+ }
+
+ f := v.Interface().(func(...Point) int)
+ i = int64(f(points[0], points[1], points[2]))
+ if i != want {
+ t.Errorf("Variadic Method Interface returned %d; want %d", i, want)
+ }
+ i = int64(f(points...))
+ if i != want {
+ t.Errorf("Variadic Method Interface Slice returned %d; want %d", i, want)
+ }
+}
+
+type DirectIfaceT struct {
+ p *int
+}
+
+func (d DirectIfaceT) M() int { return *d.p }
+
+func TestDirectIfaceMethod(t *testing.T) {
+ x := 42
+ v := DirectIfaceT{&x}
+ typ := TypeOf(v)
+ m, ok := typ.MethodByName("M")
+ if !ok {
+ t.Fatalf("cannot find method M")
+ }
+ in := []Value{ValueOf(v)}
+ out := m.Func.Call(in)
+ if got := out[0].Int(); got != 42 {
+ t.Errorf("Call with value receiver got %d, want 42", got)
+ }
+
+ pv := &v
+ typ = TypeOf(pv)
+ m, ok = typ.MethodByName("M")
+ if !ok {
+ t.Fatalf("cannot find method M")
+ }
+ in = []Value{ValueOf(pv)}
+ out = m.Func.Call(in)
+ if got := out[0].Int(); got != 42 {
+ t.Errorf("Call with pointer receiver got %d, want 42", got)
+ }
+}
+
+// Reflect version of $GOROOT/test/method5.go
+
+// Concrete types implementing M method.
+// Smaller than a word, word-sized, larger than a word.
+// Value and pointer receivers.
+
+type Tinter interface {
+ M(int, byte) (byte, int)
+}
+
+type Tsmallv byte
+
+func (v Tsmallv) M(x int, b byte) (byte, int) { return b, x + int(v) }
+
+type Tsmallp byte
+
+func (p *Tsmallp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
+
+type Twordv uintptr
+
+func (v Twordv) M(x int, b byte) (byte, int) { return b, x + int(v) }
+
+type Twordp uintptr
+
+func (p *Twordp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
+
+type Tbigv [2]uintptr
+
+func (v Tbigv) M(x int, b byte) (byte, int) { return b, x + int(v[0]) + int(v[1]) }
+
+type Tbigp [2]uintptr
+
+func (p *Tbigp) M(x int, b byte) (byte, int) { return b, x + int(p[0]) + int(p[1]) }
+
+type tinter interface {
+ m(int, byte) (byte, int)
+}
+
+// Embedding via pointer.
+
+type Tm1 struct {
+ Tm2
+}
+
+type Tm2 struct {
+ *Tm3
+}
+
+type Tm3 struct {
+ *Tm4
+}
+
+type Tm4 struct {
+}
+
+func (t4 Tm4) M(x int, b byte) (byte, int) { return b, x + 40 }
+
+func TestMethod5(t *testing.T) {
+ CheckF := func(name string, f func(int, byte) (byte, int), inc int) {
+ b, x := f(1000, 99)
+ if b != 99 || x != 1000+inc {
+ t.Errorf("%s(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
+ }
+ }
+
+ CheckV := func(name string, i Value, inc int) {
+ bx := i.Method(0).Call([]Value{ValueOf(1000), ValueOf(byte(99))})
+ b := bx[0].Interface()
+ x := bx[1].Interface()
+ if b != byte(99) || x != 1000+inc {
+ t.Errorf("direct %s.M(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
+ }
+
+ CheckF(name+".M", i.Method(0).Interface().(func(int, byte) (byte, int)), inc)
+ }
+
+ var TinterType = TypeOf(new(Tinter)).Elem()
+
+ CheckI := func(name string, i any, inc int) {
+ v := ValueOf(i)
+ CheckV(name, v, inc)
+ CheckV("(i="+name+")", v.Convert(TinterType), inc)
+ }
+
+ sv := Tsmallv(1)
+ CheckI("sv", sv, 1)
+ CheckI("&sv", &sv, 1)
+
+ sp := Tsmallp(2)
+ CheckI("&sp", &sp, 2)
+
+ wv := Twordv(3)
+ CheckI("wv", wv, 3)
+ CheckI("&wv", &wv, 3)
+
+ wp := Twordp(4)
+ CheckI("&wp", &wp, 4)
+
+ bv := Tbigv([2]uintptr{5, 6})
+ CheckI("bv", bv, 11)
+ CheckI("&bv", &bv, 11)
+
+ bp := Tbigp([2]uintptr{7, 8})
+ CheckI("&bp", &bp, 15)
+
+ t4 := Tm4{}
+ t3 := Tm3{&t4}
+ t2 := Tm2{&t3}
+ t1 := Tm1{t2}
+ CheckI("t4", t4, 40)
+ CheckI("&t4", &t4, 40)
+ CheckI("t3", t3, 40)
+ CheckI("&t3", &t3, 40)
+ CheckI("t2", t2, 40)
+ CheckI("&t2", &t2, 40)
+ CheckI("t1", t1, 40)
+ CheckI("&t1", &t1, 40)
+
+ var tnil Tinter
+ vnil := ValueOf(&tnil).Elem()
+ shouldPanic("Method", func() { vnil.Method(0) })
+}
+
+func TestInterfaceSet(t *testing.T) {
+ p := &Point{3, 4}
+
+ var s struct {
+ I any
+ P interface {
+ Dist(int) int
+ }
+ }
+ sv := ValueOf(&s).Elem()
+ sv.Field(0).Set(ValueOf(p))
+ if q := s.I.(*Point); q != p {
+ t.Errorf("i: have %p want %p", q, p)
+ }
+
+ pv := sv.Field(1)
+ pv.Set(ValueOf(p))
+ if q := s.P.(*Point); q != p {
+ t.Errorf("i: have %p want %p", q, p)
+ }
+
+ i := pv.Method(0).Call([]Value{ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Interface Method returned %d; want 250", i)
+ }
+}
+
+type T1 struct {
+ a string
+ int
+}
+
+func TestAnonymousFields(t *testing.T) {
+ var field StructField
+ var ok bool
+ var t1 T1
+ type1 := TypeOf(t1)
+ if field, ok = type1.FieldByName("int"); !ok {
+ t.Fatal("no field 'int'")
+ }
+ if field.Index[0] != 1 {
+ t.Error("field index should be 1; is", field.Index)
+ }
+}
+
+type FTest struct {
+ s any
+ name string
+ index []int
+ value int
+}
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+// The X in S15.S11.S1 and S16.S11.S1 annihilate.
+type S14 struct {
+ S15
+ S16
+}
+
+type S15 struct {
+ S11
+}
+
+type S16 struct {
+ S11
+}
+
+var fieldTests = []FTest{
+ {struct{}{}, "", nil, 0},
+ {struct{}{}, "Foo", nil, 0},
+ {S0{A: 'a'}, "A", []int{0}, 'a'},
+ {S0{}, "D", nil, 0},
+ {S1{S0: S0{A: 'a'}}, "A", []int{1, 0}, 'a'},
+ {S1{B: 'b'}, "B", []int{0}, 'b'},
+ {S1{}, "S0", []int{1}, 0},
+ {S1{S0: S0{C: 'c'}}, "C", []int{1, 2}, 'c'},
+ {S2{A: 'a'}, "A", []int{0}, 'a'},
+ {S2{}, "S1", []int{1}, 0},
+ {S2{S1: &S1{B: 'b'}}, "B", []int{1, 0}, 'b'},
+ {S2{S1: &S1{S0: S0{C: 'c'}}}, "C", []int{1, 1, 2}, 'c'},
+ {S2{}, "D", nil, 0},
+ {S3{}, "S1", nil, 0},
+ {S3{S2: S2{A: 'a'}}, "A", []int{1, 0}, 'a'},
+ {S3{}, "B", nil, 0},
+ {S3{D: 'd'}, "D", []int{2}, 0},
+ {S3{E: 'e'}, "E", []int{3}, 'e'},
+ {S4{A: 'a'}, "A", []int{1}, 'a'},
+ {S4{}, "B", nil, 0},
+ {S5{}, "X", nil, 0},
+ {S5{}, "Y", []int{2, 0, 1}, 0},
+ {S10{}, "X", nil, 0},
+ {S10{}, "Y", []int{2, 0, 0, 1}, 0},
+ {S14{}, "X", nil, 0},
+}
+
+func TestFieldByIndex(t *testing.T) {
+ for _, test := range fieldTests {
+ s := TypeOf(test.s)
+ f := s.FieldByIndex(test.index)
+ if f.Name != "" {
+ if test.index != nil {
+ if f.Name != test.name {
+ t.Errorf("%s.%s found; want %s", s.Name(), f.Name, test.name)
+ }
+ } else {
+ t.Errorf("%s.%s found", s.Name(), f.Name)
+ }
+ } else if len(test.index) > 0 {
+ t.Errorf("%s.%s not found", s.Name(), test.name)
+ }
+
+ if test.value != 0 {
+ v := ValueOf(test.s).FieldByIndex(test.index)
+ if v.IsValid() {
+ if x, ok := v.Interface().(int); ok {
+ if x != test.value {
+ t.Errorf("%s%v is %d; want %d", s.Name(), test.index, x, test.value)
+ }
+ } else {
+ t.Errorf("%s%v value not an int", s.Name(), test.index)
+ }
+ } else {
+ t.Errorf("%s%v value not found", s.Name(), test.index)
+ }
+ }
+ }
+}
+
+func TestFieldByName(t *testing.T) {
+ for _, test := range fieldTests {
+ s := TypeOf(test.s)
+ f, found := s.FieldByName(test.name)
+ if found {
+ if test.index != nil {
+ // Verify field depth and index.
+ if len(f.Index) != len(test.index) {
+ t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index)
+ } else {
+ for i, x := range f.Index {
+ if x != test.index[i] {
+ t.Errorf("%s.%s.Index[%d] is %d; want %d", s.Name(), test.name, i, x, test.index[i])
+ }
+ }
+ }
+ } else {
+ t.Errorf("%s.%s found", s.Name(), f.Name)
+ }
+ } else if len(test.index) > 0 {
+ t.Errorf("%s.%s not found", s.Name(), test.name)
+ }
+
+ if test.value != 0 {
+ v := ValueOf(test.s).FieldByName(test.name)
+ if v.IsValid() {
+ if x, ok := v.Interface().(int); ok {
+ if x != test.value {
+ t.Errorf("%s.%s is %d; want %d", s.Name(), test.name, x, test.value)
+ }
+ } else {
+ t.Errorf("%s.%s value not an int", s.Name(), test.name)
+ }
+ } else {
+ t.Errorf("%s.%s value not found", s.Name(), test.name)
+ }
+ }
+ }
+}
+
+func TestImportPath(t *testing.T) {
+ tests := []struct {
+ t Type
+ path string
+ }{
+ {TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"},
+ {TypeOf(int(0)), ""},
+ {TypeOf(int8(0)), ""},
+ {TypeOf(int16(0)), ""},
+ {TypeOf(int32(0)), ""},
+ {TypeOf(int64(0)), ""},
+ {TypeOf(uint(0)), ""},
+ {TypeOf(uint8(0)), ""},
+ {TypeOf(uint16(0)), ""},
+ {TypeOf(uint32(0)), ""},
+ {TypeOf(uint64(0)), ""},
+ {TypeOf(uintptr(0)), ""},
+ {TypeOf(float32(0)), ""},
+ {TypeOf(float64(0)), ""},
+ {TypeOf(complex64(0)), ""},
+ {TypeOf(complex128(0)), ""},
+ {TypeOf(byte(0)), ""},
+ {TypeOf(rune(0)), ""},
+ {TypeOf([]byte(nil)), ""},
+ {TypeOf([]rune(nil)), ""},
+ {TypeOf(string("")), ""},
+ {TypeOf((*any)(nil)).Elem(), ""},
+ {TypeOf((*byte)(nil)), ""},
+ {TypeOf((*rune)(nil)), ""},
+ {TypeOf((*int64)(nil)), ""},
+ {TypeOf(map[string]int{}), ""},
+ {TypeOf((*error)(nil)).Elem(), ""},
+ {TypeOf((*Point)(nil)), ""},
+ {TypeOf((*Point)(nil)).Elem(), "reflect_test"},
+ }
+ for _, test := range tests {
+ if path := test.t.PkgPath(); path != test.path {
+ t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path)
+ }
+ }
+}
+
+func TestFieldPkgPath(t *testing.T) {
+ type x int
+ typ := TypeOf(struct {
+ Exported string
+ unexported string
+ OtherPkgFields
+ int // issue 21702
+ *x // issue 21122
+ }{})
+
+ type pkgpathTest struct {
+ index []int
+ pkgPath string
+ embedded bool
+ exported bool
+ }
+
+ checkPkgPath := func(name string, s []pkgpathTest) {
+ for _, test := range s {
+ f := typ.FieldByIndex(test.index)
+ if got, want := f.PkgPath, test.pkgPath; got != want {
+ t.Errorf("%s: Field(%d).PkgPath = %q, want %q", name, test.index, got, want)
+ }
+ if got, want := f.Anonymous, test.embedded; got != want {
+ t.Errorf("%s: Field(%d).Anonymous = %v, want %v", name, test.index, got, want)
+ }
+ if got, want := f.IsExported(), test.exported; got != want {
+ t.Errorf("%s: Field(%d).IsExported = %v, want %v", name, test.index, got, want)
+ }
+ }
+ }
+
+ checkPkgPath("testStruct", []pkgpathTest{
+ {[]int{0}, "", false, true}, // Exported
+ {[]int{1}, "reflect_test", false, false}, // unexported
+ {[]int{2}, "", true, true}, // OtherPkgFields
+ {[]int{2, 0}, "", false, true}, // OtherExported
+ {[]int{2, 1}, "reflect", false, false}, // otherUnexported
+ {[]int{3}, "reflect_test", true, false}, // int
+ {[]int{4}, "reflect_test", true, false}, // *x
+ })
+
+ type localOtherPkgFields OtherPkgFields
+ typ = TypeOf(localOtherPkgFields{})
+ checkPkgPath("localOtherPkgFields", []pkgpathTest{
+ {[]int{0}, "", false, true}, // OtherExported
+ {[]int{1}, "reflect", false, false}, // otherUnexported
+ })
+}
+
+func TestMethodPkgPath(t *testing.T) {
+ type I interface {
+ x()
+ X()
+ }
+ typ := TypeOf((*interface {
+ I
+ y()
+ Y()
+ })(nil)).Elem()
+
+ tests := []struct {
+ name string
+ pkgPath string
+ exported bool
+ }{
+ {"X", "", true},
+ {"Y", "", true},
+ {"x", "reflect_test", false},
+ {"y", "reflect_test", false},
+ }
+
+ for _, test := range tests {
+ m, _ := typ.MethodByName(test.name)
+ if got, want := m.PkgPath, test.pkgPath; got != want {
+ t.Errorf("MethodByName(%q).PkgPath = %q, want %q", test.name, got, want)
+ }
+ if got, want := m.IsExported(), test.exported; got != want {
+ t.Errorf("MethodByName(%q).IsExported = %v, want %v", test.name, got, want)
+ }
+ }
+}
+
+func TestVariadicType(t *testing.T) {
+ // Test example from Type documentation.
+ var f func(x int, y ...float64)
+ typ := TypeOf(f)
+ if typ.NumIn() == 2 && typ.In(0) == TypeOf(int(0)) {
+ sl := typ.In(1)
+ if sl.Kind() == Slice {
+ if sl.Elem() == TypeOf(0.0) {
+ // ok
+ return
+ }
+ }
+ }
+
+ // Failed
+ t.Errorf("want NumIn() = 2, In(0) = int, In(1) = []float64")
+ s := fmt.Sprintf("have NumIn() = %d", typ.NumIn())
+ for i := 0; i < typ.NumIn(); i++ {
+ s += fmt.Sprintf(", In(%d) = %s", i, typ.In(i))
+ }
+ t.Error(s)
+}
+
+type inner struct {
+ x int
+}
+
+type outer struct {
+ y int
+ inner
+}
+
+func (*inner) M() {}
+func (*outer) M() {}
+
+func TestNestedMethods(t *testing.T) {
+ typ := TypeOf((*outer)(nil))
+ if typ.NumMethod() != 1 || typ.Method(0).Func.UnsafePointer() != ValueOf((*outer).M).UnsafePointer() {
+ t.Errorf("Wrong method table for outer: (M=%p)", (*outer).M)
+ for i := 0; i < typ.NumMethod(); i++ {
+ m := typ.Method(i)
+ t.Errorf("\t%d: %s %p\n", i, m.Name, m.Func.UnsafePointer())
+ }
+ }
+}
+
+type unexp struct{}
+
+func (*unexp) f() (int32, int8) { return 7, 7 }
+func (*unexp) g() (int64, int8) { return 8, 8 }
+
+type unexpI interface {
+ f() (int32, int8)
+}
+
+var unexpi unexpI = new(unexp)
+
+func TestUnexportedMethods(t *testing.T) {
+ typ := TypeOf(unexpi)
+
+ if got := typ.NumMethod(); got != 0 {
+ t.Errorf("NumMethod=%d, want 0 satisfied methods", got)
+ }
+}
+
+type InnerInt struct {
+ X int
+}
+
+type OuterInt struct {
+ Y int
+ InnerInt
+}
+
+func (i *InnerInt) M() int {
+ return i.X
+}
+
+func TestEmbeddedMethods(t *testing.T) {
+ typ := TypeOf((*OuterInt)(nil))
+ if typ.NumMethod() != 1 || typ.Method(0).Func.UnsafePointer() != ValueOf((*OuterInt).M).UnsafePointer() {
+ t.Errorf("Wrong method table for OuterInt: (m=%p)", (*OuterInt).M)
+ for i := 0; i < typ.NumMethod(); i++ {
+ m := typ.Method(i)
+ t.Errorf("\t%d: %s %p\n", i, m.Name, m.Func.UnsafePointer())
+ }
+ }
+
+ i := &InnerInt{3}
+ if v := ValueOf(i).Method(0).Call(nil)[0].Int(); v != 3 {
+ t.Errorf("i.M() = %d, want 3", v)
+ }
+
+ o := &OuterInt{1, InnerInt{2}}
+ if v := ValueOf(o).Method(0).Call(nil)[0].Int(); v != 2 {
+ t.Errorf("i.M() = %d, want 2", v)
+ }
+
+ f := (*OuterInt).M
+ if v := f(o); v != 2 {
+ t.Errorf("f(o) = %d, want 2", v)
+ }
+}
+
+type FuncDDD func(...any) error
+
+func (f FuncDDD) M() {}
+
+func TestNumMethodOnDDD(t *testing.T) {
+ rv := ValueOf((FuncDDD)(nil))
+ if n := rv.NumMethod(); n != 1 {
+ t.Fatalf("NumMethod()=%d, want 1", n)
+ }
+}
+
+func TestPtrTo(t *testing.T) {
+ // This block of code means that the ptrToThis field of the
+ // reflect data for *unsafe.Pointer is non zero, see
+ // https://golang.org/issue/19003
+ var x unsafe.Pointer
+ var y = &x
+ var z = &y
+
+ var i int
+
+ typ := TypeOf(z)
+ for i = 0; i < 100; i++ {
+ typ = PointerTo(typ)
+ }
+ for i = 0; i < 100; i++ {
+ typ = typ.Elem()
+ }
+ if typ != TypeOf(z) {
+ t.Errorf("after 100 PointerTo and Elem, have %s, want %s", typ, TypeOf(z))
+ }
+}
+
+func TestPtrToGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ pt := PointerTo(tt)
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := New(pt)
+ p := new(*uintptr)
+ *p = new(uintptr)
+ **p = uintptr(i)
+ v.Elem().Set(ValueOf(p).Convert(pt))
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ k := ValueOf(xi).Elem().Elem().Elem().Interface().(uintptr)
+ if k != uintptr(i) {
+ t.Errorf("lost x[%d] = %d, want %d", i, k, i)
+ }
+ }
+}
+
+func TestAddr(t *testing.T) {
+ var p struct {
+ X, Y int
+ }
+
+ v := ValueOf(&p)
+ v = v.Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(2)
+ if p.X != 2 {
+ t.Errorf("Addr.Elem.Set failed to set value")
+ }
+
+ // Again but take address of the ValueOf value.
+ // Exercises generation of PtrTypes not present in the binary.
+ q := &p
+ v = ValueOf(&q).Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(3)
+ if p.X != 3 {
+ t.Errorf("Addr.Elem.Set failed to set value")
+ }
+
+ // Starting without pointer we should get changed value
+ // in interface.
+ qq := p
+ v = ValueOf(&qq).Elem()
+ v0 := v
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(4)
+ if p.X != 3 { // should be unchanged from last time
+ t.Errorf("somehow value Set changed original p")
+ }
+ p = v0.Interface().(struct {
+ X, Y int
+ })
+ if p.X != 4 {
+ t.Errorf("Addr.Elem.Set valued to set value in top value")
+ }
+
+ // Verify that taking the address of a type gives us a pointer
+ // which we can convert back using the usual interface
+ // notation.
+ var s struct {
+ B *bool
+ }
+ ps := ValueOf(&s).Elem().Field(0).Addr().Interface()
+ *(ps.(**bool)) = new(bool)
+ if s.B == nil {
+ t.Errorf("Addr.Interface direct assignment failed")
+ }
+}
+
+func noAlloc(t *testing.T, n int, f func(int)) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ if runtime.GOMAXPROCS(0) > 1 {
+ t.Skip("skipping; GOMAXPROCS>1")
+ }
+ i := -1
+ allocs := testing.AllocsPerRun(n, func() {
+ f(i)
+ i++
+ })
+ if allocs > 0 {
+ t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
+ }
+}
+
+func TestAllocations(t *testing.T) {
+ noAlloc(t, 100, func(j int) {
+ var i any
+ var v Value
+
+ // We can uncomment this when compiler escape analysis
+ // is good enough to see that the integer assigned to i
+ // does not escape and therefore need not be allocated.
+ //
+ // i = 42 + j
+ // v = ValueOf(i)
+ // if int(v.Int()) != 42+j {
+ // panic("wrong int")
+ // }
+
+ i = func(j int) int { return j }
+ v = ValueOf(i)
+ if v.Interface().(func(int) int)(j) != j {
+ panic("wrong result")
+ }
+ })
+}
+
+func TestSmallNegativeInt(t *testing.T) {
+ i := int16(-1)
+ v := ValueOf(i)
+ if v.Int() != -1 {
+ t.Errorf("int16(-1).Int() returned %v", v.Int())
+ }
+}
+
+func TestIndex(t *testing.T) {
+ xs := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Index(3).Interface().(byte)
+ if v != xs[3] {
+ t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3])
+ }
+ xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(xa).Index(2).Interface().(byte)
+ if v != xa[2] {
+ t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2])
+ }
+ s := "0123456789"
+ v = ValueOf(s).Index(3).Interface().(byte)
+ if v != s[3] {
+ t.Errorf("s.Index(3) = %v; expected %v", v, s[3])
+ }
+}
+
+func TestSlice(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Slice(3, 5).Interface().([]int)
+ if len(v) != 2 {
+ t.Errorf("len(xs.Slice(3, 5)) = %d", len(v))
+ }
+ if cap(v) != 5 {
+ t.Errorf("cap(xs.Slice(3, 5)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:5], xs[3:]) {
+ t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5])
+ }
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int)
+ if len(v) != 3 {
+ t.Errorf("len(xa.Slice(2, 5)) = %d", len(v))
+ }
+ if cap(v) != 6 {
+ t.Errorf("cap(xa.Slice(2, 5)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:6], xa[2:]) {
+ t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6])
+ }
+ s := "0123456789"
+ vs := ValueOf(s).Slice(3, 5).Interface().(string)
+ if vs != s[3:5] {
+ t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
+ }
+
+ rv := ValueOf(&xs).Elem()
+ rv = rv.Slice(3, 4)
+ ptr2 := rv.UnsafePointer()
+ rv = rv.Slice(5, 5)
+ ptr3 := rv.UnsafePointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice(3,4).Slice3(5,5).UnsafePointer() = %p, want %p", ptr3, ptr2)
+ }
+}
+
+func TestSlice3(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Slice3(3, 5, 7).Interface().([]int)
+ if len(v) != 2 {
+ t.Errorf("len(xs.Slice3(3, 5, 7)) = %d", len(v))
+ }
+ if cap(v) != 4 {
+ t.Errorf("cap(xs.Slice3(3, 5, 7)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:4], xs[3:7:7]) {
+ t.Errorf("xs.Slice3(3, 5, 7)[0:4] = %v", v[0:4])
+ }
+ rv := ValueOf(&xs).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) })
+ shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) })
+ shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) })
+
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(&xa).Elem().Slice3(2, 5, 6).Interface().([]int)
+ if len(v) != 3 {
+ t.Errorf("len(xa.Slice(2, 5, 6)) = %d", len(v))
+ }
+ if cap(v) != 4 {
+ t.Errorf("cap(xa.Slice(2, 5, 6)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:4], xa[2:6:6]) {
+ t.Errorf("xs.Slice(2, 5, 6)[0:4] = %v", v[0:4])
+ }
+ rv = ValueOf(&xa).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) })
+ shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) })
+ shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) })
+
+ s := "hello world"
+ rv = ValueOf(&s).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 3) })
+
+ rv = ValueOf(&xs).Elem()
+ rv = rv.Slice3(3, 5, 7)
+ ptr2 := rv.UnsafePointer()
+ rv = rv.Slice3(4, 4, 4)
+ ptr3 := rv.UnsafePointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).UnsafePointer() = %p, want %p", ptr3, ptr2)
+ }
+}
+
+func TestSetLenCap(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+
+ vs := ValueOf(&xs).Elem()
+ shouldPanic("SetLen", func() { vs.SetLen(10) })
+ shouldPanic("SetCap", func() { vs.SetCap(10) })
+ shouldPanic("SetLen", func() { vs.SetLen(-1) })
+ shouldPanic("SetCap", func() { vs.SetCap(-1) })
+ shouldPanic("SetCap", func() { vs.SetCap(6) }) // smaller than len
+ vs.SetLen(5)
+ if len(xs) != 5 || cap(xs) != 8 {
+ t.Errorf("after SetLen(5), len, cap = %d, %d, want 5, 8", len(xs), cap(xs))
+ }
+ vs.SetCap(6)
+ if len(xs) != 5 || cap(xs) != 6 {
+ t.Errorf("after SetCap(6), len, cap = %d, %d, want 5, 6", len(xs), cap(xs))
+ }
+ vs.SetCap(5)
+ if len(xs) != 5 || cap(xs) != 5 {
+ t.Errorf("after SetCap(5), len, cap = %d, %d, want 5, 5", len(xs), cap(xs))
+ }
+ shouldPanic("SetCap", func() { vs.SetCap(4) }) // smaller than len
+ shouldPanic("SetLen", func() { vs.SetLen(6) }) // bigger than cap
+
+ va := ValueOf(&xa).Elem()
+ shouldPanic("SetLen", func() { va.SetLen(8) })
+ shouldPanic("SetCap", func() { va.SetCap(8) })
+}
+
+func TestVariadic(t *testing.T) {
+ var b strings.Builder
+ V := ValueOf
+
+ b.Reset()
+ V(fmt.Fprintf).Call([]Value{V(&b), V("%s, %d world"), V("hello"), V(42)})
+ if b.String() != "hello, 42 world" {
+ t.Errorf("after Fprintf Call: %q != %q", b.String(), "hello 42 world")
+ }
+
+ b.Reset()
+ V(fmt.Fprintf).CallSlice([]Value{V(&b), V("%s, %d world"), V([]any{"hello", 42})})
+ if b.String() != "hello, 42 world" {
+ t.Errorf("after Fprintf CallSlice: %q != %q", b.String(), "hello 42 world")
+ }
+}
+
+func TestFuncArg(t *testing.T) {
+ f1 := func(i int, f func(int) int) int { return f(i) }
+ f2 := func(i int) int { return i + 1 }
+ r := ValueOf(f1).Call([]Value{ValueOf(100), ValueOf(f2)})
+ if r[0].Int() != 101 {
+ t.Errorf("function returned %d, want 101", r[0].Int())
+ }
+}
+
+func TestStructArg(t *testing.T) {
+ type padded struct {
+ B string
+ C int32
+ }
+ var (
+ gotA padded
+ gotB uint32
+ wantA = padded{"3", 4}
+ wantB = uint32(5)
+ )
+ f := func(a padded, b uint32) {
+ gotA, gotB = a, b
+ }
+ ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
+ if gotA != wantA || gotB != wantB {
+ t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
+ }
+}
+
+var tagGetTests = []struct {
+ Tag StructTag
+ Key string
+ Value string
+}{
+ {`protobuf:"PB(1,2)"`, `protobuf`, `PB(1,2)`},
+ {`protobuf:"PB(1,2)"`, `foo`, ``},
+ {`protobuf:"PB(1,2)"`, `rotobuf`, ``},
+ {`protobuf:"PB(1,2)" json:"name"`, `json`, `name`},
+ {`protobuf:"PB(1,2)" json:"name"`, `protobuf`, `PB(1,2)`},
+ {`k0:"values contain spaces" k1:"and\ttabs"`, "k0", "values contain spaces"},
+ {`k0:"values contain spaces" k1:"and\ttabs"`, "k1", "and\ttabs"},
+}
+
+func TestTagGet(t *testing.T) {
+ for _, tt := range tagGetTests {
+ if v := tt.Tag.Get(tt.Key); v != tt.Value {
+ t.Errorf("StructTag(%#q).Get(%#q) = %#q, want %#q", tt.Tag, tt.Key, v, tt.Value)
+ }
+ }
+}
+
+func TestBytes(t *testing.T) {
+ shouldPanic("on int Value", func() { ValueOf(0).Bytes() })
+ shouldPanic("of non-byte slice", func() { ValueOf([]string{}).Bytes() })
+
+ type S []byte
+ x := S{1, 2, 3, 4}
+ y := ValueOf(x).Bytes()
+ if !bytes.Equal(x, y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
+ }
+ if &x[0] != &y[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
+ }
+
+ type A [4]byte
+ a := A{1, 2, 3, 4}
+ shouldPanic("unaddressable", func() { ValueOf(a).Bytes() })
+ shouldPanic("on ptr Value", func() { ValueOf(&a).Bytes() })
+ b := ValueOf(&a).Elem().Bytes()
+ if !bytes.Equal(a[:], y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", a, b)
+ }
+ if &a[0] != &b[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &a[0], &b[0])
+ }
+
+ // Per issue #24746, it was decided that Bytes can be called on byte slices
+ // that normally cannot be converted from per Go language semantics.
+ type B byte
+ type SB []B
+ type AB [4]B
+ ValueOf([]B{1, 2, 3, 4}).Bytes() // should not panic
+ ValueOf(new([4]B)).Elem().Bytes() // should not panic
+ ValueOf(SB{1, 2, 3, 4}).Bytes() // should not panic
+ ValueOf(new(AB)).Elem().Bytes() // should not panic
+}
+
+func TestSetBytes(t *testing.T) {
+ type B []byte
+ var x B
+ y := []byte{1, 2, 3, 4}
+ ValueOf(&x).Elem().SetBytes(y)
+ if !bytes.Equal(x, y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
+ }
+ if &x[0] != &y[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
+ }
+}
+
+type Private struct {
+ x int
+ y **int
+ Z int
+}
+
+func (p *Private) m() {
+}
+
+type private struct {
+ Z int
+ z int
+ S string
+ A [1]Private
+ T []Private
+}
+
+func (p *private) P() {
+}
+
+type Public struct {
+ X int
+ Y **int
+ private
+}
+
+func (p *Public) M() {
+}
+
+func TestUnexported(t *testing.T) {
+ var pub Public
+ pub.S = "S"
+ pub.T = pub.A[:]
+ v := ValueOf(&pub)
+ isValid(v.Elem().Field(0))
+ isValid(v.Elem().Field(1))
+ isValid(v.Elem().Field(2))
+ isValid(v.Elem().FieldByName("X"))
+ isValid(v.Elem().FieldByName("Y"))
+ isValid(v.Elem().FieldByName("Z"))
+ isValid(v.Type().Method(0).Func)
+ m, _ := v.Type().MethodByName("M")
+ isValid(m.Func)
+ m, _ = v.Type().MethodByName("P")
+ isValid(m.Func)
+ isNonNil(v.Elem().Field(0).Interface())
+ isNonNil(v.Elem().Field(1).Interface())
+ isNonNil(v.Elem().Field(2).Field(2).Index(0))
+ isNonNil(v.Elem().FieldByName("X").Interface())
+ isNonNil(v.Elem().FieldByName("Y").Interface())
+ isNonNil(v.Elem().FieldByName("Z").Interface())
+ isNonNil(v.Elem().FieldByName("S").Index(0).Interface())
+ isNonNil(v.Type().Method(0).Func.Interface())
+ m, _ = v.Type().MethodByName("P")
+ isNonNil(m.Func.Interface())
+
+ var priv Private
+ v = ValueOf(&priv)
+ isValid(v.Elem().Field(0))
+ isValid(v.Elem().Field(1))
+ isValid(v.Elem().FieldByName("x"))
+ isValid(v.Elem().FieldByName("y"))
+ shouldPanic("Interface", func() { v.Elem().Field(0).Interface() })
+ shouldPanic("Interface", func() { v.Elem().Field(1).Interface() })
+ shouldPanic("Interface", func() { v.Elem().FieldByName("x").Interface() })
+ shouldPanic("Interface", func() { v.Elem().FieldByName("y").Interface() })
+ shouldPanic("Method", func() { v.Type().Method(0) })
+}
+
+func TestSetPanic(t *testing.T) {
+ ok := func(f func()) { f() }
+ bad := func(f func()) { shouldPanic("Set", f) }
+ clear := func(v Value) { v.Set(Zero(v.Type())) }
+
+ type t0 struct {
+ W int
+ }
+
+ type t1 struct {
+ Y int
+ t0
+ }
+
+ type T2 struct {
+ Z int
+ namedT0 t0
+ }
+
+ type T struct {
+ X int
+ t1
+ T2
+ NamedT1 t1
+ NamedT2 T2
+ namedT1 t1
+ namedT2 T2
+ }
+
+ // not addressable
+ v := ValueOf(T{})
+ bad(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ bad(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ bad(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ bad(func() { clear(v.Field(2)) }) // .T2
+ bad(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ bad(func() { clear(v.Field(3)) }) // .NamedT1
+ bad(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ bad(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ bad(func() { clear(v.Field(4)) }) // .NamedT2
+ bad(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+
+ // addressable
+ v = ValueOf(&T{}).Elem()
+ ok(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ ok(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ ok(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ ok(func() { clear(v.Field(2)) }) // .T2
+ ok(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ ok(func() { clear(v.Field(3)) }) // .NamedT1
+ ok(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ ok(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ ok(func() { clear(v.Field(4)) }) // .NamedT2
+ ok(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+}
+
+type timp int
+
+func (t timp) W() {}
+func (t timp) Y() {}
+func (t timp) w() {}
+func (t timp) y() {}
+
+func TestCallPanic(t *testing.T) {
+ type t0 interface {
+ W()
+ w()
+ }
+ type T1 interface {
+ Y()
+ y()
+ }
+ type T2 struct {
+ T1
+ t0
+ }
+ type T struct {
+ t0 // 0
+ T1 // 1
+
+ NamedT0 t0 // 2
+ NamedT1 T1 // 3
+ NamedT2 T2 // 4
+
+ namedT0 t0 // 5
+ namedT1 T1 // 6
+ namedT2 T2 // 7
+ }
+ ok := func(f func()) { f() }
+ badCall := func(f func()) { shouldPanic("Call", f) }
+ badMethod := func(f func()) { shouldPanic("Method", f) }
+ call := func(v Value) { v.Call(nil) }
+
+ i := timp(0)
+ v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}})
+ badCall(func() { call(v.Field(0).Method(0)) }) // .t0.W
+ badCall(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W
+ badCall(func() { call(v.Field(0).Method(1)) }) // .t0.w
+ badMethod(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w
+ ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y
+ ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y
+ badCall(func() { call(v.Field(1).Method(1)) }) // .T1.y
+ badMethod(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y
+
+ ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W
+ ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W
+ badCall(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w
+ badMethod(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w
+
+ ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y
+ ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y
+ badCall(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y
+ badMethod(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y
+
+ ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y
+ ok(func() { call(v.Field(4).Field(0).Elem().Method(0)) }) // .NamedT2.T1.W
+ badCall(func() { call(v.Field(4).Field(1).Method(0)) }) // .NamedT2.t0.W
+ badCall(func() { call(v.Field(4).Field(1).Elem().Method(0)) }) // .NamedT2.t0.W
+
+ badCall(func() { call(v.Field(5).Method(0)) }) // .namedT0.W
+ badCall(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W
+ badCall(func() { call(v.Field(5).Method(1)) }) // .namedT0.w
+ badMethod(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w
+
+ badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y
+ badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.Y
+ badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.y
+ badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.y
+
+ badCall(func() { call(v.Field(7).Field(0).Method(0)) }) // .namedT2.T1.Y
+ badCall(func() { call(v.Field(7).Field(0).Elem().Method(0)) }) // .namedT2.T1.W
+ badCall(func() { call(v.Field(7).Field(1).Method(0)) }) // .namedT2.t0.W
+ badCall(func() { call(v.Field(7).Field(1).Elem().Method(0)) }) // .namedT2.t0.W
+}
+
+func TestValuePanic(t *testing.T) {
+ vo := ValueOf
+ shouldPanic("reflect.Value.Addr of unaddressable value", func() { vo(0).Addr() })
+ shouldPanic("call of reflect.Value.Bool on float64 Value", func() { vo(0.0).Bool() })
+ shouldPanic("call of reflect.Value.Bytes on string Value", func() { vo("").Bytes() })
+ shouldPanic("call of reflect.Value.Call on bool Value", func() { vo(true).Call(nil) })
+ shouldPanic("call of reflect.Value.CallSlice on int Value", func() { vo(0).CallSlice(nil) })
+ shouldPanic("call of reflect.Value.Close on string Value", func() { vo("").Close() })
+ shouldPanic("call of reflect.Value.Complex on float64 Value", func() { vo(0.0).Complex() })
+ shouldPanic("call of reflect.Value.Elem on bool Value", func() { vo(false).Elem() })
+ shouldPanic("call of reflect.Value.Field on int Value", func() { vo(0).Field(0) })
+ shouldPanic("call of reflect.Value.Float on string Value", func() { vo("").Float() })
+ shouldPanic("call of reflect.Value.Index on float64 Value", func() { vo(0.0).Index(0) })
+ shouldPanic("call of reflect.Value.Int on bool Value", func() { vo(false).Int() })
+ shouldPanic("call of reflect.Value.IsNil on int Value", func() { vo(0).IsNil() })
+ shouldPanic("call of reflect.Value.Len on bool Value", func() { vo(false).Len() })
+ shouldPanic("call of reflect.Value.MapIndex on float64 Value", func() { vo(0.0).MapIndex(vo(0.0)) })
+ shouldPanic("call of reflect.Value.MapKeys on string Value", func() { vo("").MapKeys() })
+ shouldPanic("call of reflect.Value.MapRange on int Value", func() { vo(0).MapRange() })
+ shouldPanic("call of reflect.Value.Method on zero Value", func() { vo(nil).Method(0) })
+ shouldPanic("call of reflect.Value.NumField on string Value", func() { vo("").NumField() })
+ shouldPanic("call of reflect.Value.NumMethod on zero Value", func() { vo(nil).NumMethod() })
+ shouldPanic("call of reflect.Value.OverflowComplex on float64 Value", func() { vo(float64(0)).OverflowComplex(0) })
+ shouldPanic("call of reflect.Value.OverflowFloat on int64 Value", func() { vo(int64(0)).OverflowFloat(0) })
+ shouldPanic("call of reflect.Value.OverflowInt on uint64 Value", func() { vo(uint64(0)).OverflowInt(0) })
+ shouldPanic("call of reflect.Value.OverflowUint on complex64 Value", func() { vo(complex64(0)).OverflowUint(0) })
+ shouldPanic("call of reflect.Value.Recv on string Value", func() { vo("").Recv() })
+ shouldPanic("call of reflect.Value.Send on bool Value", func() { vo(true).Send(vo(true)) })
+ shouldPanic("value of type string is not assignable to type bool", func() { vo(new(bool)).Elem().Set(vo("")) })
+ shouldPanic("call of reflect.Value.SetBool on string Value", func() { vo(new(string)).Elem().SetBool(false) })
+ shouldPanic("reflect.Value.SetBytes using unaddressable value", func() { vo("").SetBytes(nil) })
+ shouldPanic("call of reflect.Value.SetCap on string Value", func() { vo(new(string)).Elem().SetCap(0) })
+ shouldPanic("call of reflect.Value.SetComplex on string Value", func() { vo(new(string)).Elem().SetComplex(0) })
+ shouldPanic("call of reflect.Value.SetFloat on string Value", func() { vo(new(string)).Elem().SetFloat(0) })
+ shouldPanic("call of reflect.Value.SetInt on string Value", func() { vo(new(string)).Elem().SetInt(0) })
+ shouldPanic("call of reflect.Value.SetLen on string Value", func() { vo(new(string)).Elem().SetLen(0) })
+ shouldPanic("call of reflect.Value.SetString on int Value", func() { vo(new(int)).Elem().SetString("") })
+ shouldPanic("reflect.Value.SetUint using unaddressable value", func() { vo(0.0).SetUint(0) })
+ shouldPanic("call of reflect.Value.Slice on bool Value", func() { vo(true).Slice(1, 2) })
+ shouldPanic("call of reflect.Value.Slice3 on int Value", func() { vo(0).Slice3(1, 2, 3) })
+ shouldPanic("call of reflect.Value.TryRecv on bool Value", func() { vo(true).TryRecv() })
+ shouldPanic("call of reflect.Value.TrySend on string Value", func() { vo("").TrySend(vo("")) })
+ shouldPanic("call of reflect.Value.Uint on float64 Value", func() { vo(0.0).Uint() })
+}
+
+func shouldPanic(expect string, f func()) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ panic("did not panic")
+ }
+ if expect != "" {
+ var s string
+ switch r := r.(type) {
+ case string:
+ s = r
+ case *ValueError:
+ s = r.Error()
+ default:
+ panic(fmt.Sprintf("panicked with unexpected type %T", r))
+ }
+ if !strings.HasPrefix(s, "reflect") {
+ panic(`panic string does not start with "reflect": ` + s)
+ }
+ if !strings.Contains(s, expect) {
+ panic(`panic string does not contain "` + expect + `": ` + s)
+ }
+ }
+ }()
+ f()
+}
+
+func isNonNil(x any) {
+ if x == nil {
+ panic("nil interface")
+ }
+}
+
+func isValid(v Value) {
+ if !v.IsValid() {
+ panic("zero Value")
+ }
+}
+
+func TestAlias(t *testing.T) {
+ x := string("hello")
+ v := ValueOf(&x).Elem()
+ oldvalue := v.Interface()
+ v.SetString("world")
+ newvalue := v.Interface()
+
+ if oldvalue != "hello" || newvalue != "world" {
+ t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue)
+ }
+}
+
+var V = ValueOf
+
+func EmptyInterfaceV(x any) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReaderV(x io.Reader) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReadWriterV(x io.ReadWriter) Value {
+ return ValueOf(&x).Elem()
+}
+
+type Empty struct{}
+type MyStruct struct {
+ x int `some:"tag"`
+}
+type MyStruct1 struct {
+ x struct {
+ int `some:"bar"`
+ }
+}
+type MyStruct2 struct {
+ x struct {
+ int `some:"foo"`
+ }
+}
+type MyString string
+type MyBytes []byte
+type MyBytesArrayPtr0 *[0]byte
+type MyBytesArrayPtr *[4]byte
+type MyBytesArray0 [0]byte
+type MyBytesArray [4]byte
+type MyRunes []int32
+type MyFunc func()
+type MyByte byte
+
+type IntChan chan int
+type IntChanRecv <-chan int
+type IntChanSend chan<- int
+type BytesChan chan []byte
+type BytesChanRecv <-chan []byte
+type BytesChanSend chan<- []byte
+
+var convertTests = []struct {
+ in Value
+ out Value
+}{
+ // numbers
+ /*
+ Edit .+1,/\*\//-1>cat >/tmp/x.go && go run /tmp/x.go
+
+ package main
+
+ import "fmt"
+
+ var numbers = []string{
+ "int8", "uint8", "int16", "uint16",
+ "int32", "uint32", "int64", "uint64",
+ "int", "uint", "uintptr",
+ "float32", "float64",
+ }
+
+ func main() {
+ // all pairs but in an unusual order,
+ // to emit all the int8, uint8 cases
+ // before n grows too big.
+ n := 1
+ for i, f := range numbers {
+ for _, g := range numbers[i:] {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", f, n, g, n)
+ n++
+ if f != g {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", g, n, f, n)
+ n++
+ }
+ }
+ }
+ }
+ */
+ {V(int8(1)), V(int8(1))},
+ {V(int8(2)), V(uint8(2))},
+ {V(uint8(3)), V(int8(3))},
+ {V(int8(4)), V(int16(4))},
+ {V(int16(5)), V(int8(5))},
+ {V(int8(6)), V(uint16(6))},
+ {V(uint16(7)), V(int8(7))},
+ {V(int8(8)), V(int32(8))},
+ {V(int32(9)), V(int8(9))},
+ {V(int8(10)), V(uint32(10))},
+ {V(uint32(11)), V(int8(11))},
+ {V(int8(12)), V(int64(12))},
+ {V(int64(13)), V(int8(13))},
+ {V(int8(14)), V(uint64(14))},
+ {V(uint64(15)), V(int8(15))},
+ {V(int8(16)), V(int(16))},
+ {V(int(17)), V(int8(17))},
+ {V(int8(18)), V(uint(18))},
+ {V(uint(19)), V(int8(19))},
+ {V(int8(20)), V(uintptr(20))},
+ {V(uintptr(21)), V(int8(21))},
+ {V(int8(22)), V(float32(22))},
+ {V(float32(23)), V(int8(23))},
+ {V(int8(24)), V(float64(24))},
+ {V(float64(25)), V(int8(25))},
+ {V(uint8(26)), V(uint8(26))},
+ {V(uint8(27)), V(int16(27))},
+ {V(int16(28)), V(uint8(28))},
+ {V(uint8(29)), V(uint16(29))},
+ {V(uint16(30)), V(uint8(30))},
+ {V(uint8(31)), V(int32(31))},
+ {V(int32(32)), V(uint8(32))},
+ {V(uint8(33)), V(uint32(33))},
+ {V(uint32(34)), V(uint8(34))},
+ {V(uint8(35)), V(int64(35))},
+ {V(int64(36)), V(uint8(36))},
+ {V(uint8(37)), V(uint64(37))},
+ {V(uint64(38)), V(uint8(38))},
+ {V(uint8(39)), V(int(39))},
+ {V(int(40)), V(uint8(40))},
+ {V(uint8(41)), V(uint(41))},
+ {V(uint(42)), V(uint8(42))},
+ {V(uint8(43)), V(uintptr(43))},
+ {V(uintptr(44)), V(uint8(44))},
+ {V(uint8(45)), V(float32(45))},
+ {V(float32(46)), V(uint8(46))},
+ {V(uint8(47)), V(float64(47))},
+ {V(float64(48)), V(uint8(48))},
+ {V(int16(49)), V(int16(49))},
+ {V(int16(50)), V(uint16(50))},
+ {V(uint16(51)), V(int16(51))},
+ {V(int16(52)), V(int32(52))},
+ {V(int32(53)), V(int16(53))},
+ {V(int16(54)), V(uint32(54))},
+ {V(uint32(55)), V(int16(55))},
+ {V(int16(56)), V(int64(56))},
+ {V(int64(57)), V(int16(57))},
+ {V(int16(58)), V(uint64(58))},
+ {V(uint64(59)), V(int16(59))},
+ {V(int16(60)), V(int(60))},
+ {V(int(61)), V(int16(61))},
+ {V(int16(62)), V(uint(62))},
+ {V(uint(63)), V(int16(63))},
+ {V(int16(64)), V(uintptr(64))},
+ {V(uintptr(65)), V(int16(65))},
+ {V(int16(66)), V(float32(66))},
+ {V(float32(67)), V(int16(67))},
+ {V(int16(68)), V(float64(68))},
+ {V(float64(69)), V(int16(69))},
+ {V(uint16(70)), V(uint16(70))},
+ {V(uint16(71)), V(int32(71))},
+ {V(int32(72)), V(uint16(72))},
+ {V(uint16(73)), V(uint32(73))},
+ {V(uint32(74)), V(uint16(74))},
+ {V(uint16(75)), V(int64(75))},
+ {V(int64(76)), V(uint16(76))},
+ {V(uint16(77)), V(uint64(77))},
+ {V(uint64(78)), V(uint16(78))},
+ {V(uint16(79)), V(int(79))},
+ {V(int(80)), V(uint16(80))},
+ {V(uint16(81)), V(uint(81))},
+ {V(uint(82)), V(uint16(82))},
+ {V(uint16(83)), V(uintptr(83))},
+ {V(uintptr(84)), V(uint16(84))},
+ {V(uint16(85)), V(float32(85))},
+ {V(float32(86)), V(uint16(86))},
+ {V(uint16(87)), V(float64(87))},
+ {V(float64(88)), V(uint16(88))},
+ {V(int32(89)), V(int32(89))},
+ {V(int32(90)), V(uint32(90))},
+ {V(uint32(91)), V(int32(91))},
+ {V(int32(92)), V(int64(92))},
+ {V(int64(93)), V(int32(93))},
+ {V(int32(94)), V(uint64(94))},
+ {V(uint64(95)), V(int32(95))},
+ {V(int32(96)), V(int(96))},
+ {V(int(97)), V(int32(97))},
+ {V(int32(98)), V(uint(98))},
+ {V(uint(99)), V(int32(99))},
+ {V(int32(100)), V(uintptr(100))},
+ {V(uintptr(101)), V(int32(101))},
+ {V(int32(102)), V(float32(102))},
+ {V(float32(103)), V(int32(103))},
+ {V(int32(104)), V(float64(104))},
+ {V(float64(105)), V(int32(105))},
+ {V(uint32(106)), V(uint32(106))},
+ {V(uint32(107)), V(int64(107))},
+ {V(int64(108)), V(uint32(108))},
+ {V(uint32(109)), V(uint64(109))},
+ {V(uint64(110)), V(uint32(110))},
+ {V(uint32(111)), V(int(111))},
+ {V(int(112)), V(uint32(112))},
+ {V(uint32(113)), V(uint(113))},
+ {V(uint(114)), V(uint32(114))},
+ {V(uint32(115)), V(uintptr(115))},
+ {V(uintptr(116)), V(uint32(116))},
+ {V(uint32(117)), V(float32(117))},
+ {V(float32(118)), V(uint32(118))},
+ {V(uint32(119)), V(float64(119))},
+ {V(float64(120)), V(uint32(120))},
+ {V(int64(121)), V(int64(121))},
+ {V(int64(122)), V(uint64(122))},
+ {V(uint64(123)), V(int64(123))},
+ {V(int64(124)), V(int(124))},
+ {V(int(125)), V(int64(125))},
+ {V(int64(126)), V(uint(126))},
+ {V(uint(127)), V(int64(127))},
+ {V(int64(128)), V(uintptr(128))},
+ {V(uintptr(129)), V(int64(129))},
+ {V(int64(130)), V(float32(130))},
+ {V(float32(131)), V(int64(131))},
+ {V(int64(132)), V(float64(132))},
+ {V(float64(133)), V(int64(133))},
+ {V(uint64(134)), V(uint64(134))},
+ {V(uint64(135)), V(int(135))},
+ {V(int(136)), V(uint64(136))},
+ {V(uint64(137)), V(uint(137))},
+ {V(uint(138)), V(uint64(138))},
+ {V(uint64(139)), V(uintptr(139))},
+ {V(uintptr(140)), V(uint64(140))},
+ {V(uint64(141)), V(float32(141))},
+ {V(float32(142)), V(uint64(142))},
+ {V(uint64(143)), V(float64(143))},
+ {V(float64(144)), V(uint64(144))},
+ {V(int(145)), V(int(145))},
+ {V(int(146)), V(uint(146))},
+ {V(uint(147)), V(int(147))},
+ {V(int(148)), V(uintptr(148))},
+ {V(uintptr(149)), V(int(149))},
+ {V(int(150)), V(float32(150))},
+ {V(float32(151)), V(int(151))},
+ {V(int(152)), V(float64(152))},
+ {V(float64(153)), V(int(153))},
+ {V(uint(154)), V(uint(154))},
+ {V(uint(155)), V(uintptr(155))},
+ {V(uintptr(156)), V(uint(156))},
+ {V(uint(157)), V(float32(157))},
+ {V(float32(158)), V(uint(158))},
+ {V(uint(159)), V(float64(159))},
+ {V(float64(160)), V(uint(160))},
+ {V(uintptr(161)), V(uintptr(161))},
+ {V(uintptr(162)), V(float32(162))},
+ {V(float32(163)), V(uintptr(163))},
+ {V(uintptr(164)), V(float64(164))},
+ {V(float64(165)), V(uintptr(165))},
+ {V(float32(166)), V(float32(166))},
+ {V(float32(167)), V(float64(167))},
+ {V(float64(168)), V(float32(168))},
+ {V(float64(169)), V(float64(169))},
+
+ // truncation
+ {V(float64(1.5)), V(int(1))},
+
+ // complex
+ {V(complex64(1i)), V(complex64(1i))},
+ {V(complex64(2i)), V(complex128(2i))},
+ {V(complex128(3i)), V(complex64(3i))},
+ {V(complex128(4i)), V(complex128(4i))},
+
+ // string
+ {V(string("hello")), V(string("hello"))},
+ {V(string("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(string("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(string("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(string("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(string("a"))},
+ {V(int8('a')), V(string("a"))},
+ {V(int16('a')), V(string("a"))},
+ {V(int32('a')), V(string("a"))},
+ {V(int64('a')), V(string("a"))},
+ {V(uint('a')), V(string("a"))},
+ {V(uint8('a')), V(string("a"))},
+ {V(uint16('a')), V(string("a"))},
+ {V(uint32('a')), V(string("a"))},
+ {V(uint64('a')), V(string("a"))},
+ {V(uintptr('a')), V(string("a"))},
+ {V(int(-1)), V(string("\uFFFD"))},
+ {V(int8(-2)), V(string("\uFFFD"))},
+ {V(int16(-3)), V(string("\uFFFD"))},
+ {V(int32(-4)), V(string("\uFFFD"))},
+ {V(int64(-5)), V(string("\uFFFD"))},
+ {V(int64(-1 << 32)), V(string("\uFFFD"))},
+ {V(int64(1 << 32)), V(string("\uFFFD"))},
+ {V(uint(0x110001)), V(string("\uFFFD"))},
+ {V(uint32(0x110002)), V(string("\uFFFD"))},
+ {V(uint64(0x110003)), V(string("\uFFFD"))},
+ {V(uint64(1 << 32)), V(string("\uFFFD"))},
+ {V(uintptr(0x110004)), V(string("\uFFFD"))},
+
+ // named string
+ {V(MyString("hello")), V(string("hello"))},
+ {V(string("hello")), V(MyString("hello"))},
+ {V(string("hello")), V(string("hello"))},
+ {V(MyString("hello")), V(MyString("hello"))},
+ {V(MyString("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(MyString("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(MyString("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(MyString("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V([]rune("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyRunes("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(MyString("a"))},
+ {V(int8('a')), V(MyString("a"))},
+ {V(int16('a')), V(MyString("a"))},
+ {V(int32('a')), V(MyString("a"))},
+ {V(int64('a')), V(MyString("a"))},
+ {V(uint('a')), V(MyString("a"))},
+ {V(uint8('a')), V(MyString("a"))},
+ {V(uint16('a')), V(MyString("a"))},
+ {V(uint32('a')), V(MyString("a"))},
+ {V(uint64('a')), V(MyString("a"))},
+ {V(uintptr('a')), V(MyString("a"))},
+ {V(int(-1)), V(MyString("\uFFFD"))},
+ {V(int8(-2)), V(MyString("\uFFFD"))},
+ {V(int16(-3)), V(MyString("\uFFFD"))},
+ {V(int32(-4)), V(MyString("\uFFFD"))},
+ {V(int64(-5)), V(MyString("\uFFFD"))},
+ {V(uint(0x110001)), V(MyString("\uFFFD"))},
+ {V(uint32(0x110002)), V(MyString("\uFFFD"))},
+ {V(uint64(0x110003)), V(MyString("\uFFFD"))},
+ {V(uintptr(0x110004)), V(MyString("\uFFFD"))},
+
+ // named []byte
+ {V(string("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(string("bytes2"))},
+ {V(MyBytes("bytes3")), V(MyBytes("bytes3"))},
+ {V(MyString("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(MyString("bytes2"))},
+
+ // named []rune
+ {V(string("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(string("runes♕"))},
+ {V(MyRunes("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyString("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(MyString("runes♕"))},
+
+ // slice to array
+ {V([]byte(nil)), V([0]byte{})},
+ {V([]byte{}), V([0]byte{})},
+ {V([]byte{1}), V([1]byte{1})},
+ {V([]byte{1, 2}), V([2]byte{1, 2})},
+ {V([]byte{1, 2, 3}), V([3]byte{1, 2, 3})},
+ {V(MyBytes([]byte(nil))), V([0]byte{})},
+ {V(MyBytes{}), V([0]byte{})},
+ {V(MyBytes{1}), V([1]byte{1})},
+ {V(MyBytes{1, 2}), V([2]byte{1, 2})},
+ {V(MyBytes{1, 2, 3}), V([3]byte{1, 2, 3})},
+ {V([]byte(nil)), V(MyBytesArray0{})},
+ {V([]byte{}), V(MyBytesArray0([0]byte{}))},
+ {V([]byte{1, 2, 3, 4}), V(MyBytesArray([4]byte{1, 2, 3, 4}))},
+ {V(MyBytes{}), V(MyBytesArray0([0]byte{}))},
+ {V(MyBytes{5, 6, 7, 8}), V(MyBytesArray([4]byte{5, 6, 7, 8}))},
+ {V([]MyByte{}), V([0]MyByte{})},
+ {V([]MyByte{1, 2}), V([2]MyByte{1, 2})},
+
+ // slice to array pointer
+ {V([]byte(nil)), V((*[0]byte)(nil))},
+ {V([]byte{}), V(new([0]byte))},
+ {V([]byte{7}), V(&[1]byte{7})},
+ {V(MyBytes([]byte(nil))), V((*[0]byte)(nil))},
+ {V(MyBytes([]byte{})), V(new([0]byte))},
+ {V(MyBytes([]byte{9})), V(&[1]byte{9})},
+ {V([]byte(nil)), V(MyBytesArrayPtr0(nil))},
+ {V([]byte{}), V(MyBytesArrayPtr0(new([0]byte)))},
+ {V([]byte{1, 2, 3, 4}), V(MyBytesArrayPtr(&[4]byte{1, 2, 3, 4}))},
+ {V(MyBytes([]byte{})), V(MyBytesArrayPtr0(new([0]byte)))},
+ {V(MyBytes([]byte{5, 6, 7, 8})), V(MyBytesArrayPtr(&[4]byte{5, 6, 7, 8}))},
+
+ {V([]byte(nil)), V((*MyBytesArray0)(nil))},
+ {V([]byte{}), V((*MyBytesArray0)(new([0]byte)))},
+ {V([]byte{1, 2, 3, 4}), V(&MyBytesArray{1, 2, 3, 4})},
+ {V(MyBytes([]byte(nil))), V((*MyBytesArray0)(nil))},
+ {V(MyBytes([]byte{})), V((*MyBytesArray0)(new([0]byte)))},
+ {V(MyBytes([]byte{5, 6, 7, 8})), V(&MyBytesArray{5, 6, 7, 8})},
+ {V(new([0]byte)), V(new(MyBytesArray0))},
+ {V(new(MyBytesArray0)), V(new([0]byte))},
+ {V(MyBytesArrayPtr0(nil)), V((*[0]byte)(nil))},
+ {V((*[0]byte)(nil)), V(MyBytesArrayPtr0(nil))},
+
+ // named types and equal underlying types
+ {V(new(int)), V(new(integer))},
+ {V(new(integer)), V(new(int))},
+ {V(Empty{}), V(struct{}{})},
+ {V(new(Empty)), V(new(struct{}))},
+ {V(struct{}{}), V(Empty{})},
+ {V(new(struct{})), V(new(Empty))},
+ {V(Empty{}), V(Empty{})},
+ {V(MyBytes{}), V([]byte{})},
+ {V([]byte{}), V(MyBytes{})},
+ {V((func())(nil)), V(MyFunc(nil))},
+ {V((MyFunc)(nil)), V((func())(nil))},
+
+ // structs with different tags
+ {V(struct {
+ x int `some:"foo"`
+ }{}), V(struct {
+ x int `some:"bar"`
+ }{})},
+
+ {V(struct {
+ x int `some:"bar"`
+ }{}), V(struct {
+ x int `some:"foo"`
+ }{})},
+
+ {V(MyStruct{}), V(struct {
+ x int `some:"foo"`
+ }{})},
+
+ {V(struct {
+ x int `some:"foo"`
+ }{}), V(MyStruct{})},
+
+ {V(MyStruct{}), V(struct {
+ x int `some:"bar"`
+ }{})},
+
+ {V(struct {
+ x int `some:"bar"`
+ }{}), V(MyStruct{})},
+
+ {V(MyStruct1{}), V(MyStruct2{})},
+ {V(MyStruct2{}), V(MyStruct1{})},
+
+ // can convert *byte and *MyByte
+ {V((*byte)(nil)), V((*MyByte)(nil))},
+ {V((*MyByte)(nil)), V((*byte)(nil))},
+
+ // cannot convert mismatched array sizes
+ {V([2]byte{}), V([2]byte{})},
+ {V([3]byte{}), V([3]byte{})},
+ {V(MyBytesArray0{}), V([0]byte{})},
+ {V([0]byte{}), V(MyBytesArray0{})},
+
+ // cannot convert other instances
+ {V((**byte)(nil)), V((**byte)(nil))},
+ {V((**MyByte)(nil)), V((**MyByte)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V(([]byte)(nil)), V(([]byte)(nil))},
+ {V(([]MyByte)(nil)), V(([]MyByte)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[int]MyByte)(nil)), V((map[int]MyByte)(nil))},
+ {V((map[byte]int)(nil)), V((map[byte]int)(nil))},
+ {V((map[MyByte]int)(nil)), V((map[MyByte]int)(nil))},
+ {V([2]byte{}), V([2]byte{})},
+ {V([2]MyByte{}), V([2]MyByte{})},
+
+ // other
+ {V((***int)(nil)), V((***int)(nil))},
+ {V((***byte)(nil)), V((***byte)(nil))},
+ {V((***int32)(nil)), V((***int32)(nil))},
+ {V((***int64)(nil)), V((***int64)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V((map[int]bool)(nil)), V((map[int]bool)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[uint]bool)(nil)), V((map[uint]bool)(nil))},
+ {V([]uint(nil)), V([]uint(nil))},
+ {V([]int(nil)), V([]int(nil))},
+ {V(new(any)), V(new(any))},
+ {V(new(io.Reader)), V(new(io.Reader))},
+ {V(new(io.Writer)), V(new(io.Writer))},
+
+ // channels
+ {V(IntChan(nil)), V((chan<- int)(nil))},
+ {V(IntChan(nil)), V((<-chan int)(nil))},
+ {V((chan int)(nil)), V(IntChanRecv(nil))},
+ {V((chan int)(nil)), V(IntChanSend(nil))},
+ {V(IntChanRecv(nil)), V((<-chan int)(nil))},
+ {V((<-chan int)(nil)), V(IntChanRecv(nil))},
+ {V(IntChanSend(nil)), V((chan<- int)(nil))},
+ {V((chan<- int)(nil)), V(IntChanSend(nil))},
+ {V(IntChan(nil)), V((chan int)(nil))},
+ {V((chan int)(nil)), V(IntChan(nil))},
+ {V((chan int)(nil)), V((<-chan int)(nil))},
+ {V((chan int)(nil)), V((chan<- int)(nil))},
+ {V(BytesChan(nil)), V((chan<- []byte)(nil))},
+ {V(BytesChan(nil)), V((<-chan []byte)(nil))},
+ {V((chan []byte)(nil)), V(BytesChanRecv(nil))},
+ {V((chan []byte)(nil)), V(BytesChanSend(nil))},
+ {V(BytesChanRecv(nil)), V((<-chan []byte)(nil))},
+ {V((<-chan []byte)(nil)), V(BytesChanRecv(nil))},
+ {V(BytesChanSend(nil)), V((chan<- []byte)(nil))},
+ {V((chan<- []byte)(nil)), V(BytesChanSend(nil))},
+ {V(BytesChan(nil)), V((chan []byte)(nil))},
+ {V((chan []byte)(nil)), V(BytesChan(nil))},
+ {V((chan []byte)(nil)), V((<-chan []byte)(nil))},
+ {V((chan []byte)(nil)), V((chan<- []byte)(nil))},
+
+ // cannot convert other instances (channels)
+ {V(IntChan(nil)), V(IntChan(nil))},
+ {V(IntChanRecv(nil)), V(IntChanRecv(nil))},
+ {V(IntChanSend(nil)), V(IntChanSend(nil))},
+ {V(BytesChan(nil)), V(BytesChan(nil))},
+ {V(BytesChanRecv(nil)), V(BytesChanRecv(nil))},
+ {V(BytesChanSend(nil)), V(BytesChanSend(nil))},
+
+ // interfaces
+ {V(int(1)), EmptyInterfaceV(int(1))},
+ {V(string("hello")), EmptyInterfaceV(string("hello"))},
+ {V(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {ReadWriterV(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {V(new(bytes.Buffer)), ReadWriterV(new(bytes.Buffer))},
+}
+
+func TestConvert(t *testing.T) {
+ canConvert := map[[2]Type]bool{}
+ all := map[Type]bool{}
+
+ for _, tt := range convertTests {
+ t1 := tt.in.Type()
+ if !t1.ConvertibleTo(t1) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t1)
+ continue
+ }
+
+ t2 := tt.out.Type()
+ if !t1.ConvertibleTo(t2) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t2)
+ continue
+ }
+
+ all[t1] = true
+ all[t2] = true
+ canConvert[[2]Type{t1, t2}] = true
+
+ // vout1 represents the in value converted to the in type.
+ v1 := tt.in
+ if !v1.CanConvert(t1) {
+ t.Errorf("ValueOf(%T(%[1]v)).CanConvert(%s) = false, want true", tt.in.Interface(), t1)
+ }
+ vout1 := v1.Convert(t1)
+ out1 := vout1.Interface()
+ if vout1.Type() != tt.in.Type() || !DeepEqual(out1, tt.in.Interface()) {
+ t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t1, out1, tt.in.Interface())
+ }
+
+ // vout2 represents the in value converted to the out type.
+ if !v1.CanConvert(t2) {
+ t.Errorf("ValueOf(%T(%[1]v)).CanConvert(%s) = false, want true", tt.in.Interface(), t2)
+ }
+ vout2 := v1.Convert(t2)
+ out2 := vout2.Interface()
+ if vout2.Type() != tt.out.Type() || !DeepEqual(out2, tt.out.Interface()) {
+ t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out2, tt.out.Interface())
+ }
+ if got, want := vout2.Kind(), vout2.Type().Kind(); got != want {
+ t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) has internal kind %v want %v", tt.in.Interface(), t1, got, want)
+ }
+
+ // vout3 represents a new value of the out type, set to vout2. This makes
+ // sure the converted value vout2 is really usable as a regular value.
+ vout3 := New(t2).Elem()
+ vout3.Set(vout2)
+ out3 := vout3.Interface()
+ if vout3.Type() != tt.out.Type() || !DeepEqual(out3, tt.out.Interface()) {
+ t.Errorf("Set(ValueOf(%T(%[1]v)).Convert(%s)) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out3, tt.out.Interface())
+ }
+
+ if IsRO(v1) {
+ t.Errorf("table entry %v is RO, should not be", v1)
+ }
+ if IsRO(vout1) {
+ t.Errorf("self-conversion output %v is RO, should not be", vout1)
+ }
+ if IsRO(vout2) {
+ t.Errorf("conversion output %v is RO, should not be", vout2)
+ }
+ if IsRO(vout3) {
+ t.Errorf("set(conversion output) %v is RO, should not be", vout3)
+ }
+ if !IsRO(MakeRO(v1).Convert(t1)) {
+ t.Errorf("RO self-conversion output %v is not RO, should be", v1)
+ }
+ if !IsRO(MakeRO(v1).Convert(t2)) {
+ t.Errorf("RO conversion output %v is not RO, should be", v1)
+ }
+ }
+
+ // Assume that of all the types we saw during the tests,
+ // if there wasn't an explicit entry for a conversion between
+ // a pair of types, then it's not to be allowed. This checks for
+ // things like 'int64' converting to '*int'.
+ for t1 := range all {
+ for t2 := range all {
+ expectOK := t1 == t2 || canConvert[[2]Type{t1, t2}] || t2.Kind() == Interface && t2.NumMethod() == 0
+ if ok := t1.ConvertibleTo(t2); ok != expectOK {
+ t.Errorf("(%s).ConvertibleTo(%s) = %v, want %v", t1, t2, ok, expectOK)
+ }
+ }
+ }
+}
+
+func TestConvertPanic(t *testing.T) {
+ s := make([]byte, 4)
+ p := new([8]byte)
+ v := ValueOf(s)
+ pt := TypeOf(p)
+ if !v.Type().ConvertibleTo(pt) {
+ t.Errorf("[]byte should be convertible to *[8]byte")
+ }
+ if v.CanConvert(pt) {
+ t.Errorf("slice with length 4 should not be convertible to *[8]byte")
+ }
+ shouldPanic("reflect: cannot convert slice with length 4 to pointer to array with length 8", func() {
+ _ = v.Convert(pt)
+ })
+
+ if v.CanConvert(pt.Elem()) {
+ t.Errorf("slice with length 4 should not be convertible to [8]byte")
+ }
+ shouldPanic("reflect: cannot convert slice with length 4 to array with length 8", func() {
+ _ = v.Convert(pt.Elem())
+ })
+}
+
+func TestConvertSlice2Array(t *testing.T) {
+ s := make([]int, 4)
+ p := [4]int{}
+ pt := TypeOf(p)
+ ov := ValueOf(s)
+ v := ov.Convert(pt)
+ // Converting a slice to non-empty array needs to return
+ // a non-addressable copy of the original memory.
+ if v.CanAddr() {
+ t.Fatalf("convert slice to non-empty array returns a addressable copy array")
+ }
+ for i := range s {
+ ov.Index(i).Set(ValueOf(i + 1))
+ }
+ for i := range s {
+ if v.Index(i).Int() != 0 {
+ t.Fatalf("slice (%v) mutation visible in converted result (%v)", ov, v)
+ }
+ }
+}
+
+var gFloat32 float32
+
+const snan uint32 = 0x7f800001
+
+func TestConvertNaNs(t *testing.T) {
+ // Test to see if a store followed by a load of a signaling NaN
+ // maintains the signaling bit. (This used to fail on the 387 port.)
+ gFloat32 = math.Float32frombits(snan)
+ runtime.Gosched() // make sure we don't optimize the store/load away
+ if got := math.Float32bits(gFloat32); got != snan {
+ t.Errorf("store/load of sNaN not faithful, got %x want %x", got, snan)
+ }
+ // Test reflect's conversion between float32s. See issue 36400.
+ type myFloat32 float32
+ x := V(myFloat32(math.Float32frombits(snan)))
+ y := x.Convert(TypeOf(float32(0)))
+ z := y.Interface().(float32)
+ if got := math.Float32bits(z); got != snan {
+ t.Errorf("signaling nan conversion got %x, want %x", got, snan)
+ }
+}
+
+type ComparableStruct struct {
+ X int
+}
+
+type NonComparableStruct struct {
+ X int
+ Y map[string]int
+}
+
+var comparableTests = []struct {
+ typ Type
+ ok bool
+}{
+ {TypeOf(1), true},
+ {TypeOf("hello"), true},
+ {TypeOf(new(byte)), true},
+ {TypeOf((func())(nil)), false},
+ {TypeOf([]byte{}), false},
+ {TypeOf(map[string]int{}), false},
+ {TypeOf(make(chan int)), true},
+ {TypeOf(1.5), true},
+ {TypeOf(false), true},
+ {TypeOf(1i), true},
+ {TypeOf(ComparableStruct{}), true},
+ {TypeOf(NonComparableStruct{}), false},
+ {TypeOf([10]map[string]int{}), false},
+ {TypeOf([10]string{}), true},
+ {TypeOf(new(any)).Elem(), true},
+}
+
+func TestComparable(t *testing.T) {
+ for _, tt := range comparableTests {
+ if ok := tt.typ.Comparable(); ok != tt.ok {
+ t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok)
+ }
+ }
+}
+
+func TestOverflow(t *testing.T) {
+ if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
+ t.Errorf("%v wrongly overflows float64", 1e300)
+ }
+
+ maxFloat32 := float64((1<<24 - 1) << (127 - 23))
+ if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf {
+ t.Errorf("%v wrongly overflows float32", maxFloat32)
+ }
+ ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52))
+ if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", ovfFloat32)
+ }
+ if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", -ovfFloat32)
+ }
+
+ maxInt32 := int64(0x7fffffff)
+ if ovf := V(int32(0)).OverflowInt(maxInt32); ovf {
+ t.Errorf("%v wrongly overflows int32", maxInt32)
+ }
+ if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf {
+ t.Errorf("%v wrongly overflows int32", -int64(1)<<31)
+ }
+ ovfInt32 := int64(1 << 31)
+ if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf {
+ t.Errorf("%v should overflow int32", ovfInt32)
+ }
+
+ maxUint32 := uint64(0xffffffff)
+ if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf {
+ t.Errorf("%v wrongly overflows uint32", maxUint32)
+ }
+ ovfUint32 := uint64(1 << 32)
+ if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf {
+ t.Errorf("%v should overflow uint32", ovfUint32)
+ }
+}
+
+func checkSameType(t *testing.T, x Type, y any) {
+ if x != TypeOf(y) || TypeOf(Zero(x).Interface()) != TypeOf(y) {
+ t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y))
+ }
+}
+
+func TestArrayOf(t *testing.T) {
+ // check construction and use of type not in binary
+ tests := []struct {
+ n int
+ value func(i int) any
+ comparable bool
+ want string
+ }{
+ {
+ n: 0,
+ value: func(i int) any { type Tint int; return Tint(i) },
+ comparable: true,
+ want: "[]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tint int; return Tint(i) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tfloat float64; return Tfloat(i) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tstring string; return Tstring(strconv.Itoa(i)) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tstruct struct{ V int }; return Tstruct{i} },
+ comparable: true,
+ want: "[{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tint int; return []Tint{Tint(i)} },
+ comparable: false,
+ want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tint int; return [1]Tint{Tint(i)} },
+ comparable: true,
+ want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tstruct struct{ V [1]int }; return Tstruct{[1]int{i}} },
+ comparable: true,
+ want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type Tstruct struct{ V []int }; return Tstruct{[]int{i}} },
+ comparable: false,
+ want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
+ },
+ {
+ n: 10,
+ value: func(i int) any { type TstructUV struct{ U, V int }; return TstructUV{i, i} },
+ comparable: true,
+ want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
+ },
+ {
+ n: 10,
+ value: func(i int) any {
+ type TstructUV struct {
+ U int
+ V float64
+ }
+ return TstructUV{i, float64(i)}
+ },
+ comparable: true,
+ want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
+ },
+ }
+
+ for _, table := range tests {
+ at := ArrayOf(table.n, TypeOf(table.value(0)))
+ v := New(at).Elem()
+ vok := New(at).Elem()
+ vnot := New(at).Elem()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(table.value(i)))
+ vok.Index(i).Set(ValueOf(table.value(i)))
+ j := i
+ if i+1 == v.Len() {
+ j = i + 1
+ }
+ vnot.Index(i).Set(ValueOf(table.value(j))) // make it differ only by last element
+ }
+ s := fmt.Sprint(v.Interface())
+ if s != table.want {
+ t.Errorf("constructed array = %s, want %s", s, table.want)
+ }
+
+ if table.comparable != at.Comparable() {
+ t.Errorf("constructed array (%#v) is comparable=%v, want=%v", v.Interface(), at.Comparable(), table.comparable)
+ }
+ if table.comparable {
+ if table.n > 0 {
+ if DeepEqual(vnot.Interface(), v.Interface()) {
+ t.Errorf(
+ "arrays (%#v) compare ok (but should not)",
+ v.Interface(),
+ )
+ }
+ }
+ if !DeepEqual(vok.Interface(), v.Interface()) {
+ t.Errorf(
+ "arrays (%#v) compare NOT-ok (but should)",
+ v.Interface(),
+ )
+ }
+ }
+ }
+
+ // check that type already in binary is found
+ type T int
+ checkSameType(t, ArrayOf(5, TypeOf(T(1))), [5]T{})
+}
+
+func TestArrayOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := New(ArrayOf(n, tt)).Elem()
+ for j := 0; j < v.Len(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Index(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.Len(); j++ {
+ k := v.Index(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestArrayOfAlg(t *testing.T) {
+ at := ArrayOf(6, TypeOf(byte(0)))
+ v1 := New(at).Elem()
+ v2 := New(at).Elem()
+ if v1.Interface() != v1.Interface() {
+ t.Errorf("constructed array %v not equal to itself", v1.Interface())
+ }
+ v1.Index(5).Set(ValueOf(byte(1)))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
+ t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
+ }
+
+ at = ArrayOf(6, TypeOf([]int(nil)))
+ v1 = New(at).Elem()
+ shouldPanic("", func() { _ = v1.Interface() == v1.Interface() })
+}
+
+func TestArrayOfGenericAlg(t *testing.T) {
+ at1 := ArrayOf(5, TypeOf(string("")))
+ at := ArrayOf(6, at1)
+ v1 := New(at).Elem()
+ v2 := New(at).Elem()
+ if v1.Interface() != v1.Interface() {
+ t.Errorf("constructed array %v not equal to itself", v1.Interface())
+ }
+
+ v1.Index(0).Index(0).Set(ValueOf("abc"))
+ v2.Index(0).Index(0).Set(ValueOf("efg"))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
+ t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
+ }
+
+ v1.Index(0).Index(0).Set(ValueOf("abc"))
+ v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3]))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 {
+ t.Errorf("constructed arrays %v and %v should be equal", i1, i2)
+ }
+
+ // Test hash
+ m := MakeMap(MapOf(at, TypeOf(int(0))))
+ m.SetMapIndex(v1, ValueOf(1))
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed arrays %v and %v have different hashes", i1, i2)
+ }
+}
+
+func TestArrayOfDirectIface(t *testing.T) {
+ {
+ type T [1]*byte
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(ArrayOf(1, PointerTo(TypeOf(int8(0))))).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 != 0 {
+ t.Errorf("got p1=%v. want=%v", p1, nil)
+ }
+
+ if p2 != 0 {
+ t.Errorf("got p2=%v. want=%v", p2, nil)
+ }
+ }
+ {
+ type T [0]*byte
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(ArrayOf(0, PointerTo(TypeOf(int8(0))))).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 == 0 {
+ t.Errorf("got p1=%v. want=not-%v", p1, nil)
+ }
+
+ if p2 == 0 {
+ t.Errorf("got p2=%v. want=not-%v", p2, nil)
+ }
+ }
+}
+
+// Ensure passing in negative lengths panics.
+// See https://golang.org/issue/43603
+func TestArrayOfPanicOnNegativeLength(t *testing.T) {
+ shouldPanic("reflect: negative length passed to ArrayOf", func() {
+ ArrayOf(-1, TypeOf(byte(0)))
+ })
+}
+
+func TestSliceOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ st := SliceOf(TypeOf(T(1)))
+ if got, want := st.String(), "[]reflect_test.T"; got != want {
+ t.Errorf("SliceOf(T(1)).String()=%q, want %q", got, want)
+ }
+ v := MakeSlice(st, 10, 10)
+ runtime.GC()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ runtime.GC()
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed slice = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, SliceOf(TypeOf(T1(1))), []T1{})
+}
+
+func TestSliceOverflow(t *testing.T) {
+ // check that MakeSlice panics when size of slice overflows uint
+ const S = 1e6
+ s := uint(S)
+ l := (1<<(unsafe.Sizeof((*byte)(nil))*8)-1)/s + 1
+ if l*s >= s {
+ t.Fatal("slice size does not overflow")
+ }
+ var x [S]byte
+ st := SliceOf(TypeOf(x))
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Fatal("slice overflow does not panic")
+ }
+ }()
+ MakeSlice(st, int(l), int(l))
+}
+
+func TestSliceOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ st := SliceOf(tt)
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := MakeSlice(st, n, n)
+ for j := 0; j < v.Len(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Index(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.Len(); j++ {
+ k := v.Index(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestStructOfFieldName(t *testing.T) {
+ // invalid field name "1nvalid"
+ shouldPanic("has invalid name", func() {
+ StructOf([]StructField{
+ {Name: "Valid", Type: TypeOf("")},
+ {Name: "1nvalid", Type: TypeOf("")},
+ })
+ })
+
+ // invalid field name "+"
+ shouldPanic("has invalid name", func() {
+ StructOf([]StructField{
+ {Name: "Val1d", Type: TypeOf("")},
+ {Name: "+", Type: TypeOf("")},
+ })
+ })
+
+ // no field name
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Name: "", Type: TypeOf("")},
+ })
+ })
+
+ // verify creation of a struct with valid struct fields
+ validFields := []StructField{
+ {
+ Name: "φ",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "ValidName",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "Val1dNam5",
+ Type: TypeOf(""),
+ },
+ }
+
+ validStruct := StructOf(validFields)
+
+ const structStr = `struct { φ string; ValidName string; Val1dNam5 string }`
+ if got, want := validStruct.String(), structStr; got != want {
+ t.Errorf("StructOf(validFields).String()=%q, want %q", got, want)
+ }
+}
+
+func TestStructOf(t *testing.T) {
+ // check construction and use of type not in binary
+ fields := []StructField{
+ {
+ Name: "S",
+ Tag: "s",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "X",
+ Tag: "x",
+ Type: TypeOf(byte(0)),
+ },
+ {
+ Name: "Y",
+ Type: TypeOf(uint64(0)),
+ },
+ {
+ Name: "Z",
+ Type: TypeOf([3]uint16{}),
+ },
+ }
+
+ st := StructOf(fields)
+ v := New(st).Elem()
+ runtime.GC()
+ v.FieldByName("X").Set(ValueOf(byte(2)))
+ v.FieldByIndex([]int{1}).Set(ValueOf(byte(1)))
+ runtime.GC()
+
+ s := fmt.Sprint(v.Interface())
+ want := `{ 1 0 [0 0 0]}`
+ if s != want {
+ t.Errorf("constructed struct = %s, want %s", s, want)
+ }
+ const stStr = `struct { S string "s"; X uint8 "x"; Y uint64; Z [3]uint16 }`
+ if got, want := st.String(), stStr; got != want {
+ t.Errorf("StructOf(fields).String()=%q, want %q", got, want)
+ }
+
+ // check the size, alignment and field offsets
+ stt := TypeOf(struct {
+ String string
+ X byte
+ Y uint64
+ Z [3]uint16
+ }{})
+ if st.Size() != stt.Size() {
+ t.Errorf("constructed struct size = %v, want %v", st.Size(), stt.Size())
+ }
+ if st.Align() != stt.Align() {
+ t.Errorf("constructed struct align = %v, want %v", st.Align(), stt.Align())
+ }
+ if st.FieldAlign() != stt.FieldAlign() {
+ t.Errorf("constructed struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
+ }
+ for i := 0; i < st.NumField(); i++ {
+ o1 := st.Field(i).Offset
+ o2 := stt.Field(i).Offset
+ if o1 != o2 {
+ t.Errorf("constructed struct field %v offset = %v, want %v", i, o1, o2)
+ }
+ }
+
+ // Check size and alignment with a trailing zero-sized field.
+ st = StructOf([]StructField{
+ {
+ Name: "F1",
+ Type: TypeOf(byte(0)),
+ },
+ {
+ Name: "F2",
+ Type: TypeOf([0]*byte{}),
+ },
+ })
+ stt = TypeOf(struct {
+ G1 byte
+ G2 [0]*byte
+ }{})
+ if st.Size() != stt.Size() {
+ t.Errorf("constructed zero-padded struct size = %v, want %v", st.Size(), stt.Size())
+ }
+ if st.Align() != stt.Align() {
+ t.Errorf("constructed zero-padded struct align = %v, want %v", st.Align(), stt.Align())
+ }
+ if st.FieldAlign() != stt.FieldAlign() {
+ t.Errorf("constructed zero-padded struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
+ }
+ for i := 0; i < st.NumField(); i++ {
+ o1 := st.Field(i).Offset
+ o2 := stt.Field(i).Offset
+ if o1 != o2 {
+ t.Errorf("constructed zero-padded struct field %v offset = %v, want %v", i, o1, o2)
+ }
+ }
+
+ // check duplicate names
+ shouldPanic("duplicate field", func() {
+ StructOf([]StructField{
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ })
+ })
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Type: TypeOf("")},
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ })
+ })
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Type: TypeOf("")},
+ {Type: TypeOf("")},
+ })
+ })
+ // check that type already in binary is found
+ checkSameType(t, StructOf(fields[2:3]), struct{ Y uint64 }{})
+
+ // gccgo used to fail this test.
+ type structFieldType any
+ checkSameType(t,
+ StructOf([]StructField{
+ {
+ Name: "F",
+ Type: TypeOf((*structFieldType)(nil)).Elem(),
+ },
+ }),
+ struct{ F structFieldType }{})
+}
+
+func TestStructOfExportRules(t *testing.T) {
+ type S1 struct{}
+ type s2 struct{}
+ type ΦType struct{}
+ type φType struct{}
+
+ testPanic := func(i int, mustPanic bool, f func()) {
+ defer func() {
+ err := recover()
+ if err == nil && mustPanic {
+ t.Errorf("test-%d did not panic", i)
+ }
+ if err != nil && !mustPanic {
+ t.Errorf("test-%d panicked: %v\n", i, err)
+ }
+ }()
+ f()
+ }
+
+ tests := []struct {
+ field StructField
+ mustPanic bool
+ exported bool
+ }{
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "Name", Type: nil, PkgPath: ""},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(S1{}), PkgPath: ""},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{}), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{}), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf(S1{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf((*S1)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf(s2{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf((*s2)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(S1{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*S1)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(s2{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*s2)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(S1{}), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(s2{}), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(ΦType{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(φType{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "Φ", Type: TypeOf(0)},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "φ", Type: TypeOf(0)},
+ exported: false,
+ },
+ }
+
+ for i, test := range tests {
+ testPanic(i, test.mustPanic, func() {
+ typ := StructOf([]StructField{test.field})
+ if typ == nil {
+ t.Errorf("test-%d: error creating struct type", i)
+ return
+ }
+ field := typ.Field(0)
+ n := field.Name
+ if n == "" {
+ panic("field.Name must not be empty")
+ }
+ exported := token.IsExported(n)
+ if exported != test.exported {
+ t.Errorf("test-%d: got exported=%v want exported=%v", i, exported, test.exported)
+ }
+ if field.PkgPath != test.field.PkgPath {
+ t.Errorf("test-%d: got PkgPath=%q want pkgPath=%q", i, field.PkgPath, test.field.PkgPath)
+ }
+ })
+ }
+}
+
+func TestStructOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ fields := []StructField{
+ {Name: "X", Type: tt},
+ {Name: "Y", Type: tt},
+ }
+ st := StructOf(fields)
+
+ const n = 10000
+ var x []any
+ for i := 0; i < n; i++ {
+ v := New(st).Elem()
+ for j := 0; j < v.NumField(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Field(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.NumField(); j++ {
+ k := v.Field(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d].%c = %d, want %d", i, "XY"[j], k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestStructOfAlg(t *testing.T) {
+ st := StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf(int(0))}})
+ v1 := New(st).Elem()
+ v2 := New(st).Elem()
+ if !DeepEqual(v1.Interface(), v1.Interface()) {
+ t.Errorf("constructed struct %v not equal to itself", v1.Interface())
+ }
+ v1.FieldByName("X").Set(ValueOf(int(1)))
+ if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
+ }
+
+ st = StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf([]int(nil))}})
+ v1 = New(st).Elem()
+ shouldPanic("", func() { _ = v1.Interface() == v1.Interface() })
+}
+
+func TestStructOfGenericAlg(t *testing.T) {
+ st1 := StructOf([]StructField{
+ {Name: "X", Tag: "x", Type: TypeOf(int64(0))},
+ {Name: "Y", Type: TypeOf(string(""))},
+ })
+ st := StructOf([]StructField{
+ {Name: "S0", Type: st1},
+ {Name: "S1", Type: st1},
+ })
+
+ tests := []struct {
+ rt Type
+ idx []int
+ }{
+ {
+ rt: st,
+ idx: []int{0, 1},
+ },
+ {
+ rt: st1,
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([0]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([0]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([2]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([1]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([1]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([1]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([2]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([2]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf(int64(0))},
+ {Name: "YY", Type: TypeOf(byte(0))},
+ {Name: "ZZ", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{2},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf(int64(0))},
+ {Name: "YY", Type: TypeOf(int64(0))},
+ {Name: "ZZ", Type: TypeOf("")},
+ {Name: "AA", Type: TypeOf([1]int64{})},
+ },
+ ),
+ idx: []int{2},
+ },
+ }
+
+ for _, table := range tests {
+ v1 := New(table.rt).Elem()
+ v2 := New(table.rt).Elem()
+
+ if !DeepEqual(v1.Interface(), v1.Interface()) {
+ t.Errorf("constructed struct %v not equal to itself", v1.Interface())
+ }
+
+ v1.FieldByIndex(table.idx).Set(ValueOf("abc"))
+ v2.FieldByIndex(table.idx).Set(ValueOf("def"))
+ if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
+ }
+
+ abc := "abc"
+ v1.FieldByIndex(table.idx).Set(ValueOf(abc))
+ val := "+" + abc + "-"
+ v2.FieldByIndex(table.idx).Set(ValueOf(val[1:4]))
+ if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should be equal", i1, i2)
+ }
+
+ // Test hash
+ m := MakeMap(MapOf(table.rt, TypeOf(int(0))))
+ m.SetMapIndex(v1, ValueOf(1))
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed structs %#v and %#v have different hashes", i1, i2)
+ }
+
+ v2.FieldByIndex(table.idx).Set(ValueOf("abc"))
+ if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should be equal", i1, i2)
+ }
+
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed structs %v and %v have different hashes", i1, i2)
+ }
+ }
+}
+
+func TestStructOfDirectIface(t *testing.T) {
+ {
+ type T struct{ X [1]*byte }
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(StructOf([]StructField{
+ {
+ Name: "X",
+ Type: ArrayOf(1, TypeOf((*int8)(nil))),
+ },
+ })).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 != 0 {
+ t.Errorf("got p1=%v. want=%v", p1, nil)
+ }
+
+ if p2 != 0 {
+ t.Errorf("got p2=%v. want=%v", p2, nil)
+ }
+ }
+ {
+ type T struct{ X [0]*byte }
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(StructOf([]StructField{
+ {
+ Name: "X",
+ Type: ArrayOf(0, TypeOf((*int8)(nil))),
+ },
+ })).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 == 0 {
+ t.Errorf("got p1=%v. want=not-%v", p1, nil)
+ }
+
+ if p2 == 0 {
+ t.Errorf("got p2=%v. want=not-%v", p2, nil)
+ }
+ }
+}
+
+type StructI int
+
+func (i StructI) Get() int { return int(i) }
+
+type StructIPtr int
+
+func (i *StructIPtr) Get() int { return int(*i) }
+func (i *StructIPtr) Set(v int) { *(*int)(i) = v }
+
+type SettableStruct struct {
+ SettableField int
+}
+
+func (p *SettableStruct) Set(v int) { p.SettableField = v }
+
+type SettablePointer struct {
+ SettableField *int
+}
+
+func (p *SettablePointer) Set(v int) { *p.SettableField = v }
+
+func TestStructOfWithInterface(t *testing.T) {
+ const want = 42
+ type Iface interface {
+ Get() int
+ }
+ type IfaceSet interface {
+ Set(int)
+ }
+ tests := []struct {
+ name string
+ typ Type
+ val Value
+ impl bool
+ }{
+ {
+ name: "StructI",
+ typ: TypeOf(StructI(want)),
+ val: ValueOf(StructI(want)),
+ impl: true,
+ },
+ {
+ name: "StructI",
+ typ: PointerTo(TypeOf(StructI(want))),
+ val: ValueOf(func() any {
+ v := StructI(want)
+ return &v
+ }()),
+ impl: true,
+ },
+ {
+ name: "StructIPtr",
+ typ: PointerTo(TypeOf(StructIPtr(want))),
+ val: ValueOf(func() any {
+ v := StructIPtr(want)
+ return &v
+ }()),
+ impl: true,
+ },
+ {
+ name: "StructIPtr",
+ typ: TypeOf(StructIPtr(want)),
+ val: ValueOf(StructIPtr(want)),
+ impl: false,
+ },
+ // {
+ // typ: TypeOf((*Iface)(nil)).Elem(), // FIXME(sbinet): fix method.ifn/tfn
+ // val: ValueOf(StructI(want)),
+ // impl: true,
+ // },
+ }
+
+ for i, table := range tests {
+ for j := 0; j < 2; j++ {
+ var fields []StructField
+ if j == 1 {
+ fields = append(fields, StructField{
+ Name: "Dummy",
+ PkgPath: "",
+ Type: TypeOf(int(0)),
+ })
+ }
+ fields = append(fields, StructField{
+ Name: table.name,
+ Anonymous: true,
+ PkgPath: "",
+ Type: table.typ,
+ })
+
+ // We currently do not correctly implement methods
+ // for embedded fields other than the first.
+ // Therefore, for now, we expect those methods
+ // to not exist. See issues 15924 and 20824.
+ // When those issues are fixed, this test of panic
+ // should be removed.
+ if j == 1 && table.impl {
+ func() {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Errorf("test-%d-%d did not panic", i, j)
+ }
+ }()
+ _ = StructOf(fields)
+ }()
+ continue
+ }
+
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ rv.Field(j).Set(table.val)
+
+ if _, ok := rv.Interface().(Iface); ok != table.impl {
+ if table.impl {
+ t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ)
+ } else {
+ t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ)
+ }
+ continue
+ }
+
+ if !table.impl {
+ continue
+ }
+
+ v := rv.Interface().(Iface).Get()
+ if v != want {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want)
+ }
+
+ fct := rv.MethodByName("Get")
+ out := fct.Call(nil)
+ if !DeepEqual(out[0].Interface(), want) {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want)
+ }
+ }
+ }
+
+ // Test an embedded nil pointer with pointer methods.
+ fields := []StructField{{
+ Name: "StructIPtr",
+ Anonymous: true,
+ Type: PointerTo(TypeOf(StructIPtr(want))),
+ }}
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ // This should panic since the pointer is nil.
+ shouldPanic("", func() {
+ rv.Interface().(IfaceSet).Set(want)
+ })
+
+ // Test an embedded nil pointer to a struct with pointer methods.
+
+ fields = []StructField{{
+ Name: "SettableStruct",
+ Anonymous: true,
+ Type: PointerTo(TypeOf(SettableStruct{})),
+ }}
+ rt = StructOf(fields)
+ rv = New(rt).Elem()
+ // This should panic since the pointer is nil.
+ shouldPanic("", func() {
+ rv.Interface().(IfaceSet).Set(want)
+ })
+
+ // The behavior is different if there is a second field,
+ // since now an interface value holds a pointer to the struct
+ // rather than just holding a copy of the struct.
+ fields = []StructField{
+ {
+ Name: "SettableStruct",
+ Anonymous: true,
+ Type: PointerTo(TypeOf(SettableStruct{})),
+ },
+ {
+ Name: "EmptyStruct",
+ Anonymous: true,
+ Type: StructOf(nil),
+ },
+ }
+ // With the current implementation this is expected to panic.
+ // Ideally it should work and we should be able to see a panic
+ // if we call the Set method.
+ shouldPanic("", func() {
+ StructOf(fields)
+ })
+
+ // Embed a field that can be stored directly in an interface,
+ // with a second field.
+ fields = []StructField{
+ {
+ Name: "SettablePointer",
+ Anonymous: true,
+ Type: TypeOf(SettablePointer{}),
+ },
+ {
+ Name: "EmptyStruct",
+ Anonymous: true,
+ Type: StructOf(nil),
+ },
+ }
+ // With the current implementation this is expected to panic.
+ // Ideally it should work and we should be able to call the
+ // Set and Get methods.
+ shouldPanic("", func() {
+ StructOf(fields)
+ })
+}
+
+func TestStructOfTooManyFields(t *testing.T) {
+ // Bug Fix: #25402 - this should not panic
+ tt := StructOf([]StructField{
+ {Name: "Time", Type: TypeOf(time.Time{}), Anonymous: true},
+ })
+
+ if _, present := tt.MethodByName("After"); !present {
+ t.Errorf("Expected method `After` to be found")
+ }
+}
+
+func TestStructOfDifferentPkgPath(t *testing.T) {
+ fields := []StructField{
+ {
+ Name: "f1",
+ PkgPath: "p1",
+ Type: TypeOf(int(0)),
+ },
+ {
+ Name: "f2",
+ PkgPath: "p2",
+ Type: TypeOf(int(0)),
+ },
+ }
+ shouldPanic("different PkgPath", func() {
+ StructOf(fields)
+ })
+}
+
+func TestStructOfTooLarge(t *testing.T) {
+ t1 := TypeOf(byte(0))
+ t2 := TypeOf(int16(0))
+ t4 := TypeOf(int32(0))
+ t0 := ArrayOf(0, t1)
+
+ // 2^64-3 sized type (or 2^32-3 on 32-bit archs)
+ bigType := StructOf([]StructField{
+ {Name: "F1", Type: ArrayOf(int(^uintptr(0)>>1), t1)},
+ {Name: "F2", Type: ArrayOf(int(^uintptr(0)>>1-1), t1)},
+ })
+
+ type test struct {
+ shouldPanic bool
+ fields []StructField
+ }
+
+ tests := [...]test{
+ {
+ shouldPanic: false, // 2^64-1, ok
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(2, t1)},
+ },
+ },
+ {
+ shouldPanic: true, // overflow in total size
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(3, t1)},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while aligning F2
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: t4},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while adding trailing byte for zero-sized fields
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(2, t1)},
+ {Name: "F3", Type: t0},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while aligning total size
+ fields: []StructField{
+ {Name: "F1", Type: t2},
+ {Name: "F2", Type: bigType},
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ func() {
+ defer func() {
+ err := recover()
+ if !tt.shouldPanic {
+ if err != nil {
+ t.Errorf("test %d should not panic, got %s", i, err)
+ }
+ return
+ }
+ if err == nil {
+ t.Errorf("test %d expected to panic", i)
+ return
+ }
+ s := fmt.Sprintf("%s", err)
+ if s != "reflect.StructOf: struct size would exceed virtual address space" {
+ t.Errorf("test %d wrong panic message: %s", i, s)
+ return
+ }
+ }()
+ _ = StructOf(tt.fields)
+ }()
+ }
+}
+
+func TestChanOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ ct := ChanOf(BothDir, TypeOf(T("")))
+ v := MakeChan(ct, 2)
+ runtime.GC()
+ v.Send(ValueOf(T("hello")))
+ runtime.GC()
+ v.Send(ValueOf(T("world")))
+ runtime.GC()
+
+ sv1, _ := v.Recv()
+ sv2, _ := v.Recv()
+ s1 := sv1.String()
+ s2 := sv2.String()
+ if s1 != "hello" || s2 != "world" {
+ t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world")
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, ChanOf(BothDir, TypeOf(T1(1))), (chan T1)(nil))
+
+ // Check arrow token association in undefined chan types.
+ var left chan<- chan T
+ var right chan (<-chan T)
+ tLeft := ChanOf(SendDir, ChanOf(BothDir, TypeOf(T(""))))
+ tRight := ChanOf(BothDir, ChanOf(RecvDir, TypeOf(T(""))))
+ if tLeft != TypeOf(left) {
+ t.Errorf("chan<-chan: have %s, want %T", tLeft, left)
+ }
+ if tRight != TypeOf(right) {
+ t.Errorf("chan<-chan: have %s, want %T", tRight, right)
+ }
+}
+
+func TestChanOfDir(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ crt := ChanOf(RecvDir, TypeOf(T("")))
+ cst := ChanOf(SendDir, TypeOf(T("")))
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, ChanOf(RecvDir, TypeOf(T1(1))), (<-chan T1)(nil))
+ checkSameType(t, ChanOf(SendDir, TypeOf(T1(1))), (chan<- T1)(nil))
+
+ // check String form of ChanDir
+ if crt.ChanDir().String() != "<-chan" {
+ t.Errorf("chan dir: have %q, want %q", crt.ChanDir().String(), "<-chan")
+ }
+ if cst.ChanDir().String() != "chan<-" {
+ t.Errorf("chan dir: have %q, want %q", cst.ChanDir().String(), "chan<-")
+ }
+}
+
+func TestChanOfGC(t *testing.T) {
+ done := make(chan bool, 1)
+ go func() {
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ panic("deadlock in TestChanOfGC")
+ }
+ }()
+
+ defer func() {
+ done <- true
+ }()
+
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ ct := ChanOf(BothDir, tt)
+
+ // NOTE: The garbage collector handles allocated channels specially,
+ // so we have to save pointers to channels in x; the pointer code will
+ // use the gc info in the newly constructed chan type.
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := MakeChan(ct, n)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Send(ValueOf(p).Convert(tt))
+ }
+ pv := New(ct)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ for j := 0; j < n; j++ {
+ pv, _ := v.Recv()
+ k := pv.Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestMapOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0))))
+ runtime.GC()
+ v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1)))
+ runtime.GC()
+
+ s := fmt.Sprint(v.Interface())
+ want := "map[a:1]"
+ if s != want {
+ t.Errorf("constructed map = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, MapOf(TypeOf(V(0)), TypeOf(K(""))), map[V]K(nil))
+
+ // check that invalid key type panics
+ shouldPanic("invalid key type", func() { MapOf(TypeOf((func())(nil)), TypeOf(false)) })
+}
+
+func TestMapOfGCKeys(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ mt := MapOf(tt, TypeOf(false))
+
+ // NOTE: The garbage collector handles allocated maps specially,
+ // so we have to save pointers to maps in x; the pointer code will
+ // use the gc info in the newly constructed map type.
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := MakeMap(mt)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.SetMapIndex(ValueOf(p).Convert(tt), ValueOf(true))
+ }
+ pv := New(mt)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ var out []int
+ for _, kv := range v.MapKeys() {
+ out = append(out, int(kv.Elem().Interface().(uintptr)))
+ }
+ sort.Ints(out)
+ for j, k := range out {
+ if k != i*n+j {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestMapOfGCValues(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ mt := MapOf(TypeOf(1), tt)
+
+ // NOTE: The garbage collector handles allocated maps specially,
+ // so we have to save pointers to maps in x; the pointer code will
+ // use the gc info in the newly constructed map type.
+ const n = 100
+ var x []any
+ for i := 0; i < n; i++ {
+ v := MakeMap(mt)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.SetMapIndex(ValueOf(j), ValueOf(p).Convert(tt))
+ }
+ pv := New(mt)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ for j := 0; j < n; j++ {
+ k := v.MapIndex(ValueOf(j)).Elem().Interface().(uintptr)
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestTypelinksSorted(t *testing.T) {
+ var last string
+ for i, n := range TypeLinks() {
+ if n < last {
+ t.Errorf("typelinks not sorted: %q [%d] > %q [%d]", last, i-1, n, i)
+ }
+ last = n
+ }
+}
+
+func TestFuncOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ fn := func(args []Value) []Value {
+ if len(args) != 1 {
+ t.Errorf("args == %v, want exactly one arg", args)
+ } else if args[0].Type() != TypeOf(K("")) {
+ t.Errorf("args[0] is type %v, want %v", args[0].Type(), TypeOf(K("")))
+ } else if args[0].String() != "gopher" {
+ t.Errorf("args[0] = %q, want %q", args[0].String(), "gopher")
+ }
+ return []Value{ValueOf(V(3.14))}
+ }
+ v := MakeFunc(FuncOf([]Type{TypeOf(K(""))}, []Type{TypeOf(V(0))}, false), fn)
+
+ outs := v.Call([]Value{ValueOf(K("gopher"))})
+ if len(outs) != 1 {
+ t.Fatalf("v.Call returned %v, want exactly one result", outs)
+ } else if outs[0].Type() != TypeOf(V(0)) {
+ t.Fatalf("c.Call[0] is type %v, want %v", outs[0].Type(), TypeOf(V(0)))
+ }
+ f := outs[0].Float()
+ if f != 3.14 {
+ t.Errorf("constructed func returned %f, want %f", f, 3.14)
+ }
+
+ // check that types already in binary are found
+ type T1 int
+ testCases := []struct {
+ in, out []Type
+ variadic bool
+ want any
+ }{
+ {in: []Type{TypeOf(T1(0))}, want: (func(T1))(nil)},
+ {in: []Type{TypeOf(int(0))}, want: (func(int))(nil)},
+ {in: []Type{SliceOf(TypeOf(int(0)))}, variadic: true, want: (func(...int))(nil)},
+ {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false)}, want: (func(int) bool)(nil)},
+ {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false), TypeOf("")}, want: (func(int) (bool, string))(nil)},
+ }
+ for _, tt := range testCases {
+ checkSameType(t, FuncOf(tt.in, tt.out, tt.variadic), tt.want)
+ }
+
+ // check that variadic requires last element be a slice.
+ FuncOf([]Type{TypeOf(1), TypeOf(""), SliceOf(TypeOf(false))}, nil, true)
+ shouldPanic("must be slice", func() { FuncOf([]Type{TypeOf(0), TypeOf(""), TypeOf(false)}, nil, true) })
+ shouldPanic("must be slice", func() { FuncOf(nil, nil, true) })
+
+ //testcase for #54669
+ var in []Type
+ for i := 0; i < 51; i++ {
+ in = append(in, TypeOf(1))
+ }
+ FuncOf(in, nil, false)
+}
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+func TestEmbed(t *testing.T) {
+ typ := TypeOf(R0{})
+ f, ok := typ.FieldByName("X")
+ if ok {
+ t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index)
+ }
+}
+
+func TestAllocsInterfaceBig(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ v := ValueOf(S{})
+ if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
+ t.Error("allocs:", allocs)
+ }
+}
+
+func TestAllocsInterfaceSmall(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ v := ValueOf(int64(0))
+ if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
+ t.Error("allocs:", allocs)
+ }
+}
+
+// An exhaustive is a mechanism for writing exhaustive or stochastic tests.
+// The basic usage is:
+//
+// for x.Next() {
+// ... code using x.Maybe() or x.Choice(n) to create test cases ...
+// }
+//
+// Each iteration of the loop returns a different set of results, until all
+// possible result sets have been explored. It is okay for different code paths
+// to make different method call sequences on x, but there must be no
+// other source of non-determinism in the call sequences.
+//
+// When faced with a new decision, x chooses randomly. Future explorations
+// of that path will choose successive values for the result. Thus, stopping
+// the loop after a fixed number of iterations gives somewhat stochastic
+// testing.
+//
+// Example:
+//
+// for x.Next() {
+// v := make([]bool, x.Choose(4))
+// for i := range v {
+// v[i] = x.Maybe()
+// }
+// fmt.Println(v)
+// }
+//
+// prints (in some order):
+//
+// []
+// [false]
+// [true]
+// [false false]
+// [false true]
+// ...
+// [true true]
+// [false false false]
+// ...
+// [true true true]
+// [false false false false]
+// ...
+// [true true true true]
+type exhaustive struct {
+ r *rand.Rand
+ pos int
+ last []choice
+}
+
+type choice struct {
+ off int
+ n int
+ max int
+}
+
+func (x *exhaustive) Next() bool {
+ if x.r == nil {
+ x.r = rand.New(rand.NewSource(time.Now().UnixNano()))
+ }
+ x.pos = 0
+ if x.last == nil {
+ x.last = []choice{}
+ return true
+ }
+ for i := len(x.last) - 1; i >= 0; i-- {
+ c := &x.last[i]
+ if c.n+1 < c.max {
+ c.n++
+ x.last = x.last[:i+1]
+ return true
+ }
+ }
+ return false
+}
+
+func (x *exhaustive) Choose(max int) int {
+ if x.pos >= len(x.last) {
+ x.last = append(x.last, choice{x.r.Intn(max), 0, max})
+ }
+ c := &x.last[x.pos]
+ x.pos++
+ if c.max != max {
+ panic("inconsistent use of exhaustive tester")
+ }
+ return (c.n + c.off) % max
+}
+
+func (x *exhaustive) Maybe() bool {
+ return x.Choose(2) == 1
+}
+
+func GCFunc(args []Value) []Value {
+ runtime.GC()
+ return []Value{}
+}
+
+func TestReflectFuncTraceback(t *testing.T) {
+ f := MakeFunc(TypeOf(func() {}), GCFunc)
+ f.Call([]Value{})
+}
+
+func TestReflectMethodTraceback(t *testing.T) {
+ p := Point{3, 4}
+ m := ValueOf(p).MethodByName("GCMethod")
+ i := ValueOf(m.Interface()).Call([]Value{ValueOf(5)})[0].Int()
+ if i != 8 {
+ t.Errorf("Call returned %d; want 8", i)
+ }
+}
+
+func TestSmallZero(t *testing.T) {
+ type T [10]byte
+ typ := TypeOf(T{})
+ if allocs := testing.AllocsPerRun(100, func() { Zero(typ) }); allocs > 0 {
+ t.Errorf("Creating small zero values caused %f allocs, want 0", allocs)
+ }
+}
+
+func TestBigZero(t *testing.T) {
+ const size = 1 << 10
+ var v [size]byte
+ z := Zero(ValueOf(v).Type()).Interface().([size]byte)
+ for i := 0; i < size; i++ {
+ if z[i] != 0 {
+ t.Fatalf("Zero object not all zero, index %d", i)
+ }
+ }
+}
+
+func TestZeroSet(t *testing.T) {
+ type T [16]byte
+ type S struct {
+ a uint64
+ T T
+ b uint64
+ }
+ v := S{
+ a: 0xaaaaaaaaaaaaaaaa,
+ T: T{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
+ b: 0xbbbbbbbbbbbbbbbb,
+ }
+ ValueOf(&v).Elem().Field(1).Set(Zero(TypeOf(T{})))
+ if v != (S{
+ a: 0xaaaaaaaaaaaaaaaa,
+ b: 0xbbbbbbbbbbbbbbbb,
+ }) {
+ t.Fatalf("Setting a field to a Zero value didn't work")
+ }
+}
+
+func TestFieldByIndexNil(t *testing.T) {
+ type P struct {
+ F int
+ }
+ type T struct {
+ *P
+ }
+ v := ValueOf(T{})
+
+ v.FieldByName("P") // should be fine
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("no error")
+ } else if !strings.Contains(fmt.Sprint(err), "nil pointer to embedded struct") {
+ t.Fatalf(`err=%q, wanted error containing "nil pointer to embedded struct"`, err)
+ }
+ }()
+ v.FieldByName("F") // should panic
+
+ t.Fatalf("did not panic")
+}
+
+// Given
+// type Outer struct {
+// *Inner
+// ...
+// }
+// the compiler generates the implementation of (*Outer).M dispatching to the embedded Inner.
+// The implementation is logically:
+// func (p *Outer) M() {
+// (p.Inner).M()
+// }
+// but since the only change here is the replacement of one pointer receiver with another,
+// the actual generated code overwrites the original receiver with the p.Inner pointer and
+// then jumps to the M method expecting the *Inner receiver.
+//
+// During reflect.Value.Call, we create an argument frame and the associated data structures
+// to describe it to the garbage collector, populate the frame, call reflect.call to
+// run a function call using that frame, and then copy the results back out of the frame.
+// The reflect.call function does a memmove of the frame structure onto the
+// stack (to set up the inputs), runs the call, and the memmoves the stack back to
+// the frame structure (to preserve the outputs).
+//
+// Originally reflect.call did not distinguish inputs from outputs: both memmoves
+// were for the full stack frame. However, in the case where the called function was
+// one of these wrappers, the rewritten receiver is almost certainly a different type
+// than the original receiver. This is not a problem on the stack, where we use the
+// program counter to determine the type information and understand that
+// during (*Outer).M the receiver is an *Outer while during (*Inner).M the receiver in the same
+// memory word is now an *Inner. But in the statically typed argument frame created
+// by reflect, the receiver is always an *Outer. Copying the modified receiver pointer
+// off the stack into the frame will store an *Inner there, and then if a garbage collection
+// happens to scan that argument frame before it is discarded, it will scan the *Inner
+// memory as if it were an *Outer. If the two have different memory layouts, the
+// collection will interpret the memory incorrectly.
+//
+// One such possible incorrect interpretation is to treat two arbitrary memory words
+// (Inner.P1 and Inner.P2 below) as an interface (Outer.R below). Because interpreting
+// an interface requires dereferencing the itab word, the misinterpretation will try to
+// deference Inner.P1, causing a crash during garbage collection.
+//
+// This came up in a real program in issue 7725.
+
+type Outer struct {
+ *Inner
+ R io.Reader
+}
+
+type Inner struct {
+ X *Outer
+ P1 uintptr
+ P2 uintptr
+}
+
+func (pi *Inner) M() {
+ // Clear references to pi so that the only way the
+ // garbage collection will find the pointer is in the
+ // argument frame, typed as a *Outer.
+ pi.X.Inner = nil
+
+ // Set up an interface value that will cause a crash.
+ // P1 = 1 is a non-zero, so the interface looks non-nil.
+ // P2 = pi ensures that the data word points into the
+ // allocated heap; if not the collection skips the interface
+ // value as irrelevant, without dereferencing P1.
+ pi.P1 = 1
+ pi.P2 = uintptr(unsafe.Pointer(pi))
+}
+
+func TestCallMethodJump(t *testing.T) {
+ // In reflect.Value.Call, trigger a garbage collection after reflect.call
+ // returns but before the args frame has been discarded.
+ // This is a little clumsy but makes the failure repeatable.
+ *CallGC = true
+
+ p := &Outer{Inner: new(Inner)}
+ p.Inner.X = p
+ ValueOf(p).Method(0).Call(nil)
+
+ // Stop garbage collecting during reflect.call.
+ *CallGC = false
+}
+
+func TestCallArgLive(t *testing.T) {
+ type T struct{ X, Y *string } // pointerful aggregate
+
+ F := func(t T) { *t.X = "ok" }
+
+ // In reflect.Value.Call, trigger a garbage collection in reflect.call
+ // between marshaling argument and the actual call.
+ *CallGC = true
+
+ x := new(string)
+ runtime.SetFinalizer(x, func(p *string) {
+ if *p != "ok" {
+ t.Errorf("x dead prematurely")
+ }
+ })
+ v := T{x, nil}
+
+ ValueOf(F).Call([]Value{ValueOf(v)})
+
+ // Stop garbage collecting during reflect.call.
+ *CallGC = false
+}
+
+func TestMakeFuncStackCopy(t *testing.T) {
+ target := func(in []Value) []Value {
+ runtime.GC()
+ useStack(16)
+ return []Value{ValueOf(9)}
+ }
+
+ var concrete func(*int, int) int
+ fn := MakeFunc(ValueOf(concrete).Type(), target)
+ ValueOf(&concrete).Elem().Set(fn)
+ x := concrete(nil, 7)
+ if x != 9 {
+ t.Errorf("have %#q want 9", x)
+ }
+}
+
+// use about n KB of stack
+func useStack(n int) {
+ if n == 0 {
+ return
+ }
+ var b [1024]byte // makes frame about 1KB
+ useStack(n - 1 + int(b[99]))
+}
+
+type Impl struct{}
+
+func (Impl) F() {}
+
+func TestValueString(t *testing.T) {
+ rv := ValueOf(Impl{})
+ if rv.String() != "<reflect_test.Impl Value>" {
+ t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "<reflect_test.Impl Value>")
+ }
+
+ method := rv.Method(0)
+ if method.String() != "<func() Value>" {
+ t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "<func() Value>")
+ }
+}
+
+func TestInvalid(t *testing.T) {
+ // Used to have inconsistency between IsValid() and Kind() != Invalid.
+ type T struct{ v any }
+
+ v := ValueOf(T{}).Field(0)
+ if v.IsValid() != true || v.Kind() != Interface {
+ t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind())
+ }
+ v = v.Elem()
+ if v.IsValid() != false || v.Kind() != Invalid {
+ t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind())
+ }
+}
+
+// Issue 8917.
+func TestLargeGCProg(t *testing.T) {
+ fv := ValueOf(func([256]*byte) {})
+ fv.Call([]Value{ValueOf([256]*byte{})})
+}
+
+func fieldIndexRecover(t Type, i int) (recovered any) {
+ defer func() {
+ recovered = recover()
+ }()
+
+ t.Field(i)
+ return
+}
+
+// Issue 15046.
+func TestTypeFieldOutOfRangePanic(t *testing.T) {
+ typ := TypeOf(struct{ X int }{10})
+ testIndices := [...]struct {
+ i int
+ mustPanic bool
+ }{
+ 0: {-2, true},
+ 1: {0, false},
+ 2: {1, true},
+ 3: {1 << 10, true},
+ }
+ for i, tt := range testIndices {
+ recoveredErr := fieldIndexRecover(typ, tt.i)
+ if tt.mustPanic {
+ if recoveredErr == nil {
+ t.Errorf("#%d: fieldIndex %d expected to panic", i, tt.i)
+ }
+ } else {
+ if recoveredErr != nil {
+ t.Errorf("#%d: got err=%v, expected no panic", i, recoveredErr)
+ }
+ }
+ }
+}
+
+// Issue 9179.
+func TestCallGC(t *testing.T) {
+ f := func(a, b, c, d, e string) {
+ }
+ g := func(in []Value) []Value {
+ runtime.GC()
+ return nil
+ }
+ typ := ValueOf(f).Type()
+ f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string))
+ f2("four", "five5", "six666", "seven77", "eight888")
+}
+
+// Issue 18635 (function version).
+func TestKeepFuncLive(t *testing.T) {
+ // Test that we keep makeFuncImpl live as long as it is
+ // referenced on the stack.
+ typ := TypeOf(func(i int) {})
+ var f, g func(in []Value) []Value
+ f = func(in []Value) []Value {
+ clobber()
+ i := int(in[0].Int())
+ if i > 0 {
+ // We can't use Value.Call here because
+ // runtime.call* will keep the makeFuncImpl
+ // alive. However, by converting it to an
+ // interface value and calling that,
+ // reflect.callReflect is the only thing that
+ // can keep the makeFuncImpl live.
+ //
+ // Alternate between f and g so that if we do
+ // reuse the memory prematurely it's more
+ // likely to get obviously corrupted.
+ MakeFunc(typ, g).Interface().(func(i int))(i - 1)
+ }
+ return nil
+ }
+ g = func(in []Value) []Value {
+ clobber()
+ i := int(in[0].Int())
+ MakeFunc(typ, f).Interface().(func(i int))(i)
+ return nil
+ }
+ MakeFunc(typ, f).Call([]Value{ValueOf(10)})
+}
+
+type UnExportedFirst int
+
+func (i UnExportedFirst) ΦExported() {}
+func (i UnExportedFirst) unexported() {}
+
+// Issue 21177
+func TestMethodByNameUnExportedFirst(t *testing.T) {
+ defer func() {
+ if recover() != nil {
+ t.Errorf("should not panic")
+ }
+ }()
+ typ := TypeOf(UnExportedFirst(0))
+ m, _ := typ.MethodByName("ΦExported")
+ if m.Name != "ΦExported" {
+ t.Errorf("got %s, expected ΦExported", m.Name)
+ }
+}
+
+// Issue 18635 (method version).
+type KeepMethodLive struct{}
+
+func (k KeepMethodLive) Method1(i int) {
+ clobber()
+ if i > 0 {
+ ValueOf(k).MethodByName("Method2").Interface().(func(i int))(i - 1)
+ }
+}
+
+func (k KeepMethodLive) Method2(i int) {
+ clobber()
+ ValueOf(k).MethodByName("Method1").Interface().(func(i int))(i)
+}
+
+func TestKeepMethodLive(t *testing.T) {
+ // Test that we keep methodValue live as long as it is
+ // referenced on the stack.
+ KeepMethodLive{}.Method1(10)
+}
+
+// clobber tries to clobber unreachable memory.
+func clobber() {
+ runtime.GC()
+ for i := 1; i < 32; i++ {
+ for j := 0; j < 10; j++ {
+ obj := make([]*byte, i)
+ sink = obj
+ }
+ }
+ runtime.GC()
+}
+
+func TestFuncLayout(t *testing.T) {
+ align := func(x uintptr) uintptr {
+ return (x + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
+ }
+ var r []byte
+ if goarch.PtrSize == 4 {
+ r = []byte{0, 0, 0, 1}
+ } else {
+ r = []byte{0, 0, 1}
+ }
+
+ type S struct {
+ a, b uintptr
+ c, d *byte
+ }
+
+ type test struct {
+ rcvr, typ Type
+ size, argsize, retOffset uintptr
+ stack, gc, inRegs, outRegs []byte // pointer bitmap: 1 is pointer, 0 is scalar
+ intRegs, floatRegs int
+ floatRegSize uintptr
+ }
+ tests := []test{
+ {
+ typ: ValueOf(func(a, b string) string { return "" }).Type(),
+ size: 6 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
+ stack: []byte{1, 0, 1, 0, 1},
+ gc: []byte{1, 0, 1, 0, 1},
+ },
+ {
+ typ: ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
+ size: align(align(3*4) + goarch.PtrSize + 2),
+ argsize: align(3*4) + goarch.PtrSize + 2,
+ retOffset: align(align(3*4) + goarch.PtrSize + 2),
+ stack: r,
+ gc: r,
+ },
+ {
+ typ: ValueOf(func(a map[int]int, b uintptr, c any) {}).Type(),
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
+ stack: []byte{1, 0, 1, 1},
+ gc: []byte{1, 0, 1, 1},
+ },
+ {
+ typ: ValueOf(func(a S) {}).Type(),
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
+ stack: []byte{0, 0, 1, 1},
+ gc: []byte{0, 0, 1, 1},
+ },
+ {
+ rcvr: ValueOf((*byte)(nil)).Type(),
+ typ: ValueOf(func(a uintptr, b *int) {}).Type(),
+ size: 3 * goarch.PtrSize,
+ argsize: 3 * goarch.PtrSize,
+ retOffset: 3 * goarch.PtrSize,
+ stack: []byte{1, 0, 1},
+ gc: []byte{1, 0, 1},
+ },
+ {
+ typ: ValueOf(func(a uintptr) {}).Type(),
+ size: goarch.PtrSize,
+ argsize: goarch.PtrSize,
+ retOffset: goarch.PtrSize,
+ stack: []byte{},
+ gc: []byte{},
+ },
+ {
+ typ: ValueOf(func() uintptr { return 0 }).Type(),
+ size: goarch.PtrSize,
+ argsize: 0,
+ retOffset: 0,
+ stack: []byte{},
+ gc: []byte{},
+ },
+ {
+ rcvr: ValueOf(uintptr(0)).Type(),
+ typ: ValueOf(func(a uintptr) {}).Type(),
+ size: 2 * goarch.PtrSize,
+ argsize: 2 * goarch.PtrSize,
+ retOffset: 2 * goarch.PtrSize,
+ stack: []byte{1},
+ gc: []byte{1},
+ // Note: this one is tricky, as the receiver is not a pointer. But we
+ // pass the receiver by reference to the autogenerated pointer-receiver
+ // version of the function.
+ },
+ // TODO(mknyszek): Add tests for non-zero register count.
+ }
+ for _, lt := range tests {
+ name := lt.typ.String()
+ if lt.rcvr != nil {
+ name = lt.rcvr.String() + "." + name
+ }
+ t.Run(name, func(t *testing.T) {
+ defer SetArgRegs(SetArgRegs(lt.intRegs, lt.floatRegs, lt.floatRegSize))
+
+ typ, argsize, retOffset, stack, gc, inRegs, outRegs, ptrs := FuncLayout(lt.typ, lt.rcvr)
+ if typ.Size() != lt.size {
+ t.Errorf("funcLayout(%v, %v).size=%d, want %d", lt.typ, lt.rcvr, typ.Size(), lt.size)
+ }
+ if argsize != lt.argsize {
+ t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.typ, lt.rcvr, argsize, lt.argsize)
+ }
+ if retOffset != lt.retOffset {
+ t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.typ, lt.rcvr, retOffset, lt.retOffset)
+ }
+ if !bytes.Equal(stack, lt.stack) {
+ t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.typ, lt.rcvr, stack, lt.stack)
+ }
+ if !bytes.Equal(gc, lt.gc) {
+ t.Errorf("funcLayout(%v, %v).gc=%v, want %v", lt.typ, lt.rcvr, gc, lt.gc)
+ }
+ if !bytes.Equal(inRegs, lt.inRegs) {
+ t.Errorf("funcLayout(%v, %v).inRegs=%v, want %v", lt.typ, lt.rcvr, inRegs, lt.inRegs)
+ }
+ if !bytes.Equal(outRegs, lt.outRegs) {
+ t.Errorf("funcLayout(%v, %v).outRegs=%v, want %v", lt.typ, lt.rcvr, outRegs, lt.outRegs)
+ }
+ if ptrs && len(stack) == 0 || !ptrs && len(stack) > 0 {
+ t.Errorf("funcLayout(%v, %v) pointers flag=%v, want %v", lt.typ, lt.rcvr, ptrs, !ptrs)
+ }
+ })
+ }
+}
+
+// trimBitmap removes trailing 0 elements from b and returns the result.
+func trimBitmap(b []byte) []byte {
+ for len(b) > 0 && b[len(b)-1] == 0 {
+ b = b[:len(b)-1]
+ }
+ return b
+}
+
+func verifyGCBits(t *testing.T, typ Type, bits []byte) {
+ heapBits := GCBits(New(typ).Interface())
+
+ // Trim scalars at the end, as bits might end in zero,
+ // e.g. with rep(2, lit(1, 0)).
+ bits = trimBitmap(bits)
+
+ if !bytes.Equal(heapBits, bits) {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
+ }
+}
+
+func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
+ // Creating a slice causes the runtime to repeat a bitmap,
+ // which exercises a different path from making the compiler
+ // repeat a bitmap for a small array or executing a repeat in
+ // a GC program.
+ val := MakeSlice(typ, 0, cap)
+ data := NewAt(ArrayOf(cap, typ), val.UnsafePointer())
+ heapBits := GCBits(data.Interface())
+ // Repeat the bitmap for the slice size, trimming scalars in
+ // the last element.
+ bits = trimBitmap(rep(cap, bits))
+ if !bytes.Equal(heapBits, bits) {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
+ }
+}
+
+func TestGCBits(t *testing.T) {
+ verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
+
+ // Building blocks for types seen by the compiler (like [2]Xscalar).
+ // The compiler will create the type structures for the derived types,
+ // including their GC metadata.
+ type Xscalar struct{ x uintptr }
+ type Xptr struct{ x *byte }
+ type Xptrscalar struct {
+ *byte
+ uintptr
+ }
+ type Xscalarptr struct {
+ uintptr
+ *byte
+ }
+ type Xbigptrscalar struct {
+ _ [100]*byte
+ _ [100]uintptr
+ }
+
+ var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
+ {
+ // Building blocks for types constructed by reflect.
+ // This code is in a separate block so that code below
+ // cannot accidentally refer to these.
+ // The compiler must NOT see types derived from these
+ // (for example, [2]Scalar must NOT appear in the program),
+ // or else reflect will use it instead of having to construct one.
+ // The goal is to test the construction.
+ type Scalar struct{ x uintptr }
+ type Ptr struct{ x *byte }
+ type Ptrscalar struct {
+ *byte
+ uintptr
+ }
+ type Scalarptr struct {
+ uintptr
+ *byte
+ }
+ type Bigptrscalar struct {
+ _ [100]*byte
+ _ [100]uintptr
+ }
+ type Int64 int64
+ Tscalar = TypeOf(Scalar{})
+ Tint64 = TypeOf(Int64(0))
+ Tptr = TypeOf(Ptr{})
+ Tscalarptr = TypeOf(Scalarptr{})
+ Tptrscalar = TypeOf(Ptrscalar{})
+ Tbigptrscalar = TypeOf(Bigptrscalar{})
+ }
+
+ empty := []byte{}
+
+ verifyGCBits(t, TypeOf(Xscalar{}), empty)
+ verifyGCBits(t, Tscalar, empty)
+ verifyGCBits(t, TypeOf(Xptr{}), lit(1))
+ verifyGCBits(t, Tptr, lit(1))
+ verifyGCBits(t, TypeOf(Xscalarptr{}), lit(0, 1))
+ verifyGCBits(t, Tscalarptr, lit(0, 1))
+ verifyGCBits(t, TypeOf(Xptrscalar{}), lit(1))
+ verifyGCBits(t, Tptrscalar, lit(1))
+
+ verifyGCBits(t, TypeOf([0]Xptr{}), empty)
+ verifyGCBits(t, ArrayOf(0, Tptr), empty)
+ verifyGCBits(t, TypeOf([1]Xptrscalar{}), lit(1))
+ verifyGCBits(t, ArrayOf(1, Tptrscalar), lit(1))
+ verifyGCBits(t, TypeOf([2]Xscalar{}), empty)
+ verifyGCBits(t, ArrayOf(2, Tscalar), empty)
+ verifyGCBits(t, TypeOf([10000]Xscalar{}), empty)
+ verifyGCBits(t, ArrayOf(10000, Tscalar), empty)
+ verifyGCBits(t, TypeOf([2]Xptr{}), lit(1, 1))
+ verifyGCBits(t, ArrayOf(2, Tptr), lit(1, 1))
+ verifyGCBits(t, TypeOf([10000]Xptr{}), rep(10000, lit(1)))
+ verifyGCBits(t, ArrayOf(10000, Tptr), rep(10000, lit(1)))
+ verifyGCBits(t, TypeOf([2]Xscalarptr{}), lit(0, 1, 0, 1))
+ verifyGCBits(t, ArrayOf(2, Tscalarptr), lit(0, 1, 0, 1))
+ verifyGCBits(t, TypeOf([10000]Xscalarptr{}), rep(10000, lit(0, 1)))
+ verifyGCBits(t, ArrayOf(10000, Tscalarptr), rep(10000, lit(0, 1)))
+ verifyGCBits(t, TypeOf([2]Xptrscalar{}), lit(1, 0, 1))
+ verifyGCBits(t, ArrayOf(2, Tptrscalar), lit(1, 0, 1))
+ verifyGCBits(t, TypeOf([10000]Xptrscalar{}), rep(10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(10000, Tptrscalar), rep(10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([1][10000]Xptrscalar{}), rep(10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(1, ArrayOf(10000, Tptrscalar)), rep(10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([2][10000]Xptrscalar{}), rep(2*10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(2, ArrayOf(10000, Tptrscalar)), rep(2*10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([4]Xbigptrscalar{}), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
+ verifyGCBits(t, ArrayOf(4, Tbigptrscalar), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
+
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 0, empty)
+ verifyGCBitsSlice(t, SliceOf(Tptr), 0, empty)
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 1, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 1, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 2, lit(0))
+ verifyGCBitsSlice(t, SliceOf(Tscalar), 2, lit(0))
+ verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 10000, lit(0))
+ verifyGCBitsSlice(t, SliceOf(Tscalar), 10000, lit(0))
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 2, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptr), 2, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 10000, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptr), 10000, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 2, lit(0, 1))
+ verifyGCBitsSlice(t, SliceOf(Tscalarptr), 2, lit(0, 1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 10000, lit(0, 1))
+ verifyGCBitsSlice(t, SliceOf(Tscalarptr), 10000, lit(0, 1))
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 2, lit(1, 0))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 2, lit(1, 0))
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 10000, lit(1, 0))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 10000, lit(1, 0))
+ verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 1, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 1, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 2, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 2, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, TypeOf([]Xbigptrscalar{}), 4, join(rep(100, lit(1)), rep(100, lit(0))))
+ verifyGCBitsSlice(t, SliceOf(Tbigptrscalar), 4, join(rep(100, lit(1)), rep(100, lit(0))))
+
+ verifyGCBits(t, TypeOf((chan [100]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, ChanOf(BothDir, ArrayOf(100, Tscalar)), lit(1))
+
+ verifyGCBits(t, TypeOf((func([10000]Xscalarptr))(nil)), lit(1))
+ verifyGCBits(t, FuncOf([]Type{ArrayOf(10000, Tscalarptr)}, nil, false), lit(1))
+
+ verifyGCBits(t, TypeOf((map[[10000]Xscalarptr]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, MapOf(ArrayOf(10000, Tscalarptr), Tscalar), lit(1))
+
+ verifyGCBits(t, TypeOf((*[10000]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, PointerTo(ArrayOf(10000, Tscalar)), lit(1))
+
+ verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
+
+ hdr := make([]byte, 8/goarch.PtrSize)
+
+ verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
+ verifyGCBits(t, MapBucketOf(k, e), want)
+ verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+ }
+ verifyMapBucket(t,
+ Tscalar, Tptr,
+ map[Xscalar]Xptr(nil),
+ join(hdr, rep(8, lit(0)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalarptr, Tptr,
+ map[Xscalarptr]Xptr(nil),
+ join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t, Tint64, Tptr,
+ map[int64]Xptr(nil),
+ join(hdr, rep(8, rep(8/goarch.PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalar, Tscalar,
+ map[Xscalar]Xscalar(nil),
+ empty)
+ verifyMapBucket(t,
+ ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
+ map[[2]Xscalarptr][3]Xptrscalar(nil),
+ join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1)))
+}
+
+func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
+func join(b ...[]byte) []byte { return bytes.Join(b, nil) }
+func lit(x ...byte) []byte { return x }
+
+func TestTypeOfTypeOf(t *testing.T) {
+ // Check that all the type constructors return concrete *rtype implementations.
+ // It's difficult to test directly because the reflect package is only at arm's length.
+ // The easiest thing to do is just call a function that crashes if it doesn't get an *rtype.
+ check := func(name string, typ Type) {
+ if underlying := TypeOf(typ).String(); underlying != "*reflect.rtype" {
+ t.Errorf("%v returned %v, not *reflect.rtype", name, underlying)
+ }
+ }
+
+ type T struct{ int }
+ check("TypeOf", TypeOf(T{}))
+
+ check("ArrayOf", ArrayOf(10, TypeOf(T{})))
+ check("ChanOf", ChanOf(BothDir, TypeOf(T{})))
+ check("FuncOf", FuncOf([]Type{TypeOf(T{})}, nil, false))
+ check("MapOf", MapOf(TypeOf(T{}), TypeOf(T{})))
+ check("PtrTo", PointerTo(TypeOf(T{})))
+ check("SliceOf", SliceOf(TypeOf(T{})))
+}
+
+type XM struct{ _ bool }
+
+func (*XM) String() string { return "" }
+
+func TestPtrToMethods(t *testing.T) {
+ var y struct{ XM }
+ yp := New(TypeOf(y)).Interface()
+ _, ok := yp.(fmt.Stringer)
+ if !ok {
+ t.Fatal("does not implement Stringer, but should")
+ }
+}
+
+func TestMapAlloc(t *testing.T) {
+ m := ValueOf(make(map[int]int, 10))
+ k := ValueOf(5)
+ v := ValueOf(7)
+ allocs := testing.AllocsPerRun(100, func() {
+ m.SetMapIndex(k, v)
+ })
+ if allocs > 0.5 {
+ t.Errorf("allocs per map assignment: want 0 got %f", allocs)
+ }
+
+ const size = 1000
+ tmp := 0
+ val := ValueOf(&tmp).Elem()
+ allocs = testing.AllocsPerRun(100, func() {
+ mv := MakeMapWithSize(TypeOf(map[int]int{}), size)
+ // Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets.
+ for i := 0; i < size/2; i++ {
+ val.SetInt(int64(i))
+ mv.SetMapIndex(val, val)
+ }
+ })
+ if allocs > 10 {
+ t.Errorf("allocs per map assignment: want at most 10 got %f", allocs)
+ }
+ // Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set
+ // the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the
+ // map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added.
+}
+
+func TestChanAlloc(t *testing.T) {
+ // Note: for a chan int, the return Value must be allocated, so we
+ // use a chan *int instead.
+ c := ValueOf(make(chan *int, 1))
+ v := ValueOf(new(int))
+ allocs := testing.AllocsPerRun(100, func() {
+ c.Send(v)
+ _, _ = c.Recv()
+ })
+ if allocs < 0.5 || allocs > 1.5 {
+ t.Errorf("allocs per chan send/recv: want 1 got %f", allocs)
+ }
+ // Note: there is one allocation in reflect.recv which seems to be
+ // a limitation of escape analysis. If that is ever fixed the
+ // allocs < 0.5 condition will trigger and this test should be fixed.
+}
+
+type TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678 int
+
+type nameTest struct {
+ v any
+ want string
+}
+
+var nameTests = []nameTest{
+ {(*int32)(nil), "int32"},
+ {(*D1)(nil), "D1"},
+ {(*[]D1)(nil), ""},
+ {(*chan D1)(nil), ""},
+ {(*func() D1)(nil), ""},
+ {(*<-chan D1)(nil), ""},
+ {(*chan<- D1)(nil), ""},
+ {(*any)(nil), ""},
+ {(*interface {
+ F()
+ })(nil), ""},
+ {(*TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678)(nil), "TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678"},
+}
+
+func TestNames(t *testing.T) {
+ for _, test := range nameTests {
+ typ := TypeOf(test.v).Elem()
+ if got := typ.Name(); got != test.want {
+ t.Errorf("%v Name()=%q, want %q", typ, got, test.want)
+ }
+ }
+}
+
+func TestExported(t *testing.T) {
+ type ΦExported struct{}
+ type φUnexported struct{}
+ type BigP *big
+ type P int
+ type p *P
+ type P2 p
+ type p3 p
+
+ type exportTest struct {
+ v any
+ want bool
+ }
+ exportTests := []exportTest{
+ {D1{}, true},
+ {(*D1)(nil), true},
+ {big{}, false},
+ {(*big)(nil), false},
+ {(BigP)(nil), true},
+ {(*BigP)(nil), true},
+ {ΦExported{}, true},
+ {φUnexported{}, false},
+ {P(0), true},
+ {(p)(nil), false},
+ {(P2)(nil), true},
+ {(p3)(nil), false},
+ }
+
+ for i, test := range exportTests {
+ typ := TypeOf(test.v)
+ if got := IsExported(typ); got != test.want {
+ t.Errorf("%d: %s exported=%v, want %v", i, typ.Name(), got, test.want)
+ }
+ }
+}
+
+func TestTypeStrings(t *testing.T) {
+ type stringTest struct {
+ typ Type
+ want string
+ }
+ stringTests := []stringTest{
+ {TypeOf(func(int) {}), "func(int)"},
+ {FuncOf([]Type{TypeOf(int(0))}, nil, false), "func(int)"},
+ {TypeOf(XM{}), "reflect_test.XM"},
+ {TypeOf(new(XM)), "*reflect_test.XM"},
+ {TypeOf(new(XM).String), "func() string"},
+ {TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"},
+ {ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"},
+ {MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"},
+ }
+
+ for i, test := range stringTests {
+ if got, want := test.typ.String(), test.want; got != want {
+ t.Errorf("type %d String()=%q, want %q", i, got, want)
+ }
+ }
+}
+
+func TestOffsetLock(t *testing.T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 4; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ for j := 0; j < 50; j++ {
+ ResolveReflectName(fmt.Sprintf("OffsetLockName:%d:%d", i, j))
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestSwapper(t *testing.T) {
+ type I int
+ var a, b, c I
+ type pair struct {
+ x, y int
+ }
+ type pairPtr struct {
+ x, y int
+ p *I
+ }
+ type S string
+
+ tests := []struct {
+ in any
+ i, j int
+ want any
+ }{
+ {
+ in: []int{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []int{300, 20, 1},
+ },
+ {
+ in: []uintptr{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []uintptr{300, 20, 1},
+ },
+ {
+ in: []int16{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []int16{300, 20, 1},
+ },
+ {
+ in: []int8{1, 20, 100},
+ i: 0,
+ j: 2,
+ want: []int8{100, 20, 1},
+ },
+ {
+ in: []*I{&a, &b, &c},
+ i: 0,
+ j: 2,
+ want: []*I{&c, &b, &a},
+ },
+ {
+ in: []string{"eric", "sergey", "larry"},
+ i: 0,
+ j: 2,
+ want: []string{"larry", "sergey", "eric"},
+ },
+ {
+ in: []S{"eric", "sergey", "larry"},
+ i: 0,
+ j: 2,
+ want: []S{"larry", "sergey", "eric"},
+ },
+ {
+ in: []pair{{1, 2}, {3, 4}, {5, 6}},
+ i: 0,
+ j: 2,
+ want: []pair{{5, 6}, {3, 4}, {1, 2}},
+ },
+ {
+ in: []pairPtr{{1, 2, &a}, {3, 4, &b}, {5, 6, &c}},
+ i: 0,
+ j: 2,
+ want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}},
+ },
+ }
+
+ for i, tt := range tests {
+ inStr := fmt.Sprint(tt.in)
+ Swapper(tt.in)(tt.i, tt.j)
+ if !DeepEqual(tt.in, tt.want) {
+ t.Errorf("%d. swapping %v and %v of %v = %v; want %v", i, tt.i, tt.j, inStr, tt.in, tt.want)
+ }
+ }
+}
+
+// TestUnaddressableField tests that the reflect package will not allow
+// a type from another package to be used as a named type with an
+// unexported field.
+//
+// This ensures that unexported fields cannot be modified by other packages.
+func TestUnaddressableField(t *testing.T) {
+ var b Buffer // type defined in reflect, a different package
+ var localBuffer struct {
+ buf []byte
+ }
+ lv := ValueOf(&localBuffer).Elem()
+ rv := ValueOf(b)
+ shouldPanic("Set", func() {
+ lv.Set(rv)
+ })
+}
+
+type Tint int
+
+type Tint2 = Tint
+
+type Talias1 struct {
+ byte
+ uint8
+ int
+ int32
+ rune
+}
+
+type Talias2 struct {
+ Tint
+ Tint2
+}
+
+func TestAliasNames(t *testing.T) {
+ t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
+ out := fmt.Sprintf("%#v", t1)
+ want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
+ if out != want {
+ t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
+ }
+
+ t2 := Talias2{Tint: 1, Tint2: 2}
+ out = fmt.Sprintf("%#v", t2)
+ want = "reflect_test.Talias2{Tint:1, Tint2:2}"
+ if out != want {
+ t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
+ }
+}
+
+func TestIssue22031(t *testing.T) {
+ type s []struct{ C int }
+
+ type t1 struct{ s }
+ type t2 struct{ f s }
+
+ tests := []Value{
+ ValueOf(t1{s{{}}}).Field(0).Index(0).Field(0),
+ ValueOf(t2{s{{}}}).Field(0).Index(0).Field(0),
+ }
+
+ for i, test := range tests {
+ if test.CanSet() {
+ t.Errorf("%d: CanSet: got true, want false", i)
+ }
+ }
+}
+
+type NonExportedFirst int
+
+func (i NonExportedFirst) ΦExported() {}
+func (i NonExportedFirst) nonexported() int { panic("wrong") }
+
+func TestIssue22073(t *testing.T) {
+ m := ValueOf(NonExportedFirst(0)).Method(0)
+
+ if got := m.Type().NumOut(); got != 0 {
+ t.Errorf("NumOut: got %v, want 0", got)
+ }
+
+ // Shouldn't panic.
+ m.Call(nil)
+}
+
+func TestMapIterNonEmptyMap(t *testing.T) {
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want {
+ t.Errorf("iterator returned %s (after sorting), want %s", got, want)
+ }
+}
+
+func TestMapIterNilMap(t *testing.T) {
+ var m map[string]int
+ iter := ValueOf(m).MapRange()
+ if got, want := iterateToString(iter), `[]`; got != want {
+ t.Errorf("non-empty result iteratoring nil map: %s", got)
+ }
+}
+
+func TestMapIterReset(t *testing.T) {
+ iter := new(MapIter)
+
+ // Use of zero iterator should panic.
+ func() {
+ defer func() { recover() }()
+ iter.Next()
+ t.Error("Next did not panic")
+ }()
+
+ // Reset to new Map should work.
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter.Reset(ValueOf(m))
+ if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want {
+ t.Errorf("iterator returned %s (after sorting), want %s", got, want)
+ }
+
+ // Reset to Zero value should work, but iterating over it should panic.
+ iter.Reset(Value{})
+ func() {
+ defer func() { recover() }()
+ iter.Next()
+ t.Error("Next did not panic")
+ }()
+
+ // Reset to a different Map with different types should work.
+ m2 := map[int]string{1: "one", 2: "two", 3: "three"}
+ iter.Reset(ValueOf(m2))
+ if got, want := iterateToString(iter), `[1: one, 2: two, 3: three]`; got != want {
+ t.Errorf("iterator returned %s (after sorting), want %s", got, want)
+ }
+
+ // Check that Reset, Next, and SetKey/SetValue play nicely together.
+ m3 := map[uint64]uint64{
+ 1 << 0: 1 << 1,
+ 1 << 1: 1 << 2,
+ 1 << 2: 1 << 3,
+ }
+ kv := New(TypeOf(uint64(0))).Elem()
+ for i := 0; i < 5; i++ {
+ var seenk, seenv uint64
+ iter.Reset(ValueOf(m3))
+ for iter.Next() {
+ kv.SetIterKey(iter)
+ seenk ^= kv.Uint()
+ kv.SetIterValue(iter)
+ seenv ^= kv.Uint()
+ }
+ if seenk != 0b111 {
+ t.Errorf("iteration yielded keys %b, want %b", seenk, 0b111)
+ }
+ if seenv != 0b1110 {
+ t.Errorf("iteration yielded values %b, want %b", seenv, 0b1110)
+ }
+ }
+
+ // Reset should not allocate.
+ n := int(testing.AllocsPerRun(10, func() {
+ iter.Reset(ValueOf(m2))
+ iter.Reset(Value{})
+ }))
+ if n > 0 {
+ t.Errorf("MapIter.Reset allocated %d times", n)
+ }
+}
+
+func TestMapIterSafety(t *testing.T) {
+ // Using a zero MapIter causes a panic, but not a crash.
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Value()
+ t.Fatal("Value did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Next()
+ t.Fatal("Next did not panic")
+ }()
+
+ // Calling Key/Value on a MapIter before Next
+ // causes a panic, but not a crash.
+ var m map[string]int
+ iter := ValueOf(m).MapRange()
+
+ func() {
+ defer func() { recover() }()
+ iter.Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Value()
+ t.Fatal("Value did not panic")
+ }()
+
+ // Calling Next, Key, or Value on an exhausted iterator
+ // causes a panic, but not a crash.
+ iter.Next() // -> false
+ func() {
+ defer func() { recover() }()
+ iter.Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Value()
+ t.Fatal("Value did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Next()
+ t.Fatal("Next did not panic")
+ }()
+}
+
+func TestMapIterNext(t *testing.T) {
+ // The first call to Next should reflect any
+ // insertions to the map since the iterator was created.
+ m := map[string]int{}
+ iter := ValueOf(m).MapRange()
+ m["one"] = 1
+ if got, want := iterateToString(iter), `[one: 1]`; got != want {
+ t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
+ }
+}
+
+func TestMapIterDelete0(t *testing.T) {
+ // Delete all elements before first iteration.
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ delete(m, "one")
+ delete(m, "two")
+ delete(m, "three")
+ if got, want := iterateToString(iter), `[]`; got != want {
+ t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
+ }
+}
+
+func TestMapIterDelete1(t *testing.T) {
+ // Delete all elements after first iteration.
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ var got []string
+ for iter.Next() {
+ got = append(got, fmt.Sprint(iter.Key(), iter.Value()))
+ delete(m, "one")
+ delete(m, "two")
+ delete(m, "three")
+ }
+ if len(got) != 1 {
+ t.Errorf("iterator returned wrong number of elements: got %d, want 1", len(got))
+ }
+}
+
+// iterateToString returns the set of elements
+// returned by an iterator in readable form.
+func iterateToString(it *MapIter) string {
+ var got []string
+ for it.Next() {
+ line := fmt.Sprintf("%v: %v", it.Key(), it.Value())
+ got = append(got, line)
+ }
+ sort.Strings(got)
+ return "[" + strings.Join(got, ", ") + "]"
+}
+
+func TestConvertibleTo(t *testing.T) {
+ t1 := ValueOf(example1.MyStruct{}).Type()
+ t2 := ValueOf(example2.MyStruct{}).Type()
+
+ // Shouldn't raise stack overflow
+ if t1.ConvertibleTo(t2) {
+ t.Fatalf("(%s).ConvertibleTo(%s) = true, want false", t1, t2)
+ }
+
+ t3 := ValueOf([]example1.MyStruct{}).Type()
+ t4 := ValueOf([]example2.MyStruct{}).Type()
+
+ if t3.ConvertibleTo(t4) {
+ t.Fatalf("(%s).ConvertibleTo(%s) = true, want false", t3, t4)
+ }
+}
+
+func TestSetIter(t *testing.T) {
+ data := map[string]int{
+ "foo": 1,
+ "bar": 2,
+ "baz": 3,
+ }
+
+ m := ValueOf(data)
+ i := m.MapRange()
+ k := New(TypeOf("")).Elem()
+ v := New(TypeOf(0)).Elem()
+ shouldPanic("Value.SetIterKey called before Next", func() {
+ k.SetIterKey(i)
+ })
+ shouldPanic("Value.SetIterValue called before Next", func() {
+ v.SetIterValue(i)
+ })
+ data2 := map[string]int{}
+ for i.Next() {
+ k.SetIterKey(i)
+ v.SetIterValue(i)
+ data2[k.Interface().(string)] = v.Interface().(int)
+ }
+ if !DeepEqual(data, data2) {
+ t.Errorf("maps not equal, got %v want %v", data2, data)
+ }
+ shouldPanic("Value.SetIterKey called on exhausted iterator", func() {
+ k.SetIterKey(i)
+ })
+ shouldPanic("Value.SetIterValue called on exhausted iterator", func() {
+ v.SetIterValue(i)
+ })
+
+ i.Reset(m)
+ i.Next()
+ shouldPanic("Value.SetIterKey using unaddressable value", func() {
+ ValueOf("").SetIterKey(i)
+ })
+ shouldPanic("Value.SetIterValue using unaddressable value", func() {
+ ValueOf(0).SetIterValue(i)
+ })
+ shouldPanic("value of type string is not assignable to type int", func() {
+ New(TypeOf(0)).Elem().SetIterKey(i)
+ })
+ shouldPanic("value of type int is not assignable to type string", func() {
+ New(TypeOf("")).Elem().SetIterValue(i)
+ })
+
+ // Make sure assignment conversion works.
+ var x any
+ y := ValueOf(&x).Elem()
+ y.SetIterKey(i)
+ if _, ok := data[x.(string)]; !ok {
+ t.Errorf("got key %s which is not in map", x)
+ }
+ y.SetIterValue(i)
+ if x.(int) < 1 || x.(int) > 3 {
+ t.Errorf("got value %d which is not in map", x)
+ }
+
+ // Try some key/value types which are direct interfaces.
+ a := 88
+ b := 99
+ pp := map[*int]*int{
+ &a: &b,
+ }
+ i = ValueOf(pp).MapRange()
+ i.Next()
+ y.SetIterKey(i)
+ if got := *y.Interface().(*int); got != a {
+ t.Errorf("pointer incorrect: got %d want %d", got, a)
+ }
+ y.SetIterValue(i)
+ if got := *y.Interface().(*int); got != b {
+ t.Errorf("pointer incorrect: got %d want %d", got, b)
+ }
+
+ // Make sure we panic assigning from an unexported field.
+ m = ValueOf(struct{ m map[string]int }{data}).Field(0)
+ for iter := m.MapRange(); iter.Next(); {
+ shouldPanic("using value obtained using unexported field", func() {
+ k.SetIterKey(iter)
+ })
+ shouldPanic("using value obtained using unexported field", func() {
+ v.SetIterValue(iter)
+ })
+ }
+}
+
+func TestMethodCallValueCodePtr(t *testing.T) {
+ m := ValueOf(Point{}).Method(1)
+ want := MethodValueCallCodePtr()
+ if got := uintptr(m.UnsafePointer()); got != want {
+ t.Errorf("methodValueCall code pointer mismatched, want: %v, got: %v", want, got)
+ }
+ if got := m.Pointer(); got != want {
+ t.Errorf("methodValueCall code pointer mismatched, want: %v, got: %v", want, got)
+ }
+}
+
+type A struct{}
+type B[T any] struct{}
+
+func TestIssue50208(t *testing.T) {
+ want1 := "B[reflect_test.A]"
+ if got := TypeOf(new(B[A])).Elem().Name(); got != want1 {
+ t.Errorf("name of type parameter mismatched, want:%s, got:%s", want1, got)
+ }
+ want2 := "B[reflect_test.B[reflect_test.A]]"
+ if got := TypeOf(new(B[B[A]])).Elem().Name(); got != want2 {
+ t.Errorf("name of type parameter mismatched, want:%s, got:%s", want2, got)
+ }
+}
+
+func TestNegativeKindString(t *testing.T) {
+ x := -1
+ s := Kind(x).String()
+ want := "kind-1"
+ if s != want {
+ t.Fatalf("Kind(-1).String() = %q, want %q", s, want)
+ }
+}
+
+type (
+ namedBool bool
+ namedBytes []byte
+)
+
+func TestValue_Cap(t *testing.T) {
+ a := &[3]int{1, 2, 3}
+ v := ValueOf(a)
+ if v.Cap() != cap(a) {
+ t.Errorf("Cap = %d want %d", v.Cap(), cap(a))
+ }
+
+ a = nil
+ v = ValueOf(a)
+ if v.Cap() != cap(a) {
+ t.Errorf("Cap = %d want %d", v.Cap(), cap(a))
+ }
+
+ getError := func(f func()) (errorStr string) {
+ defer func() {
+ e := recover()
+ if str, ok := e.(string); ok {
+ errorStr = str
+ }
+ }()
+ f()
+ return
+ }
+ e := getError(func() {
+ var ptr *int
+ ValueOf(ptr).Cap()
+ })
+ wantStr := "reflect: call of reflect.Value.Cap on ptr to non-array Value"
+ if e != wantStr {
+ t.Errorf("error is %q, want %q", e, wantStr)
+ }
+}
+
+func TestValue_Len(t *testing.T) {
+ a := &[3]int{1, 2, 3}
+ v := ValueOf(a)
+ if v.Len() != len(a) {
+ t.Errorf("Len = %d want %d", v.Len(), len(a))
+ }
+
+ a = nil
+ v = ValueOf(a)
+ if v.Len() != len(a) {
+ t.Errorf("Len = %d want %d", v.Len(), len(a))
+ }
+
+ getError := func(f func()) (errorStr string) {
+ defer func() {
+ e := recover()
+ if str, ok := e.(string); ok {
+ errorStr = str
+ }
+ }()
+ f()
+ return
+ }
+ e := getError(func() {
+ var ptr *int
+ ValueOf(ptr).Len()
+ })
+ wantStr := "reflect: call of reflect.Value.Len on ptr to non-array Value"
+ if e != wantStr {
+ t.Errorf("error is %q, want %q", e, wantStr)
+ }
+}
+
+func TestValue_Comparable(t *testing.T) {
+ var a int
+ var s []int
+ var i interface{} = a
+ var iSlice interface{} = s
+ var iArrayFalse interface{} = [2]interface{}{1, map[int]int{}}
+ var iArrayTrue interface{} = [2]interface{}{1, struct{ I interface{} }{1}}
+ var testcases = []struct {
+ value Value
+ comparable bool
+ deref bool
+ }{
+ {
+ ValueOf(32),
+ true,
+ false,
+ },
+ {
+ ValueOf(int8(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(int16(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(int32(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(int64(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(uint8(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(uint16(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(uint32(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(uint64(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(float32(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(float64(1)),
+ true,
+ false,
+ },
+ {
+ ValueOf(complex(float32(1), float32(1))),
+ true,
+ false,
+ },
+ {
+ ValueOf(complex(float64(1), float64(1))),
+ true,
+ false,
+ },
+ {
+ ValueOf("abc"),
+ true,
+ false,
+ },
+ {
+ ValueOf(true),
+ true,
+ false,
+ },
+ {
+ ValueOf(map[int]int{}),
+ false,
+ false,
+ },
+ {
+ ValueOf([]int{}),
+ false,
+ false,
+ },
+ {
+ Value{},
+ false,
+ false,
+ },
+ {
+ ValueOf(&a),
+ true,
+ false,
+ },
+ {
+ ValueOf(&s),
+ true,
+ false,
+ },
+ {
+ ValueOf(&i),
+ true,
+ true,
+ },
+ {
+ ValueOf(&iSlice),
+ false,
+ true,
+ },
+ {
+ ValueOf([2]int{}),
+ true,
+ false,
+ },
+ {
+ ValueOf([2]map[int]int{}),
+ false,
+ false,
+ },
+ {
+ ValueOf([0]func(){}),
+ false,
+ false,
+ },
+ {
+ ValueOf([2]struct{ I interface{} }{{1}, {1}}),
+ true,
+ false,
+ },
+ {
+ ValueOf([2]struct{ I interface{} }{{[]int{}}, {1}}),
+ false,
+ false,
+ },
+ {
+ ValueOf([2]interface{}{1, struct{ I int }{1}}),
+ true,
+ false,
+ },
+ {
+ ValueOf([2]interface{}{[1]interface{}{map[int]int{}}, struct{ I int }{1}}),
+ false,
+ false,
+ },
+ {
+ ValueOf(&iArrayFalse),
+ false,
+ true,
+ },
+ {
+ ValueOf(&iArrayTrue),
+ true,
+ true,
+ },
+ }
+
+ for _, cas := range testcases {
+ v := cas.value
+ if cas.deref {
+ v = v.Elem()
+ }
+ got := v.Comparable()
+ if got != cas.comparable {
+ t.Errorf("%T.Comparable = %t, want %t", v, got, cas.comparable)
+ }
+ }
+}
+
+type ValueEqualTest struct {
+ v, u any
+ eq bool
+ vDeref, uDeref bool
+}
+
+var equalI interface{} = 1
+var equalSlice interface{} = []int{1}
+var nilInterface interface{}
+var mapInterface interface{} = map[int]int{}
+
+var valueEqualTests = []ValueEqualTest{
+ {
+ Value{}, Value{},
+ true,
+ false, false,
+ },
+ {
+ true, true,
+ true,
+ false, false,
+ },
+ {
+ 1, 1,
+ true,
+ false, false,
+ },
+ {
+ int8(1), int8(1),
+ true,
+ false, false,
+ },
+ {
+ int16(1), int16(1),
+ true,
+ false, false,
+ },
+ {
+ int32(1), int32(1),
+ true,
+ false, false,
+ },
+ {
+ int64(1), int64(1),
+ true,
+ false, false,
+ },
+ {
+ uint(1), uint(1),
+ true,
+ false, false,
+ },
+ {
+ uint8(1), uint8(1),
+ true,
+ false, false,
+ },
+ {
+ uint16(1), uint16(1),
+ true,
+ false, false,
+ },
+ {
+ uint32(1), uint32(1),
+ true,
+ false, false,
+ },
+ {
+ uint64(1), uint64(1),
+ true,
+ false, false,
+ },
+ {
+ float32(1), float32(1),
+ true,
+ false, false,
+ },
+ {
+ float64(1), float64(1),
+ true,
+ false, false,
+ },
+ {
+ complex(1, 1), complex(1, 1),
+ true,
+ false, false,
+ },
+ {
+ complex128(1 + 1i), complex128(1 + 1i),
+ true,
+ false, false,
+ },
+ {
+ func() {}, nil,
+ false,
+ false, false,
+ },
+ {
+ &equalI, 1,
+ true,
+ true, false,
+ },
+ {
+ (chan int)(nil), nil,
+ false,
+ false, false,
+ },
+ {
+ (chan int)(nil), (chan int)(nil),
+ true,
+ false, false,
+ },
+ {
+ &equalI, &equalI,
+ true,
+ false, false,
+ },
+ {
+ struct{ i int }{1}, struct{ i int }{1},
+ true,
+ false, false,
+ },
+ {
+ struct{ i int }{1}, struct{ i int }{2},
+ false,
+ false, false,
+ },
+ {
+ &nilInterface, &nilInterface,
+ true,
+ true, true,
+ },
+ {
+ 1, ValueOf(struct{ i int }{1}).Field(0),
+ true,
+ false, false,
+ },
+}
+
+func TestValue_Equal(t *testing.T) {
+ for _, test := range valueEqualTests {
+ var v, u Value
+ if vv, ok := test.v.(Value); ok {
+ v = vv
+ } else {
+ v = ValueOf(test.v)
+ }
+
+ if uu, ok := test.u.(Value); ok {
+ u = uu
+ } else {
+ u = ValueOf(test.u)
+ }
+ if test.vDeref {
+ v = v.Elem()
+ }
+
+ if test.uDeref {
+ u = u.Elem()
+ }
+
+ if r := v.Equal(u); r != test.eq {
+ t.Errorf("%s == %s got %t, want %t", v.Type(), u.Type(), r, test.eq)
+ }
+ }
+}
+
+func TestValue_EqualNonComparable(t *testing.T) {
+ var invalid = Value{} // ValueOf(nil)
+ var values = []Value{
+ // Value of slice is non-comparable.
+ ValueOf([]int(nil)),
+ ValueOf(([]int{})),
+
+ // Value of map is non-comparable.
+ ValueOf(map[int]int(nil)),
+ ValueOf((map[int]int{})),
+
+ // Value of func is non-comparable.
+ ValueOf(((func())(nil))),
+ ValueOf(func() {}),
+
+ // Value of struct is non-comparable because of non-comparable elements.
+ ValueOf((NonComparableStruct{})),
+
+ // Value of array is non-comparable because of non-comparable elements.
+ ValueOf([0]map[int]int{}),
+ ValueOf([0]func(){}),
+ ValueOf(([1]struct{ I interface{} }{{[]int{}}})),
+ ValueOf(([1]interface{}{[1]interface{}{map[int]int{}}})),
+ }
+ for _, value := range values {
+ // Panic when reflect.Value.Equal using two valid non-comparable values.
+ shouldPanic("are not comparable", func() { value.Equal(value) })
+
+ // If one is non-comparable and the other is invalid, the expected result is always false.
+ if r := value.Equal(invalid); r != false {
+ t.Errorf("%s == invalid got %t, want false", value.Type(), r)
+ }
+ }
+}
+
+func TestInitFuncTypes(t *testing.T) {
+ n := 100
+ var wg sync.WaitGroup
+
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func() {
+ defer wg.Done()
+ ipT := TypeOf(net.IP{})
+ for i := 0; i < ipT.NumMethod(); i++ {
+ _ = ipT.Method(i)
+ }
+ }()
+ }
+ wg.Wait()
+}
diff --git a/src/reflect/arena.go b/src/reflect/arena.go
new file mode 100644
index 0000000..694a3a1
--- /dev/null
+++ b/src/reflect/arena.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.arenas
+
+package reflect
+
+import "arena"
+
+// ArenaNew returns a Value representing a pointer to a new zero value for the
+// specified type, allocating storage for it in the provided arena. That is,
+// the returned Value's Type is PointerTo(typ).
+func ArenaNew(a *arena.Arena, typ Type) Value {
+ return ValueOf(arena_New(a, typ))
+}
+
+func arena_New(a *arena.Arena, typ any) any
diff --git a/src/reflect/asm_386.s b/src/reflect/asm_386.s
new file mode 100644
index 0000000..5bedea5
--- /dev/null
+++ b/src/reflect/asm_386.s
@@ -0,0 +1,38 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVL DX, 0(SP)
+ LEAL argframe+0(FP), CX
+ MOVL CX, 4(SP)
+ MOVB $0, 16(SP)
+ LEAL 16(SP), AX
+ MOVL AX, 8(SP)
+ MOVL $0, 12(SP)
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVL DX, 0(SP)
+ LEAL argframe+0(FP), CX
+ MOVL CX, 4(SP)
+ MOVB $0, 16(SP)
+ LEAL 16(SP), AX
+ MOVL AX, 8(SP)
+ MOVL $0, 12(SP)
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s
new file mode 100644
index 0000000..d21d498
--- /dev/null
+++ b/src/reflect/asm_amd64.s
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 32
+#define LOCAL_REGARGS 40
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+// This frame contains two locals. See the comment above LOCAL_RETVALID.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$312
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ LEAQ LOCAL_REGARGS(SP), R12
+ CALL runtime·spillArgs(SB)
+ MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVQ DX, 0(SP)
+ MOVQ R12, 8(SP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVQ 24(SP), DX
+ MOVQ DX, 0(SP)
+ LEAQ argframe+0(FP), CX
+ MOVQ CX, 8(SP)
+ MOVB $0, LOCAL_RETVALID(SP)
+ LEAQ LOCAL_RETVALID(SP), AX
+ MOVQ AX, 16(SP)
+ LEAQ LOCAL_REGARGS(SP), AX
+ MOVQ AX, 24(SP)
+ CALL ·callReflect(SB)
+ LEAQ LOCAL_REGARGS(SP), R12
+ CALL runtime·unspillArgs(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+// This frame contains two locals. See the comment above LOCAL_RETVALID.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$312
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ LEAQ LOCAL_REGARGS(SP), R12
+ CALL runtime·spillArgs(SB)
+ MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVQ DX, 0(SP)
+ MOVQ R12, 8(SP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVQ 24(SP), DX
+ MOVQ DX, 0(SP)
+ LEAQ argframe+0(FP), CX
+ MOVQ CX, 8(SP)
+ MOVB $0, LOCAL_RETVALID(SP)
+ LEAQ LOCAL_RETVALID(SP), AX
+ MOVQ AX, 16(SP)
+ LEAQ LOCAL_REGARGS(SP), AX
+ MOVQ AX, 24(SP)
+ CALL ·callMethod(SB)
+ LEAQ LOCAL_REGARGS(SP), R12
+ CALL runtime·unspillArgs(SB)
+ RET
diff --git a/src/reflect/asm_arm.s b/src/reflect/asm_arm.s
new file mode 100644
index 0000000..057c941
--- /dev/null
+++ b/src/reflect/asm_arm.s
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is jumped to by the code generated by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVW R7, 4(R13)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R13)
+ MOVW $0, R1
+ MOVB R1, 20(R13)
+ ADD $20, R13, R1
+ MOVW R1, 12(R13)
+ MOVW $0, R1
+ MOVW R1, 16(R13)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVW R7, 4(R13)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R13)
+ MOVW $0, R1
+ MOVB R1, 20(R13)
+ ADD $20, R13, R1
+ MOVW R1, 12(R13)
+ MOVW $0, R1
+ MOVW R1, 16(R13)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s
new file mode 100644
index 0000000..5e91e62
--- /dev/null
+++ b/src/reflect/asm_arm64.s
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432.
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, R0
+ MOVD R20, R1
+ CALL ·moveMakeFuncArgPtrs<ABIInternal>(SB)
+ MOVD 32(RSP), R26
+ MOVD R26, 8(RSP)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(RSP)
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
+ MOVD R3, 24(RSP)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, R0
+ MOVD R20, R1
+ CALL ·moveMakeFuncArgPtrs<ABIInternal>(SB)
+ MOVD 32(RSP), R26
+ MOVD R26, 8(RSP)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(RSP)
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
+ MOVD R3, 24(RSP)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
+ RET
diff --git a/src/reflect/asm_loong64.s b/src/reflect/asm_loong64.s
new file mode 100644
index 0000000..341a6d5
--- /dev/null
+++ b/src/reflect/asm_loong64.s
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+#define REGCTXT R29
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R3)
+ MOVV $argframe+0(FP), R19
+ MOVV R19, 16(R3)
+ MOVB R0, 40(R3)
+ ADDV $40, R3, R19
+ MOVV R19, 24(R3)
+ MOVV R0, 32(R3)
+ JAL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R3)
+ MOVV $argframe+0(FP), R19
+ MOVV R19, 16(R3)
+ MOVB R0, 40(R3)
+ ADDV $40, R3, R19
+ MOVV R19, 24(R3)
+ MOVV R0, 32(R3)
+ JAL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_mips64x.s b/src/reflect/asm_mips64x.s
new file mode 100644
index 0000000..f21e34d
--- /dev/null
+++ b/src/reflect/asm_mips64x.s
@@ -0,0 +1,42 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+#include "textflag.h"
+#include "funcdata.h"
+
+#define REGCTXT R22
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R29)
+ MOVV $argframe+0(FP), R1
+ MOVV R1, 16(R29)
+ MOVB R0, 40(R29)
+ ADDV $40, R29, R1
+ MOVV R1, 24(R29)
+ MOVV R0, 32(R29)
+ JAL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R29)
+ MOVV $argframe+0(FP), R1
+ MOVV R1, 16(R29)
+ MOVB R0, 40(R29)
+ ADDV $40, R29, R1
+ MOVV R1, 24(R29)
+ MOVV R0, 32(R29)
+ JAL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_mipsx.s b/src/reflect/asm_mipsx.s
new file mode 100644
index 0000000..636c8a5
--- /dev/null
+++ b/src/reflect/asm_mipsx.s
@@ -0,0 +1,42 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+#include "textflag.h"
+#include "funcdata.h"
+
+#define REGCTXT R22
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVW REGCTXT, 4(R29)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R29)
+ MOVB R0, 20(R29)
+ ADD $20, R29, R1
+ MOVW R1, 12(R29)
+ MOVW R0, 16(R29)
+ JAL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$20
+ NO_LOCAL_POINTERS
+ MOVW REGCTXT, 4(R29)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R29)
+ MOVB R0, 20(R29)
+ ADD $20, R29, R1
+ MOVW R1, 12(R29)
+ MOVW R0, 16(R29)
+ JAL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_ppc64x.s b/src/reflect/asm_ppc64x.s
new file mode 100644
index 0000000..3b529be
--- /dev/null
+++ b/src/reflect/asm_ppc64x.s
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "asm_ppc64x.h"
+
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+
+#define LOCAL_RETVALID 32+FIXED_FRAME
+#define LOCAL_REGARGS 40+FIXED_FRAME
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 296 (abi.RegArgs) = 336.
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$336
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, R1, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R11, FIXED_FRAME+32(R1) // save R11
+ MOVD R11, FIXED_FRAME+0(R1) // arg for moveMakeFuncArgPtrs
+ MOVD R20, FIXED_FRAME+8(R1) // arg for local args
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD FIXED_FRAME+32(R1), R11 // restore R11 ctxt
+ MOVD R11, FIXED_FRAME+0(R1) // ctxt (arg0)
+ MOVD $argframe+0(FP), R3 // save arg to callArg
+ MOVD R3, FIXED_FRAME+8(R1) // frame (arg1)
+ ADD $LOCAL_RETVALID, R1, R3 // addr of return flag
+ MOVB R0, (R3) // clear flag
+ MOVD R3, FIXED_FRAME+16(R1) // addr retvalid (arg2)
+ ADD $LOCAL_REGARGS, R1, R3
+ MOVD R3, FIXED_FRAME+24(R1) // abiregargs (arg3)
+ BL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, R1, R20 // set address of spill area
+ CALL runtime·unspillArgs(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$336
+ NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, R1, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R11, FIXED_FRAME+0(R1) // arg0 ctxt
+ MOVD R11, FIXED_FRAME+32(R1) // save for later
+ MOVD R20, FIXED_FRAME+8(R1) // arg1 abiregargs
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD FIXED_FRAME+32(R1), R11 // restore ctxt
+ MOVD R11, FIXED_FRAME+0(R1) // set as arg0
+ MOVD $argframe+0(FP), R3 // frame pointer
+ MOVD R3, FIXED_FRAME+8(R1) // set as arg1
+ ADD $LOCAL_RETVALID, R1, R3
+ MOVB $0, (R3) // clear ret flag
+ MOVD R3, FIXED_FRAME+16(R1) // addr of return flag
+ ADD $LOCAL_REGARGS, R1, R3 // addr of abiregargs
+ MOVD R3, FIXED_FRAME+24(R1) // set as arg3
+ BL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, R1, R20
+ CALL runtime·unspillArgs(SB)
+ RET
diff --git a/src/reflect/asm_riscv64.s b/src/reflect/asm_riscv64.s
new file mode 100644
index 0000000..1200b4d
--- /dev/null
+++ b/src/reflect/asm_riscv64.s
@@ -0,0 +1,76 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect/callMethod) + (8 bool with padding) + 392 (abi.RegArgs) = 432.
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
+ NO_LOCAL_POINTERS
+ ADD $LOCAL_REGARGS, SP, X25 // spillArgs using X25
+ CALL runtime·spillArgs(SB)
+ MOV CTXT, 32(SP) // save CTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS
+ MOV CTXT, 8(SP)
+ MOV X25, 16(SP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOV 32(SP), CTXT // restore CTXT
+
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ MOV ZERO, LOCAL_RETVALID(SP)
+ ADD $LOCAL_RETVALID, SP, T1
+ MOV T1, 24(SP)
+ ADD $LOCAL_REGARGS, SP, T1
+ MOV T1, 32(SP)
+ CALL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, SP, X25 // unspillArgs using X25
+ CALL runtime·unspillArgs(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
+ NO_LOCAL_POINTERS
+ ADD $LOCAL_REGARGS, SP, X25 // spillArgs using X25
+ CALL runtime·spillArgs(SB)
+ MOV CTXT, 32(SP) // save CTXT
+ MOV CTXT, 8(SP)
+ MOV X25, 16(SP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOV 32(SP), CTXT // restore CTXT
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ MOV ZERO, LOCAL_RETVALID(SP)
+ ADD $LOCAL_RETVALID, SP, T1
+ MOV T1, 24(SP)
+ ADD $LOCAL_REGARGS, SP, T1
+ MOV T1, 32(SP) // frame size to 32+SP as callreflect args
+ CALL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, SP, X25 // unspillArgs using X25
+ CALL runtime·unspillArgs(SB)
+ RET
diff --git a/src/reflect/asm_s390x.s b/src/reflect/asm_s390x.s
new file mode 100644
index 0000000..4bd6613
--- /dev/null
+++ b/src/reflect/asm_s390x.s
@@ -0,0 +1,38 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ MOVB $0, 40(R15)
+ ADD $40, R15, R3
+ MOVD R3, 24(R15)
+ MOVD $0, 32(R15)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ MOVB $0, 40(R15)
+ ADD $40, R15, R3
+ MOVD R3, 24(R15)
+ MOVD $0, 32(R15)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_wasm.s b/src/reflect/asm_wasm.s
new file mode 100644
index 0000000..71abe67
--- /dev/null
+++ b/src/reflect/asm_wasm.s
@@ -0,0 +1,52 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+
+ MOVD CTXT, 0(SP)
+
+ Get SP
+ Get SP
+ I64ExtendI32U
+ I64Const $argframe+0(FP)
+ I64Add
+ I64Store $8
+
+ MOVB $0, 32(SP)
+ MOVD $32(SP), 16(SP)
+ MOVD $0, 24(SP)
+
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+
+ MOVD CTXT, 0(SP)
+
+ Get SP
+ Get SP
+ I64ExtendI32U
+ I64Const $argframe+0(FP)
+ I64Add
+ I64Store $8
+
+ MOVB $0, 32(SP)
+ MOVD $32(SP), 16(SP)
+ MOVD $0, 24(SP)
+
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/benchmark_test.go b/src/reflect/benchmark_test.go
new file mode 100644
index 0000000..51634ab
--- /dev/null
+++ b/src/reflect/benchmark_test.go
@@ -0,0 +1,397 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "fmt"
+ . "reflect"
+ "strconv"
+ "testing"
+)
+
+var sourceAll = struct {
+ Bool Value
+ String Value
+ Bytes Value
+ NamedBytes Value
+ BytesArray Value
+ SliceAny Value
+ MapStringAny Value
+}{
+ Bool: ValueOf(new(bool)).Elem(),
+ String: ValueOf(new(string)).Elem(),
+ Bytes: ValueOf(new([]byte)).Elem(),
+ NamedBytes: ValueOf(new(namedBytes)).Elem(),
+ BytesArray: ValueOf(new([32]byte)).Elem(),
+ SliceAny: ValueOf(new([]any)).Elem(),
+ MapStringAny: ValueOf(new(map[string]any)).Elem(),
+}
+
+var sinkAll struct {
+ RawBool bool
+ RawString string
+ RawBytes []byte
+ RawInt int
+}
+
+func BenchmarkBool(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawBool = sourceAll.Bool.Bool()
+ }
+}
+
+func BenchmarkString(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawString = sourceAll.String.String()
+ }
+}
+
+func BenchmarkBytes(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawBytes = sourceAll.Bytes.Bytes()
+ }
+}
+
+func BenchmarkNamedBytes(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawBytes = sourceAll.NamedBytes.Bytes()
+ }
+}
+
+func BenchmarkBytesArray(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawBytes = sourceAll.BytesArray.Bytes()
+ }
+}
+
+func BenchmarkSliceLen(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawInt = sourceAll.SliceAny.Len()
+ }
+}
+
+func BenchmarkMapLen(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawInt = sourceAll.MapStringAny.Len()
+ }
+}
+
+func BenchmarkStringLen(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawInt = sourceAll.String.Len()
+ }
+}
+
+func BenchmarkArrayLen(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawInt = sourceAll.BytesArray.Len()
+ }
+}
+
+func BenchmarkSliceCap(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sinkAll.RawInt = sourceAll.SliceAny.Cap()
+ }
+}
+
+func BenchmarkDeepEqual(b *testing.B) {
+ for _, bb := range deepEqualPerfTests {
+ b.Run(ValueOf(bb.x).Type().String(), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ sink = DeepEqual(bb.x, bb.y)
+ }
+ })
+ }
+}
+
+func BenchmarkIsZero(b *testing.B) {
+ source := ValueOf(struct {
+ ArrayComparable [4]T
+ ArrayIncomparable [4]_Complex
+ StructComparable T
+ StructIncomparable _Complex
+ }{})
+
+ for i := 0; i < source.NumField(); i++ {
+ name := source.Type().Field(i).Name
+ value := source.Field(i)
+ b.Run(name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sink = value.IsZero()
+ }
+ })
+ }
+}
+
+func BenchmarkSetZero(b *testing.B) {
+ source := ValueOf(new(struct {
+ Bool bool
+ Int int64
+ Uint uint64
+ Float float64
+ Complex complex128
+ Array [4]Value
+ Chan chan Value
+ Func func() Value
+ Interface interface{ String() string }
+ Map map[string]Value
+ Pointer *Value
+ Slice []Value
+ String string
+ Struct Value
+ })).Elem()
+
+ for i := 0; i < source.NumField(); i++ {
+ name := source.Type().Field(i).Name
+ value := source.Field(i)
+ zero := Zero(value.Type())
+ b.Run(name+"/Direct", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ value.SetZero()
+ }
+ })
+ b.Run(name+"/CachedZero", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ value.Set(zero)
+ }
+ })
+ b.Run(name+"/NewZero", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ value.Set(Zero(value.Type()))
+ }
+ })
+ }
+}
+
+func BenchmarkSelect(b *testing.B) {
+ channel := make(chan int)
+ close(channel)
+ var cases []SelectCase
+ for i := 0; i < 8; i++ {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ }
+ for _, numCases := range []int{1, 4, 8} {
+ b.Run(strconv.Itoa(numCases), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = Select(cases[:numCases])
+ }
+ })
+ }
+}
+
+func BenchmarkCall(b *testing.B) {
+ fv := ValueOf(func(a, b string) {})
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ args := []Value{ValueOf("a"), ValueOf("b")}
+ for pb.Next() {
+ fv.Call(args)
+ }
+ })
+}
+
+type myint int64
+
+func (i *myint) inc() {
+ *i = *i + 1
+}
+
+func BenchmarkCallMethod(b *testing.B) {
+ b.ReportAllocs()
+ z := new(myint)
+
+ v := ValueOf(z.inc)
+ for i := 0; i < b.N; i++ {
+ v.Call(nil)
+ }
+}
+
+func BenchmarkCallArgCopy(b *testing.B) {
+ byteArray := func(n int) Value {
+ return Zero(ArrayOf(n, TypeOf(byte(0))))
+ }
+ sizes := [...]struct {
+ fv Value
+ arg Value
+ }{
+ {ValueOf(func(a [128]byte) {}), byteArray(128)},
+ {ValueOf(func(a [256]byte) {}), byteArray(256)},
+ {ValueOf(func(a [1024]byte) {}), byteArray(1024)},
+ {ValueOf(func(a [4096]byte) {}), byteArray(4096)},
+ {ValueOf(func(a [65536]byte) {}), byteArray(65536)},
+ }
+ for _, size := range sizes {
+ bench := func(b *testing.B) {
+ args := []Value{size.arg}
+ b.SetBytes(int64(size.arg.Len()))
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ size.fv.Call(args)
+ }
+ })
+ }
+ name := fmt.Sprintf("size=%v", size.arg.Len())
+ b.Run(name, bench)
+ }
+}
+
+func BenchmarkPtrTo(b *testing.B) {
+ // Construct a type with a zero ptrToThis.
+ type T struct{ int }
+ t := SliceOf(TypeOf(T{}))
+ ptrToThis := ValueOf(t).Elem().FieldByName("ptrToThis")
+ if !ptrToThis.IsValid() {
+ b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
+ }
+ if ptrToThis.Int() != 0 {
+ b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
+ }
+ b.ResetTimer()
+
+ // Now benchmark calling PointerTo on it: we'll have to hit the ptrMap cache on
+ // every call.
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ PointerTo(t)
+ }
+ })
+}
+
+type B1 struct {
+ X int
+ Y int
+ Z int
+}
+
+func BenchmarkFieldByName1(b *testing.B) {
+ t := TypeOf(B1{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("Z")
+ }
+ })
+}
+
+func BenchmarkFieldByName2(b *testing.B) {
+ t := TypeOf(S3{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("B")
+ }
+ })
+}
+
+func BenchmarkFieldByName3(b *testing.B) {
+ t := TypeOf(R0{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("X")
+ }
+ })
+}
+
+type S struct {
+ i1 int64
+ i2 int64
+}
+
+func BenchmarkInterfaceBig(b *testing.B) {
+ v := ValueOf(S{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
+ b.StopTimer()
+}
+
+func BenchmarkInterfaceSmall(b *testing.B) {
+ v := ValueOf(int64(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
+}
+
+func BenchmarkNew(b *testing.B) {
+ v := TypeOf(XM{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ New(v)
+ }
+ })
+}
+
+func BenchmarkMap(b *testing.B) {
+ type V *int
+ type S string
+ value := ValueOf((V)(nil))
+ stringKeys := []string{}
+ mapOfStrings := map[string]V{}
+ uint64Keys := []uint64{}
+ mapOfUint64s := map[uint64]V{}
+ userStringKeys := []S{}
+ mapOfUserStrings := map[S]V{}
+ for i := 0; i < 100; i++ {
+ stringKey := fmt.Sprintf("key%d", i)
+ stringKeys = append(stringKeys, stringKey)
+ mapOfStrings[stringKey] = nil
+
+ uint64Key := uint64(i)
+ uint64Keys = append(uint64Keys, uint64Key)
+ mapOfUint64s[uint64Key] = nil
+
+ userStringKey := S(fmt.Sprintf("key%d", i))
+ userStringKeys = append(userStringKeys, userStringKey)
+ mapOfUserStrings[userStringKey] = nil
+ }
+
+ tests := []struct {
+ label string
+ m, keys, value Value
+ }{
+ {"StringKeys", ValueOf(mapOfStrings), ValueOf(stringKeys), value},
+ {"Uint64Keys", ValueOf(mapOfUint64s), ValueOf(uint64Keys), value},
+ {"UserStringKeys", ValueOf(mapOfUserStrings), ValueOf(userStringKeys), value},
+ }
+
+ for _, tt := range tests {
+ b.Run(tt.label, func(b *testing.B) {
+ b.Run("MapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.MapIndex(tt.keys.Index(j))
+ }
+ }
+ })
+ b.Run("SetMapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.SetMapIndex(tt.keys.Index(j), tt.value)
+ }
+ }
+ })
+ })
+ }
+}
+
+func BenchmarkMapIterNext(b *testing.B) {
+ m := ValueOf(map[string]int{"a": 0, "b": 1, "c": 2, "d": 3})
+ it := m.MapRange()
+ for i := 0; i < b.N; i++ {
+ for it.Next() {
+ }
+ it.Reset(m)
+ }
+}
diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go
new file mode 100644
index 0000000..c898bc8
--- /dev/null
+++ b/src/reflect/deepequal.go
@@ -0,0 +1,238 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Deep equality test via reflection
+
+package reflect
+
+import (
+ "internal/bytealg"
+ "unsafe"
+)
+
+// During deepValueEqual, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+ a1 unsafe.Pointer
+ a2 unsafe.Pointer
+ typ Type
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+
+ // We want to avoid putting more in the visited map than we need to.
+ // For any possible reference cycle that might be encountered,
+ // hard(v1, v2) needs to return true for at least one of the types in the cycle,
+ // and it's safe and valid to get Value's internal pointer.
+ hard := func(v1, v2 Value) bool {
+ switch v1.Kind() {
+ case Pointer:
+ if v1.typ.ptrdata == 0 {
+ // not-in-heap pointers can't be cyclic.
+ // At least, all of our current uses of runtime/internal/sys.NotInHeap
+ // have that property. The runtime ones aren't cyclic (and we don't use
+ // DeepEqual on them anyway), and the cgo-generated ones are
+ // all empty structs.
+ return false
+ }
+ fallthrough
+ case Map, Slice, Interface:
+ // Nil pointers cannot be cyclic. Avoid putting them in the visited map.
+ return !v1.IsNil() && !v2.IsNil()
+ }
+ return false
+ }
+
+ if hard(v1, v2) {
+ // For a Pointer or Map value, we need to check flagIndir,
+ // which we do by calling the pointer method.
+ // For Slice or Interface, flagIndir is always set,
+ // and using v.ptr suffices.
+ ptrval := func(v Value) unsafe.Pointer {
+ switch v.Kind() {
+ case Pointer, Map:
+ return v.pointer()
+ default:
+ return v.ptr
+ }
+ }
+ addr1 := ptrval(v1)
+ addr2 := ptrval(v2)
+ if uintptr(addr1) > uintptr(addr2) {
+ // Canonicalize order to reduce number of entries in visited.
+ // Assumes non-moving garbage collector.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are already seen.
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case Array:
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Slice:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ // Special case for []byte, which is common.
+ if v1.Type().Elem().Kind() == Uint8 {
+ return bytealg.Equal(v1.Bytes(), v2.Bytes())
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Pointer:
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !deepValueEqual(v1.Field(i), v2.Field(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Map:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ val1 := v1.MapIndex(k)
+ val2 := v2.MapIndex(k)
+ if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
+ return false
+ }
+ }
+ return true
+ case Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ case Int, Int8, Int16, Int32, Int64:
+ return v1.Int() == v2.Int()
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v1.Uint() == v2.Uint()
+ case String:
+ return v1.String() == v2.String()
+ case Bool:
+ return v1.Bool() == v2.Bool()
+ case Float32, Float64:
+ return v1.Float() == v2.Float()
+ case Complex64, Complex128:
+ return v1.Complex() == v2.Complex()
+ default:
+ // Normal equality suffices
+ return valueInterface(v1, false) == valueInterface(v2, false)
+ }
+}
+
+// DeepEqual reports whether x and y are “deeply equal,” defined as follows.
+// Two values of identical type are deeply equal if one of the following cases applies.
+// Values of distinct types are never deeply equal.
+//
+// Array values are deeply equal when their corresponding elements are deeply equal.
+//
+// Struct values are deeply equal if their corresponding fields,
+// both exported and unexported, are deeply equal.
+//
+// Func values are deeply equal if both are nil; otherwise they are not deeply equal.
+//
+// Interface values are deeply equal if they hold deeply equal concrete values.
+//
+// Map values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they are the same map object or their corresponding keys
+// (matched using Go equality) map to deeply equal values.
+//
+// Pointer values are deeply equal if they are equal using Go's == operator
+// or if they point to deeply equal values.
+//
+// Slice values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they point to the same initial entry of the same underlying array
+// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal.
+// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil))
+// are not deeply equal.
+//
+// Other values - numbers, bools, strings, and channels - are deeply equal
+// if they are equal using Go's == operator.
+//
+// In general DeepEqual is a recursive relaxation of Go's == operator.
+// However, this idea is impossible to implement without some inconsistency.
+// Specifically, it is possible for a value to be unequal to itself,
+// either because it is of func type (uncomparable in general)
+// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
+// or because it is an array, struct, or interface containing
+// such a value.
+// On the other hand, pointer values are always equal to themselves,
+// even if they point at or contain such problematic values,
+// because they compare equal using Go's == operator, and that
+// is a sufficient condition to be deeply equal, regardless of content.
+// DeepEqual has been defined so that the same short-cut applies
+// to slices and maps: if x and y are the same slice or the same map,
+// they are deeply equal regardless of content.
+//
+// As DeepEqual traverses the data values it may find a cycle. The
+// second and subsequent times that DeepEqual compares two pointer
+// values that have been compared before, it treats the values as
+// equal rather than examining the values to which they point.
+// This ensures that DeepEqual terminates.
+func DeepEqual(x, y any) bool {
+ if x == nil || y == nil {
+ return x == y
+ }
+ v1 := ValueOf(x)
+ v2 := ValueOf(y)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return deepValueEqual(v1, v2, make(map[visit]bool))
+}
diff --git a/src/reflect/example_test.go b/src/reflect/example_test.go
new file mode 100644
index 0000000..b4f3b29
--- /dev/null
+++ b/src/reflect/example_test.go
@@ -0,0 +1,209 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+func ExampleKind() {
+ for _, v := range []any{"hi", 42, func() {}} {
+ switch v := reflect.ValueOf(v); v.Kind() {
+ case reflect.String:
+ fmt.Println(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fmt.Println(v.Int())
+ default:
+ fmt.Printf("unhandled kind %s", v.Kind())
+ }
+ }
+
+ // Output:
+ // hi
+ // 42
+ // unhandled kind func
+}
+
+func ExampleMakeFunc() {
+ // swap is the implementation passed to MakeFunc.
+ // It must work in terms of reflect.Values so that it is possible
+ // to write code without knowing beforehand what the types
+ // will be.
+ swap := func(in []reflect.Value) []reflect.Value {
+ return []reflect.Value{in[1], in[0]}
+ }
+
+ // makeSwap expects fptr to be a pointer to a nil function.
+ // It sets that pointer to a new function created with MakeFunc.
+ // When the function is invoked, reflect turns the arguments
+ // into Values, calls swap, and then turns swap's result slice
+ // into the values returned by the new function.
+ makeSwap := func(fptr any) {
+ // fptr is a pointer to a function.
+ // Obtain the function value itself (likely nil) as a reflect.Value
+ // so that we can query its type and then set the value.
+ fn := reflect.ValueOf(fptr).Elem()
+
+ // Make a function of the right type.
+ v := reflect.MakeFunc(fn.Type(), swap)
+
+ // Assign it to the value fn represents.
+ fn.Set(v)
+ }
+
+ // Make and call a swap function for ints.
+ var intSwap func(int, int) (int, int)
+ makeSwap(&intSwap)
+ fmt.Println(intSwap(0, 1))
+
+ // Make and call a swap function for float64s.
+ var floatSwap func(float64, float64) (float64, float64)
+ makeSwap(&floatSwap)
+ fmt.Println(floatSwap(2.72, 3.14))
+
+ // Output:
+ // 1 0
+ // 3.14 2.72
+}
+
+func ExampleStructTag() {
+ type S struct {
+ F string `species:"gopher" color:"blue"`
+ }
+
+ s := S{}
+ st := reflect.TypeOf(s)
+ field := st.Field(0)
+ fmt.Println(field.Tag.Get("color"), field.Tag.Get("species"))
+
+ // Output:
+ // blue gopher
+}
+
+func ExampleStructTag_Lookup() {
+ type S struct {
+ F0 string `alias:"field_0"`
+ F1 string `alias:""`
+ F2 string
+ }
+
+ s := S{}
+ st := reflect.TypeOf(s)
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ if alias, ok := field.Tag.Lookup("alias"); ok {
+ if alias == "" {
+ fmt.Println("(blank)")
+ } else {
+ fmt.Println(alias)
+ }
+ } else {
+ fmt.Println("(not specified)")
+ }
+ }
+
+ // Output:
+ // field_0
+ // (blank)
+ // (not specified)
+}
+
+func ExampleTypeOf() {
+ // As interface types are only used for static typing, a
+ // common idiom to find the reflection Type for an interface
+ // type Foo is to use a *Foo value.
+ writerType := reflect.TypeOf((*io.Writer)(nil)).Elem()
+
+ fileType := reflect.TypeOf((*os.File)(nil))
+ fmt.Println(fileType.Implements(writerType))
+
+ // Output:
+ // true
+}
+
+func ExampleStructOf() {
+ typ := reflect.StructOf([]reflect.StructField{
+ {
+ Name: "Height",
+ Type: reflect.TypeOf(float64(0)),
+ Tag: `json:"height"`,
+ },
+ {
+ Name: "Age",
+ Type: reflect.TypeOf(int(0)),
+ Tag: `json:"age"`,
+ },
+ })
+
+ v := reflect.New(typ).Elem()
+ v.Field(0).SetFloat(0.4)
+ v.Field(1).SetInt(2)
+ s := v.Addr().Interface()
+
+ w := new(bytes.Buffer)
+ if err := json.NewEncoder(w).Encode(s); err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("value: %+v\n", s)
+ fmt.Printf("json: %s", w.Bytes())
+
+ r := bytes.NewReader([]byte(`{"height":1.5,"age":10}`))
+ if err := json.NewDecoder(r).Decode(s); err != nil {
+ panic(err)
+ }
+ fmt.Printf("value: %+v\n", s)
+
+ // Output:
+ // value: &{Height:0.4 Age:2}
+ // json: {"height":0.4,"age":2}
+ // value: &{Height:1.5 Age:10}
+}
+
+func ExampleValue_FieldByIndex() {
+ // This example shows a case in which the name of a promoted field
+ // is hidden by another field: FieldByName will not work, so
+ // FieldByIndex must be used instead.
+ type user struct {
+ firstName string
+ lastName string
+ }
+
+ type data struct {
+ user
+ firstName string
+ lastName string
+ }
+
+ u := data{
+ user: user{"Embedded John", "Embedded Doe"},
+ firstName: "John",
+ lastName: "Doe",
+ }
+
+ s := reflect.ValueOf(u).FieldByIndex([]int{0, 1})
+ fmt.Println("embedded last name:", s)
+
+ // Output:
+ // embedded last name: Embedded Doe
+}
+
+func ExampleValue_FieldByName() {
+ type user struct {
+ firstName string
+ lastName string
+ }
+ u := user{firstName: "John", lastName: "Doe"}
+ s := reflect.ValueOf(u)
+
+ fmt.Println("Name:", s.FieldByName("firstName"))
+ // Output:
+ // Name: John
+}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
new file mode 100644
index 0000000..f7d2cc3
--- /dev/null
+++ b/src/reflect/export_test.go
@@ -0,0 +1,165 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/goarch"
+ "sync"
+ "unsafe"
+)
+
+// MakeRO returns a copy of v with the read-only flag set.
+func MakeRO(v Value) Value {
+ v.flag |= flagStickyRO
+ return v
+}
+
+// IsRO reports whether v's read-only flag is set.
+func IsRO(v Value) bool {
+ return v.flag&flagStickyRO != 0
+}
+
+var CallGC = &callGC
+
+// FuncLayout calls funcLayout and returns a subset of the results for testing.
+//
+// Bitmaps like stack, gc, inReg, and outReg are expanded such that each bit
+// takes up one byte, so that writing out test cases is a little clearer.
+// If ptrs is false, gc will be nil.
+func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack, gc, inReg, outReg []byte, ptrs bool) {
+ var ft *rtype
+ var abid abiDesc
+ if rcvr != nil {
+ ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype))
+ } else {
+ ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil)
+ }
+ // Extract size information.
+ argSize = abid.stackCallArgsSize
+ retOffset = abid.retOffset
+ frametype = ft
+
+ // Expand stack pointer bitmap into byte-map.
+ for i := uint32(0); i < abid.stackPtrs.n; i++ {
+ stack = append(stack, abid.stackPtrs.data[i/8]>>(i%8)&1)
+ }
+
+ // Expand register pointer bitmaps into byte-maps.
+ bool2byte := func(b bool) byte {
+ if b {
+ return 1
+ }
+ return 0
+ }
+ for i := 0; i < intArgRegs; i++ {
+ inReg = append(inReg, bool2byte(abid.inRegPtrs.Get(i)))
+ outReg = append(outReg, bool2byte(abid.outRegPtrs.Get(i)))
+ }
+ if ft.kind&kindGCProg != 0 {
+ panic("can't handle gc programs")
+ }
+
+ // Expand frame type's GC bitmap into byte-map.
+ ptrs = ft.ptrdata != 0
+ if ptrs {
+ nptrs := ft.ptrdata / goarch.PtrSize
+ gcdata := ft.gcSlice(0, (nptrs+7)/8)
+ for i := uintptr(0); i < nptrs; i++ {
+ gc = append(gc, gcdata[i/8]>>(i%8)&1)
+ }
+ }
+ return
+}
+
+func TypeLinks() []string {
+ var r []string
+ sections, offset := typelinks()
+ for i, offs := range offset {
+ rodata := sections[i]
+ for _, off := range offs {
+ typ := (*rtype)(resolveTypeOff(unsafe.Pointer(rodata), off))
+ r = append(r, typ.String())
+ }
+ }
+ return r
+}
+
+var GCBits = gcbits
+
+func gcbits(any) []byte // provided by runtime
+
+func MapBucketOf(x, y Type) Type {
+ return bucketOf(x.(*rtype), y.(*rtype))
+}
+
+func CachedBucketOf(m Type) Type {
+ t := m.(*rtype)
+ if Kind(t.kind&kindMask) != Map {
+ panic("not map")
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return tt.bucket
+}
+
+type EmbedWithUnexpMeth struct{}
+
+func (EmbedWithUnexpMeth) f() {}
+
+type pinUnexpMeth interface {
+ f()
+}
+
+var pinUnexpMethI = pinUnexpMeth(EmbedWithUnexpMeth{})
+
+func FirstMethodNameBytes(t Type) *byte {
+ _ = pinUnexpMethI
+
+ ut := t.uncommon()
+ if ut == nil {
+ panic("type has no methods")
+ }
+ m := ut.methods()[0]
+ mname := t.(*rtype).nameOff(m.name)
+ if *mname.data(0, "name flag field")&(1<<2) == 0 {
+ panic("method name does not have pkgPath *string")
+ }
+ return mname.bytes
+}
+
+type OtherPkgFields struct {
+ OtherExported int
+ otherUnexported int
+}
+
+func IsExported(t Type) bool {
+ typ := t.(*rtype)
+ n := typ.nameOff(typ.str)
+ return n.isExported()
+}
+
+func ResolveReflectName(s string) {
+ resolveReflectName(newName(s, "", false, false))
+}
+
+type Buffer struct {
+ buf []byte
+}
+
+func clearLayoutCache() {
+ layoutCache = sync.Map{}
+}
+
+func SetArgRegs(ints, floats int, floatSize uintptr) (oldInts, oldFloats int, oldFloatSize uintptr) {
+ oldInts = intArgRegs
+ oldFloats = floatArgRegs
+ oldFloatSize = floatRegSize
+ intArgRegs = ints
+ floatArgRegs = floats
+ floatRegSize = floatSize
+ clearLayoutCache()
+ return
+}
+
+var MethodValueCallCodePtr = methodValueCallCodePtr
diff --git a/src/reflect/float32reg_generic.go b/src/reflect/float32reg_generic.go
new file mode 100644
index 0000000..23ad4bf
--- /dev/null
+++ b/src/reflect/float32reg_generic.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !ppc64 && !ppc64le && !riscv64
+
+package reflect
+
+import "unsafe"
+
+// This file implements a straightforward conversion of a float32
+// value into its representation in a register. This conversion
+// applies for amd64 and arm64. It is also chosen for the case of
+// zero argument registers, but is not used.
+
+func archFloat32FromReg(reg uint64) float32 {
+ i := uint32(reg)
+ return *(*float32)(unsafe.Pointer(&i))
+}
+
+func archFloat32ToReg(val float32) uint64 {
+ return uint64(*(*uint32)(unsafe.Pointer(&val)))
+}
diff --git a/src/reflect/float32reg_ppc64x.s b/src/reflect/float32reg_ppc64x.s
new file mode 100644
index 0000000..a4deb18
--- /dev/null
+++ b/src/reflect/float32reg_ppc64x.s
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+#include "textflag.h"
+
+// On PPC64, the float32 becomes a float64
+// when loaded in a register, different from
+// other platforms. These functions are
+// needed to ensure correct conversions on PPC64.
+
+// Convert float32->uint64
+TEXT ·archFloat32ToReg(SB),NOSPLIT,$0-16
+ FMOVS val+0(FP), F1
+ FMOVD F1, ret+8(FP)
+ RET
+
+// Convert uint64->float32
+TEXT ·archFloat32FromReg(SB),NOSPLIT,$0-12
+ FMOVD reg+0(FP), F1
+ // Normally a float64->float32 conversion
+ // would need rounding, but that is not needed
+ // here since the uint64 was originally converted
+ // from float32, and should be avoided to
+ // preserve SNaN values.
+ FMOVS F1, ret+8(FP)
+ RET
+
diff --git a/src/reflect/float32reg_riscv64.s b/src/reflect/float32reg_riscv64.s
new file mode 100644
index 0000000..8fcf3ba
--- /dev/null
+++ b/src/reflect/float32reg_riscv64.s
@@ -0,0 +1,27 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// riscv64 allows 32-bit floats to live in the bottom
+// part of the register, it expects them to be NaN-boxed.
+// These functions are needed to ensure correct conversions
+// on riscv64.
+
+// Convert float32->uint64
+TEXT ·archFloat32ToReg(SB),NOSPLIT,$0-16
+ MOVF val+0(FP), F1
+ MOVD F1, ret+8(FP)
+ RET
+
+// Convert uint64->float32
+TEXT ·archFloat32FromReg(SB),NOSPLIT,$0-12
+ // Normally a float64->float32 conversion
+ // would need rounding, but riscv64 store valid
+ // float32 in the lower 32 bits, thus we only need to
+ // unboxed the NaN-box by store a float32.
+ MOVD reg+0(FP), F1
+ MOVF F1, ret+8(FP)
+ RET
+
diff --git a/src/reflect/internal/example1/example.go b/src/reflect/internal/example1/example.go
new file mode 100644
index 0000000..181dd99
--- /dev/null
+++ b/src/reflect/internal/example1/example.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package example1
+
+type MyStruct struct {
+ MyStructs []MyStruct
+ MyStruct *MyStruct
+}
diff --git a/src/reflect/internal/example2/example.go b/src/reflect/internal/example2/example.go
new file mode 100644
index 0000000..8a55826
--- /dev/null
+++ b/src/reflect/internal/example2/example.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package example2
+
+type MyStruct struct {
+ MyStructs []MyStruct
+ MyStruct *MyStruct
+}
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
new file mode 100644
index 0000000..ee07299
--- /dev/null
+++ b/src/reflect/makefunc.go
@@ -0,0 +1,176 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MakeFunc implementation.
+
+package reflect
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// makeFuncImpl is the closure value implementing the function
+// returned by MakeFunc.
+// The first three words of this type must be kept in sync with
+// methodValue and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type makeFuncImpl struct {
+ makeFuncCtxt
+ ftyp *funcType
+ fn func([]Value) []Value
+}
+
+// MakeFunc returns a new function of the given Type
+// that wraps the function fn. When called, that new function
+// does the following:
+//
+// - converts its arguments to a slice of Values.
+// - runs results := fn(args).
+// - returns the results as a slice of Values, one per formal result.
+//
+// The implementation fn can assume that the argument Value slice
+// has the number and type of arguments given by typ.
+// If typ describes a variadic function, the final Value is itself
+// a slice representing the variadic arguments, as in the
+// body of a variadic function. The result Value slice returned by fn
+// must have the number and type of results given by typ.
+//
+// The Value.Call method allows the caller to invoke a typed function
+// in terms of Values; in contrast, MakeFunc allows the caller to implement
+// a typed function in terms of Values.
+//
+// The Examples section of the documentation includes an illustration
+// of how to use MakeFunc to build a swap function for different types.
+func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
+ if typ.Kind() != Func {
+ panic("reflect: call of MakeFunc with non-Func type")
+ }
+
+ t := typ.common()
+ ftyp := (*funcType)(unsafe.Pointer(t))
+
+ code := abi.FuncPCABI0(makeFuncStub)
+
+ // makeFuncImpl contains a stack map for use by the runtime
+ _, _, abid := funcLayout(ftyp, nil)
+
+ impl := &makeFuncImpl{
+ makeFuncCtxt: makeFuncCtxt{
+ fn: code,
+ stack: abid.stackPtrs,
+ argLen: abid.stackCallArgsSize,
+ regPtrs: abid.inRegPtrs,
+ },
+ ftyp: ftyp,
+ fn: fn,
+ }
+
+ return Value{t, unsafe.Pointer(impl), flag(Func)}
+}
+
+// makeFuncStub is an assembly function that is the code half of
+// the function returned from MakeFunc. It expects a *callReflectFunc
+// as its context register, and its job is to invoke callReflect(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func makeFuncStub()
+
+// The first 3 words of this type must be kept in sync with
+// makeFuncImpl and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type methodValue struct {
+ makeFuncCtxt
+ method int
+ rcvr Value
+}
+
+// makeMethodValue converts v from the rcvr+method index representation
+// of a method value to an actual method func value, which is
+// basically the receiver value with a special bit set, into a true
+// func value - a value holding an actual func. The output is
+// semantically equivalent to the input as far as the user of package
+// reflect can tell, but the true func representation can be handled
+// by code like Convert and Interface and Assign.
+func makeMethodValue(op string, v Value) Value {
+ if v.flag&flagMethod == 0 {
+ panic("reflect: internal error: invalid use of makeMethodValue")
+ }
+
+ // Ignoring the flagMethod bit, v describes the receiver, not the method type.
+ fl := v.flag & (flagRO | flagAddr | flagIndir)
+ fl |= flag(v.typ.Kind())
+ rcvr := Value{v.typ, v.ptr, fl}
+
+ // v.Type returns the actual type of the method value.
+ ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
+
+ code := methodValueCallCodePtr()
+
+ // methodValue contains a stack map for use by the runtime
+ _, _, abid := funcLayout(ftyp, nil)
+ fv := &methodValue{
+ makeFuncCtxt: makeFuncCtxt{
+ fn: code,
+ stack: abid.stackPtrs,
+ argLen: abid.stackCallArgsSize,
+ regPtrs: abid.inRegPtrs,
+ },
+ method: int(v.flag) >> flagMethodShift,
+ rcvr: rcvr,
+ }
+
+ // Cause panic if method is not appropriate.
+ // The panic would still happen during the call if we omit this,
+ // but we want Interface() and other operations to fail early.
+ methodReceiver(op, fv.rcvr, fv.method)
+
+ return Value{&ftyp.rtype, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
+}
+
+func methodValueCallCodePtr() uintptr {
+ return abi.FuncPCABI0(methodValueCall)
+}
+
+// methodValueCall is an assembly function that is the code half of
+// the function returned from makeMethodValue. It expects a *methodValue
+// as its context register, and its job is to invoke callMethod(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func methodValueCall()
+
+// This structure must be kept in sync with runtime.reflectMethodValue.
+// Any changes should be reflected in all both.
+type makeFuncCtxt struct {
+ fn uintptr
+ stack *bitVector // ptrmap for both stack args and results
+ argLen uintptr // just args
+ regPtrs abi.IntArgRegBitmap
+}
+
+// moveMakeFuncArgPtrs uses ctxt.regPtrs to copy integer pointer arguments
+// in args.Ints to args.Ptrs where the GC can see them.
+//
+// This is similar to what reflectcallmove does in the runtime, except
+// that happens on the return path, whereas this happens on the call path.
+//
+// nosplit because pointers are being held in uintptr slots in args, so
+// having our stack scanned now could lead to accidentally freeing
+// memory.
+//
+//go:nosplit
+func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
+ for i, arg := range args.Ints {
+ // Avoid write barriers! Because our write barrier enqueues what
+ // was there before, we might enqueue garbage.
+ if ctxt.regPtrs.Get(i) {
+ *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg
+ } else {
+ // We *must* zero this space ourselves because it's defined in
+ // assembly code and the GC will scan these pointers. Otherwise,
+ // there will be garbage here.
+ *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0
+ }
+ }
+}
diff --git a/src/reflect/nih_test.go b/src/reflect/nih_test.go
new file mode 100644
index 0000000..f503939
--- /dev/null
+++ b/src/reflect/nih_test.go
@@ -0,0 +1,38 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo
+
+package reflect_test
+
+import (
+ . "reflect"
+ "runtime/cgo"
+ "testing"
+ "unsafe"
+)
+
+type nih struct {
+ _ cgo.Incomplete
+ x int
+}
+
+var global_nih = nih{x: 7}
+
+func TestNotInHeapDeref(t *testing.T) {
+ // See issue 48399.
+ v := ValueOf((*nih)(nil))
+ v.Elem()
+ shouldPanic("reflect: call of reflect.Value.Field on zero Value", func() { v.Elem().Field(0) })
+
+ v = ValueOf(&global_nih)
+ if got := v.Elem().Field(1).Int(); got != 7 {
+ t.Fatalf("got %d, want 7", got)
+ }
+
+ v = ValueOf((*nih)(unsafe.Pointer(new(int))))
+ shouldPanic("reflect: reflect.Value.Elem on an invalid notinheap pointer", func() { v.Elem() })
+ shouldPanic("reflect: reflect.Value.Pointer on an invalid notinheap pointer", func() { v.Pointer() })
+ shouldPanic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer", func() { v.UnsafePointer() })
+}
diff --git a/src/reflect/set_test.go b/src/reflect/set_test.go
new file mode 100644
index 0000000..028c051
--- /dev/null
+++ b/src/reflect/set_test.go
@@ -0,0 +1,227 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ "io"
+ . "reflect"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+func TestImplicitMapConversion(t *testing.T) {
+ // Test implicit conversions in MapIndex and SetMapIndex.
+ {
+ // direct
+ m := make(map[int]int)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#1 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#1 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert interface key
+ m := make(map[any]int)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#2 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#2 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert interface value
+ m := make(map[int]any)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#3 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#3 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert both interface key and interface value
+ m := make(map[any]any)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#4 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#4 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert both, with non-empty interfaces
+ m := make(map[io.Reader]io.Writer)
+ mv := ValueOf(m)
+ b1 := new(bytes.Buffer)
+ b2 := new(bytes.Buffer)
+ mv.SetMapIndex(ValueOf(b1), ValueOf(b2))
+ x, ok := m[b1]
+ if x != b2 {
+ t.Errorf("#5 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(b1)).Elem().UnsafePointer(); p != unsafe.Pointer(b2) {
+ t.Errorf("#5 MapIndex(b1) = %#x want %p", p, b2)
+ }
+ }
+ {
+ // convert channel direction
+ m := make(map[<-chan int]chan int)
+ mv := ValueOf(m)
+ c1 := make(chan int)
+ c2 := make(chan int)
+ mv.SetMapIndex(ValueOf(c1), ValueOf(c2))
+ x, ok := m[c1]
+ if x != c2 {
+ t.Errorf("#6 after SetMapIndex(c1, c2): %p (!= %p), %t (map=%v)", x, c2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(c1)).UnsafePointer(); p != ValueOf(c2).UnsafePointer() {
+ t.Errorf("#6 MapIndex(c1) = %#x want %p", p, c2)
+ }
+ }
+ {
+ // convert identical underlying types
+ type MyBuffer bytes.Buffer
+ m := make(map[*MyBuffer]*bytes.Buffer)
+ mv := ValueOf(m)
+ b1 := new(MyBuffer)
+ b2 := new(bytes.Buffer)
+ mv.SetMapIndex(ValueOf(b1), ValueOf(b2))
+ x, ok := m[b1]
+ if x != b2 {
+ t.Errorf("#7 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(b1)).UnsafePointer(); p != unsafe.Pointer(b2) {
+ t.Errorf("#7 MapIndex(b1) = %#x want %p", p, b2)
+ }
+ }
+
+}
+
+func TestImplicitSetConversion(t *testing.T) {
+ // Assume TestImplicitMapConversion covered the basics.
+ // Just make sure conversions are being applied at all.
+ var r io.Reader
+ b := new(bytes.Buffer)
+ rv := ValueOf(&r).Elem()
+ rv.Set(ValueOf(b))
+ if r != b {
+ t.Errorf("after Set: r=%T(%v)", r, r)
+ }
+}
+
+func TestImplicitSendConversion(t *testing.T) {
+ c := make(chan io.Reader, 10)
+ b := new(bytes.Buffer)
+ ValueOf(c).Send(ValueOf(b))
+ if bb := <-c; bb != b {
+ t.Errorf("Received %p != %p", bb, b)
+ }
+}
+
+func TestImplicitCallConversion(t *testing.T) {
+ // Arguments must be assignable to parameter types.
+ fv := ValueOf(io.WriteString)
+ b := new(strings.Builder)
+ fv.Call([]Value{ValueOf(b), ValueOf("hello world")})
+ if b.String() != "hello world" {
+ t.Errorf("After call: string=%q want %q", b.String(), "hello world")
+ }
+}
+
+func TestImplicitAppendConversion(t *testing.T) {
+ // Arguments must be assignable to the slice's element type.
+ s := []io.Reader{}
+ sv := ValueOf(&s).Elem()
+ b := new(bytes.Buffer)
+ sv.Set(Append(sv, ValueOf(b)))
+ if len(s) != 1 || s[0] != b {
+ t.Errorf("after append: s=%v want [%p]", s, b)
+ }
+}
+
+var implementsTests = []struct {
+ x any
+ t any
+ b bool
+}{
+ {new(*bytes.Buffer), new(io.Reader), true},
+ {new(bytes.Buffer), new(io.Reader), false},
+ {new(*bytes.Buffer), new(io.ReaderAt), false},
+ {new(*ast.Ident), new(ast.Expr), true},
+ {new(*notAnExpr), new(ast.Expr), false},
+ {new(*ast.Ident), new(notASTExpr), false},
+ {new(notASTExpr), new(ast.Expr), false},
+ {new(ast.Expr), new(notASTExpr), false},
+ {new(*notAnExpr), new(notASTExpr), true},
+}
+
+type notAnExpr struct{}
+
+func (notAnExpr) Pos() token.Pos { return token.NoPos }
+func (notAnExpr) End() token.Pos { return token.NoPos }
+func (notAnExpr) exprNode() {}
+
+type notASTExpr interface {
+ Pos() token.Pos
+ End() token.Pos
+ exprNode()
+}
+
+func TestImplements(t *testing.T) {
+ for _, tt := range implementsTests {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.Implements(xt); b != tt.b {
+ t.Errorf("(%s).Implements(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b)
+ }
+ }
+}
+
+var assignableTests = []struct {
+ x any
+ t any
+ b bool
+}{
+ {new(chan int), new(<-chan int), true},
+ {new(<-chan int), new(chan int), false},
+ {new(*int), new(IntPtr), true},
+ {new(IntPtr), new(*int), true},
+ {new(IntPtr), new(IntPtr1), false},
+ {new(Ch), new(<-chan any), true},
+ // test runs implementsTests too
+}
+
+type IntPtr *int
+type IntPtr1 *int
+type Ch <-chan any
+
+func TestAssignableTo(t *testing.T) {
+ for _, tt := range append(assignableTests, implementsTests...) {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.AssignableTo(xt); b != tt.b {
+ t.Errorf("(%s).AssignableTo(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b)
+ }
+ }
+}
diff --git a/src/reflect/stubs_ppc64x.go b/src/reflect/stubs_ppc64x.go
new file mode 100644
index 0000000..06c8bf5
--- /dev/null
+++ b/src/reflect/stubs_ppc64x.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le || ppc64
+
+package reflect
+
+func archFloat32FromReg(reg uint64) float32
+func archFloat32ToReg(val float32) uint64
diff --git a/src/reflect/stubs_riscv64.go b/src/reflect/stubs_riscv64.go
new file mode 100644
index 0000000..a72ebab
--- /dev/null
+++ b/src/reflect/stubs_riscv64.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+func archFloat32FromReg(reg uint64) float32
+func archFloat32ToReg(val float32) uint64
diff --git a/src/reflect/swapper.go b/src/reflect/swapper.go
new file mode 100644
index 0000000..745c7b9
--- /dev/null
+++ b/src/reflect/swapper.go
@@ -0,0 +1,78 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/goarch"
+ "internal/unsafeheader"
+ "unsafe"
+)
+
+// Swapper returns a function that swaps the elements in the provided
+// slice.
+//
+// Swapper panics if the provided interface is not a slice.
+func Swapper(slice any) func(i, j int) {
+ v := ValueOf(slice)
+ if v.Kind() != Slice {
+ panic(&ValueError{Method: "Swapper", Kind: v.Kind()})
+ }
+ // Fast path for slices of size 0 and 1. Nothing to swap.
+ switch v.Len() {
+ case 0:
+ return func(i, j int) { panic("reflect: slice index out of range") }
+ case 1:
+ return func(i, j int) {
+ if i != 0 || j != 0 {
+ panic("reflect: slice index out of range")
+ }
+ }
+ }
+
+ typ := v.Type().Elem().(*rtype)
+ size := typ.Size()
+ hasPtr := typ.ptrdata != 0
+
+ // Some common & small cases, without using memmove:
+ if hasPtr {
+ if size == goarch.PtrSize {
+ ps := *(*[]unsafe.Pointer)(v.ptr)
+ return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
+ }
+ if typ.Kind() == String {
+ ss := *(*[]string)(v.ptr)
+ return func(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
+ }
+ } else {
+ switch size {
+ case 8:
+ is := *(*[]int64)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 4:
+ is := *(*[]int32)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 2:
+ is := *(*[]int16)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 1:
+ is := *(*[]int8)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ }
+ }
+
+ s := (*unsafeheader.Slice)(v.ptr)
+ tmp := unsafe_New(typ) // swap scratch space
+
+ return func(i, j int) {
+ if uint(i) >= uint(s.Len) || uint(j) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ val1 := arrayAt(s.Data, i, size, "i < s.Len")
+ val2 := arrayAt(s.Data, j, size, "j < s.Len")
+ typedmemmove(typ, tmp, val1)
+ typedmemmove(typ, val1, val2)
+ typedmemmove(typ, val2, tmp)
+ }
+}
diff --git a/src/reflect/tostring_test.go b/src/reflect/tostring_test.go
new file mode 100644
index 0000000..193484a
--- /dev/null
+++ b/src/reflect/tostring_test.go
@@ -0,0 +1,95 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Formatting of reflection types and values for debugging.
+// Not defined as methods so they do not need to be linked into most binaries;
+// the functions are not used by the library itself, only in tests.
+
+package reflect_test
+
+import (
+ . "reflect"
+ "strconv"
+)
+
+// valueToString returns a textual representation of the reflection value val.
+// For debugging only.
+func valueToString(val Value) string {
+ var str string
+ if !val.IsValid() {
+ return "<zero Value>"
+ }
+ typ := val.Type()
+ switch val.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return strconv.FormatInt(val.Int(), 10)
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return strconv.FormatUint(val.Uint(), 10)
+ case Float32, Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, 64)
+ case Complex64, Complex128:
+ c := val.Complex()
+ return strconv.FormatFloat(real(c), 'g', -1, 64) + "+" + strconv.FormatFloat(imag(c), 'g', -1, 64) + "i"
+ case String:
+ return val.String()
+ case Bool:
+ if val.Bool() {
+ return "true"
+ } else {
+ return "false"
+ }
+ case Pointer:
+ v := val
+ str = typ.String() + "("
+ if v.IsNil() {
+ str += "0"
+ } else {
+ str += "&" + valueToString(v.Elem())
+ }
+ str += ")"
+ return str
+ case Array, Slice:
+ v := val
+ str += typ.String()
+ str += "{"
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToString(v.Index(i))
+ }
+ str += "}"
+ return str
+ case Map:
+ t := typ
+ str = t.String()
+ str += "{"
+ str += "<can't iterate on maps>"
+ str += "}"
+ return str
+ case Chan:
+ str = typ.String()
+ return str
+ case Struct:
+ t := typ
+ v := val
+ str += t.String()
+ str += "{"
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToString(v.Field(i))
+ }
+ str += "}"
+ return str
+ case Interface:
+ return typ.String() + "(" + valueToString(val.Elem()) + ")"
+ case Func:
+ v := val
+ return typ.String() + "(" + strconv.FormatUint(uint64(v.Pointer()), 10) + ")"
+ default:
+ panic("valueToString: can't print type " + typ.String())
+ }
+}
diff --git a/src/reflect/type.go b/src/reflect/type.go
new file mode 100644
index 0000000..01d1456
--- /dev/null
+++ b/src/reflect/type.go
@@ -0,0 +1,3186 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect implements run-time reflection, allowing a program to
+// manipulate objects with arbitrary types. The typical use is to take a value
+// with static type interface{} and extract its dynamic type information by
+// calling TypeOf, which returns a Type.
+//
+// A call to ValueOf returns a Value representing the run-time data.
+// Zero takes a Type and returns a Value representing a zero value
+// for that type.
+//
+// See "The Laws of Reflection" for an introduction to reflection in Go:
+// https://golang.org/doc/articles/laws_of_reflection.html
+package reflect
+
+import (
+ "internal/goarch"
+ "strconv"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// Type is the representation of a Go type.
+//
+// Not all methods apply to all kinds of types. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of type before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run-time panic.
+//
+// Type values are comparable, such as with the == operator,
+// so they can be used as map keys.
+// Two Type values are equal if they represent identical types.
+type Type interface {
+ // Methods applicable to all types.
+
+ // Align returns the alignment in bytes of a value of
+ // this type when allocated in memory.
+ Align() int
+
+ // FieldAlign returns the alignment in bytes of a value of
+ // this type when used as a field in a struct.
+ FieldAlign() int
+
+ // Method returns the i'th method in the type's method set.
+ // It panics if i is not in the range [0, NumMethod()).
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver,
+ // and only exported methods are accessible.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ //
+ // Methods are sorted in lexicographic order.
+ Method(int) Method
+
+ // MethodByName returns the method with that name in the type's
+ // method set and a boolean indicating if the method was found.
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ MethodByName(string) (Method, bool)
+
+ // NumMethod returns the number of methods accessible using Method.
+ //
+ // For a non-interface type, it returns the number of exported methods.
+ //
+ // For an interface type, it returns the number of exported and unexported methods.
+ NumMethod() int
+
+ // Name returns the type's name within its package for a defined type.
+ // For other (non-defined) types it returns the empty string.
+ Name() string
+
+ // PkgPath returns a defined type's package path, that is, the import path
+ // that uniquely identifies the package, such as "encoding/base64".
+ // If the type was predeclared (string, error) or not defined (*T, struct{},
+ // []int, or A where A is an alias for a non-defined type), the package path
+ // will be the empty string.
+ PkgPath() string
+
+ // Size returns the number of bytes needed to store
+ // a value of the given type; it is analogous to unsafe.Sizeof.
+ Size() uintptr
+
+ // String returns a string representation of the type.
+ // The string representation may use shortened package names
+ // (e.g., base64 instead of "encoding/base64") and is not
+ // guaranteed to be unique among types. To test for type identity,
+ // compare the Types directly.
+ String() string
+
+ // Kind returns the specific kind of this type.
+ Kind() Kind
+
+ // Implements reports whether the type implements the interface type u.
+ Implements(u Type) bool
+
+ // AssignableTo reports whether a value of the type is assignable to type u.
+ AssignableTo(u Type) bool
+
+ // ConvertibleTo reports whether a value of the type is convertible to type u.
+ // Even if ConvertibleTo returns true, the conversion may still panic.
+ // For example, a slice of type []T is convertible to *[N]T,
+ // but the conversion will panic if its length is less than N.
+ ConvertibleTo(u Type) bool
+
+ // Comparable reports whether values of this type are comparable.
+ // Even if Comparable returns true, the comparison may still panic.
+ // For example, values of interface type are comparable,
+ // but the comparison will panic if their dynamic type is not comparable.
+ Comparable() bool
+
+ // Methods applicable only to some types, depending on Kind.
+ // The methods allowed for each kind are:
+ //
+ // Int*, Uint*, Float*, Complex*: Bits
+ // Array: Elem, Len
+ // Chan: ChanDir, Elem
+ // Func: In, NumIn, Out, NumOut, IsVariadic.
+ // Map: Key, Elem
+ // Pointer: Elem
+ // Slice: Elem
+ // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
+
+ // Bits returns the size of the type in bits.
+ // It panics if the type's Kind is not one of the
+ // sized or unsized Int, Uint, Float, or Complex kinds.
+ Bits() int
+
+ // ChanDir returns a channel type's direction.
+ // It panics if the type's Kind is not Chan.
+ ChanDir() ChanDir
+
+ // IsVariadic reports whether a function type's final input parameter
+ // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
+ // implicit actual type []T.
+ //
+ // For concreteness, if t represents func(x int, y ... float64), then
+ //
+ // t.NumIn() == 2
+ // t.In(0) is the reflect.Type for "int"
+ // t.In(1) is the reflect.Type for "[]float64"
+ // t.IsVariadic() == true
+ //
+ // IsVariadic panics if the type's Kind is not Func.
+ IsVariadic() bool
+
+ // Elem returns a type's element type.
+ // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
+ Elem() Type
+
+ // Field returns a struct type's i'th field.
+ // It panics if the type's Kind is not Struct.
+ // It panics if i is not in the range [0, NumField()).
+ Field(i int) StructField
+
+ // FieldByIndex returns the nested field corresponding
+ // to the index sequence. It is equivalent to calling Field
+ // successively for each index i.
+ // It panics if the type's Kind is not Struct.
+ FieldByIndex(index []int) StructField
+
+ // FieldByName returns the struct field with the given name
+ // and a boolean indicating if the field was found.
+ FieldByName(name string) (StructField, bool)
+
+ // FieldByNameFunc returns the struct field with a name
+ // that satisfies the match function and a boolean indicating if
+ // the field was found.
+ //
+ // FieldByNameFunc considers the fields in the struct itself
+ // and then the fields in any embedded structs, in breadth first order,
+ // stopping at the shallowest nesting depth containing one or more
+ // fields satisfying the match function. If multiple fields at that depth
+ // satisfy the match function, they cancel each other
+ // and FieldByNameFunc returns no match.
+ // This behavior mirrors Go's handling of name lookup in
+ // structs containing embedded fields.
+ FieldByNameFunc(match func(string) bool) (StructField, bool)
+
+ // In returns the type of a function type's i'th input parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumIn()).
+ In(i int) Type
+
+ // Key returns a map type's key type.
+ // It panics if the type's Kind is not Map.
+ Key() Type
+
+ // Len returns an array type's length.
+ // It panics if the type's Kind is not Array.
+ Len() int
+
+ // NumField returns a struct type's field count.
+ // It panics if the type's Kind is not Struct.
+ NumField() int
+
+ // NumIn returns a function type's input parameter count.
+ // It panics if the type's Kind is not Func.
+ NumIn() int
+
+ // NumOut returns a function type's output parameter count.
+ // It panics if the type's Kind is not Func.
+ NumOut() int
+
+ // Out returns the type of a function type's i'th output parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumOut()).
+ Out(i int) Type
+
+ common() *rtype
+ uncommon() *uncommonType
+}
+
+// BUG(rsc): FieldByName and related functions consider struct field names to be equal
+// if the names are equal, even if they are unexported names originating
+// in different packages. The practical effect of this is that the result of
+// t.FieldByName("x") is not well defined if the struct type t contains
+// multiple fields named x (embedded from different packages).
+// FieldByName may return one of the fields named x or may report that there are none.
+// See https://golang.org/issue/4876 for more details.
+
+/*
+ * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.go.
+ */
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Array
+ Chan
+ Func
+ Interface
+ Map
+ Pointer
+ Slice
+ String
+ Struct
+ UnsafePointer
+)
+
+// Ptr is the old name for the Pointer kind.
+const Ptr = Pointer
+
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+//
+// tflag values must be kept in sync with copies in:
+//
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// runtime/type.go
+type tflag uint8
+
+const (
+ // tflagUncommon means that there is a pointer, *uncommonType,
+ // just beyond the outer type structure.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
+ // then t has uncommonType data and it can be accessed as:
+ //
+ // type tUncommon struct {
+ // structType
+ // u uncommonType
+ // }
+ // u := &(*tUncommon)(unsafe.Pointer(t)).u
+ tflagUncommon tflag = 1 << 0
+
+ // tflagExtraStar means the name in the str field has an
+ // extraneous '*' prefix. This is because for most types T in
+ // a program, the type *T also exists and reusing the str data
+ // saves binary size.
+ tflagExtraStar tflag = 1 << 1
+
+ // tflagNamed means the type has a name.
+ tflagNamed tflag = 1 << 2
+
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
+)
+
+// rtype is the common implementation of most values.
+// It is embedded in other struct types.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
+type rtype struct {
+ size uintptr
+ ptrdata uintptr // number of bytes in the type that can contain pointers
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte // garbage collection data
+ str nameOff // string form
+ ptrToThis typeOff // type for pointer to this type, may be zero
+}
+
+// Method on non-interface type
+type method struct {
+ name nameOff // name of method
+ mtyp typeOff // method type (without receiver)
+ ifn textOff // fn used in interface call (one-word receiver)
+ tfn textOff // fn used for normal method call
+}
+
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType struct {
+ pkgPath nameOff // import path; empty for built-in types like int, string
+ mcount uint16 // number of methods
+ xcount uint16 // number of exported methods
+ moff uint32 // offset from this uncommontype to [mcount]method
+ _ uint32 // unused
+}
+
+// ChanDir represents a channel type's direction.
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+)
+
+// arrayType represents a fixed array type.
+type arrayType struct {
+ rtype
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
+}
+
+// chanType represents a channel type.
+type chanType struct {
+ rtype
+ elem *rtype // channel element type
+ dir uintptr // channel direction (ChanDir)
+}
+
+// funcType represents a function type.
+//
+// A *rtype for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
+type funcType struct {
+ rtype
+ inCount uint16
+ outCount uint16 // top bit is set if last input parameter is ...
+}
+
+// imethod represents a method on an interface type
+type imethod struct {
+ name nameOff // name of method
+ typ typeOff // .(*FuncType) underneath
+}
+
+// interfaceType represents an interface type.
+type interfaceType struct {
+ rtype
+ pkgPath name // import path
+ methods []imethod // sorted by hash
+}
+
+// mapType represents a map type.
+type mapType struct {
+ rtype
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// ptrType represents a pointer type.
+type ptrType struct {
+ rtype
+ elem *rtype // pointer element (pointed at) type
+}
+
+// sliceType represents a slice type.
+type sliceType struct {
+ rtype
+ elem *rtype // slice element type
+}
+
+// Struct field
+type structField struct {
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offset uintptr // byte offset of field
+}
+
+func (f *structField) embedded() bool {
+ return f.name.embedded()
+}
+
+// structType represents a struct type.
+type structType struct {
+ rtype
+ pkgPath name
+ fields []structField // sorted by offset
+}
+
+// name is an encoded type name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+// 1<<3 the name is of an embedded (a.k.a. anonymous) field
+//
+// Following that, there is a varint-encoded length of the name,
+// followed by the name itself.
+//
+// If tag data is present, it also has a varint-encoded length
+// followed by the tag itself.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+//
+// Note: this encoding must match here and in:
+// cmd/compile/internal/reflectdata/reflect.go
+// runtime/type.go
+// internal/reflectlite/type.go
+// cmd/link/internal/ld/decodesym.go
+
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) hasTag() bool {
+ return (*n.bytes)&(1<<1) != 0
+}
+
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
+// readVarint parses a varint as encoded by encoding/binary.
+// It returns the number of encoded bytes and the encoded value.
+func (n name) readVarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.data(off+i, "read varint")
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+// writeVarint writes n to buf in varint form. Returns the
+// number of bytes written. n must be nonnegative.
+// Writes at most 10 bytes.
+func writeVarint(buf []byte, n int) int {
+ for i := 0; ; i++ {
+ b := byte(n & 0x7f)
+ n >>= 7
+ if n == 0 {
+ buf[i] = b
+ return i + 1
+ }
+ buf[i] = b | 0x80
+ }
+}
+
+func (n name) name() string {
+ if n.bytes == nil {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ return unsafe.String(n.data(1+i, "non-empty string"), l)
+}
+
+func (n name) tag() string {
+ if !n.hasTag() {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ i2, l2 := n.readVarint(1 + i + l)
+ return unsafe.String(n.data(1+i+l+i2, "non-empty string"), l2)
+}
+
+func (n name) pkgPath() string {
+ if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ off := 1 + i + l
+ if n.hasTag() {
+ i2, l2 := n.readVarint(off)
+ off += i2 + l2
+ }
+ var nameOff int32
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
+ pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
+ return pkgPathName.name()
+}
+
+func newName(n, tag string, exported, embedded bool) name {
+ if len(n) >= 1<<29 {
+ panic("reflect.nameFrom: name too long: " + n[:1024] + "...")
+ }
+ if len(tag) >= 1<<29 {
+ panic("reflect.nameFrom: tag too long: " + tag[:1024] + "...")
+ }
+ var nameLen [10]byte
+ var tagLen [10]byte
+ nameLenLen := writeVarint(nameLen[:], len(n))
+ tagLenLen := writeVarint(tagLen[:], len(tag))
+
+ var bits byte
+ l := 1 + nameLenLen + len(n)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += tagLenLen + len(tag)
+ bits |= 1 << 1
+ }
+ if embedded {
+ bits |= 1 << 3
+ }
+
+ b := make([]byte, l)
+ b[0] = bits
+ copy(b[1:], nameLen[:nameLenLen])
+ copy(b[1+nameLenLen:], n)
+ if len(tag) > 0 {
+ tb := b[1+nameLenLen+len(n):]
+ copy(tb, tagLen[:tagLenLen])
+ copy(tb[tagLenLen:], tag)
+ }
+
+ return name{bytes: &b[0]}
+}
+
+/*
+ * The compiler knows the exact layout of all the data structures above.
+ * The compiler does not know about the data structures and methods below.
+ */
+
+// Method represents a single method.
+type Method struct {
+ // Name is the method name.
+ Name string
+
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // method name. It is empty for upper case (exported) method names.
+ // The combination of PkgPath and Name uniquely identifies a method
+ // in a method set.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ PkgPath string
+
+ Type Type // method type
+ Func Value // func with receiver as first argument
+ Index int // index for Type.Method
+}
+
+// IsExported reports whether the method is exported.
+func (m Method) IsExported() bool {
+ return m.PkgPath == ""
+}
+
+const (
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6 // Type.gc points to GC program
+ kindMask = (1 << 5) - 1
+)
+
+// String returns the name of k.
+func (k Kind) String() string {
+ if uint(k) < uint(len(kindNames)) {
+ return kindNames[uint(k)]
+ }
+ return "kind" + strconv.Itoa(int(k))
+}
+
+var kindNames = []string{
+ Invalid: "invalid",
+ Bool: "bool",
+ Int: "int",
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+ Uint: "uint",
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+ Uintptr: "uintptr",
+ Float32: "float32",
+ Float64: "float64",
+ Complex64: "complex64",
+ Complex128: "complex128",
+ Array: "array",
+ Chan: "chan",
+ Func: "func",
+ Interface: "interface",
+ Map: "map",
+ Pointer: "ptr",
+ Slice: "slice",
+ String: "string",
+ Struct: "struct",
+ UnsafePointer: "unsafe.Pointer",
+}
+
+func (t *uncommonType) methods() []method {
+ if t.mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
+}
+
+func (t *uncommonType) exportedMethods() []method {
+ if t.xcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
+}
+
+// resolveNameOff resolves a name offset from a base pointer.
+// The (*rtype).nameOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTextOff resolves a function pointer offset from a base type.
+// The (*rtype).textOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// addReflectOff adds a pointer to the reflection lookup map in the runtime.
+// It returns a new ID that can be used as a typeOff or textOff, and will
+// be resolved correctly. Implemented in the runtime package.
+func addReflectOff(ptr unsafe.Pointer) int32
+
+// resolveReflectName adds a name to the reflection lookup map in the runtime.
+// It returns a new nameOff that can be used to refer to the pointer.
+func resolveReflectName(n name) nameOff {
+ return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
+}
+
+// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
+// It returns a new typeOff that can be used to refer to the pointer.
+func resolveReflectType(t *rtype) typeOff {
+ return typeOff(addReflectOff(unsafe.Pointer(t)))
+}
+
+// resolveReflectText adds a function pointer to the reflection lookup map in
+// the runtime. It returns a new textOff that can be used to refer to the
+// pointer.
+func resolveReflectText(ptr unsafe.Pointer) textOff {
+ return textOff(addReflectOff(ptr))
+}
+
+type nameOff int32 // offset to a name
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) nameOff(off nameOff) name {
+ return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
+}
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+ return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) textOff(off textOff) unsafe.Pointer {
+ return resolveTextOff(unsafe.Pointer(t), int32(off))
+}
+
+func (t *rtype) uncommon() *uncommonType {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ case Pointer:
+ type u struct {
+ ptrType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ funcType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ sliceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ arrayType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ chanType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ mapType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ interfaceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ rtype
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+func (t *rtype) String() string {
+ s := t.nameOff(t.str).name()
+ if t.tflag&tflagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t *rtype) Size() uintptr { return t.size }
+
+func (t *rtype) Bits() int {
+ if t == nil {
+ panic("reflect: Bits of nil Type")
+ }
+ k := t.Kind()
+ if k < Int || k > Complex128 {
+ panic("reflect: Bits of non-arithmetic Type " + t.String())
+ }
+ return int(t.size) * 8
+}
+
+func (t *rtype) Align() int { return int(t.align) }
+
+func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
+
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
+
+func (t *rtype) pointers() bool { return t.ptrdata != 0 }
+
+func (t *rtype) common() *rtype { return t }
+
+func (t *rtype) exportedMethods() []method {
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.exportedMethods()
+}
+
+func (t *rtype) NumMethod() int {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.NumMethod()
+ }
+ return len(t.exportedMethods())
+}
+
+func (t *rtype) Method(i int) (m Method) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.Method(i)
+ }
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ fl := flag(Func)
+ mtyp := t.typeOff(p.mtyp)
+ ft := (*funcType)(unsafe.Pointer(mtyp))
+ in := make([]Type, 0, 1+len(ft.in()))
+ in = append(in, t)
+ for _, arg := range ft.in() {
+ in = append(in, arg)
+ }
+ out := make([]Type, 0, len(ft.out()))
+ for _, ret := range ft.out() {
+ out = append(out, ret)
+ }
+ mt := FuncOf(in, out, ft.IsVariadic())
+ m.Type = mt
+ tfn := t.textOff(p.tfn)
+ fn := unsafe.Pointer(&tfn)
+ m.Func = Value{mt.(*rtype), fn, fl}
+
+ m.Index = i
+ return m
+}
+
+func (t *rtype) MethodByName(name string) (m Method, ok bool) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.MethodByName(name)
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+
+ methods := ut.exportedMethods()
+
+ // We are looking for the first index i where the string becomes >= s.
+ // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name).
+ i, j := 0, len(methods)
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !(t.nameOff(methods[h].name).name() >= name) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ if i < len(methods) && name == t.nameOff(methods[i].name).name() {
+ return t.Method(i), true
+ }
+
+ return Method{}, false
+}
+
+func (t *rtype) PkgPath() string {
+ if t.tflag&tflagNamed == 0 {
+ return ""
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return ""
+ }
+ return t.nameOff(ut.pkgPath).name()
+}
+
+func (t *rtype) hasName() bool {
+ return t.tflag&tflagNamed != 0
+}
+
+func (t *rtype) Name() string {
+ if !t.hasName() {
+ return ""
+ }
+ s := t.String()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+func (t *rtype) ChanDir() ChanDir {
+ if t.Kind() != Chan {
+ panic("reflect: ChanDir of non-chan type " + t.String())
+ }
+ tt := (*chanType)(unsafe.Pointer(t))
+ return ChanDir(tt.dir)
+}
+
+func (t *rtype) IsVariadic() bool {
+ if t.Kind() != Func {
+ panic("reflect: IsVariadic of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return tt.outCount&(1<<15) != 0
+}
+
+func (t *rtype) Elem() Type {
+ switch t.Kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Chan:
+ tt := (*chanType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Map:
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Pointer:
+ tt := (*ptrType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Slice:
+ tt := (*sliceType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ }
+ panic("reflect: Elem of invalid type " + t.String())
+}
+
+func (t *rtype) Field(i int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: Field of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.Field(i)
+}
+
+func (t *rtype) FieldByIndex(index []int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByIndex of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByIndex(index)
+}
+
+func (t *rtype) FieldByName(name string) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByName of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByName(name)
+}
+
+func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByNameFunc of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByNameFunc(match)
+}
+
+func (t *rtype) In(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: In of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.in()[i])
+}
+
+func (t *rtype) Key() Type {
+ if t.Kind() != Map {
+ panic("reflect: Key of non-map type " + t.String())
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.key)
+}
+
+func (t *rtype) Len() int {
+ if t.Kind() != Array {
+ panic("reflect: Len of non-array type " + t.String())
+ }
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return int(tt.len)
+}
+
+func (t *rtype) NumField() int {
+ if t.Kind() != Struct {
+ panic("reflect: NumField of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return len(tt.fields)
+}
+
+func (t *rtype) NumIn() int {
+ if t.Kind() != Func {
+ panic("reflect: NumIn of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return int(tt.inCount)
+}
+
+func (t *rtype) NumOut() int {
+ if t.Kind() != Func {
+ panic("reflect: NumOut of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return len(tt.out())
+}
+
+func (t *rtype) Out(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: Out of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.out()[i])
+}
+
+func (t *funcType) in() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ if t.inCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
+}
+
+func (t *funcType) out() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ if outCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+func (d ChanDir) String() string {
+ switch d {
+ case SendDir:
+ return "chan<-"
+ case RecvDir:
+ return "<-chan"
+ case BothDir:
+ return "chan"
+ }
+ return "ChanDir" + strconv.Itoa(int(d))
+}
+
+// Method returns the i'th method in the type's method set.
+func (t *interfaceType) Method(i int) (m Method) {
+ if i < 0 || i >= len(t.methods) {
+ return
+ }
+ p := &t.methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ if !pname.isExported() {
+ m.PkgPath = pname.pkgPath()
+ if m.PkgPath == "" {
+ m.PkgPath = t.pkgPath.name()
+ }
+ }
+ m.Type = toType(t.typeOff(p.typ))
+ m.Index = i
+ return
+}
+
+// NumMethod returns the number of interface methods in the type's method set.
+func (t *interfaceType) NumMethod() int { return len(t.methods) }
+
+// MethodByName method with the given name in the type's method set.
+func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
+ if t == nil {
+ return
+ }
+ var p *imethod
+ for i := range t.methods {
+ p = &t.methods[i]
+ if t.nameOff(p.name).name() == name {
+ return t.Method(i), true
+ }
+ }
+ return
+}
+
+// A StructField describes a single field in a struct.
+type StructField struct {
+ // Name is the field name.
+ Name string
+
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // field name. It is empty for upper case (exported) field names.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ PkgPath string
+
+ Type Type // field type
+ Tag StructTag // field tag string
+ Offset uintptr // offset within struct, in bytes
+ Index []int // index sequence for Type.FieldByIndex
+ Anonymous bool // is an embedded field
+}
+
+// IsExported reports whether the field is exported.
+func (f StructField) IsExported() bool {
+ return f.PkgPath == ""
+}
+
+// A StructTag is the tag string in a struct field.
+//
+// By convention, tag strings are a concatenation of
+// optionally space-separated key:"value" pairs.
+// Each key is a non-empty string consisting of non-control
+// characters other than space (U+0020 ' '), quote (U+0022 '"'),
+// and colon (U+003A ':'). Each value is quoted using U+0022 '"'
+// characters and Go string literal syntax.
+type StructTag string
+
+// Get returns the value associated with key in the tag string.
+// If there is no such key in the tag, Get returns the empty string.
+// If the tag does not have the conventional format, the value
+// returned by Get is unspecified. To determine whether a tag is
+// explicitly set to the empty string, use Lookup.
+func (tag StructTag) Get(key string) string {
+ v, _ := tag.Lookup(key)
+ return v
+}
+
+// Lookup returns the value associated with key in the tag string.
+// If the key is present in the tag the value (which may be empty)
+// is returned. Otherwise the returned value will be the empty string.
+// The ok return value reports whether the value was explicitly set in
+// the tag string. If the tag does not have the conventional format,
+// the value returned by Lookup is unspecified.
+func (tag StructTag) Lookup(key string) (value string, ok bool) {
+ // When modifying this code, also update the validateStructTag code
+ // in cmd/vet/structtag.go.
+
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ break
+ }
+ name := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ break
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ if key == name {
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ break
+ }
+ return value, true
+ }
+ }
+ return "", false
+}
+
+// Field returns the i'th struct field.
+func (t *structType) Field(i int) (f StructField) {
+ if i < 0 || i >= len(t.fields) {
+ panic("reflect: Field index out of bounds")
+ }
+ p := &t.fields[i]
+ f.Type = toType(p.typ)
+ f.Name = p.name.name()
+ f.Anonymous = p.embedded()
+ if !p.name.isExported() {
+ f.PkgPath = t.pkgPath.name()
+ }
+ if tag := p.name.tag(); tag != "" {
+ f.Tag = StructTag(tag)
+ }
+ f.Offset = p.offset
+
+ // NOTE(rsc): This is the only allocation in the interface
+ // presented by a reflect.Type. It would be nice to avoid,
+ // at least in the common cases, but we need to make sure
+ // that misbehaving clients of reflect cannot affect other
+ // uses of reflect. One possibility is CL 5371098, but we
+ // postponed that ugliness until there is a demonstrated
+ // need for the performance. This is issue 2320.
+ f.Index = []int{i}
+ return
+}
+
+// TODO(gri): Should there be an error/bool indicator if the index
+// is wrong for FieldByIndex?
+
+// FieldByIndex returns the nested field corresponding to index.
+func (t *structType) FieldByIndex(index []int) (f StructField) {
+ f.Type = toType(&t.rtype)
+ for i, x := range index {
+ if i > 0 {
+ ft := f.Type
+ if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
+ ft = ft.Elem()
+ }
+ f.Type = ft
+ }
+ f = f.Type.Field(x)
+ }
+ return
+}
+
+// A fieldScan represents an item on the fieldByNameFunc scan work list.
+type fieldScan struct {
+ typ *structType
+ index []int
+}
+
+// FieldByNameFunc returns the struct field with a name that satisfies the
+// match function and a boolean to indicate if the field was found.
+func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
+ // This uses the same condition that the Go language does: there must be a unique instance
+ // of the match at a given depth level. If there are multiple instances of a match at the
+ // same depth, they annihilate each other and inhibit any possible match at a lower level.
+ // The algorithm is breadth first search, one depth level at a time.
+
+ // The current and next slices are work queues:
+ // current lists the fields to visit on this depth level,
+ // and next lists the fields on the next lower level.
+ current := []fieldScan{}
+ next := []fieldScan{{typ: t}}
+
+ // nextCount records the number of times an embedded type has been
+ // encountered and considered for queueing in the 'next' slice.
+ // We only queue the first one, but we increment the count on each.
+ // If a struct type T can be reached more than once at a given depth level,
+ // then it annihilates itself and need not be considered at all when we
+ // process that next depth level.
+ var nextCount map[*structType]int
+
+ // visited records the structs that have been considered already.
+ // Embedded pointer fields can create cycles in the graph of
+ // reachable embedded types; visited avoids following those cycles.
+ // It also avoids duplicated effort: if we didn't find the field in an
+ // embedded type T at level 2, we won't find it in one at level 4 either.
+ visited := map[*structType]bool{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count := nextCount
+ nextCount = nil
+
+ // Process all the fields at this depth, now listed in 'current'.
+ // The loop queues embedded fields found in 'next', for processing during the next
+ // iteration. The multiplicity of the 'current' field counts is recorded
+ // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
+ for _, scan := range current {
+ t := scan.typ
+ if visited[t] {
+ // We've looked through this type before, at a higher level.
+ // That higher level would shadow the lower level we're now at,
+ // so this one can't be useful to us. Ignore it.
+ continue
+ }
+ visited[t] = true
+ for i := range t.fields {
+ f := &t.fields[i]
+ // Find name and (for embedded field) type for field f.
+ fname := f.name.name()
+ var ntyp *rtype
+ if f.embedded() {
+ // Embedded field of type T or *T.
+ ntyp = f.typ
+ if ntyp.Kind() == Pointer {
+ ntyp = ntyp.Elem().common()
+ }
+ }
+
+ // Does it match?
+ if match(fname) {
+ // Potential match
+ if count[t] > 1 || ok {
+ // Name appeared multiple times at this level: annihilate.
+ return StructField{}, false
+ }
+ result = t.Field(i)
+ result.Index = nil
+ result.Index = append(result.Index, scan.index...)
+ result.Index = append(result.Index, i)
+ ok = true
+ continue
+ }
+
+ // Queue embedded struct fields for processing with next level,
+ // but only if we haven't seen a match yet at this level and only
+ // if the embedded types haven't already been queued.
+ if ok || ntyp == nil || ntyp.Kind() != Struct {
+ continue
+ }
+ styp := (*structType)(unsafe.Pointer(ntyp))
+ if nextCount[styp] > 0 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ continue
+ }
+ if nextCount == nil {
+ nextCount = map[*structType]int{}
+ }
+ nextCount[styp] = 1
+ if count[t] > 1 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ }
+ var index []int
+ index = append(index, scan.index...)
+ index = append(index, i)
+ next = append(next, fieldScan{styp, index})
+ }
+ }
+ if ok {
+ break
+ }
+ }
+ return
+}
+
+// FieldByName returns the struct field with the given name
+// and a boolean to indicate if the field was found.
+func (t *structType) FieldByName(name string) (f StructField, present bool) {
+ // Quick check for top-level name, or struct without embedded fields.
+ hasEmbeds := false
+ if name != "" {
+ for i := range t.fields {
+ tf := &t.fields[i]
+ if tf.name.name() == name {
+ return t.Field(i), true
+ }
+ if tf.embedded() {
+ hasEmbeds = true
+ }
+ }
+ }
+ if !hasEmbeds {
+ return
+ }
+ return t.FieldByNameFunc(func(s string) bool { return s == name })
+}
+
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
+func TypeOf(i any) Type {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return toType(eface.typ)
+}
+
+// rtypeOf directly extracts the *rtype of the provided value.
+func rtypeOf(i any) *rtype {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return eface.typ
+}
+
+// ptrMap is the cache for PointerTo.
+var ptrMap sync.Map // map[*rtype]*ptrType
+
+// PtrTo returns the pointer type with element t.
+// For example, if t represents type Foo, PtrTo(t) represents *Foo.
+//
+// PtrTo is the old spelling of PointerTo.
+// The two functions behave identically.
+func PtrTo(t Type) Type { return PointerTo(t) }
+
+// PointerTo returns the pointer type with element t.
+// For example, if t represents type Foo, PointerTo(t) represents *Foo.
+func PointerTo(t Type) Type {
+ return t.(*rtype).ptrTo()
+}
+
+func (t *rtype) ptrTo() *rtype {
+ if t.ptrToThis != 0 {
+ return t.typeOff(t.ptrToThis)
+ }
+
+ // Check the cache.
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
+ }
+
+ // Look in known types.
+ s := "*" + t.String()
+ for _, tt := range typesByString(s) {
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem != t {
+ continue
+ }
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
+ }
+
+ // Create a new ptrType starting with the description
+ // of an *unsafe.Pointer.
+ var iptr any = (*unsafe.Pointer)(nil)
+ prototype := *(**ptrType)(unsafe.Pointer(&iptr))
+ pp := *prototype
+
+ pp.str = resolveReflectName(newName(s, "", false, false))
+ pp.ptrToThis = 0
+
+ // For the type structures linked into the binary, the
+ // compiler provides a good hash of the string.
+ // Create a good hash for the new string by using
+ // the FNV-1 hash's mixing function to combine the
+ // old hash and the new "*".
+ pp.hash = fnv1(t.hash, '*')
+
+ pp.elem = t
+
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
+ return &pi.(*ptrType).rtype
+}
+
+// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
+func fnv1(x uint32, list ...byte) uint32 {
+ for _, b := range list {
+ x = x*16777619 ^ uint32(b)
+ }
+ return x
+}
+
+func (t *rtype) Implements(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.Implements")
+ }
+ if u.Kind() != Interface {
+ panic("reflect: non-interface type passed to Type.Implements")
+ }
+ return implements(u.(*rtype), t)
+}
+
+func (t *rtype) AssignableTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.AssignableTo")
+ }
+ uu := u.(*rtype)
+ return directlyAssignable(uu, t) || implements(uu, t)
+}
+
+func (t *rtype) ConvertibleTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.ConvertibleTo")
+ }
+ uu := u.(*rtype)
+ return convertOp(uu, t) != nil
+}
+
+func (t *rtype) Comparable() bool {
+ return t.equal != nil
+}
+
+// implements reports whether the type V implements the interface type T.
+func implements(T, V *rtype) bool {
+ if T.Kind() != Interface {
+ return false
+ }
+ t := (*interfaceType)(unsafe.Pointer(T))
+ if len(t.methods) == 0 {
+ return true
+ }
+
+ // The same algorithm applies in both cases, but the
+ // method tables for an interface type and a concrete type
+ // are different, so the code is duplicated.
+ // In both cases the algorithm is a linear scan over the two
+ // lists - T's methods and V's methods - simultaneously.
+ // Since method tables are stored in a unique sorted order
+ // (alphabetical, with no duplicate method names), the scan
+ // through V's methods must hit a match for each of T's
+ // methods along the way, or else V does not implement T.
+ // This lets us run the scan in overall linear time instead of
+ // the quadratic time a naive search would require.
+ // See also ../runtime/iface.go.
+ if V.Kind() == Interface {
+ v := (*interfaceType)(unsafe.Pointer(V))
+ i := 0
+ for j := 0; j < len(v.methods); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := &v.methods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = v.pkgPath.name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ v := V.uncommon()
+ if v == nil {
+ return false
+ }
+ i := 0
+ vmethods := v.methods()
+ for j := 0; j < int(v.mcount); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := vmethods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = V.nameOff(v.pkgPath).name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// specialChannelAssignability reports whether a value x of channel type V
+// can be directly assigned (using memmove) to another channel type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// T and V must be both of Chan kind.
+func specialChannelAssignability(T, V *rtype) bool {
+ // Special case:
+ // x is a bidirectional channel value, T is a channel type,
+ // x's type V and T have identical element types,
+ // and at least one of V or T is not a defined type.
+ return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
+}
+
+// directlyAssignable reports whether a value x of type V can be directly
+// assigned (using memmove) to a value of type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// Ignoring the interface rules (implemented elsewhere)
+// and the ideal constant rules (no ideal constants at run time).
+func directlyAssignable(T, V *rtype) bool {
+ // x's type V is identical to T?
+ if T == V {
+ return true
+ }
+
+ // Otherwise at least one of T and V must not be defined
+ // and they must have the same kind.
+ if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ if T.Kind() == Chan && specialChannelAssignability(T, V) {
+ return true
+ }
+
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V, true)
+}
+
+func haveIdenticalType(T, V Type, cmpTags bool) bool {
+ if cmpTags {
+ return T == V
+ }
+
+ if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
+ return false
+ }
+
+ return haveIdenticalUnderlyingType(T.common(), V.common(), false)
+}
+
+func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
+ case Array:
+ return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Chan:
+ return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Func:
+ t := (*funcType)(unsafe.Pointer(T))
+ v := (*funcType)(unsafe.Pointer(V))
+ if t.outCount != v.outCount || t.inCount != v.inCount {
+ return false
+ }
+ for i := 0; i < t.NumIn(); i++ {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
+ return false
+ }
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
+ return false
+ }
+ }
+ return true
+
+ case Interface:
+ t := (*interfaceType)(unsafe.Pointer(T))
+ v := (*interfaceType)(unsafe.Pointer(V))
+ if len(t.methods) == 0 && len(v.methods) == 0 {
+ return true
+ }
+ // Might have the same methods but still
+ // need a run time conversion.
+ return false
+
+ case Map:
+ return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Pointer, Slice:
+ return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Struct:
+ t := (*structType)(unsafe.Pointer(T))
+ v := (*structType)(unsafe.Pointer(V))
+ if len(t.fields) != len(v.fields) {
+ return false
+ }
+ if t.pkgPath.name() != v.pkgPath.name() {
+ return false
+ }
+ for i := range t.fields {
+ tf := &t.fields[i]
+ vf := &v.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
+ return false
+ }
+ if cmpTags && tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.embedded() != vf.embedded() {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// typelinks is implemented in package runtime.
+// It returns a slice of the sections in each module,
+// and a slice of *rtype offsets in each module.
+//
+// The types in each module are sorted by string. That is, the first
+// two linked types of the first module are:
+//
+// d0 := sections[0]
+// t1 := (*rtype)(add(d0, offset[0][0]))
+// t2 := (*rtype)(add(d0, offset[0][1]))
+//
+// and
+//
+// t1.String() < t2.String()
+//
+// Note that strings are not unique identifiers for types:
+// there can be more than one with a given string.
+// Only types we might want to look up are included:
+// pointers, channels, maps, slices, and arrays.
+func typelinks() (sections []unsafe.Pointer, offset [][]int32)
+
+func rtypeOff(section unsafe.Pointer, off int32) *rtype {
+ return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
+}
+
+// typesByString returns the subslice of typelinks() whose elements have
+// the given string representation.
+// It may be empty (no known types with that string) or may have
+// multiple elements (multiple types with that string).
+func typesByString(s string) []*rtype {
+ sections, offset := typelinks()
+ var ret []*rtype
+
+ for offsI, offs := range offset {
+ section := sections[offsI]
+
+ // We are looking for the first index i where the string becomes >= s.
+ // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
+ i, j := 0, len(offs)
+ for i < j {
+ h := i + (j-i)>>1 // avoid overflow when computing h
+ // i ≤ h < j
+ if !(rtypeOff(section, offs[h]).String() >= s) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+
+ // Having found the first, linear scan forward to find the last.
+ // We could do a second binary search, but the caller is going
+ // to do a linear scan anyway.
+ for j := i; j < len(offs); j++ {
+ typ := rtypeOff(section, offs[j])
+ if typ.String() != s {
+ break
+ }
+ ret = append(ret, typ)
+ }
+ }
+ return ret
+}
+
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
+
+// A cacheKey is the key for use in the lookupCache.
+// Four values describe any of the types we are looking for:
+// type kind, one or two subtypes, and an extra integer.
+type cacheKey struct {
+ kind Kind
+ t1 *rtype
+ t2 *rtype
+ extra uintptr
+}
+
+// The funcLookupCache caches FuncOf lookups.
+// FuncOf does not share the common lookupCache since cacheKey is not
+// sufficient to represent functions unambiguously.
+var funcLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// ChanOf returns the channel type with the given direction and element type.
+// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
+//
+// The gc runtime imposes a limit of 64 kB on channel element types.
+// If t's size is equal to or exceeds this limit, ChanOf panics.
+func ChanOf(dir ChanDir, t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
+ }
+
+ // This restriction is imposed by the gc compiler and the runtime.
+ if typ.size >= 1<<16 {
+ panic("reflect.ChanOf: element size too large")
+ }
+
+ // Look in known types.
+ var s string
+ switch dir {
+ default:
+ panic("reflect.ChanOf: invalid dir")
+ case SendDir:
+ s = "chan<- " + typ.String()
+ case RecvDir:
+ s = "<-chan " + typ.String()
+ case BothDir:
+ typeStr := typ.String()
+ if typeStr[0] == '<' {
+ // typ is recv chan, need parentheses as "<-" associates with leftmost
+ // chan possible, see:
+ // * https://golang.org/ref/spec#Channel_types
+ // * https://github.com/golang/go/issues/39897
+ s = "chan (" + typeStr + ")"
+ } else {
+ s = "chan " + typeStr
+ }
+ }
+ for _, tt := range typesByString(s) {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a channel type.
+ var ichan any = (chan unsafe.Pointer)(nil)
+ prototype := *(**chanType)(unsafe.Pointer(&ichan))
+ ch := *prototype
+ ch.tflag = tflagRegularMemory
+ ch.dir = uintptr(dir)
+ ch.str = resolveReflectName(newName(s, "", false, false))
+ ch.hash = fnv1(typ.hash, 'c', byte(dir))
+ ch.elem = typ
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics.
+func MapOf(key, elem Type) Type {
+ ktyp := key.(*rtype)
+ etyp := elem.(*rtype)
+
+ if ktyp.equal == nil {
+ panic("reflect.MapOf: invalid key type " + ktyp.String())
+ }
+
+ // Look in cache.
+ ckey := cacheKey{Map, ktyp, etyp, 0}
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
+ }
+
+ // Look in known types.
+ s := "map[" + ktyp.String() + "]" + etyp.String()
+ for _, tt := range typesByString(s) {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a map type.
+ // Note: flag values must match those used in the TMAP case
+ // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+ var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+ mt := **(**mapType)(unsafe.Pointer(&imap))
+ mt.str = resolveReflectName(newName(s, "", false, false))
+ mt.tflag = 0
+ mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
+ mt.key = ktyp
+ mt.elem = etyp
+ mt.bucket = bucketOf(ktyp, etyp)
+ mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+ return typehash(ktyp, p, seed)
+ }
+ mt.flags = 0
+ if ktyp.size > maxKeySize {
+ mt.keysize = uint8(goarch.PtrSize)
+ mt.flags |= 1 // indirect key
+ } else {
+ mt.keysize = uint8(ktyp.size)
+ }
+ if etyp.size > maxValSize {
+ mt.valuesize = uint8(goarch.PtrSize)
+ mt.flags |= 2 // indirect value
+ } else {
+ mt.valuesize = uint8(etyp.size)
+ }
+ mt.bucketsize = uint16(mt.bucket.size)
+ if isReflexive(ktyp) {
+ mt.flags |= 4
+ }
+ if needKeyUpdate(ktyp) {
+ mt.flags |= 8
+ }
+ if hashMightPanic(ktyp) {
+ mt.flags |= 16
+ }
+ mt.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
+}
+
+var funcTypes []Type
+var funcTypesMutex sync.Mutex
+
+func initFuncTypes(n int) Type {
+ funcTypesMutex.Lock()
+ defer funcTypesMutex.Unlock()
+ if n >= len(funcTypes) {
+ newFuncTypes := make([]Type, n+1)
+ copy(newFuncTypes, funcTypes)
+ funcTypes = newFuncTypes
+ }
+ if funcTypes[n] != nil {
+ return funcTypes[n]
+ }
+
+ funcTypes[n] = StructOf([]StructField{
+ {
+ Name: "FuncType",
+ Type: TypeOf(funcType{}),
+ },
+ {
+ Name: "Args",
+ Type: ArrayOf(n, TypeOf(&rtype{})),
+ },
+ })
+ return funcTypes[n]
+}
+
+// FuncOf returns the function type with the given argument and result types.
+// For example if k represents int and e represents string,
+// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
+//
+// The variadic argument controls whether the function is variadic. FuncOf
+// panics if the in[len(in)-1] does not represent a slice and variadic is
+// true.
+func FuncOf(in, out []Type, variadic bool) Type {
+ if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
+ panic("reflect.FuncOf: last arg of variadic func must be slice")
+ }
+
+ // Make a func type.
+ var ifunc any = (func())(nil)
+ prototype := *(**funcType)(unsafe.Pointer(&ifunc))
+ n := len(in) + len(out)
+
+ if n > 128 {
+ panic("reflect.FuncOf: too many arguments")
+ }
+
+ o := New(initFuncTypes(n)).Elem()
+ ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer()))
+ args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n]
+ *ft = *prototype
+
+ // Build a hash and minimally populate ft.
+ var hash uint32
+ for _, in := range in {
+ t := in.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+ if variadic {
+ hash = fnv1(hash, 'v')
+ }
+ hash = fnv1(hash, '.')
+ for _, out := range out {
+ t := out.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+
+ ft.tflag = 0
+ ft.hash = hash
+ ft.inCount = uint16(len(in))
+ ft.outCount = uint16(len(out))
+ if variadic {
+ ft.outCount |= 1 << 15
+ }
+
+ // Look in cache.
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ funcLookupCache.Lock()
+ defer funcLookupCache.Unlock()
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
+ }
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
+ }
+
+ // Look in known types for the same string representation.
+ str := funcStr(ft)
+ for _, tt := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
+
+ // Populate the remaining fields of ft and store in cache.
+ ft.str = resolveReflectName(newName(str, "", false, false))
+ ft.ptrToThis = 0
+ return addToCache(&ft.rtype)
+}
+
+// funcStr builds a string representation of a funcType.
+func funcStr(ft *funcType) string {
+ repr := make([]byte, 0, 64)
+ repr = append(repr, "func("...)
+ for i, t := range ft.in() {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ if ft.IsVariadic() && i == int(ft.inCount)-1 {
+ repr = append(repr, "..."...)
+ repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
+ } else {
+ repr = append(repr, t.String()...)
+ }
+ }
+ repr = append(repr, ')')
+ out := ft.out()
+ if len(out) == 1 {
+ repr = append(repr, ' ')
+ } else if len(out) > 1 {
+ repr = append(repr, " ("...)
+ }
+ for i, t := range out {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ repr = append(repr, t.String()...)
+ }
+ if len(out) > 1 {
+ repr = append(repr, ')')
+ }
+ return string(repr)
+}
+
+// isReflexive reports whether the == operation on the type is reflexive.
+// That is, x == x for all values x of type t.
+func isReflexive(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
+ return true
+ case Float32, Float64, Complex64, Complex128, Interface:
+ return false
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return isReflexive(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if !isReflexive(f.typ) {
+ return false
+ }
+ }
+ return true
+ default:
+ // Func, Map, Slice, Invalid
+ panic("isReflexive called on non-key type " + t.String())
+ }
+}
+
+// needKeyUpdate reports whether map overwrites require the key to be copied.
+func needKeyUpdate(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
+ return false
+ case Float32, Float64, Complex64, Complex128, Interface, String:
+ // Float keys can be updated from +0 to -0.
+ // String keys can be updated to use a smaller backing store.
+ // Interfaces might have floats of strings in them.
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return needKeyUpdate(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if needKeyUpdate(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ // Func, Map, Slice, Invalid
+ panic("needKeyUpdate called on non-key type " + t.String())
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *rtype) bool {
+ switch t.Kind() {
+ case Interface:
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return hashMightPanic(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if hashMightPanic(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+}
+
+// Make sure these routines stay in sync with ../runtime/map.go!
+// These types exist only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in string
+// for possible debugging use.
+const (
+ bucketSize uintptr = 8
+ maxKeySize uintptr = 128
+ maxValSize uintptr = 128
+)
+
+func bucketOf(ktyp, etyp *rtype) *rtype {
+ if ktyp.size > maxKeySize {
+ ktyp = PointerTo(ktyp).(*rtype)
+ }
+ if etyp.size > maxValSize {
+ etyp = PointerTo(etyp).(*rtype)
+ }
+
+ // Prepare GC data if any.
+ // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
+ // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
+ // Note that since the key and value are known to be <= 128 bytes,
+ // they're guaranteed to have bitmaps instead of GC programs.
+ var gcdata *byte
+ var ptrdata uintptr
+
+ size := bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize
+ if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
+ panic("reflect: bad size computation in MapOf")
+ }
+
+ if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
+ n := (nptr + 7) / 8
+ // Runtime needs pointer masks to be a multiple of uintptr in size.
+ n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
+ mask := make([]byte, n)
+ base := bucketSize / goarch.PtrSize
+
+ if ktyp.ptrdata != 0 {
+ emitGCMask(mask, base, ktyp, bucketSize)
+ }
+ base += bucketSize * ktyp.size / goarch.PtrSize
+
+ if etyp.ptrdata != 0 {
+ emitGCMask(mask, base, etyp, bucketSize)
+ }
+ base += bucketSize * etyp.size / goarch.PtrSize
+
+ word := base
+ mask[word/8] |= 1 << (word % 8)
+ gcdata = &mask[0]
+ ptrdata = (word + 1) * goarch.PtrSize
+
+ // overflow word must be last
+ if ptrdata != size {
+ panic("reflect: bad layout computation in MapOf")
+ }
+ }
+
+ b := &rtype{
+ align: goarch.PtrSize,
+ size: size,
+ kind: uint8(Struct),
+ ptrdata: ptrdata,
+ gcdata: gcdata,
+ }
+ s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
+ b.str = resolveReflectName(newName(s, "", false, false))
+ return b
+}
+
+func (t *rtype) gcSlice(begin, end uintptr) []byte {
+ return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
+}
+
+// emitGCMask writes the GC mask for [n]typ into out, starting at bit
+// offset base.
+func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
+ if typ.kind&kindGCProg != 0 {
+ panic("reflect: unexpected GC program")
+ }
+ ptrs := typ.ptrdata / goarch.PtrSize
+ words := typ.size / goarch.PtrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+ for j := uintptr(0); j < ptrs; j++ {
+ if (mask[j/8]>>(j%8))&1 != 0 {
+ for i := uintptr(0); i < n; i++ {
+ k := base + i*words + j
+ out[k/8] |= 1 << (k % 8)
+ }
+ }
+ }
+}
+
+// appendGCProg appends the GC program for the first ptrdata bytes of
+// typ to dst and returns the extended slice.
+func appendGCProg(dst []byte, typ *rtype) []byte {
+ if typ.kind&kindGCProg != 0 {
+ // Element has GC program; emit one element.
+ n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
+ prog := typ.gcSlice(4, 4+n-1)
+ return append(dst, prog...)
+ }
+
+ // Element is small with pointer mask; use as literal bits.
+ ptrs := typ.ptrdata / goarch.PtrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+
+ // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
+ for ; ptrs > 120; ptrs -= 120 {
+ dst = append(dst, 120)
+ dst = append(dst, mask[:15]...)
+ mask = mask[15:]
+ }
+
+ dst = append(dst, byte(ptrs))
+ dst = append(dst, mask...)
+ return dst
+}
+
+// SliceOf returns the slice type with element type t.
+// For example, if t represents int, SliceOf(t) represents []int.
+func SliceOf(t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Slice, typ, nil, 0}
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
+ }
+
+ // Look in known types.
+ s := "[]" + typ.String()
+ for _, tt := range typesByString(s) {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a slice type.
+ var islice any = ([]unsafe.Pointer)(nil)
+ prototype := *(**sliceType)(unsafe.Pointer(&islice))
+ slice := *prototype
+ slice.tflag = 0
+ slice.str = resolveReflectName(newName(s, "", false, false))
+ slice.hash = fnv1(typ.hash, '[')
+ slice.elem = typ
+ slice.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
+}
+
+// The structLookupCache caches StructOf lookups.
+// StructOf does not share the common lookupCache since we need to pin
+// the memory associated with *structTypeFixedN.
+var structLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+type structTypeUncommon struct {
+ structType
+ u uncommonType
+}
+
+// isLetter reports whether a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
+}
+
+// StructOf returns the struct type containing fields.
+// The Offset and Index fields are ignored and computed as they would be
+// by the compiler.
+//
+// StructOf currently does not generate wrapper methods for embedded
+// fields and panics if passed unexported StructFields.
+// These limitations may be lifted in a future version.
+func StructOf(fields []StructField) Type {
+ var (
+ hash = fnv1(0, []byte("struct {")...)
+ size uintptr
+ typalign uint8
+ comparable = true
+ methods []method
+
+ fs = make([]structField, len(fields))
+ repr = make([]byte, 0, 64)
+ fset = map[string]struct{}{} // fields' names
+
+ hasGCProg = false // records whether a struct-field type has a GCProg
+ )
+
+ lastzero := uintptr(0)
+ repr = append(repr, "struct {"...)
+ pkgpath := ""
+ for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
+ if field.Type == nil {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
+ }
+ f, fpkgpath := runtimeStructField(field)
+ ft := f.typ
+ if ft.kind&kindGCProg != 0 {
+ hasGCProg = true
+ }
+ if fpkgpath != "" {
+ if pkgpath == "" {
+ pkgpath = fpkgpath
+ } else if pkgpath != fpkgpath {
+ panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
+ }
+ }
+
+ // Update string and hash
+ name := f.name.name()
+ hash = fnv1(hash, []byte(name)...)
+ repr = append(repr, (" " + name)...)
+ if f.embedded() {
+ // Embedded field
+ if f.typ.Kind() == Pointer {
+ // Embedded ** and *interface{} are illegal
+ elem := ft.Elem()
+ if k := elem.Kind(); k == Pointer || k == Interface {
+ panic("reflect.StructOf: illegal embedded field type " + ft.String())
+ }
+ }
+
+ switch f.typ.Kind() {
+ case Interface:
+ ift := (*interfaceType)(unsafe.Pointer(ft))
+ for im, m := range ift.methods {
+ if ift.nameOff(m.name).pkgPath() != "" {
+ // TODO(sbinet). Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+
+ var (
+ mtyp = ift.typeOff(m.typ)
+ ifield = i
+ imethod = im
+ ifn Value
+ tfn Value
+ )
+
+ if ft.kind&kindDirectIface != 0 {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ } else {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = Indirect(in[0])
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ }
+
+ methods = append(methods, method{
+ name: resolveReflectName(ift.nameOff(m.name)),
+ mtyp: resolveReflectType(mtyp),
+ ifn: resolveReflectText(unsafe.Pointer(&ifn)),
+ tfn: resolveReflectText(unsafe.Pointer(&tfn)),
+ })
+ }
+ case Pointer:
+ ptr := (*ptrType)(unsafe.Pointer(ft))
+ if unt := ptr.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 {
+ panic("reflect: embedded type with methods not implemented if there is more than one field")
+ }
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet).
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.textOff(m.tfn)),
+ })
+ }
+ }
+ if unt := ptr.elem.uncommon(); unt != nil {
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
+ })
+ }
+ }
+ default:
+ if unt := ft.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
+ panic("reflect: embedded type with methods not implemented for non-pointer type")
+ }
+ for _, m := range unt.methods() {
+ mname := ft.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ft.textOff(m.ifn)),
+ tfn: resolveReflectText(ft.textOff(m.tfn)),
+ })
+
+ }
+ }
+ }
+ }
+ if _, dup := fset[name]; dup && name != "_" {
+ panic("reflect.StructOf: duplicate field " + name)
+ }
+ fset[name] = struct{}{}
+
+ hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
+
+ repr = append(repr, (" " + ft.String())...)
+ if f.name.hasTag() {
+ hash = fnv1(hash, []byte(f.name.tag())...)
+ repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
+ }
+ if i < len(fields)-1 {
+ repr = append(repr, ';')
+ }
+
+ comparable = comparable && (ft.equal != nil)
+
+ offset := align(size, uintptr(ft.align))
+ if offset < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ if ft.align > typalign {
+ typalign = ft.align
+ }
+ size = offset + ft.size
+ if size < offset {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ f.offset = offset
+
+ if ft.size == 0 {
+ lastzero = size
+ }
+
+ fs[i] = f
+ }
+
+ if size > 0 && lastzero == size {
+ // This is a non-zero sized struct that ends in a
+ // zero-sized field. We add an extra byte of padding,
+ // to ensure that taking the address of the final
+ // zero-sized field can't manufacture a pointer to the
+ // next object in the heap. See issue 9401.
+ size++
+ if size == 0 {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ }
+
+ var typ *structType
+ var ut *uncommonType
+
+ if len(methods) == 0 {
+ t := new(structTypeUncommon)
+ typ = &t.structType
+ ut = &t.u
+ } else {
+ // A *rtype representing a struct is followed directly in memory by an
+ // array of method objects representing the methods attached to the
+ // struct. To get the same layout for a run time generated type, we
+ // need an array directly following the uncommonType memory.
+ // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
+ tt := New(StructOf([]StructField{
+ {Name: "S", Type: TypeOf(structType{})},
+ {Name: "U", Type: TypeOf(uncommonType{})},
+ {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
+ }))
+
+ typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer())
+ ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer())
+
+ copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods)
+ }
+ // TODO(sbinet): Once we allow embedding multiple types,
+ // methods will need to be sorted like the compiler does.
+ // TODO(sbinet): Once we allow non-exported methods, we will
+ // need to compute xcount as the number of exported methods.
+ ut.mcount = uint16(len(methods))
+ ut.xcount = ut.mcount
+ ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
+
+ if len(fs) > 0 {
+ repr = append(repr, ' ')
+ }
+ repr = append(repr, '}')
+ hash = fnv1(hash, '}')
+ str := string(repr)
+
+ // Round the size up to be a multiple of the alignment.
+ s := align(size, uintptr(typalign))
+ if s < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ size = s
+
+ // Make the struct type.
+ var istruct any = struct{}{}
+ prototype := *(**structType)(unsafe.Pointer(&istruct))
+ *typ = *prototype
+ typ.fields = fs
+ if pkgpath != "" {
+ typ.pkgPath = newName(pkgpath, "", false, false)
+ }
+
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ structLookupCache.Lock()
+ defer structLookupCache.Unlock()
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
+ }
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
+ }
+
+ // Look in known types.
+ for _, t := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ // even if 't' wasn't a structType with methods, we should be ok
+ // as the 'u uncommonType' field won't be accessed except when
+ // tflag&tflagUncommon is set.
+ return addToCache(t)
+ }
+ }
+
+ typ.str = resolveReflectName(newName(str, "", false, false))
+ typ.tflag = 0 // TODO: set tflagRegularMemory
+ typ.hash = hash
+ typ.size = size
+ typ.ptrdata = typeptrdata(typ.common())
+ typ.align = typalign
+ typ.fieldAlign = typalign
+ typ.ptrToThis = 0
+ if len(methods) > 0 {
+ typ.tflag |= tflagUncommon
+ }
+
+ if hasGCProg {
+ lastPtrField := 0
+ for i, ft := range fs {
+ if ft.typ.pointers() {
+ lastPtrField = i
+ }
+ }
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ var off uintptr
+ for i, ft := range fs {
+ if i > lastPtrField {
+ // gcprog should not include anything for any field after
+ // the last field that contains pointer data
+ break
+ }
+ if !ft.typ.pointers() {
+ // Ignore pointerless fields.
+ continue
+ }
+ // Pad to start of this field with zeros.
+ if ft.offset > off {
+ n := (ft.offset - off) / goarch.PtrSize
+ prog = append(prog, 0x01, 0x00) // emit a 0 bit
+ if n > 1 {
+ prog = append(prog, 0x81) // repeat previous bit
+ prog = appendVarint(prog, n-1) // n-1 times
+ }
+ off = ft.offset
+ }
+
+ prog = appendGCProg(prog, ft.typ)
+ off += ft.typ.ptrdata
+ }
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ typ.kind |= kindGCProg
+ typ.gcdata = &prog[0]
+ } else {
+ typ.kind &^= kindGCProg
+ bv := new(bitVector)
+ addTypeBits(bv, 0, typ.common())
+ if len(bv.data) > 0 {
+ typ.gcdata = &bv.data[0]
+ }
+ }
+ typ.equal = nil
+ if comparable {
+ typ.equal = func(p, q unsafe.Pointer) bool {
+ for _, ft := range typ.fields {
+ pi := add(p, ft.offset, "&x.field safe")
+ qi := add(q, ft.offset, "&x.field safe")
+ if !ft.typ.equal(pi, qi) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ switch {
+ case len(fs) == 1 && !ifaceIndir(fs[0].typ):
+ // structs of 1 direct iface type can be direct
+ typ.kind |= kindDirectIface
+ default:
+ typ.kind &^= kindDirectIface
+ }
+
+ return addToCache(&typ.rtype)
+}
+
+// runtimeStructField takes a StructField value passed to StructOf and
+// returns both the corresponding internal representation, of type
+// structField, and the pkgpath value to use for this field.
+func runtimeStructField(field StructField) (structField, string) {
+ if field.Anonymous && field.PkgPath != "" {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
+ }
+
+ if field.IsExported() {
+ // Best-effort check for misuse.
+ // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
+ }
+ }
+
+ resolveReflectType(field.Type.common()) // install in runtime
+ f := structField{
+ name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous),
+ typ: field.Type.common(),
+ offset: 0,
+ }
+ return f, field.PkgPath
+}
+
+// typeptrdata returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
+func typeptrdata(t *rtype) uintptr {
+ switch t.Kind() {
+ case Struct:
+ st := (*structType)(unsafe.Pointer(t))
+ // find the last field that has pointers.
+ field := -1
+ for i := range st.fields {
+ ft := st.fields[i].typ
+ if ft.pointers() {
+ field = i
+ }
+ }
+ if field == -1 {
+ return 0
+ }
+ f := st.fields[field]
+ return f.offset + f.typ.ptrdata
+
+ default:
+ panic("reflect.typeptrdata: unexpected type, " + t.String())
+ }
+}
+
+// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
+const maxPtrmaskBytes = 2048
+
+// ArrayOf returns the array type with the given length and element type.
+// For example, if t represents int, ArrayOf(5, t) represents [5]int.
+//
+// If the resulting type would be larger than the available address space,
+// ArrayOf panics.
+func ArrayOf(length int, elem Type) Type {
+ if length < 0 {
+ panic("reflect: negative length passed to ArrayOf")
+ }
+
+ typ := elem.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Array, typ, nil, uintptr(length)}
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
+ }
+
+ // Look in known types.
+ s := "[" + strconv.Itoa(length) + "]" + typ.String()
+ for _, tt := range typesByString(s) {
+ array := (*arrayType)(unsafe.Pointer(tt))
+ if array.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make an array type.
+ var iarray any = [1]unsafe.Pointer{}
+ prototype := *(**arrayType)(unsafe.Pointer(&iarray))
+ array := *prototype
+ array.tflag = typ.tflag & tflagRegularMemory
+ array.str = resolveReflectName(newName(s, "", false, false))
+ array.hash = fnv1(typ.hash, '[')
+ for n := uint32(length); n > 0; n >>= 8 {
+ array.hash = fnv1(array.hash, byte(n))
+ }
+ array.hash = fnv1(array.hash, ']')
+ array.elem = typ
+ array.ptrToThis = 0
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(length) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
+ }
+ array.size = typ.size * uintptr(length)
+ if length > 0 && typ.ptrdata != 0 {
+ array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
+ }
+ array.align = typ.align
+ array.fieldAlign = typ.fieldAlign
+ array.len = uintptr(length)
+ array.slice = SliceOf(elem).(*rtype)
+
+ switch {
+ case typ.ptrdata == 0 || array.size == 0:
+ // No pointers.
+ array.gcdata = nil
+ array.ptrdata = 0
+
+ case length == 1:
+ // In memory, 1-element array looks just like the element.
+ array.kind |= typ.kind & kindGCProg
+ array.gcdata = typ.gcdata
+ array.ptrdata = typ.ptrdata
+
+ case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
+ // Element is small with pointer mask; array is still small.
+ // Create direct pointer mask by turning each 1 bit in elem
+ // into length 1 bits in larger mask.
+ n := (array.ptrdata/goarch.PtrSize + 7) / 8
+ // Runtime needs pointer masks to be a multiple of uintptr in size.
+ n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
+ mask := make([]byte, n)
+ emitGCMask(mask, 0, typ, array.len)
+ array.gcdata = &mask[0]
+
+ default:
+ // Create program that emits one element
+ // and then repeats to make the array.
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ prog = appendGCProg(prog, typ)
+ // Pad from ptrdata to size.
+ elemPtrs := typ.ptrdata / goarch.PtrSize
+ elemWords := typ.size / goarch.PtrSize
+ if elemPtrs < elemWords {
+ // Emit literal 0 bit, then repeat as needed.
+ prog = append(prog, 0x01, 0x00)
+ if elemPtrs+1 < elemWords {
+ prog = append(prog, 0x81)
+ prog = appendVarint(prog, elemWords-elemPtrs-1)
+ }
+ }
+ // Repeat length-1 times.
+ if elemWords < 0x80 {
+ prog = append(prog, byte(elemWords|0x80))
+ } else {
+ prog = append(prog, 0x80)
+ prog = appendVarint(prog, elemWords)
+ }
+ prog = appendVarint(prog, uintptr(length)-1)
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ array.kind |= kindGCProg
+ array.gcdata = &prog[0]
+ array.ptrdata = array.size // overestimate but ok; must match program
+ }
+
+ etyp := typ.common()
+ esize := etyp.Size()
+
+ array.equal = nil
+ if eequal := etyp.equal; eequal != nil {
+ array.equal = func(p, q unsafe.Pointer) bool {
+ for i := 0; i < length; i++ {
+ pi := arrayAt(p, i, esize, "i < length")
+ qi := arrayAt(q, i, esize, "i < length")
+ if !eequal(pi, qi) {
+ return false
+ }
+
+ }
+ return true
+ }
+ }
+
+ switch {
+ case length == 1 && !ifaceIndir(typ):
+ // array of 1 direct iface type can be direct
+ array.kind |= kindDirectIface
+ default:
+ array.kind &^= kindDirectIface
+ }
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
+}
+
+func appendVarint(x []byte, v uintptr) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ x = append(x, byte(v|0x80))
+ }
+ x = append(x, byte(v))
+ return x
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *rtype) Type {
+ if t == nil {
+ return nil
+ }
+ return t
+}
+
+type layoutKey struct {
+ ftyp *funcType // function signature
+ rcvr *rtype // receiver type, or nil if none
+}
+
+type layoutType struct {
+ t *rtype
+ framePool *sync.Pool
+ abid abiDesc
+}
+
+var layoutCache sync.Map // map[layoutKey]layoutType
+
+// funcLayout computes a struct type representing the layout of the
+// stack-assigned function arguments and return values for the function
+// type t.
+// If rcvr != nil, rcvr specifies the type of the receiver.
+// The returned type exists only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in
+// the name for possible debugging use.
+func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abid abiDesc) {
+ if t.Kind() != Func {
+ panic("reflect: funcLayout of non-func type " + t.String())
+ }
+ if rcvr != nil && rcvr.Kind() == Interface {
+ panic("reflect: funcLayout with interface receiver " + rcvr.String())
+ }
+ k := layoutKey{t, rcvr}
+ if lti, ok := layoutCache.Load(k); ok {
+ lt := lti.(layoutType)
+ return lt.t, lt.framePool, lt.abid
+ }
+
+ // Compute the ABI layout.
+ abid = newAbiDesc(t, rcvr)
+
+ // build dummy rtype holding gc program
+ x := &rtype{
+ align: goarch.PtrSize,
+ // Don't add spill space here; it's only necessary in
+ // reflectcall's frame, not in the allocated frame.
+ // TODO(mknyszek): Remove this comment when register
+ // spill space in the frame is no longer required.
+ size: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
+ ptrdata: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
+ }
+ if abid.stackPtrs.n > 0 {
+ x.gcdata = &abid.stackPtrs.data[0]
+ }
+
+ var s string
+ if rcvr != nil {
+ s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
+ } else {
+ s = "funcargs(" + t.String() + ")"
+ }
+ x.str = resolveReflectName(newName(s, "", false, false))
+
+ // cache result for future callers
+ framePool = &sync.Pool{New: func() any {
+ return unsafe_New(x)
+ }}
+ lti, _ := layoutCache.LoadOrStore(k, layoutType{
+ t: x,
+ framePool: framePool,
+ abid: abid,
+ })
+ lt := lti.(layoutType)
+ return lt.t, lt.framePool, lt.abid
+}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *rtype) bool {
+ return t.kind&kindDirectIface == 0
+}
+
+// Note: this type must agree with runtime.bitvector.
+type bitVector struct {
+ n uint32 // number of bits
+ data []byte
+}
+
+// append a bit to the bitmap.
+func (bv *bitVector) append(bit uint8) {
+ if bv.n%(8*goarch.PtrSize) == 0 {
+ // Runtime needs pointer masks to be a multiple of uintptr in size.
+ // Since reflect passes bv.data directly to the runtime as a pointer mask,
+ // we append a full uintptr of zeros at a time.
+ for i := 0; i < goarch.PtrSize; i++ {
+ bv.data = append(bv.data, 0)
+ }
+ }
+ bv.data[bv.n/8] |= bit << (bv.n % 8)
+ bv.n++
+}
+
+func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
+ if t.ptrdata == 0 {
+ return
+ }
+
+ switch Kind(t.kind & kindMask) {
+ case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
+ // 1 pointer at start of representation
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+
+ case Interface:
+ // 2 pointers
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+ bv.append(1)
+
+ case Array:
+ // repeat inner type
+ tt := (*arrayType)(unsafe.Pointer(t))
+ for i := 0; i < int(tt.len); i++ {
+ addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
+ }
+
+ case Struct:
+ // apply fields
+ tt := (*structType)(unsafe.Pointer(t))
+ for i := range tt.fields {
+ f := &tt.fields[i]
+ addTypeBits(bv, offset+f.offset, f.typ)
+ }
+ }
+}
diff --git a/src/reflect/value.go b/src/reflect/value.go
new file mode 100644
index 0000000..42bb5ea
--- /dev/null
+++ b/src/reflect/value.go
@@ -0,0 +1,3860 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "errors"
+ "internal/abi"
+ "internal/goarch"
+ "internal/itoa"
+ "internal/unsafeheader"
+ "math"
+ "runtime"
+ "unsafe"
+)
+
+// Value is the reflection interface to a Go value.
+//
+// Not all methods apply to all kinds of values. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of value before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run time panic.
+//
+// The zero Value represents no value.
+// Its IsValid method returns false, its Kind method returns Invalid,
+// its String method returns "<invalid Value>", and all other methods panic.
+// Most functions and methods never return an invalid value.
+// If one does, its documentation states the conditions explicitly.
+//
+// A Value can be used concurrently by multiple goroutines provided that
+// the underlying Go value can be used concurrently for the equivalent
+// direct operations.
+//
+// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
+type Value struct {
+ // typ holds the type of the value represented by a Value.
+ typ *rtype
+
+ // Pointer-valued data or, if flagIndir is set, pointer to data.
+ // Valid when either flagIndir is set or typ.pointers() is true.
+ ptr unsafe.Pointer
+
+ // flag holds metadata about the value.
+ //
+ // The lowest five bits give the Kind of the value, mirroring typ.Kind().
+ //
+ // The next set of bits are flag bits:
+ // - flagStickyRO: obtained via unexported not embedded field, so read-only
+ // - flagEmbedRO: obtained via unexported embedded field, so read-only
+ // - flagIndir: val holds a pointer to the data
+ // - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil)
+ // - flagMethod: v is a method value.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
+ //
+ // The remaining 22+ bits give a method number for method values.
+ // If flag.kind() != Func, code can assume that flagMethod is unset.
+ flag
+
+ // A method value represents a curried method invocation
+ // like r.Read for some receiver r. The typ+val+flag bits describe
+ // the receiver r, but the flag's Kind bits say Func (methods are
+ // functions), and the top bits of the flag give the method number
+ // in r's type's method table.
+}
+
+type flag uintptr
+
+const (
+ flagKindWidth = 5 // there are 27 kinds
+ flagKindMask flag = 1<<flagKindWidth - 1
+ flagStickyRO flag = 1 << 5
+ flagEmbedRO flag = 1 << 6
+ flagIndir flag = 1 << 7
+ flagAddr flag = 1 << 8
+ flagMethod flag = 1 << 9
+ flagMethodShift = 10
+ flagRO flag = flagStickyRO | flagEmbedRO
+)
+
+func (f flag) kind() Kind {
+ return Kind(f & flagKindMask)
+}
+
+func (f flag) ro() flag {
+ if f&flagRO != 0 {
+ return flagStickyRO
+ }
+ return 0
+}
+
+// pointer returns the underlying pointer represented by v.
+// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
+// if v.Kind() == Pointer, the base type must not be not-in-heap.
+func (v Value) pointer() unsafe.Pointer {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
+ panic("can't call pointer on a non-pointer Value")
+ }
+ if v.flag&flagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+// packEface converts v to the empty interface.
+func packEface(v Value) any {
+ t := v.typ
+ var i any
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // First, fill in the data portion of the interface.
+ switch {
+ case ifaceIndir(t):
+ if v.flag&flagIndir == 0 {
+ panic("bad indir")
+ }
+ // Value is indirect, and so is the interface we're making.
+ ptr := v.ptr
+ if v.flag&flagAddr != 0 {
+ // TODO: pass safe boolean from valueInterface so
+ // we don't need to copy if safe==true?
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ }
+ e.word = ptr
+ case v.flag&flagIndir != 0:
+ // Value is indirect, but interface is direct. We need
+ // to load the data at v.ptr into the interface data word.
+ e.word = *(*unsafe.Pointer)(v.ptr)
+ default:
+ // Value is direct, and so is the interface.
+ e.word = v.ptr
+ }
+ // Now, fill in the type portion. We're very careful here not
+ // to have any operation between the e.word and e.typ assignments
+ // that would let the garbage collector observe the partially-built
+ // interface value.
+ e.typ = t
+ return i
+}
+
+// unpackEface converts the empty interface i to a Value.
+func unpackEface(i any) Value {
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // NOTE: don't read e.word until we know whether it is really a pointer or not.
+ t := e.typ
+ if t == nil {
+ return Value{}
+ }
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
+ f |= flagIndir
+ }
+ return Value{t, e.word, f}
+}
+
+// A ValueError occurs when a Value method is invoked on
+// a Value that does not support it. Such cases are documented
+// in the description of each method.
+type ValueError struct {
+ Method string
+ Kind Kind
+}
+
+func (e *ValueError) Error() string {
+ if e.Kind == 0 {
+ return "reflect: call of " + e.Method + " on zero Value"
+ }
+ return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
+}
+
+// valueMethodName returns the name of the exported calling method on Value.
+func valueMethodName() string {
+ var pc [5]uintptr
+ n := runtime.Callers(1, pc[:])
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ for more := true; more; {
+ const prefix = "reflect.Value."
+ frame, more = frames.Next()
+ name := frame.Function
+ if len(name) > len(prefix) && name[:len(prefix)] == prefix {
+ methodName := name[len(prefix):]
+ if len(methodName) > 0 && 'A' <= methodName[0] && methodName[0] <= 'Z' {
+ return name
+ }
+ }
+ }
+ return "unknown method"
+}
+
+// emptyInterface is the header for an interface{} value.
+type emptyInterface struct {
+ typ *rtype
+ word unsafe.Pointer
+}
+
+// nonEmptyInterface is the header for an interface value with methods.
+type nonEmptyInterface struct {
+ // see ../runtime/iface.go:/Itab
+ itab *struct {
+ ityp *rtype // static interface type
+ typ *rtype // dynamic concrete type
+ hash uint32 // copy of typ.hash
+ _ [4]byte
+ fun [100000]unsafe.Pointer // method table
+ }
+ word unsafe.Pointer
+}
+
+// mustBe panics if f's kind is not expected.
+// Making this a method on flag instead of on Value
+// (and embedding flag in Value) means that we can write
+// the very clear v.mustBe(Bool) and have it compile into
+// v.flag.mustBe(Bool), which will only bother to copy the
+// single important word for the receiver.
+func (f flag) mustBe(expected Kind) {
+ // TODO(mvdan): use f.kind() again once mid-stack inlining gets better
+ if Kind(f&flagKindMask) != expected {
+ panic(&ValueError{valueMethodName(), f.kind()})
+ }
+}
+
+// mustBeExported panics if f records that the value was obtained using
+// an unexported field.
+func (f flag) mustBeExported() {
+ if f == 0 || f&flagRO != 0 {
+ f.mustBeExportedSlow()
+ }
+}
+
+func (f flag) mustBeExportedSlow() {
+ if f == 0 {
+ panic(&ValueError{valueMethodName(), Invalid})
+ }
+ if f&flagRO != 0 {
+ panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
+ }
+}
+
+// mustBeAssignable panics if f records that the value is not assignable,
+// which is to say that either it was obtained using an unexported field
+// or it is not addressable.
+func (f flag) mustBeAssignable() {
+ if f&flagRO != 0 || f&flagAddr == 0 {
+ f.mustBeAssignableSlow()
+ }
+}
+
+func (f flag) mustBeAssignableSlow() {
+ if f == 0 {
+ panic(&ValueError{valueMethodName(), Invalid})
+ }
+ // Assignable if addressable and not read-only.
+ if f&flagRO != 0 {
+ panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
+ }
+ if f&flagAddr == 0 {
+ panic("reflect: " + valueMethodName() + " using unaddressable value")
+ }
+}
+
+// Addr returns a pointer value representing the address of v.
+// It panics if CanAddr() returns false.
+// Addr is typically used to obtain a pointer to a struct field
+// or slice element in order to call a method that requires a
+// pointer receiver.
+func (v Value) Addr() Value {
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Addr of unaddressable value")
+ }
+ // Preserve flagRO instead of using v.flag.ro() so that
+ // v.Addr().Elem() is equivalent to v (#32772)
+ fl := v.flag & flagRO
+ return Value{v.typ.ptrTo(), v.ptr, fl | flag(Pointer)}
+}
+
+// Bool returns v's underlying value.
+// It panics if v's kind is not Bool.
+func (v Value) Bool() bool {
+ // panicNotBool is split out to keep Bool inlineable.
+ if v.kind() != Bool {
+ v.panicNotBool()
+ }
+ return *(*bool)(v.ptr)
+}
+
+func (v Value) panicNotBool() {
+ v.mustBe(Bool)
+}
+
+var bytesType = rtypeOf(([]byte)(nil))
+
+// Bytes returns v's underlying value.
+// It panics if v's underlying value is not a slice of bytes or
+// an addressable array of bytes.
+func (v Value) Bytes() []byte {
+ // bytesSlow is split out to keep Bytes inlineable for unnamed []byte.
+ if v.typ == bytesType {
+ return *(*[]byte)(v.ptr)
+ }
+ return v.bytesSlow()
+}
+
+func (v Value) bytesSlow() []byte {
+ switch v.kind() {
+ case Slice:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]byte)(v.ptr)
+ case Array:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte array")
+ }
+ if !v.CanAddr() {
+ panic("reflect.Value.Bytes of unaddressable byte array")
+ }
+ p := (*byte)(v.ptr)
+ n := int((*arrayType)(unsafe.Pointer(v.typ)).len)
+ return unsafe.Slice(p, n)
+ }
+ panic(&ValueError{"reflect.Value.Bytes", v.kind()})
+}
+
+// runes returns v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) runes() []rune {
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.Bytes of non-rune slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]rune)(v.ptr)
+}
+
+// CanAddr reports whether the value's address can be obtained with Addr.
+// Such values are called addressable. A value is addressable if it is
+// an element of a slice, an element of an addressable array,
+// a field of an addressable struct, or the result of dereferencing a pointer.
+// If CanAddr returns false, calling Addr will panic.
+func (v Value) CanAddr() bool {
+ return v.flag&flagAddr != 0
+}
+
+// CanSet reports whether the value of v can be changed.
+// A Value can be changed only if it is addressable and was not
+// obtained by the use of unexported struct fields.
+// If CanSet returns false, calling Set or any type-specific
+// setter (e.g., SetBool, SetInt) will panic.
+func (v Value) CanSet() bool {
+ return v.flag&(flagAddr|flagRO) == flagAddr
+}
+
+// Call calls the function v with the input arguments in.
+// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
+// Call panics if v's Kind is not Func.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+// If v is a variadic function, Call creates the variadic slice parameter
+// itself, copying in the corresponding values.
+func (v Value) Call(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("Call", in)
+}
+
+// CallSlice calls the variadic function v with the input arguments in,
+// assigning the slice in[len(in)-1] to v's final variadic argument.
+// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
+// CallSlice panics if v's Kind is not Func or if v is not variadic.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+func (v Value) CallSlice(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("CallSlice", in)
+}
+
+var callGC bool // for testing; see TestCallMethodJump and TestCallArgLive
+
+const debugReflectCall = false
+
+func (v Value) call(op string, in []Value) []Value {
+ // Get function pointer, type.
+ t := (*funcType)(unsafe.Pointer(v.typ))
+ var (
+ fn unsafe.Pointer
+ rcvr Value
+ rcvrtype *rtype
+ )
+ if v.flag&flagMethod != 0 {
+ rcvr = v
+ rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
+ } else if v.flag&flagIndir != 0 {
+ fn = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ fn = v.ptr
+ }
+
+ if fn == nil {
+ panic("reflect.Value.Call: call of nil function")
+ }
+
+ isSlice := op == "CallSlice"
+ n := t.NumIn()
+ isVariadic := t.IsVariadic()
+ if isSlice {
+ if !isVariadic {
+ panic("reflect: CallSlice of non-variadic function")
+ }
+ if len(in) < n {
+ panic("reflect: CallSlice with too few input arguments")
+ }
+ if len(in) > n {
+ panic("reflect: CallSlice with too many input arguments")
+ }
+ } else {
+ if isVariadic {
+ n--
+ }
+ if len(in) < n {
+ panic("reflect: Call with too few input arguments")
+ }
+ if !isVariadic && len(in) > n {
+ panic("reflect: Call with too many input arguments")
+ }
+ }
+ for _, x := range in {
+ if x.Kind() == Invalid {
+ panic("reflect: " + op + " using zero Value argument")
+ }
+ }
+ for i := 0; i < n; i++ {
+ if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
+ panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
+ }
+ }
+ if !isSlice && isVariadic {
+ // prepare slice for remaining values
+ m := len(in) - n
+ slice := MakeSlice(t.In(n), m, m)
+ elem := t.In(n).Elem()
+ for i := 0; i < m; i++ {
+ x := in[n+i]
+ if xt := x.Type(); !xt.AssignableTo(elem) {
+ panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
+ }
+ slice.Index(i).Set(x)
+ }
+ origIn := in
+ in = make([]Value, n+1)
+ copy(in[:n], origIn)
+ in[n] = slice
+ }
+
+ nin := len(in)
+ if nin != t.NumIn() {
+ panic("reflect.Value.Call: wrong argument count")
+ }
+ nout := t.NumOut()
+
+ // Register argument space.
+ var regArgs abi.RegArgs
+
+ // Compute frame type.
+ frametype, framePool, abid := funcLayout(t, rcvrtype)
+
+ // Allocate a chunk of memory for frame if needed.
+ var stackArgs unsafe.Pointer
+ if frametype.size != 0 {
+ if nout == 0 {
+ stackArgs = framePool.Get().(unsafe.Pointer)
+ } else {
+ // Can't use pool if the function has return values.
+ // We will leak pointer to args in ret, so its lifetime is not scoped.
+ stackArgs = unsafe_New(frametype)
+ }
+ }
+ frameSize := frametype.size
+
+ if debugReflectCall {
+ println("reflect.call", t.String())
+ abid.dump()
+ }
+
+ // Copy inputs into args.
+
+ // Handle receiver.
+ inStart := 0
+ if rcvrtype != nil {
+ // Guaranteed to only be one word in size,
+ // so it will only take up exactly 1 abiStep (either
+ // in a register or on the stack).
+ switch st := abid.call.steps[0]; st.kind {
+ case abiStepStack:
+ storeRcvr(rcvr, stackArgs)
+ case abiStepPointer:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ptrs[st.ireg]))
+ fallthrough
+ case abiStepIntReg:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
+ case abiStepFloatReg:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
+ default:
+ panic("unknown ABI parameter kind")
+ }
+ inStart = 1
+ }
+
+ // Handle arguments.
+ for i, v := range in {
+ v.mustBeExported()
+ targ := t.In(i).(*rtype)
+ // TODO(mknyszek): Figure out if it's possible to get some
+ // scratch space for this assignment check. Previously, it
+ // was possible to use space in the argument frame.
+ v = v.assignTo("reflect.Value.Call", targ, nil)
+ stepsLoop:
+ for _, st := range abid.call.stepsForValue(i + inStart) {
+ switch st.kind {
+ case abiStepStack:
+ // Copy values to the "stack."
+ addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
+ if v.flag&flagIndir != 0 {
+ typedmemmove(targ, addr, v.ptr)
+ } else {
+ *(*unsafe.Pointer)(addr) = v.ptr
+ }
+ // There's only one step for a stack-allocated value.
+ break stepsLoop
+ case abiStepIntReg, abiStepPointer:
+ // Copy values to "integer registers."
+ if v.flag&flagIndir != 0 {
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ if st.kind == abiStepPointer {
+ // Duplicate this pointer in the pointer area of the
+ // register space. Otherwise, there's the potential for
+ // this to be the last reference to v.ptr.
+ regArgs.Ptrs[st.ireg] = *(*unsafe.Pointer)(offset)
+ }
+ intToReg(&regArgs, st.ireg, st.size, offset)
+ } else {
+ if st.kind == abiStepPointer {
+ // See the comment in abiStepPointer case above.
+ regArgs.Ptrs[st.ireg] = v.ptr
+ }
+ regArgs.Ints[st.ireg] = uintptr(v.ptr)
+ }
+ case abiStepFloatReg:
+ // Copy values to "float registers."
+ if v.flag&flagIndir == 0 {
+ panic("attempted to copy pointer to FP register")
+ }
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatToReg(&regArgs, st.freg, st.size, offset)
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ }
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ frameSize = align(frameSize, goarch.PtrSize)
+ frameSize += abid.spill
+
+ // Mark pointers in registers for the return path.
+ regArgs.ReturnIsPtr = abid.outRegPtrs
+
+ if debugReflectCall {
+ regArgs.Dump()
+ }
+
+ // For testing; see TestCallArgLive.
+ if callGC {
+ runtime.GC()
+ }
+
+ // Call.
+ call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abid.retOffset), uint32(frameSize), &regArgs)
+
+ // For testing; see TestCallMethodJump.
+ if callGC {
+ runtime.GC()
+ }
+
+ var ret []Value
+ if nout == 0 {
+ if stackArgs != nil {
+ typedmemclr(frametype, stackArgs)
+ framePool.Put(stackArgs)
+ }
+ } else {
+ if stackArgs != nil {
+ // Zero the now unused input area of args,
+ // because the Values returned by this function contain pointers to the args object,
+ // and will thus keep the args object alive indefinitely.
+ typedmemclrpartial(frametype, stackArgs, 0, abid.retOffset)
+ }
+
+ // Wrap Values around return values in args.
+ ret = make([]Value, nout)
+ for i := 0; i < nout; i++ {
+ tv := t.Out(i)
+ if tv.Size() == 0 {
+ // For zero-sized return value, args+off may point to the next object.
+ // In this case, return the zero value instead.
+ ret[i] = Zero(tv)
+ continue
+ }
+ steps := abid.ret.stepsForValue(i)
+ if st := steps[0]; st.kind == abiStepStack {
+ // This value is on the stack. If part of a value is stack
+ // allocated, the entire value is according to the ABI. So
+ // just make an indirection into the allocated frame.
+ fl := flagIndir | flag(tv.Kind())
+ ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
+ // Note: this does introduce false sharing between results -
+ // if any result is live, they are all live.
+ // (And the space for the args is live as well, but as we've
+ // cleared that space it isn't as big a deal.)
+ continue
+ }
+
+ // Handle pointers passed in registers.
+ if !ifaceIndir(tv.common()) {
+ // Pointer-valued data gets put directly
+ // into v.ptr.
+ if steps[0].kind != abiStepPointer {
+ print("kind=", steps[0].kind, ", type=", tv.String(), "\n")
+ panic("mismatch between ABI description and types")
+ }
+ ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
+ continue
+ }
+
+ // All that's left is values passed in registers that we need to
+ // create space for and copy values back into.
+ //
+ // TODO(mknyszek): We make a new allocation for each register-allocated
+ // value, but previously we could always point into the heap-allocated
+ // stack frame. This is a regression that could be fixed by adding
+ // additional space to the allocated stack frame and storing the
+ // register-allocated return values into the allocated stack frame and
+ // referring there in the resulting Value.
+ s := unsafe_New(tv.common())
+ for _, st := range steps {
+ switch st.kind {
+ case abiStepIntReg:
+ offset := add(s, st.offset, "precomputed value offset")
+ intFromReg(&regArgs, st.ireg, st.size, offset)
+ case abiStepPointer:
+ s := add(s, st.offset, "precomputed value offset")
+ *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
+ case abiStepFloatReg:
+ offset := add(s, st.offset, "precomputed value offset")
+ floatFromReg(&regArgs, st.freg, st.size, offset)
+ case abiStepStack:
+ panic("register-based return value has stack component")
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())}
+ }
+ }
+
+ return ret
+}
+
+// callReflect is the call implementation used by a function
+// returned by MakeFunc. In many ways it is the opposite of the
+// method Value.call above. The method above converts a call using Values
+// into a call of a function with a concrete argument frame, while
+// callReflect converts a call of a function with a concrete argument
+// frame into a call using Values.
+// It is in this file so that it can be next to the call method above.
+// The remainder of the MakeFunc implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callReflect".
+//
+// ctxt is the "closure" generated by MakeFunc.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+//
+// regs contains the argument values passed in registers and will contain
+// the values returned from ctxt.fn in registers.
+func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
+ if callGC {
+ // Call GC upon entry during testing.
+ // Getting our stack scanned here is the biggest hazard, because
+ // our caller (makeFuncStub) could have failed to place the last
+ // pointer to a value in regs' pointer space, in which case it
+ // won't be visible to the GC.
+ runtime.GC()
+ }
+ ftyp := ctxt.ftyp
+ f := ctxt.fn
+
+ _, _, abid := funcLayout(ftyp, nil)
+
+ // Copy arguments into Values.
+ ptr := frame
+ in := make([]Value, 0, int(ftyp.inCount))
+ for i, typ := range ftyp.in() {
+ if typ.Size() == 0 {
+ in = append(in, Zero(typ))
+ continue
+ }
+ v := Value{typ, nil, flag(typ.Kind())}
+ steps := abid.call.stepsForValue(i)
+ if st := steps[0]; st.kind == abiStepStack {
+ if ifaceIndir(typ) {
+ // value cannot be inlined in interface data.
+ // Must make a copy, because f might keep a reference to it,
+ // and we cannot let f keep a reference to the stack frame
+ // after this function returns, not even a read-only reference.
+ v.ptr = unsafe_New(typ)
+ if typ.size > 0 {
+ typedmemmove(typ, v.ptr, add(ptr, st.stkOff, "typ.size > 0"))
+ }
+ v.flag |= flagIndir
+ } else {
+ v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr"))
+ }
+ } else {
+ if ifaceIndir(typ) {
+ // All that's left is values passed in registers that we need to
+ // create space for the values.
+ v.flag |= flagIndir
+ v.ptr = unsafe_New(typ)
+ for _, st := range steps {
+ switch st.kind {
+ case abiStepIntReg:
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ intFromReg(regs, st.ireg, st.size, offset)
+ case abiStepPointer:
+ s := add(v.ptr, st.offset, "precomputed value offset")
+ *((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg]
+ case abiStepFloatReg:
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatFromReg(regs, st.freg, st.size, offset)
+ case abiStepStack:
+ panic("register-based return value has stack component")
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ } else {
+ // Pointer-valued data gets put directly
+ // into v.ptr.
+ if steps[0].kind != abiStepPointer {
+ print("kind=", steps[0].kind, ", type=", typ.String(), "\n")
+ panic("mismatch between ABI description and types")
+ }
+ v.ptr = regs.Ptrs[steps[0].ireg]
+ }
+ }
+ in = append(in, v)
+ }
+
+ // Call underlying function.
+ out := f(in)
+ numOut := ftyp.NumOut()
+ if len(out) != numOut {
+ panic("reflect: wrong return count from function created by MakeFunc")
+ }
+
+ // Copy results back into argument frame and register space.
+ if numOut > 0 {
+ for i, typ := range ftyp.out() {
+ v := out[i]
+ if v.typ == nil {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned zero Value")
+ }
+ if v.flag&flagRO != 0 {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned value obtained from unexported field")
+ }
+ if typ.size == 0 {
+ continue
+ }
+
+ // Convert v to type typ if v is assignable to a variable
+ // of type t in the language spec.
+ // See issue 28761.
+ //
+ //
+ // TODO(mknyszek): In the switch to the register ABI we lost
+ // the scratch space here for the register cases (and
+ // temporarily for all the cases).
+ //
+ // If/when this happens, take note of the following:
+ //
+ // We must clear the destination before calling assignTo,
+ // in case assignTo writes (with memory barriers) to the
+ // target location used as scratch space. See issue 39541.
+ v = v.assignTo("reflect.MakeFunc", typ, nil)
+ stepsLoop:
+ for _, st := range abid.ret.stepsForValue(i) {
+ switch st.kind {
+ case abiStepStack:
+ // Copy values to the "stack."
+ addr := add(ptr, st.stkOff, "precomputed stack arg offset")
+ // Do not use write barriers. The stack space used
+ // for this call is not adequately zeroed, and we
+ // are careful to keep the arguments alive until we
+ // return to makeFuncStub's caller.
+ if v.flag&flagIndir != 0 {
+ memmove(addr, v.ptr, st.size)
+ } else {
+ // This case must be a pointer type.
+ *(*uintptr)(addr) = uintptr(v.ptr)
+ }
+ // There's only one step for a stack-allocated value.
+ break stepsLoop
+ case abiStepIntReg, abiStepPointer:
+ // Copy values to "integer registers."
+ if v.flag&flagIndir != 0 {
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ intToReg(regs, st.ireg, st.size, offset)
+ } else {
+ // Only populate the Ints space on the return path.
+ // This is safe because out is kept alive until the
+ // end of this function, and the return path through
+ // makeFuncStub has no preemption, so these pointers
+ // are always visible to the GC.
+ regs.Ints[st.ireg] = uintptr(v.ptr)
+ }
+ case abiStepFloatReg:
+ // Copy values to "float registers."
+ if v.flag&flagIndir == 0 {
+ panic("attempted to copy pointer to FP register")
+ }
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatToReg(regs, st.freg, st.size, offset)
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ }
+ }
+
+ // Announce that the return values are valid.
+ // After this point the runtime can depend on the return values being valid.
+ *retValid = true
+
+ // We have to make sure that the out slice lives at least until
+ // the runtime knows the return values are valid. Otherwise, the
+ // return values might not be scanned by anyone during a GC.
+ // (out would be dead, and the return slots not yet alive.)
+ runtime.KeepAlive(out)
+
+ // runtime.getArgInfo expects to be able to find ctxt on the
+ // stack when it finds our caller, makeFuncStub. Make sure it
+ // doesn't get garbage collected.
+ runtime.KeepAlive(ctxt)
+}
+
+// methodReceiver returns information about the receiver
+// described by v. The Value v may or may not have the
+// flagMethod bit set, so the kind cached in v.flag should
+// not be used.
+// The return value rcvrtype gives the method's actual receiver type.
+// The return value t gives the method type signature (without the receiver).
+// The return value fn is a pointer to the method code.
+func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) {
+ i := methodIndex
+ if v.typ.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ if !tt.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ iface := (*nonEmptyInterface)(v.ptr)
+ if iface.itab == nil {
+ panic("reflect: " + op + " of method on nil interface value")
+ }
+ rcvrtype = iface.itab.typ
+ fn = unsafe.Pointer(&iface.itab.fun[i])
+ t = (*funcType)(unsafe.Pointer(tt.typeOff(m.typ)))
+ } else {
+ rcvrtype = v.typ
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ if !v.typ.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ ifn := v.typ.textOff(m.ifn)
+ fn = unsafe.Pointer(&ifn)
+ t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.mtyp)))
+ }
+ return
+}
+
+// v is a method receiver. Store at p the word which is used to
+// encode that receiver at the start of the argument list.
+// Reflect uses the "interface" calling convention for
+// methods, which always uses one word to record the receiver.
+func storeRcvr(v Value, p unsafe.Pointer) {
+ t := v.typ
+ if t.Kind() == Interface {
+ // the interface data word becomes the receiver word
+ iface := (*nonEmptyInterface)(v.ptr)
+ *(*unsafe.Pointer)(p) = iface.word
+ } else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
+ *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ *(*unsafe.Pointer)(p) = v.ptr
+ }
+}
+
+// align returns the result of rounding x up to a multiple of n.
+// n must be a power of two.
+func align(x, n uintptr) uintptr {
+ return (x + n - 1) &^ (n - 1)
+}
+
+// callMethod is the call implementation used by a function returned
+// by makeMethodValue (used by v.Method(i).Interface()).
+// It is a streamlined version of the usual reflect call: the caller has
+// already laid out the argument frame for us, so we don't have
+// to deal with individual Values for each argument.
+// It is in this file so that it can be next to the two similar functions above.
+// The remainder of the makeMethodValue implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callMethod".
+//
+// ctxt is the "closure" generated by makeVethodValue.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+//
+// regs contains the argument values passed in registers and will contain
+// the values returned from ctxt.fn in registers.
+func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
+ rcvr := ctxt.rcvr
+ rcvrType, valueFuncType, methodFn := methodReceiver("call", rcvr, ctxt.method)
+
+ // There are two ABIs at play here.
+ //
+ // methodValueCall was invoked with the ABI assuming there was no
+ // receiver ("value ABI") and that's what frame and regs are holding.
+ //
+ // Meanwhile, we need to actually call the method with a receiver, which
+ // has its own ABI ("method ABI"). Everything that follows is a translation
+ // between the two.
+ _, _, valueABI := funcLayout(valueFuncType, nil)
+ valueFrame, valueRegs := frame, regs
+ methodFrameType, methodFramePool, methodABI := funcLayout(valueFuncType, rcvrType)
+
+ // Make a new frame that is one word bigger so we can store the receiver.
+ // This space is used for both arguments and return values.
+ methodFrame := methodFramePool.Get().(unsafe.Pointer)
+ var methodRegs abi.RegArgs
+
+ // Deal with the receiver. It's guaranteed to only be one word in size.
+ switch st := methodABI.call.steps[0]; st.kind {
+ case abiStepStack:
+ // Only copy the receiver to the stack if the ABI says so.
+ // Otherwise, it'll be in a register already.
+ storeRcvr(rcvr, methodFrame)
+ case abiStepPointer:
+ // Put the receiver in a register.
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ptrs[st.ireg]))
+ fallthrough
+ case abiStepIntReg:
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ints[st.ireg]))
+ case abiStepFloatReg:
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Floats[st.freg]))
+ default:
+ panic("unknown ABI parameter kind")
+ }
+
+ // Translate the rest of the arguments.
+ for i, t := range valueFuncType.in() {
+ valueSteps := valueABI.call.stepsForValue(i)
+ methodSteps := methodABI.call.stepsForValue(i + 1)
+
+ // Zero-sized types are trivial: nothing to do.
+ if len(valueSteps) == 0 {
+ if len(methodSteps) != 0 {
+ panic("method ABI and value ABI do not align")
+ }
+ continue
+ }
+
+ // There are four cases to handle in translating each
+ // argument:
+ // 1. Stack -> stack translation.
+ // 2. Stack -> registers translation.
+ // 3. Registers -> stack translation.
+ // 4. Registers -> registers translation.
+
+ // If the value ABI passes the value on the stack,
+ // then the method ABI does too, because it has strictly
+ // fewer arguments. Simply copy between the two.
+ if vStep := valueSteps[0]; vStep.kind == abiStepStack {
+ mStep := methodSteps[0]
+ // Handle stack -> stack translation.
+ if mStep.kind == abiStepStack {
+ if vStep.size != mStep.size {
+ panic("method ABI and value ABI do not align")
+ }
+ typedmemmove(t,
+ add(methodFrame, mStep.stkOff, "precomputed stack offset"),
+ add(valueFrame, vStep.stkOff, "precomputed stack offset"))
+ continue
+ }
+ // Handle stack -> register translation.
+ for _, mStep := range methodSteps {
+ from := add(valueFrame, vStep.stkOff+mStep.offset, "precomputed stack offset")
+ switch mStep.kind {
+ case abiStepPointer:
+ // Do the pointer copy directly so we get a write barrier.
+ methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
+ fallthrough // We need to make sure this ends up in Ints, too.
+ case abiStepIntReg:
+ intToReg(&methodRegs, mStep.ireg, mStep.size, from)
+ case abiStepFloatReg:
+ floatToReg(&methodRegs, mStep.freg, mStep.size, from)
+ default:
+ panic("unexpected method step")
+ }
+ }
+ continue
+ }
+ // Handle register -> stack translation.
+ if mStep := methodSteps[0]; mStep.kind == abiStepStack {
+ for _, vStep := range valueSteps {
+ to := add(methodFrame, mStep.stkOff+vStep.offset, "precomputed stack offset")
+ switch vStep.kind {
+ case abiStepPointer:
+ // Do the pointer copy directly so we get a write barrier.
+ *(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
+ case abiStepIntReg:
+ intFromReg(valueRegs, vStep.ireg, vStep.size, to)
+ case abiStepFloatReg:
+ floatFromReg(valueRegs, vStep.freg, vStep.size, to)
+ default:
+ panic("unexpected value step")
+ }
+ }
+ continue
+ }
+ // Handle register -> register translation.
+ if len(valueSteps) != len(methodSteps) {
+ // Because it's the same type for the value, and it's assigned
+ // to registers both times, it should always take up the same
+ // number of registers for each ABI.
+ panic("method ABI and value ABI don't align")
+ }
+ for i, vStep := range valueSteps {
+ mStep := methodSteps[i]
+ if mStep.kind != vStep.kind {
+ panic("method ABI and value ABI don't align")
+ }
+ switch vStep.kind {
+ case abiStepPointer:
+ // Copy this too, so we get a write barrier.
+ methodRegs.Ptrs[mStep.ireg] = valueRegs.Ptrs[vStep.ireg]
+ fallthrough
+ case abiStepIntReg:
+ methodRegs.Ints[mStep.ireg] = valueRegs.Ints[vStep.ireg]
+ case abiStepFloatReg:
+ methodRegs.Floats[mStep.freg] = valueRegs.Floats[vStep.freg]
+ default:
+ panic("unexpected value step")
+ }
+ }
+ }
+
+ methodFrameSize := methodFrameType.size
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ methodFrameSize = align(methodFrameSize, goarch.PtrSize)
+ methodFrameSize += methodABI.spill
+
+ // Mark pointers in registers for the return path.
+ methodRegs.ReturnIsPtr = methodABI.outRegPtrs
+
+ // Call.
+ // Call copies the arguments from scratch to the stack, calls fn,
+ // and then copies the results back into scratch.
+ call(methodFrameType, methodFn, methodFrame, uint32(methodFrameType.size), uint32(methodABI.retOffset), uint32(methodFrameSize), &methodRegs)
+
+ // Copy return values.
+ //
+ // This is somewhat simpler because both ABIs have an identical
+ // return value ABI (the types are identical). As a result, register
+ // results can simply be copied over. Stack-allocated values are laid
+ // out the same, but are at different offsets from the start of the frame
+ // Ignore any changes to args.
+ // Avoid constructing out-of-bounds pointers if there are no return values.
+ // because the arguments may be laid out differently.
+ if valueRegs != nil {
+ *valueRegs = methodRegs
+ }
+ if retSize := methodFrameType.size - methodABI.retOffset; retSize > 0 {
+ valueRet := add(valueFrame, valueABI.retOffset, "valueFrame's size > retOffset")
+ methodRet := add(methodFrame, methodABI.retOffset, "methodFrame's size > retOffset")
+ // This copies to the stack. Write barriers are not needed.
+ memmove(valueRet, methodRet, retSize)
+ }
+
+ // Tell the runtime it can now depend on the return values
+ // being properly initialized.
+ *retValid = true
+
+ // Clear the scratch space and put it back in the pool.
+ // This must happen after the statement above, so that the return
+ // values will always be scanned by someone.
+ typedmemclr(methodFrameType, methodFrame)
+ methodFramePool.Put(methodFrame)
+
+ // See the comment in callReflect.
+ runtime.KeepAlive(ctxt)
+
+ // Keep valueRegs alive because it may hold live pointer results.
+ // The caller (methodValueCall) has it as a stack object, which is only
+ // scanned when there is a reference to it.
+ runtime.KeepAlive(valueRegs)
+}
+
+// funcName returns the name of f, for use in error messages.
+func funcName(f func([]Value) []Value) string {
+ pc := *(*uintptr)(unsafe.Pointer(&f))
+ rf := runtime.FuncForPC(pc)
+ if rf != nil {
+ return rf.Name()
+ }
+ return "closure"
+}
+
+// Cap returns v's capacity.
+// It panics if v's Kind is not Array, Chan, Slice or pointer to Array.
+func (v Value) Cap() int {
+ // capNonSlice is split out to keep Cap inlineable for slice kinds.
+ if v.kind() == Slice {
+ return (*unsafeheader.Slice)(v.ptr).Cap
+ }
+ return v.capNonSlice()
+}
+
+func (v Value) capNonSlice() int {
+ k := v.kind()
+ switch k {
+ case Array:
+ return v.typ.Len()
+ case Chan:
+ return chancap(v.pointer())
+ case Ptr:
+ if v.typ.Elem().Kind() == Array {
+ return v.typ.Elem().Len()
+ }
+ panic("reflect: call of reflect.Value.Cap on ptr to non-array Value")
+ }
+ panic(&ValueError{"reflect.Value.Cap", v.kind()})
+}
+
+// Close closes the channel v.
+// It panics if v's Kind is not Chan.
+func (v Value) Close() {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ chanclose(v.pointer())
+}
+
+// CanComplex reports whether Complex can be used without panicking.
+func (v Value) CanComplex() bool {
+ switch v.kind() {
+ case Complex64, Complex128:
+ return true
+ default:
+ return false
+ }
+}
+
+// Complex returns v's underlying value, as a complex128.
+// It panics if v's Kind is not Complex64 or Complex128
+func (v Value) Complex() complex128 {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return complex128(*(*complex64)(v.ptr))
+ case Complex128:
+ return *(*complex128)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Complex", v.kind()})
+}
+
+// Elem returns the value that the interface v contains
+// or that the pointer v points to.
+// It panics if v's Kind is not Interface or Pointer.
+// It returns the zero Value if v is nil.
+func (v Value) Elem() Value {
+ k := v.kind()
+ switch k {
+ case Interface:
+ var eface any
+ if v.typ.NumMethod() == 0 {
+ eface = *(*any)(v.ptr)
+ } else {
+ eface = (any)(*(*interface {
+ M()
+ })(v.ptr))
+ }
+ x := unpackEface(eface)
+ if x.flag != 0 {
+ x.flag |= v.flag.ro()
+ }
+ return x
+ case Pointer:
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ if ifaceIndir(v.typ) {
+ // This is a pointer to a not-in-heap object. ptr points to a uintptr
+ // in the heap. That uintptr is the address of a not-in-heap object.
+ // In general, pointers to not-in-heap objects can be total junk.
+ // But Elem() is asking to dereference it, so the user has asserted
+ // that at least it is a valid pointer (not just an integer stored in
+ // a pointer slot). So let's check, to make sure that it isn't a pointer
+ // that the runtime will crash on if it sees it during GC or write barriers.
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! That makes the test pretty easy.
+ // See issue 48399.
+ if !verifyNotInHeapPtr(*(*uintptr)(ptr)) {
+ panic("reflect: reflect.Value.Elem on an invalid notinheap pointer")
+ }
+ }
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ // The returned value's address is v's value.
+ if ptr == nil {
+ return Value{}
+ }
+ tt := (*ptrType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ fl := v.flag&flagRO | flagIndir | flagAddr
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
+ }
+ panic(&ValueError{"reflect.Value.Elem", v.kind()})
+}
+
+// Field returns the i'th field of the struct v.
+// It panics if v's Kind is not Struct or i is out of range.
+func (v Value) Field(i int) Value {
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.fields)) {
+ panic("reflect: Field index out of range")
+ }
+ field := &tt.fields[i]
+ typ := field.typ
+
+ // Inherit permission bits from v, but clear flagEmbedRO.
+ fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
+ // Using an unexported field forces flagRO.
+ if !field.name.isExported() {
+ if field.embedded() {
+ fl |= flagEmbedRO
+ } else {
+ fl |= flagStickyRO
+ }
+ }
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must have field.offset = 0,
+ // so v.ptr + field.offset is still the correct address.
+ ptr := add(v.ptr, field.offset, "same as non-reflect &v.field")
+ return Value{typ, ptr, fl}
+}
+
+// FieldByIndex returns the nested field corresponding to index.
+// It panics if evaluation requires stepping through a nil
+// pointer or a field that is not a struct.
+func (v Value) FieldByIndex(index []int) Value {
+ if len(index) == 1 {
+ return v.Field(index[0])
+ }
+ v.mustBe(Struct)
+ for i, x := range index {
+ if i > 0 {
+ if v.Kind() == Pointer && v.typ.Elem().Kind() == Struct {
+ if v.IsNil() {
+ panic("reflect: indirection through nil pointer to embedded struct")
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+// FieldByIndexErr returns the nested field corresponding to index.
+// It returns an error if evaluation requires stepping through a nil
+// pointer, but panics if it must step through a field that
+// is not a struct.
+func (v Value) FieldByIndexErr(index []int) (Value, error) {
+ if len(index) == 1 {
+ return v.Field(index[0]), nil
+ }
+ v.mustBe(Struct)
+ for i, x := range index {
+ if i > 0 {
+ if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
+ if v.IsNil() {
+ return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + v.typ.Elem().Name())
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v, nil
+}
+
+// FieldByName returns the struct field with the given name.
+// It returns the zero Value if no field was found.
+// It panics if v's Kind is not struct.
+func (v Value) FieldByName(name string) Value {
+ v.mustBe(Struct)
+ if f, ok := v.typ.FieldByName(name); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// FieldByNameFunc returns the struct field with a name
+// that satisfies the match function.
+// It panics if v's Kind is not struct.
+// It returns the zero Value if no field was found.
+func (v Value) FieldByNameFunc(match func(string) bool) Value {
+ if f, ok := v.typ.FieldByNameFunc(match); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// CanFloat reports whether Float can be used without panicking.
+func (v Value) CanFloat() bool {
+ switch v.kind() {
+ case Float32, Float64:
+ return true
+ default:
+ return false
+ }
+}
+
+// Float returns v's underlying value, as a float64.
+// It panics if v's Kind is not Float32 or Float64
+func (v Value) Float() float64 {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return float64(*(*float32)(v.ptr))
+ case Float64:
+ return *(*float64)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Float", v.kind()})
+}
+
+var uint8Type = rtypeOf(uint8(0))
+
+// Index returns v's i'th element.
+// It panics if v's Kind is not Array, Slice, or String or i is out of range.
+func (v Value) Index(i int) Value {
+ switch v.kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(tt.len) {
+ panic("reflect: array index out of range")
+ }
+ typ := tt.elem
+ offset := uintptr(i) * typ.size
+
+ // Either flagIndir is set and v.ptr points at array,
+ // or flagIndir is not set and v.ptr is the actual array data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be doing Index(0), so offset = 0,
+ // so v.ptr + offset is still the correct address.
+ val := add(v.ptr, offset, "same as &v[i], i < tt.len")
+ fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
+ return Value{typ, val, fl}
+
+ case Slice:
+ // Element flag same as Elem of Pointer.
+ // Addressable, indirect, possibly read-only.
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ tt := (*sliceType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ val := arrayAt(s.Data, i, typ.size, "i < s.Len")
+ fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
+ return Value{typ, val, fl}
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: string index out of range")
+ }
+ p := arrayAt(s.Data, i, 1, "i < s.Len")
+ fl := v.flag.ro() | flag(Uint8) | flagIndir
+ return Value{uint8Type, p, fl}
+ }
+ panic(&ValueError{"reflect.Value.Index", v.kind()})
+}
+
+// CanInt reports whether Int can be used without panicking.
+func (v Value) CanInt() bool {
+ switch v.kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return true
+ default:
+ return false
+ }
+}
+
+// Int returns v's underlying value, as an int64.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) Int() int64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Int:
+ return int64(*(*int)(p))
+ case Int8:
+ return int64(*(*int8)(p))
+ case Int16:
+ return int64(*(*int16)(p))
+ case Int32:
+ return int64(*(*int32)(p))
+ case Int64:
+ return *(*int64)(p)
+ }
+ panic(&ValueError{"reflect.Value.Int", v.kind()})
+}
+
+// CanInterface reports whether Interface can be used without panicking.
+func (v Value) CanInterface() bool {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.CanInterface", Invalid})
+ }
+ return v.flag&flagRO == 0
+}
+
+// Interface returns v's current value as an interface{}.
+// It is equivalent to:
+//
+// var i interface{} = (v's underlying value)
+//
+// It panics if the Value was obtained by accessing
+// unexported struct fields.
+func (v Value) Interface() (i any) {
+ return valueInterface(v, true)
+}
+
+func valueInterface(v Value, safe bool) any {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.Interface", Invalid})
+ }
+ if safe && v.flag&flagRO != 0 {
+ // Do not allow access to unexported values via Interface,
+ // because they might be pointers that should not be
+ // writable or methods or function that should not be callable.
+ panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
+ }
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Interface", v)
+ }
+
+ if v.kind() == Interface {
+ // Special case: return the element inside the interface.
+ // Empty interface has one layout, all interfaces with
+ // methods have a second layout.
+ if v.NumMethod() == 0 {
+ return *(*any)(v.ptr)
+ }
+ return *(*interface {
+ M()
+ })(v.ptr)
+ }
+
+ // TODO: pass safe to packEface so we don't need to copy if safe==true?
+ return packEface(v)
+}
+
+// InterfaceData returns a pair of unspecified uintptr values.
+// It panics if v's Kind is not Interface.
+//
+// In earlier versions of Go, this function returned the interface's
+// value as a uintptr pair. As of Go 1.4, the implementation of
+// interface values precludes any defined use of InterfaceData.
+//
+// Deprecated: The memory representation of interface values is not
+// compatible with InterfaceData.
+func (v Value) InterfaceData() [2]uintptr {
+ v.mustBe(Interface)
+ // We treat this as a read operation, so we allow
+ // it even for unexported data, because the caller
+ // has to import "unsafe" to turn it into something
+ // that can be abused.
+ // Interface value is always bigger than a word; assume flagIndir.
+ return *(*[2]uintptr)(v.ptr)
+}
+
+// IsNil reports whether its argument v is nil. The argument must be
+// a chan, func, interface, map, pointer, or slice value; if it is
+// not, IsNil panics. Note that IsNil is not always equivalent to a
+// regular comparison with nil in Go. For example, if v was created
+// by calling ValueOf with an uninitialized interface variable i,
+// i==nil will be true but v.IsNil will panic as v will be the zero
+// Value.
+func (v Value) IsNil() bool {
+ k := v.kind()
+ switch k {
+ case Chan, Func, Map, Pointer, UnsafePointer:
+ if v.flag&flagMethod != 0 {
+ return false
+ }
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ return ptr == nil
+ case Interface, Slice:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return *(*unsafe.Pointer)(v.ptr) == nil
+ }
+ panic(&ValueError{"reflect.Value.IsNil", v.kind()})
+}
+
+// IsValid reports whether v represents a value.
+// It returns false if v is the zero Value.
+// If IsValid returns false, all other methods except String panic.
+// Most functions and methods never return an invalid Value.
+// If one does, its documentation states the conditions explicitly.
+func (v Value) IsValid() bool {
+ return v.flag != 0
+}
+
+// IsZero reports whether v is the zero value for its type.
+// It panics if the argument is invalid.
+func (v Value) IsZero() bool {
+ switch v.kind() {
+ case Bool:
+ return !v.Bool()
+ case Int, Int8, Int16, Int32, Int64:
+ return v.Int() == 0
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v.Uint() == 0
+ case Float32, Float64:
+ return math.Float64bits(v.Float()) == 0
+ case Complex64, Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case Array:
+ // If the type is comparable, then compare directly with zero.
+ if v.typ.equal != nil && v.typ.size <= maxZero {
+ if v.flag&flagIndir == 0 {
+ return v.ptr == nil
+ }
+ return v.typ.equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
+ }
+
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if !v.Index(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ case Chan, Func, Interface, Map, Pointer, Slice, UnsafePointer:
+ return v.IsNil()
+ case String:
+ return v.Len() == 0
+ case Struct:
+ // If the type is comparable, then compare directly with zero.
+ if v.typ.equal != nil && v.typ.size <= maxZero {
+ if v.flag&flagIndir == 0 {
+ return v.ptr == nil
+ }
+ return v.typ.equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
+ }
+
+ n := v.NumField()
+ for i := 0; i < n; i++ {
+ if !v.Field(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ default:
+ // This should never happen, but will act as a safeguard for later,
+ // as a default value doesn't makes sense here.
+ panic(&ValueError{"reflect.Value.IsZero", v.Kind()})
+ }
+}
+
+// SetZero sets v to be the zero value of v's type.
+// It panics if CanSet returns false.
+func (v Value) SetZero() {
+ v.mustBeAssignable()
+ switch v.kind() {
+ case Bool:
+ *(*bool)(v.ptr) = false
+ case Int:
+ *(*int)(v.ptr) = 0
+ case Int8:
+ *(*int8)(v.ptr) = 0
+ case Int16:
+ *(*int16)(v.ptr) = 0
+ case Int32:
+ *(*int32)(v.ptr) = 0
+ case Int64:
+ *(*int64)(v.ptr) = 0
+ case Uint:
+ *(*uint)(v.ptr) = 0
+ case Uint8:
+ *(*uint8)(v.ptr) = 0
+ case Uint16:
+ *(*uint16)(v.ptr) = 0
+ case Uint32:
+ *(*uint32)(v.ptr) = 0
+ case Uint64:
+ *(*uint64)(v.ptr) = 0
+ case Uintptr:
+ *(*uintptr)(v.ptr) = 0
+ case Float32:
+ *(*float32)(v.ptr) = 0
+ case Float64:
+ *(*float64)(v.ptr) = 0
+ case Complex64:
+ *(*complex64)(v.ptr) = 0
+ case Complex128:
+ *(*complex128)(v.ptr) = 0
+ case String:
+ *(*string)(v.ptr) = ""
+ case Slice:
+ *(*unsafeheader.Slice)(v.ptr) = unsafeheader.Slice{}
+ case Interface:
+ *(*[2]unsafe.Pointer)(v.ptr) = [2]unsafe.Pointer{}
+ case Chan, Func, Map, Pointer, UnsafePointer:
+ *(*unsafe.Pointer)(v.ptr) = nil
+ case Array, Struct:
+ typedmemclr(v.typ, v.ptr)
+ default:
+ // This should never happen, but will act as a safeguard for later,
+ // as a default value doesn't makes sense here.
+ panic(&ValueError{"reflect.Value.SetZero", v.Kind()})
+ }
+}
+
+// Kind returns v's Kind.
+// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+func (v Value) Kind() Kind {
+ return v.kind()
+}
+
+// Len returns v's length.
+// It panics if v's Kind is not Array, Chan, Map, Slice, String, or pointer to Array.
+func (v Value) Len() int {
+ // lenNonSlice is split out to keep Len inlineable for slice kinds.
+ if v.kind() == Slice {
+ return (*unsafeheader.Slice)(v.ptr).Len
+ }
+ return v.lenNonSlice()
+}
+
+func (v Value) lenNonSlice() int {
+ switch k := v.kind(); k {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ return int(tt.len)
+ case Chan:
+ return chanlen(v.pointer())
+ case Map:
+ return maplen(v.pointer())
+ case String:
+ // String is bigger than a word; assume flagIndir.
+ return (*unsafeheader.String)(v.ptr).Len
+ case Ptr:
+ if v.typ.Elem().Kind() == Array {
+ return v.typ.Elem().Len()
+ }
+ panic("reflect: call of reflect.Value.Len on ptr to non-array Value")
+ }
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
+}
+
+var stringType = rtypeOf("")
+
+// MapIndex returns the value associated with key in the map v.
+// It panics if v's Kind is not Map.
+// It returns the zero Value if key is not found in the map or if v represents a nil map.
+// As in Go, the key's value must be assignable to the map's key type.
+func (v Value) MapIndex(key Value) Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ // Do not require key to be exported, so that DeepEqual
+ // and other programs can use all the keys returned by
+ // MapKeys as arguments to MapIndex. If either the map
+ // or the key is unexported, though, the result will be
+ // considered unexported. This is consistent with the
+ // behavior for structs, which allow read but not write
+ // of unexported fields.
+
+ var e unsafe.Pointer
+ if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ e = mapaccess_faststr(v.typ, v.pointer(), k)
+ } else {
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ e = mapaccess(v.typ, v.pointer(), k)
+ }
+ if e == nil {
+ return Value{}
+ }
+ typ := tt.elem
+ fl := (v.flag | key.flag).ro()
+ fl |= flag(typ.Kind())
+ return copyVal(typ, fl, e)
+}
+
+// MapKeys returns a slice containing all the keys present in the map,
+// in unspecified order.
+// It panics if v's Kind is not Map.
+// It returns an empty slice if v represents a nil map.
+func (v Value) MapKeys() []Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+ keyType := tt.key
+
+ fl := v.flag.ro() | flag(keyType.Kind())
+
+ m := v.pointer()
+ mlen := int(0)
+ if m != nil {
+ mlen = maplen(m)
+ }
+ var it hiter
+ mapiterinit(v.typ, m, &it)
+ a := make([]Value, mlen)
+ var i int
+ for i = 0; i < len(a); i++ {
+ key := mapiterkey(&it)
+ if key == nil {
+ // Someone deleted an entry from the map since we
+ // called maplen above. It's a data race, but nothing
+ // we can do about it.
+ break
+ }
+ a[i] = copyVal(keyType, fl, key)
+ mapiternext(&it)
+ }
+ return a[:i]
+}
+
+// hiter's structure matches runtime.hiter's structure.
+// Having a clone here allows us to embed a map iterator
+// inside type MapIter so that MapIters can be re-used
+// without doing any allocations.
+type hiter struct {
+ key unsafe.Pointer
+ elem unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
+}
+
+func (h *hiter) initialized() bool {
+ return h.t != nil
+}
+
+// A MapIter is an iterator for ranging over a map.
+// See Value.MapRange.
+type MapIter struct {
+ m Value
+ hiter hiter
+}
+
+// Key returns the key of iter's current map entry.
+func (iter *MapIter) Key() Value {
+ if !iter.hiter.initialized() {
+ panic("MapIter.Key called before Next")
+ }
+ iterkey := mapiterkey(&iter.hiter)
+ if iterkey == nil {
+ panic("MapIter.Key called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ ktype := t.key
+ return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
+}
+
+// SetIterKey assigns to v the key of iter's current map entry.
+// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
+// As in Go, the key must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterKey(iter *MapIter) {
+ if !iter.hiter.initialized() {
+ panic("reflect: Value.SetIterKey called before Next")
+ }
+ iterkey := mapiterkey(&iter.hiter)
+ if iterkey == nil {
+ panic("reflect: Value.SetIterKey called on exhausted iterator")
+ }
+
+ v.mustBeAssignable()
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ ktype := t.key
+
+ iter.m.mustBeExported() // do not let unexported m leak
+ key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
+ key = key.assignTo("reflect.MapIter.SetKey", v.typ, target)
+ typedmemmove(v.typ, v.ptr, key.ptr)
+}
+
+// Value returns the value of iter's current map entry.
+func (iter *MapIter) Value() Value {
+ if !iter.hiter.initialized() {
+ panic("MapIter.Value called before Next")
+ }
+ iterelem := mapiterelem(&iter.hiter)
+ if iterelem == nil {
+ panic("MapIter.Value called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ vtype := t.elem
+ return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
+}
+
+// SetIterValue assigns to v the value of iter's current map entry.
+// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
+// As in Go, the value must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterValue(iter *MapIter) {
+ if !iter.hiter.initialized() {
+ panic("reflect: Value.SetIterValue called before Next")
+ }
+ iterelem := mapiterelem(&iter.hiter)
+ if iterelem == nil {
+ panic("reflect: Value.SetIterValue called on exhausted iterator")
+ }
+
+ v.mustBeAssignable()
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ vtype := t.elem
+
+ iter.m.mustBeExported() // do not let unexported m leak
+ elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
+ elem = elem.assignTo("reflect.MapIter.SetValue", v.typ, target)
+ typedmemmove(v.typ, v.ptr, elem.ptr)
+}
+
+// Next advances the map iterator and reports whether there is another
+// entry. It returns false when iter is exhausted; subsequent
+// calls to Key, Value, or Next will panic.
+func (iter *MapIter) Next() bool {
+ if !iter.m.IsValid() {
+ panic("MapIter.Next called on an iterator that does not have an associated map Value")
+ }
+ if !iter.hiter.initialized() {
+ mapiterinit(iter.m.typ, iter.m.pointer(), &iter.hiter)
+ } else {
+ if mapiterkey(&iter.hiter) == nil {
+ panic("MapIter.Next called on exhausted iterator")
+ }
+ mapiternext(&iter.hiter)
+ }
+ return mapiterkey(&iter.hiter) != nil
+}
+
+// Reset modifies iter to iterate over v.
+// It panics if v's Kind is not Map and v is not the zero Value.
+// Reset(Value{}) causes iter to not to refer to any map,
+// which may allow the previously iterated-over map to be garbage collected.
+func (iter *MapIter) Reset(v Value) {
+ if v.IsValid() {
+ v.mustBe(Map)
+ }
+ iter.m = v
+ iter.hiter = hiter{}
+}
+
+// MapRange returns a range iterator for a map.
+// It panics if v's Kind is not Map.
+//
+// Call Next to advance the iterator, and Key/Value to access each entry.
+// Next returns false when the iterator is exhausted.
+// MapRange follows the same iteration semantics as a range statement.
+//
+// Example:
+//
+// iter := reflect.ValueOf(m).MapRange()
+// for iter.Next() {
+// k := iter.Key()
+// v := iter.Value()
+// ...
+// }
+func (v Value) MapRange() *MapIter {
+ // This is inlinable to take advantage of "function outlining".
+ // The allocation of MapIter can be stack allocated if the caller
+ // does not allow it to escape.
+ // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
+ if v.kind() != Map {
+ v.panicNotMap()
+ }
+ return &MapIter{m: v}
+}
+
+func (f flag) panicNotMap() {
+ f.mustBe(Map)
+}
+
+// copyVal returns a Value containing the map key or value at ptr,
+// allocating a new variable as needed.
+func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value {
+ if ifaceIndir(typ) {
+ // Copy result so future changes to the map
+ // won't change the underlying value.
+ c := unsafe_New(typ)
+ typedmemmove(typ, c, ptr)
+ return Value{typ, c, fl | flagIndir}
+ }
+ return Value{typ, *(*unsafe.Pointer)(ptr), fl}
+}
+
+// Method returns a function value corresponding to v's i'th method.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// Method panics if i is out of range or if v is a nil interface value.
+func (v Value) Method(i int) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.Method", Invalid})
+ }
+ if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
+ panic("reflect: Method index out of range")
+ }
+ if v.typ.Kind() == Interface && v.IsNil() {
+ panic("reflect: Method on nil interface value")
+ }
+ fl := v.flag.ro() | (v.flag & flagIndir)
+ fl |= flag(Func)
+ fl |= flag(i)<<flagMethodShift | flagMethod
+ return Value{v.typ, v.ptr, fl}
+}
+
+// NumMethod returns the number of methods in the value's method set.
+//
+// For a non-interface type, it returns the number of exported methods.
+//
+// For an interface type, it returns the number of exported and unexported methods.
+func (v Value) NumMethod() int {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.NumMethod", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return 0
+ }
+ return v.typ.NumMethod()
+}
+
+// MethodByName returns a function value corresponding to the method
+// of v with the given name.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// It returns the zero Value if no method was found.
+func (v Value) MethodByName(name string) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.MethodByName", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return Value{}
+ }
+ m, ok := v.typ.MethodByName(name)
+ if !ok {
+ return Value{}
+ }
+ return v.Method(m.Index)
+}
+
+// NumField returns the number of fields in the struct v.
+// It panics if v's Kind is not Struct.
+func (v Value) NumField() int {
+ v.mustBe(Struct)
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ return len(tt.fields)
+}
+
+// OverflowComplex reports whether the complex128 x cannot be represented by v's type.
+// It panics if v's Kind is not Complex64 or Complex128.
+func (v Value) OverflowComplex(x complex128) bool {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return overflowFloat32(real(x)) || overflowFloat32(imag(x))
+ case Complex128:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
+}
+
+// OverflowFloat reports whether the float64 x cannot be represented by v's type.
+// It panics if v's Kind is not Float32 or Float64.
+func (v Value) OverflowFloat(x float64) bool {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return overflowFloat32(x)
+ case Float64:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
+}
+
+func overflowFloat32(x float64) bool {
+ if x < 0 {
+ x = -x
+ }
+ return math.MaxFloat32 < x && x <= math.MaxFloat64
+}
+
+// OverflowInt reports whether the int64 x cannot be represented by v's type.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) OverflowInt(x int64) bool {
+ k := v.kind()
+ switch k {
+ case Int, Int8, Int16, Int32, Int64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
+}
+
+// OverflowUint reports whether the uint64 x cannot be represented by v's type.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) OverflowUint(x uint64) bool {
+ k := v.kind()
+ switch k {
+ case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.Pointer when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.Pointer())
+// and make an exception.
+
+// Pointer returns v's value as a uintptr.
+// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
+//
+// If v's Kind is Slice, the returned pointer is to the first
+// element of the slice. If the slice is nil the returned value
+// is 0. If the slice is empty but non-nil the return value is non-zero.
+//
+// It's preferred to use uintptr(Value.UnsafePointer()) to get the equivalent result.
+func (v Value) Pointer() uintptr {
+ k := v.kind()
+ switch k {
+ case Pointer:
+ if v.typ.ptrdata == 0 {
+ val := *(*uintptr)(v.ptr)
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! See comment in Value.Elem and issue #48399.
+ if !verifyNotInHeapPtr(val) {
+ panic("reflect: reflect.Value.Pointer on an invalid notinheap pointer")
+ }
+ return val
+ }
+ fallthrough
+ case Chan, Map, UnsafePointer:
+ return uintptr(v.pointer())
+ case Func:
+ if v.flag&flagMethod != 0 {
+ // As the doc comment says, the returned pointer is an
+ // underlying code pointer but not necessarily enough to
+ // identify a single function uniquely. All method expressions
+ // created via reflect have the same underlying code pointer,
+ // so their Pointers are equal. The function used here must
+ // match the one used in makeMethodValue.
+ return methodValueCallCodePtr()
+ }
+ p := v.pointer()
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return uintptr(p)
+
+ case Slice:
+ return uintptr((*unsafeheader.Slice)(v.ptr).Data)
+ }
+ panic(&ValueError{"reflect.Value.Pointer", v.kind()})
+}
+
+// Recv receives and returns a value from the channel v.
+// It panics if v's Kind is not Chan.
+// The receive blocks until a value is ready.
+// The boolean value ok is true if the value x corresponds to a send
+// on the channel, false if it is a zero value received because the channel is closed.
+func (v Value) Recv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(false)
+}
+
+// internal recv, possibly non-blocking (nb).
+// v is known to be a channel.
+func (v Value) recv(nb bool) (val Value, ok bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect: recv on send-only channel")
+ }
+ t := tt.elem
+ val = Value{t, nil, flag(t.Kind())}
+ var p unsafe.Pointer
+ if ifaceIndir(t) {
+ p = unsafe_New(t)
+ val.ptr = p
+ val.flag |= flagIndir
+ } else {
+ p = unsafe.Pointer(&val.ptr)
+ }
+ selected, ok := chanrecv(v.pointer(), nb, p)
+ if !selected {
+ val = Value{}
+ }
+ return
+}
+
+// Send sends x on the channel v.
+// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) Send(x Value) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ v.send(x, false)
+}
+
+// internal send, possibly non-blocking.
+// v is known to be a channel.
+func (v Value) send(x Value, nb bool) (selected bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect: send on recv-only channel")
+ }
+ x.mustBeExported()
+ x = x.assignTo("reflect.Value.Send", tt.elem, nil)
+ var p unsafe.Pointer
+ if x.flag&flagIndir != 0 {
+ p = x.ptr
+ } else {
+ p = unsafe.Pointer(&x.ptr)
+ }
+ return chansend(v.pointer(), p, nb)
+}
+
+// Set assigns x to the value v.
+// It panics if CanSet returns false.
+// As in Go, x's value must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) Set(x Value) {
+ v.mustBeAssignable()
+ x.mustBeExported() // do not let unexported x leak
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+ x = x.assignTo("reflect.Set", v.typ, target)
+ if x.flag&flagIndir != 0 {
+ if x.ptr == unsafe.Pointer(&zeroVal[0]) {
+ typedmemclr(v.typ, v.ptr)
+ } else {
+ typedmemmove(v.typ, v.ptr, x.ptr)
+ }
+ } else {
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
+ }
+}
+
+// SetBool sets v's underlying value.
+// It panics if v's Kind is not Bool or if CanSet() is false.
+func (v Value) SetBool(x bool) {
+ v.mustBeAssignable()
+ v.mustBe(Bool)
+ *(*bool)(v.ptr) = x
+}
+
+// SetBytes sets v's underlying value.
+// It panics if v's underlying value is not a slice of bytes.
+func (v Value) SetBytes(x []byte) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.SetBytes of non-byte slice")
+ }
+ *(*[]byte)(v.ptr) = x
+}
+
+// setRunes sets v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) setRunes(x []rune) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.setRunes of non-rune slice")
+ }
+ *(*[]rune)(v.ptr) = x
+}
+
+// SetComplex sets v's underlying value to x.
+// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
+func (v Value) SetComplex(x complex128) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
+ case Complex64:
+ *(*complex64)(v.ptr) = complex64(x)
+ case Complex128:
+ *(*complex128)(v.ptr) = x
+ }
+}
+
+// SetFloat sets v's underlying value to x.
+// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
+func (v Value) SetFloat(x float64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
+ case Float32:
+ *(*float32)(v.ptr) = float32(x)
+ case Float64:
+ *(*float64)(v.ptr) = x
+ }
+}
+
+// SetInt sets v's underlying value to x.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
+func (v Value) SetInt(x int64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetInt", v.kind()})
+ case Int:
+ *(*int)(v.ptr) = int(x)
+ case Int8:
+ *(*int8)(v.ptr) = int8(x)
+ case Int16:
+ *(*int16)(v.ptr) = int16(x)
+ case Int32:
+ *(*int32)(v.ptr) = int32(x)
+ case Int64:
+ *(*int64)(v.ptr) = x
+ }
+}
+
+// SetLen sets v's length to n.
+// It panics if v's Kind is not Slice or if n is negative or
+// greater than the capacity of the slice.
+func (v Value) SetLen(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(n) > uint(s.Cap) {
+ panic("reflect: slice length out of range in SetLen")
+ }
+ s.Len = n
+}
+
+// SetCap sets v's capacity to n.
+// It panics if v's Kind is not Slice or if n is smaller than the length or
+// greater than the capacity of the slice.
+func (v Value) SetCap(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if n < s.Len || n > s.Cap {
+ panic("reflect: slice capacity out of range in SetCap")
+ }
+ s.Cap = n
+}
+
+// SetMapIndex sets the element associated with key in the map v to elem.
+// It panics if v's Kind is not Map.
+// If elem is the zero Value, SetMapIndex deletes the key from the map.
+// Otherwise if v holds a nil map, SetMapIndex will panic.
+// As in Go, key's elem must be assignable to the map's key type,
+// and elem's value must be assignable to the map's elem type.
+func (v Value) SetMapIndex(key, elem Value) {
+ v.mustBe(Map)
+ v.mustBeExported()
+ key.mustBeExported()
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ if elem.typ == nil {
+ mapdelete_faststr(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign_faststr(v.typ, v.pointer(), k, e)
+ return
+ }
+
+ key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ if elem.typ == nil {
+ mapdelete(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign(v.typ, v.pointer(), k, e)
+}
+
+// SetUint sets v's underlying value to x.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
+func (v Value) SetUint(x uint64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetUint", v.kind()})
+ case Uint:
+ *(*uint)(v.ptr) = uint(x)
+ case Uint8:
+ *(*uint8)(v.ptr) = uint8(x)
+ case Uint16:
+ *(*uint16)(v.ptr) = uint16(x)
+ case Uint32:
+ *(*uint32)(v.ptr) = uint32(x)
+ case Uint64:
+ *(*uint64)(v.ptr) = x
+ case Uintptr:
+ *(*uintptr)(v.ptr) = uintptr(x)
+ }
+}
+
+// SetPointer sets the [unsafe.Pointer] value v to x.
+// It panics if v's Kind is not UnsafePointer.
+func (v Value) SetPointer(x unsafe.Pointer) {
+ v.mustBeAssignable()
+ v.mustBe(UnsafePointer)
+ *(*unsafe.Pointer)(v.ptr) = x
+}
+
+// SetString sets v's underlying value to x.
+// It panics if v's Kind is not String or if CanSet() is false.
+func (v Value) SetString(x string) {
+ v.mustBeAssignable()
+ v.mustBe(String)
+ *(*string)(v.ptr) = x
+}
+
+// Slice returns v[i:j].
+// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice(i, j int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if i < 0 || j < i || j > s.Len {
+ panic("reflect.Value.Slice: string slice index out of bounds")
+ }
+ var t unsafeheader.String
+ if i < s.Len {
+ t = unsafeheader.String{Data: arrayAt(s.Data, i, 1, "i < s.Len"), Len: j - i}
+ }
+ return Value{v.typ, unsafe.Pointer(&t), v.flag}
+ }
+
+ if i < 0 || j < i || j > cap {
+ panic("reflect.Value.Slice: slice index out of bounds")
+ }
+
+ // Declare slice so that gc can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = cap - i
+ if cap-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
+// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice3(i, j, k int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice3", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice3: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+ }
+
+ if i < 0 || j < i || k < j || k > cap {
+ panic("reflect.Value.Slice3: slice index out of bounds")
+ }
+
+ // Declare slice so that the garbage collector
+ // can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = k - i
+ if k-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// String returns the string v's underlying value, as a string.
+// String is a special case because of Go's String method convention.
+// Unlike the other getters, it does not panic if v's Kind is not String.
+// Instead, it returns a string of the form "<T value>" where T is v's type.
+// The fmt package treats Values specially. It does not call their String
+// method implicitly but instead prints the concrete values they hold.
+func (v Value) String() string {
+ // stringNonString is split out to keep String inlineable for string kinds.
+ if v.kind() == String {
+ return *(*string)(v.ptr)
+ }
+ return v.stringNonString()
+}
+
+func (v Value) stringNonString() string {
+ if v.kind() == Invalid {
+ return "<invalid Value>"
+ }
+ // If you call String on a reflect.Value of other type, it's better to
+ // print something than to panic. Useful in debugging.
+ return "<" + v.Type().String() + " Value>"
+}
+
+// TryRecv attempts to receive a value from the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// If the receive delivers a value, x is the transferred value and ok is true.
+// If the receive cannot finish without blocking, x is the zero Value and ok is false.
+// If the channel is closed, x is the zero value for the channel's element type and ok is false.
+func (v Value) TryRecv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(true)
+}
+
+// TrySend attempts to send x on the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// It reports whether the value was sent.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) TrySend(x Value) bool {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.send(x, true)
+}
+
+// Type returns v's type.
+func (v Value) Type() Type {
+ if v.flag != 0 && v.flag&flagMethod == 0 {
+ return v.typ
+ }
+ return v.typeSlow()
+}
+
+func (v Value) typeSlow() Type {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.Type", Invalid})
+ }
+ if v.flag&flagMethod == 0 {
+ return v.typ
+ }
+
+ // Method value.
+ // v.typ describes the receiver, not the method type.
+ i := int(v.flag) >> flagMethodShift
+ if v.typ.Kind() == Interface {
+ // Method on interface.
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ return v.typ.typeOff(m.typ)
+ }
+ // Method on concrete type.
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ return v.typ.typeOff(m.mtyp)
+}
+
+// CanUint reports whether Uint can be used without panicking.
+func (v Value) CanUint() bool {
+ switch v.kind() {
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return true
+ default:
+ return false
+ }
+}
+
+// Uint returns v's underlying value, as a uint64.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) Uint() uint64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Uint:
+ return uint64(*(*uint)(p))
+ case Uint8:
+ return uint64(*(*uint8)(p))
+ case Uint16:
+ return uint64(*(*uint16)(p))
+ case Uint32:
+ return uint64(*(*uint32)(p))
+ case Uint64:
+ return *(*uint64)(p)
+ case Uintptr:
+ return uint64(*(*uintptr)(p))
+ }
+ panic(&ValueError{"reflect.Value.Uint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.UnsafeAddr when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.UnsafeAddr())
+// and make an exception.
+
+// UnsafeAddr returns a pointer to v's data, as a uintptr.
+// It panics if v is not addressable.
+//
+// It's preferred to use uintptr(Value.Addr().UnsafePointer()) to get the equivalent result.
+func (v Value) UnsafeAddr() uintptr {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
+ }
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.UnsafeAddr of unaddressable value")
+ }
+ return uintptr(v.ptr)
+}
+
+// UnsafePointer returns v's value as a [unsafe.Pointer].
+// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
+//
+// If v's Kind is Slice, the returned pointer is to the first
+// element of the slice. If the slice is nil the returned value
+// is nil. If the slice is empty but non-nil the return value is non-nil.
+func (v Value) UnsafePointer() unsafe.Pointer {
+ k := v.kind()
+ switch k {
+ case Pointer:
+ if v.typ.ptrdata == 0 {
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! See comment in Value.Elem and issue #48399.
+ if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) {
+ panic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer")
+ }
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ fallthrough
+ case Chan, Map, UnsafePointer:
+ return v.pointer()
+ case Func:
+ if v.flag&flagMethod != 0 {
+ // As the doc comment says, the returned pointer is an
+ // underlying code pointer but not necessarily enough to
+ // identify a single function uniquely. All method expressions
+ // created via reflect have the same underlying code pointer,
+ // so their Pointers are equal. The function used here must
+ // match the one used in makeMethodValue.
+ code := methodValueCallCodePtr()
+ return *(*unsafe.Pointer)(unsafe.Pointer(&code))
+ }
+ p := v.pointer()
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return p
+
+ case Slice:
+ return (*unsafeheader.Slice)(v.ptr).Data
+ }
+ panic(&ValueError{"reflect.Value.UnsafePointer", v.kind()})
+}
+
+// StringHeader is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+//
+// In new code, use unsafe.String or unsafe.StringData instead.
+type StringHeader struct {
+ Data uintptr
+ Len int
+}
+
+// SliceHeader is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+//
+// In new code, use unsafe.Slice or unsafe.SliceData instead.
+type SliceHeader struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+func typesMustMatch(what string, t1, t2 Type) {
+ if t1 != t2 {
+ panic(what + ": " + t1.String() + " != " + t2.String())
+ }
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation.
+//
+// It panics if v's Kind is not a Slice or if n is negative or too large to
+// allocate the memory.
+func (v Value) Grow(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ v.grow(n)
+}
+
+// grow is identical to Grow but does not check for assignability.
+func (v Value) grow(n int) {
+ p := (*unsafeheader.Slice)(v.ptr)
+ switch {
+ case n < 0:
+ panic("reflect.Value.Grow: negative len")
+ case p.Len+n < 0:
+ panic("reflect.Value.Grow: slice overflow")
+ case p.Len+n > p.Cap:
+ t := v.typ.Elem().(*rtype)
+ *p = growslice(t, *p, n)
+ }
+}
+
+// extendSlice extends a slice by n elements.
+//
+// Unlike Value.grow, which modifies the slice in place and
+// does not change the length of the slice in place,
+// extendSlice returns a new slice value with the length
+// incremented by the number of specified elements.
+func (v Value) extendSlice(n int) Value {
+ v.mustBeExported()
+ v.mustBe(Slice)
+
+ // Shallow copy the slice header to avoid mutating the source slice.
+ sh := *(*unsafeheader.Slice)(v.ptr)
+ s := &sh
+ v.ptr = unsafe.Pointer(s)
+ v.flag = flagIndir | flag(Slice) // equivalent flag to MakeSlice
+
+ v.grow(n) // fine to treat as assignable since we allocate a new slice header
+ s.Len += n
+ return v
+}
+
+// Append appends the values x to a slice s and returns the resulting slice.
+// As in Go, each x's value must be assignable to the slice's element type.
+func Append(s Value, x ...Value) Value {
+ s.mustBe(Slice)
+ n := s.Len()
+ s = s.extendSlice(len(x))
+ for i, v := range x {
+ s.Index(n + i).Set(v)
+ }
+ return s
+}
+
+// AppendSlice appends a slice t to a slice s and returns the resulting slice.
+// The slices s and t must have the same element type.
+func AppendSlice(s, t Value) Value {
+ s.mustBe(Slice)
+ t.mustBe(Slice)
+ typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
+ ns := s.Len()
+ nt := t.Len()
+ s = s.extendSlice(nt)
+ Copy(s.Slice(ns, ns+nt), t)
+ return s
+}
+
+// Copy copies the contents of src into dst until either
+// dst has been filled or src has been exhausted.
+// It returns the number of elements copied.
+// Dst and src each must have kind Slice or Array, and
+// dst and src must have the same element type.
+//
+// As a special case, src can have kind String if the element type of dst is kind Uint8.
+func Copy(dst, src Value) int {
+ dk := dst.kind()
+ if dk != Array && dk != Slice {
+ panic(&ValueError{"reflect.Copy", dk})
+ }
+ if dk == Array {
+ dst.mustBeAssignable()
+ }
+ dst.mustBeExported()
+
+ sk := src.kind()
+ var stringCopy bool
+ if sk != Array && sk != Slice {
+ stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8
+ if !stringCopy {
+ panic(&ValueError{"reflect.Copy", sk})
+ }
+ }
+ src.mustBeExported()
+
+ de := dst.typ.Elem()
+ if !stringCopy {
+ se := src.typ.Elem()
+ typesMustMatch("reflect.Copy", de, se)
+ }
+
+ var ds, ss unsafeheader.Slice
+ if dk == Array {
+ ds.Data = dst.ptr
+ ds.Len = dst.Len()
+ ds.Cap = ds.Len
+ } else {
+ ds = *(*unsafeheader.Slice)(dst.ptr)
+ }
+ if sk == Array {
+ ss.Data = src.ptr
+ ss.Len = src.Len()
+ ss.Cap = ss.Len
+ } else if sk == Slice {
+ ss = *(*unsafeheader.Slice)(src.ptr)
+ } else {
+ sh := *(*unsafeheader.String)(src.ptr)
+ ss.Data = sh.Data
+ ss.Len = sh.Len
+ ss.Cap = sh.Len
+ }
+
+ return typedslicecopy(de.common(), ds, ss)
+}
+
+// A runtimeSelect is a single case passed to rselect.
+// This must match ../runtime/select.go:/runtimeSelect
+type runtimeSelect struct {
+ dir SelectDir // SelectSend, SelectRecv or SelectDefault
+ typ *rtype // channel type
+ ch unsafe.Pointer // channel
+ val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
+}
+
+// rselect runs a select. It returns the index of the chosen case.
+// If the case was a receive, val is filled in with the received value.
+// The conventional OK bool indicates whether the receive corresponds
+// to a sent value.
+//
+//go:noescape
+func rselect([]runtimeSelect) (chosen int, recvOK bool)
+
+// A SelectDir describes the communication direction of a select case.
+type SelectDir int
+
+// NOTE: These values must match ../runtime/select.go:/selectDir.
+
+const (
+ _ SelectDir = iota
+ SelectSend // case Chan <- Send
+ SelectRecv // case <-Chan:
+ SelectDefault // default
+)
+
+// A SelectCase describes a single case in a select operation.
+// The kind of case depends on Dir, the communication direction.
+//
+// If Dir is SelectDefault, the case represents a default case.
+// Chan and Send must be zero Values.
+//
+// If Dir is SelectSend, the case represents a send operation.
+// Normally Chan's underlying value must be a channel, and Send's underlying value must be
+// assignable to the channel's element type. As a special case, if Chan is a zero Value,
+// then the case is ignored, and the field Send will also be ignored and may be either zero
+// or non-zero.
+//
+// If Dir is SelectRecv, the case represents a receive operation.
+// Normally Chan's underlying value must be a channel and Send must be a zero Value.
+// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
+// When a receive operation is selected, the received Value is returned by Select.
+type SelectCase struct {
+ Dir SelectDir // direction of case
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+}
+
+// Select executes a select operation described by the list of cases.
+// Like the Go select statement, it blocks until at least one of the cases
+// can proceed, makes a uniform pseudo-random choice,
+// and then executes that case. It returns the index of the chosen case
+// and, if that case was a receive operation, the value received and a
+// boolean indicating whether the value corresponds to a send on the channel
+// (as opposed to a zero value received because the channel is closed).
+// Select supports a maximum of 65536 cases.
+func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
+ if len(cases) > 65536 {
+ panic("reflect.Select: too many cases (max 65536)")
+ }
+ // NOTE: Do not trust that caller is not modifying cases data underfoot.
+ // The range is safe because the caller cannot modify our copy of the len
+ // and each iteration makes its own copy of the value c.
+ var runcases []runtimeSelect
+ if len(cases) > 4 {
+ // Slice is heap allocated due to runtime dependent capacity.
+ runcases = make([]runtimeSelect, len(cases))
+ } else {
+ // Slice can be stack allocated due to constant capacity.
+ runcases = make([]runtimeSelect, len(cases), 4)
+ }
+
+ haveDefault := false
+ for i, c := range cases {
+ rc := &runcases[i]
+ rc.dir = c.Dir
+ switch c.Dir {
+ default:
+ panic("reflect.Select: invalid Dir")
+
+ case SelectDefault: // default
+ if haveDefault {
+ panic("reflect.Select: multiple default cases")
+ }
+ haveDefault = true
+ if c.Chan.IsValid() {
+ panic("reflect.Select: default case has Chan value")
+ }
+ if c.Send.IsValid() {
+ panic("reflect.Select: default case has Send value")
+ }
+
+ case SelectSend:
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect.Select: SendDir case using recv-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ v := c.Send
+ if !v.IsValid() {
+ panic("reflect.Select: SendDir case missing Send value")
+ }
+ v.mustBeExported()
+ v = v.assignTo("reflect.Select", tt.elem, nil)
+ if v.flag&flagIndir != 0 {
+ rc.val = v.ptr
+ } else {
+ rc.val = unsafe.Pointer(&v.ptr)
+ }
+
+ case SelectRecv:
+ if c.Send.IsValid() {
+ panic("reflect.Select: RecvDir case has Send value")
+ }
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect.Select: RecvDir case using send-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ rc.val = unsafe_New(tt.elem)
+ }
+ }
+
+ chosen, recvOK = rselect(runcases)
+ if runcases[chosen].dir == SelectRecv {
+ tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
+ t := tt.elem
+ p := runcases[chosen].val
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ recv = Value{t, p, fl | flagIndir}
+ } else {
+ recv = Value{t, *(*unsafe.Pointer)(p), fl}
+ }
+ }
+ return chosen, recv, recvOK
+}
+
+/*
+ * constructors
+ */
+
+// implemented in package runtime
+func unsafe_New(*rtype) unsafe.Pointer
+func unsafe_NewArray(*rtype, int) unsafe.Pointer
+
+// MakeSlice creates a new zero-initialized slice value
+// for the specified slice type, length, and capacity.
+func MakeSlice(typ Type, len, cap int) Value {
+ if typ.Kind() != Slice {
+ panic("reflect.MakeSlice of non-slice type")
+ }
+ if len < 0 {
+ panic("reflect.MakeSlice: negative len")
+ }
+ if cap < 0 {
+ panic("reflect.MakeSlice: negative cap")
+ }
+ if len > cap {
+ panic("reflect.MakeSlice: len > cap")
+ }
+
+ s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap}
+ return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)}
+}
+
+// MakeChan creates a new channel with the specified type and buffer size.
+func MakeChan(typ Type, buffer int) Value {
+ if typ.Kind() != Chan {
+ panic("reflect.MakeChan of non-chan type")
+ }
+ if buffer < 0 {
+ panic("reflect.MakeChan: negative buffer size")
+ }
+ if typ.ChanDir() != BothDir {
+ panic("reflect.MakeChan: unidirectional channel type")
+ }
+ t := typ.(*rtype)
+ ch := makechan(t, buffer)
+ return Value{t, ch, flag(Chan)}
+}
+
+// MakeMap creates a new map with the specified type.
+func MakeMap(typ Type) Value {
+ return MakeMapWithSize(typ, 0)
+}
+
+// MakeMapWithSize creates a new map with the specified type
+// and initial space for approximately n elements.
+func MakeMapWithSize(typ Type, n int) Value {
+ if typ.Kind() != Map {
+ panic("reflect.MakeMapWithSize of non-map type")
+ }
+ t := typ.(*rtype)
+ m := makemap(t, n)
+ return Value{t, m, flag(Map)}
+}
+
+// Indirect returns the value that v points to.
+// If v is a nil pointer, Indirect returns a zero Value.
+// If v is not a pointer, Indirect returns v.
+func Indirect(v Value) Value {
+ if v.Kind() != Pointer {
+ return v
+ }
+ return v.Elem()
+}
+
+// ValueOf returns a new Value initialized to the concrete value
+// stored in the interface i. ValueOf(nil) returns the zero Value.
+func ValueOf(i any) Value {
+ if i == nil {
+ return Value{}
+ }
+
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
+ escapes(i)
+
+ return unpackEface(i)
+}
+
+// Zero returns a Value representing the zero value for the specified type.
+// The result is different from the zero value of the Value struct,
+// which represents no value at all.
+// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// The returned value is neither addressable nor settable.
+func Zero(typ Type) Value {
+ if typ == nil {
+ panic("reflect: Zero(nil)")
+ }
+ t := typ.(*rtype)
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ var p unsafe.Pointer
+ if t.size <= maxZero {
+ p = unsafe.Pointer(&zeroVal[0])
+ } else {
+ p = unsafe_New(t)
+ }
+ return Value{t, p, fl | flagIndir}
+ }
+ return Value{t, nil, fl}
+}
+
+// must match declarations in runtime/map.go.
+const maxZero = 1024
+
+//go:linkname zeroVal runtime.zeroVal
+var zeroVal [maxZero]byte
+
+// New returns a Value representing a pointer to a new zero value
+// for the specified type. That is, the returned Value's Type is PointerTo(typ).
+func New(typ Type) Value {
+ if typ == nil {
+ panic("reflect: New(nil)")
+ }
+ t := typ.(*rtype)
+ pt := t.ptrTo()
+ if ifaceIndir(pt) {
+ // This is a pointer to a not-in-heap type.
+ panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
+ }
+ ptr := unsafe_New(t)
+ fl := flag(Pointer)
+ return Value{pt, ptr, fl}
+}
+
+// NewAt returns a Value representing a pointer to a value of the
+// specified type, using p as that pointer.
+func NewAt(typ Type, p unsafe.Pointer) Value {
+ fl := flag(Pointer)
+ t := typ.(*rtype)
+ return Value{t.ptrTo(), p, fl}
+}
+
+// assignTo returns a value v that can be assigned directly to dst.
+// It panics if v is not assignable to dst.
+// For a conversion to an interface type, target, if not nil,
+// is a suggested scratch space to use.
+// target must be initialized memory (or nil).
+func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue(context, v)
+ }
+
+ switch {
+ case directlyAssignable(dst, v.typ):
+ // Overwrite type so that they match.
+ // Same memory layout, so no harm done.
+ fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
+
+ case implements(dst, v.typ):
+ if v.Kind() == Interface && v.IsNil() {
+ // A nil ReadWriter passed to nil Reader is OK,
+ // but using ifaceE2I below will panic.
+ // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
+ return Value{dst, nil, flag(Interface)}
+ }
+ x := valueInterface(v, false)
+ if target == nil {
+ target = unsafe_New(dst)
+ }
+ if dst.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(dst, x, target)
+ }
+ return Value{dst, target, flagIndir | flag(Interface)}
+ }
+
+ // Failed.
+ panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
+}
+
+// Convert returns the value v converted to type t.
+// If the usual Go conversion rules do not allow conversion
+// of the value v to type t, or if converting v to type t panics, Convert panics.
+func (v Value) Convert(t Type) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Convert", v)
+ }
+ op := convertOp(t.common(), v.typ)
+ if op == nil {
+ panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
+ }
+ return op(v, t)
+}
+
+// CanConvert reports whether the value v can be converted to type t.
+// If v.CanConvert(t) returns true then v.Convert(t) will not panic.
+func (v Value) CanConvert(t Type) bool {
+ vt := v.Type()
+ if !vt.ConvertibleTo(t) {
+ return false
+ }
+ // Converting from slice to array or to pointer-to-array can panic
+ // depending on the value.
+ switch {
+ case vt.Kind() == Slice && t.Kind() == Array:
+ if t.Len() > v.Len() {
+ return false
+ }
+ case vt.Kind() == Slice && t.Kind() == Pointer && t.Elem().Kind() == Array:
+ n := t.Elem().Len()
+ if n > v.Len() {
+ return false
+ }
+ }
+ return true
+}
+
+// Comparable reports whether the value v is comparable.
+// If the type of v is an interface, this checks the dynamic type.
+// If this reports true then v.Interface() == x will not panic for any x,
+// nor will v.Equal(u) for any Value u.
+func (v Value) Comparable() bool {
+ k := v.Kind()
+ switch k {
+ case Invalid:
+ return false
+
+ case Array:
+ switch v.Type().Elem().Kind() {
+ case Interface, Array, Struct:
+ for i := 0; i < v.Type().Len(); i++ {
+ if !v.Index(i).Comparable() {
+ return false
+ }
+ }
+ return true
+ }
+ return v.Type().Comparable()
+
+ case Interface:
+ return v.Elem().Comparable()
+
+ case Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !v.Field(i).Comparable() {
+ return false
+ }
+ }
+ return true
+
+ default:
+ return v.Type().Comparable()
+ }
+}
+
+// Equal reports true if v is equal to u.
+// For two invalid values, Equal will report true.
+// For an interface value, Equal will compare the value within the interface.
+// Otherwise, If the values have different types, Equal will report false.
+// Otherwise, for arrays and structs Equal will compare each element in order,
+// and report false if it finds non-equal elements.
+// During all comparisons, if values of the same type are compared,
+// and the type is not comparable, Equal will panic.
+func (v Value) Equal(u Value) bool {
+ if v.Kind() == Interface {
+ v = v.Elem()
+ }
+ if u.Kind() == Interface {
+ u = u.Elem()
+ }
+
+ if !v.IsValid() || !u.IsValid() {
+ return v.IsValid() == u.IsValid()
+ }
+
+ if v.Kind() != u.Kind() || v.Type() != u.Type() {
+ return false
+ }
+
+ // Handle each Kind directly rather than calling valueInterface
+ // to avoid allocating.
+ switch v.Kind() {
+ default:
+ panic("reflect.Value.Equal: invalid Kind")
+ case Bool:
+ return v.Bool() == u.Bool()
+ case Int, Int8, Int16, Int32, Int64:
+ return v.Int() == u.Int()
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v.Uint() == u.Uint()
+ case Float32, Float64:
+ return v.Float() == u.Float()
+ case Complex64, Complex128:
+ return v.Complex() == u.Complex()
+ case String:
+ return v.String() == u.String()
+ case Chan, Pointer, UnsafePointer:
+ return v.Pointer() == u.Pointer()
+ case Array:
+ // u and v have the same type so they have the same length
+ vl := v.Len()
+ if vl == 0 {
+ // panic on [0]func()
+ if !v.Type().Elem().Comparable() {
+ break
+ }
+ return true
+ }
+ for i := 0; i < vl; i++ {
+ if !v.Index(i).Equal(u.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case Struct:
+ // u and v have the same type so they have the same fields
+ nf := v.NumField()
+ for i := 0; i < nf; i++ {
+ if !v.Field(i).Equal(u.Field(i)) {
+ return false
+ }
+ }
+ return true
+ case Func, Map, Slice:
+ break
+ }
+ panic("reflect.Value.Equal: values of type " + v.Type().String() + " are not comparable")
+}
+
+// convertOp returns the function to convert a value of type src
+// to a value of type dst. If the conversion is illegal, convertOp returns nil.
+func convertOp(dst, src *rtype) func(Value, Type) Value {
+ switch src.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtInt
+ case Float32, Float64:
+ return cvtIntFloat
+ case String:
+ return cvtIntString
+ }
+
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtUint
+ case Float32, Float64:
+ return cvtUintFloat
+ case String:
+ return cvtUintString
+ }
+
+ case Float32, Float64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return cvtFloatInt
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtFloatUint
+ case Float32, Float64:
+ return cvtFloat
+ }
+
+ case Complex64, Complex128:
+ switch dst.Kind() {
+ case Complex64, Complex128:
+ return cvtComplex
+ }
+
+ case String:
+ if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
+ switch dst.Elem().Kind() {
+ case Uint8:
+ return cvtStringBytes
+ case Int32:
+ return cvtStringRunes
+ }
+ }
+
+ case Slice:
+ if dst.Kind() == String && src.Elem().PkgPath() == "" {
+ switch src.Elem().Kind() {
+ case Uint8:
+ return cvtBytesString
+ case Int32:
+ return cvtRunesString
+ }
+ }
+ // "x is a slice, T is a pointer-to-array type,
+ // and the slice and array types have identical element types."
+ if dst.Kind() == Pointer && dst.Elem().Kind() == Array && src.Elem() == dst.Elem().Elem() {
+ return cvtSliceArrayPtr
+ }
+ // "x is a slice, T is a array type,
+ // and the slice and array types have identical element types."
+ if dst.Kind() == Array && src.Elem() == dst.Elem() {
+ return cvtSliceArray
+ }
+
+ case Chan:
+ if dst.Kind() == Chan && specialChannelAssignability(dst, src) {
+ return cvtDirect
+ }
+ }
+
+ // dst and src have same underlying type.
+ if haveIdenticalUnderlyingType(dst, src, false) {
+ return cvtDirect
+ }
+
+ // dst and src are non-defined pointer types with same underlying base type.
+ if dst.Kind() == Pointer && dst.Name() == "" &&
+ src.Kind() == Pointer && src.Name() == "" &&
+ haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) {
+ return cvtDirect
+ }
+
+ if implements(dst, src) {
+ if src.Kind() == Interface {
+ return cvtI2I
+ }
+ return cvtT2I
+ }
+
+ return nil
+}
+
+// makeInt returns a Value of type t equal to bits (possibly truncated),
+// where t is a signed or unsigned int type.
+func makeInt(f flag, bits uint64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 1:
+ *(*uint8)(ptr) = uint8(bits)
+ case 2:
+ *(*uint16)(ptr) = uint16(bits)
+ case 4:
+ *(*uint32)(ptr) = uint32(bits)
+ case 8:
+ *(*uint64)(ptr) = bits
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
+// where t is a float32 or float64 type.
+func makeFloat(f flag, v float64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 4:
+ *(*float32)(ptr) = float32(v)
+ case 8:
+ *(*float64)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat32 returns a Value of type t equal to v, where t is a float32 type.
+func makeFloat32(f flag, v float32, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ *(*float32)(ptr) = v
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
+// where t is a complex64 or complex128 type.
+func makeComplex(f flag, v complex128, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 8:
+ *(*complex64)(ptr) = complex64(v)
+ case 16:
+ *(*complex128)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+func makeString(f flag, v string, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetString(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeBytes(f flag, v []byte, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetBytes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeRunes(f flag, v []rune, t Type) Value {
+ ret := New(t).Elem()
+ ret.setRunes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+// These conversion functions are returned by convertOp
+// for classes of conversions. For example, the first function, cvtInt,
+// takes any value v of signed int type and returns the value converted
+// to type t, where t is any signed or unsigned int type.
+
+// convertOp: intXX -> [u]intXX
+func cvtInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Int()), t)
+}
+
+// convertOp: uintXX -> [u]intXX
+func cvtUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), v.Uint(), t)
+}
+
+// convertOp: floatXX -> intXX
+func cvtFloatInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(int64(v.Float())), t)
+}
+
+// convertOp: floatXX -> uintXX
+func cvtFloatUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Float()), t)
+}
+
+// convertOp: intXX -> floatXX
+func cvtIntFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Int()), t)
+}
+
+// convertOp: uintXX -> floatXX
+func cvtUintFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Uint()), t)
+}
+
+// convertOp: floatXX -> floatXX
+func cvtFloat(v Value, t Type) Value {
+ if v.Type().Kind() == Float32 && t.Kind() == Float32 {
+ // Don't do any conversion if both types have underlying type float32.
+ // This avoids converting to float64 and back, which will
+ // convert a signaling NaN to a quiet NaN. See issue 36400.
+ return makeFloat32(v.flag.ro(), *(*float32)(v.ptr), t)
+ }
+ return makeFloat(v.flag.ro(), v.Float(), t)
+}
+
+// convertOp: complexXX -> complexXX
+func cvtComplex(v Value, t Type) Value {
+ return makeComplex(v.flag.ro(), v.Complex(), t)
+}
+
+// convertOp: intXX -> string
+func cvtIntString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Int(); int64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: uintXX -> string
+func cvtUintString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Uint(); uint64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: []byte -> string
+func cvtBytesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.Bytes()), t)
+}
+
+// convertOp: string -> []byte
+func cvtStringBytes(v Value, t Type) Value {
+ return makeBytes(v.flag.ro(), []byte(v.String()), t)
+}
+
+// convertOp: []rune -> string
+func cvtRunesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.runes()), t)
+}
+
+// convertOp: string -> []rune
+func cvtStringRunes(v Value, t Type) Value {
+ return makeRunes(v.flag.ro(), []rune(v.String()), t)
+}
+
+// convertOp: []T -> *[N]T
+func cvtSliceArrayPtr(v Value, t Type) Value {
+ n := t.Elem().Len()
+ if n > v.Len() {
+ panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n))
+ }
+ h := (*unsafeheader.Slice)(v.ptr)
+ return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Pointer)}
+}
+
+// convertOp: []T -> [N]T
+func cvtSliceArray(v Value, t Type) Value {
+ n := t.Len()
+ if n > v.Len() {
+ panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to array with length " + itoa.Itoa(n))
+ }
+ h := (*unsafeheader.Slice)(v.ptr)
+ typ := t.common()
+ ptr := h.Data
+ c := unsafe_New(typ)
+ typedmemmove(typ, c, ptr)
+ ptr = c
+
+ return Value{typ, ptr, v.flag&^(flagAddr|flagKindMask) | flag(Array)}
+}
+
+// convertOp: direct copy
+func cvtDirect(v Value, typ Type) Value {
+ f := v.flag
+ t := typ.common()
+ ptr := v.ptr
+ if f&flagAddr != 0 {
+ // indirect, mutable word - make a copy
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ f &^= flagAddr
+ }
+ return Value{t, ptr, v.flag.ro() | f} // v.flag.ro()|f == f?
+}
+
+// convertOp: concrete -> interface
+func cvtT2I(v Value, typ Type) Value {
+ target := unsafe_New(typ.common())
+ x := valueInterface(v, false)
+ if typ.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(typ.(*rtype), x, target)
+ }
+ return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)}
+}
+
+// convertOp: interface -> interface
+func cvtI2I(v Value, typ Type) Value {
+ if v.IsNil() {
+ ret := Zero(typ)
+ ret.flag |= v.flag.ro()
+ return ret
+ }
+ return cvtT2I(v.Elem(), typ)
+}
+
+// implemented in ../runtime
+func chancap(ch unsafe.Pointer) int
+func chanclose(ch unsafe.Pointer)
+func chanlen(ch unsafe.Pointer) int
+
+// Note: some of the noescape annotations below are technically a lie,
+// but safe in the context of this package. Functions like chansend
+// and mapassign don't escape the referent, but may escape anything
+// the referent points to (they do shallow copies of the referent).
+// It is safe in this package because the referent may only point
+// to something a Value may point to, and that is always in the heap
+// (due to the escapes() call in ValueOf).
+
+//go:noescape
+func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool)
+
+//go:noescape
+func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
+
+func makechan(typ *rtype, size int) (ch unsafe.Pointer)
+func makemap(t *rtype, cap int) (m unsafe.Pointer)
+
+//go:noescape
+func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+
+//go:noescape
+func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+
+//go:noescape
+func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
+
+//go:noescape
+func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+
+//go:noescape
+func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
+
+//go:noescape
+func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+
+//go:noescape
+func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
+
+//go:noescape
+func mapiterkey(it *hiter) (key unsafe.Pointer)
+
+//go:noescape
+func mapiterelem(it *hiter) (elem unsafe.Pointer)
+
+//go:noescape
+func mapiternext(it *hiter)
+
+//go:noescape
+func maplen(m unsafe.Pointer) int
+
+// call calls fn with "stackArgsSize" bytes of stack arguments laid out
+// at stackArgs and register arguments laid out in regArgs. frameSize is
+// the total amount of stack space that will be reserved by call, so this
+// should include enough space to spill register arguments to the stack in
+// case of preemption.
+//
+// After fn returns, call copies stackArgsSize-stackRetOffset result bytes
+// back into stackArgs+stackRetOffset before returning, for any return
+// values passed on the stack. Register-based return values will be found
+// in the same regArgs structure.
+//
+// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap
+// indicating which registers will contain pointer-valued return values. The
+// purpose of this bitmap is to keep pointers visible to the GC between
+// returning from reflectcall and actually using them.
+//
+// If copying result bytes back from the stack, the caller must pass the
+// argument frame type as stackArgsType, so that call can execute appropriate
+// write barriers during the copy.
+//
+// Arguments passed through to call do not escape. The type is used only in a
+// very limited callee of call, the stackArgs are copied, and regArgs is only
+// used in the call frame.
+//
+//go:noescape
+//go:linkname call runtime.reflectcall
+func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+
+func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
+
+// memmove copies size bytes to dst from src. No write barriers are used.
+//
+//go:noescape
+func memmove(dst, src unsafe.Pointer, size uintptr)
+
+// typedmemmove copies a value of type t to dst from src.
+//
+//go:noescape
+func typedmemmove(t *rtype, dst, src unsafe.Pointer)
+
+// typedmemmovepartial is like typedmemmove but assumes that
+// dst and src point off bytes into the value and only copies size bytes.
+//
+//go:noescape
+func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr)
+
+// typedmemclr zeros the value at ptr of type t.
+//
+//go:noescape
+func typedmemclr(t *rtype, ptr unsafe.Pointer)
+
+// typedmemclrpartial is like typedmemclr but assumes that
+// dst points off bytes into the value and only clears size bytes.
+//
+//go:noescape
+func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
+
+// typedslicecopy copies a slice of elemType values from src to dst,
+// returning the number of elements copied.
+//
+//go:noescape
+func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int
+
+//go:noescape
+func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+
+func verifyNotInHeapPtr(p uintptr) bool
+
+//go:noescape
+func growslice(t *rtype, old unsafeheader.Slice, num int) unsafeheader.Slice
+
+// Dummy annotation marking that the value x escapes,
+// for use in cases where the reflect code is so clever that
+// the compiler cannot follow.
+func escapes(x any) {
+ if dummy.b {
+ dummy.x = x
+ }
+}
+
+var dummy struct {
+ b bool
+ x any
+}
diff --git a/src/reflect/visiblefields.go b/src/reflect/visiblefields.go
new file mode 100644
index 0000000..9375faa
--- /dev/null
+++ b/src/reflect/visiblefields.go
@@ -0,0 +1,105 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+// VisibleFields returns all the visible fields in t, which must be a
+// struct type. A field is defined as visible if it's accessible
+// directly with a FieldByName call. The returned fields include fields
+// inside anonymous struct members and unexported fields. They follow
+// the same order found in the struct, with anonymous fields followed
+// immediately by their promoted fields.
+//
+// For each element e of the returned slice, the corresponding field
+// can be retrieved from a value v of type t by calling v.FieldByIndex(e.Index).
+func VisibleFields(t Type) []StructField {
+ if t == nil {
+ panic("reflect: VisibleFields(nil)")
+ }
+ if t.Kind() != Struct {
+ panic("reflect.VisibleFields of non-struct type")
+ }
+ w := &visibleFieldsWalker{
+ byName: make(map[string]int),
+ visiting: make(map[Type]bool),
+ fields: make([]StructField, 0, t.NumField()),
+ index: make([]int, 0, 2),
+ }
+ w.walk(t)
+ // Remove all the fields that have been hidden.
+ // Use an in-place removal that avoids copying in
+ // the common case that there are no hidden fields.
+ j := 0
+ for i := range w.fields {
+ f := &w.fields[i]
+ if f.Name == "" {
+ continue
+ }
+ if i != j {
+ // A field has been removed. We need to shuffle
+ // all the subsequent elements up.
+ w.fields[j] = *f
+ }
+ j++
+ }
+ return w.fields[:j]
+}
+
+type visibleFieldsWalker struct {
+ byName map[string]int
+ visiting map[Type]bool
+ fields []StructField
+ index []int
+}
+
+// walk walks all the fields in the struct type t, visiting
+// fields in index preorder and appending them to w.fields
+// (this maintains the required ordering).
+// Fields that have been overridden have their
+// Name field cleared.
+func (w *visibleFieldsWalker) walk(t Type) {
+ if w.visiting[t] {
+ return
+ }
+ w.visiting[t] = true
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ w.index = append(w.index, i)
+ add := true
+ if oldIndex, ok := w.byName[f.Name]; ok {
+ old := &w.fields[oldIndex]
+ if len(w.index) == len(old.Index) {
+ // Fields with the same name at the same depth
+ // cancel one another out. Set the field name
+ // to empty to signify that has happened, and
+ // there's no need to add this field.
+ old.Name = ""
+ add = false
+ } else if len(w.index) < len(old.Index) {
+ // The old field loses because it's deeper than the new one.
+ old.Name = ""
+ } else {
+ // The old field wins because it's shallower than the new one.
+ add = false
+ }
+ }
+ if add {
+ // Copy the index so that it's not overwritten
+ // by the other appends.
+ f.Index = append([]int(nil), w.index...)
+ w.byName[f.Name] = len(w.fields)
+ w.fields = append(w.fields, f)
+ }
+ if f.Anonymous {
+ if f.Type.Kind() == Pointer {
+ f.Type = f.Type.Elem()
+ }
+ if f.Type.Kind() == Struct {
+ w.walk(f.Type)
+ }
+ }
+ w.index = w.index[:len(w.index)-1]
+ }
+ delete(w.visiting, t)
+}
diff --git a/src/reflect/visiblefields_test.go b/src/reflect/visiblefields_test.go
new file mode 100644
index 0000000..66d545d
--- /dev/null
+++ b/src/reflect/visiblefields_test.go
@@ -0,0 +1,349 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ . "reflect"
+ "strings"
+ "testing"
+)
+
+type structField struct {
+ name string
+ index []int
+}
+
+var fieldsTests = []struct {
+ testName string
+ val any
+ expect []structField
+}{{
+ testName: "SimpleStruct",
+ val: struct {
+ A int
+ B string
+ C bool
+ }{},
+ expect: []structField{{
+ name: "A",
+ index: []int{0},
+ }, {
+ name: "B",
+ index: []int{1},
+ }, {
+ name: "C",
+ index: []int{2},
+ }},
+}, {
+ testName: "NonEmbeddedStructMember",
+ val: struct {
+ A struct {
+ X int
+ }
+ }{},
+ expect: []structField{{
+ name: "A",
+ index: []int{0},
+ }},
+}, {
+ testName: "EmbeddedExportedStruct",
+ val: struct {
+ SFG
+ }{},
+ expect: []structField{{
+ name: "SFG",
+ index: []int{0},
+ }, {
+ name: "F",
+ index: []int{0, 0},
+ }, {
+ name: "G",
+ index: []int{0, 1},
+ }},
+}, {
+ testName: "EmbeddedUnexportedStruct",
+ val: struct {
+ sFG
+ }{},
+ expect: []structField{{
+ name: "sFG",
+ index: []int{0},
+ }, {
+ name: "F",
+ index: []int{0, 0},
+ }, {
+ name: "G",
+ index: []int{0, 1},
+ }},
+}, {
+ testName: "TwoEmbeddedStructsWithCancelingMembers",
+ val: struct {
+ SFG
+ SF
+ }{},
+ expect: []structField{{
+ name: "SFG",
+ index: []int{0},
+ }, {
+ name: "G",
+ index: []int{0, 1},
+ }, {
+ name: "SF",
+ index: []int{1},
+ }},
+}, {
+ testName: "EmbeddedStructsWithSameFieldsAtDifferentDepths",
+ val: struct {
+ SFGH3
+ SG1
+ SFG2
+ SF2
+ L int
+ }{},
+ expect: []structField{{
+ name: "SFGH3",
+ index: []int{0},
+ }, {
+ name: "SFGH2",
+ index: []int{0, 0},
+ }, {
+ name: "SFGH1",
+ index: []int{0, 0, 0},
+ }, {
+ name: "SFGH",
+ index: []int{0, 0, 0, 0},
+ }, {
+ name: "H",
+ index: []int{0, 0, 0, 0, 2},
+ }, {
+ name: "SG1",
+ index: []int{1},
+ }, {
+ name: "SG",
+ index: []int{1, 0},
+ }, {
+ name: "G",
+ index: []int{1, 0, 0},
+ }, {
+ name: "SFG2",
+ index: []int{2},
+ }, {
+ name: "SFG1",
+ index: []int{2, 0},
+ }, {
+ name: "SFG",
+ index: []int{2, 0, 0},
+ }, {
+ name: "SF2",
+ index: []int{3},
+ }, {
+ name: "SF1",
+ index: []int{3, 0},
+ }, {
+ name: "SF",
+ index: []int{3, 0, 0},
+ }, {
+ name: "L",
+ index: []int{4},
+ }},
+}, {
+ testName: "EmbeddedPointerStruct",
+ val: struct {
+ *SF
+ }{},
+ expect: []structField{{
+ name: "SF",
+ index: []int{0},
+ }, {
+ name: "F",
+ index: []int{0, 0},
+ }},
+}, {
+ testName: "EmbeddedNotAPointer",
+ val: struct {
+ M
+ }{},
+ expect: []structField{{
+ name: "M",
+ index: []int{0},
+ }},
+}, {
+ testName: "RecursiveEmbedding",
+ val: Rec1{},
+ expect: []structField{{
+ name: "Rec2",
+ index: []int{0},
+ }, {
+ name: "F",
+ index: []int{0, 0},
+ }, {
+ name: "Rec1",
+ index: []int{0, 1},
+ }},
+}, {
+ testName: "RecursiveEmbedding2",
+ val: Rec2{},
+ expect: []structField{{
+ name: "F",
+ index: []int{0},
+ }, {
+ name: "Rec1",
+ index: []int{1},
+ }, {
+ name: "Rec2",
+ index: []int{1, 0},
+ }},
+}, {
+ testName: "RecursiveEmbedding3",
+ val: RS3{},
+ expect: []structField{{
+ name: "RS2",
+ index: []int{0},
+ }, {
+ name: "RS1",
+ index: []int{1},
+ }, {
+ name: "i",
+ index: []int{1, 0},
+ }},
+}}
+
+type SFG struct {
+ F int
+ G int
+}
+
+type SFG1 struct {
+ SFG
+}
+
+type SFG2 struct {
+ SFG1
+}
+
+type SFGH struct {
+ F int
+ G int
+ H int
+}
+
+type SFGH1 struct {
+ SFGH
+}
+
+type SFGH2 struct {
+ SFGH1
+}
+
+type SFGH3 struct {
+ SFGH2
+}
+
+type SF struct {
+ F int
+}
+
+type SF1 struct {
+ SF
+}
+
+type SF2 struct {
+ SF1
+}
+
+type SG struct {
+ G int
+}
+
+type SG1 struct {
+ SG
+}
+
+type sFG struct {
+ F int
+ G int
+}
+
+type RS1 struct {
+ i int
+}
+
+type RS2 struct {
+ RS1
+}
+
+type RS3 struct {
+ RS2
+ RS1
+}
+
+type M map[string]any
+
+type Rec1 struct {
+ *Rec2
+}
+
+type Rec2 struct {
+ F string
+ *Rec1
+}
+
+func TestFields(t *testing.T) {
+ for _, test := range fieldsTests {
+ test := test
+ t.Run(test.testName, func(t *testing.T) {
+ typ := TypeOf(test.val)
+ fields := VisibleFields(typ)
+ if got, want := len(fields), len(test.expect); got != want {
+ t.Fatalf("unexpected field count; got %d want %d", got, want)
+ }
+
+ for j, field := range fields {
+ expect := test.expect[j]
+ t.Logf("field %d: %s", j, expect.name)
+ gotField := typ.FieldByIndex(field.Index)
+ // Unfortunately, FieldByIndex does not return
+ // a field with the same index that we passed in,
+ // so we set it to the expected value so that
+ // it can be compared later with the result of FieldByName.
+ gotField.Index = field.Index
+ expectField := typ.FieldByIndex(expect.index)
+ // ditto.
+ expectField.Index = expect.index
+ if !DeepEqual(gotField, expectField) {
+ t.Fatalf("unexpected field result\ngot %#v\nwant %#v", gotField, expectField)
+ }
+
+ // Sanity check that we can actually access the field by the
+ // expected name.
+ gotField1, ok := typ.FieldByName(expect.name)
+ if !ok {
+ t.Fatalf("field %q not accessible by name", expect.name)
+ }
+ if !DeepEqual(gotField1, expectField) {
+ t.Fatalf("unexpected FieldByName result; got %#v want %#v", gotField1, expectField)
+ }
+ }
+ })
+ }
+}
+
+// Must not panic with nil embedded pointer.
+func TestFieldByIndexErr(t *testing.T) {
+ type A struct {
+ S string
+ }
+ type B struct {
+ *A
+ }
+ v := ValueOf(B{})
+ _, err := v.FieldByIndexErr([]int{0, 0})
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if !strings.Contains(err.Error(), "embedded struct field A") {
+ t.Fatal(err)
+ }
+}