summaryrefslogtreecommitdiffstats
path: root/src/reflect
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
commit73df946d56c74384511a194dd01dbe099584fd1a (patch)
treefd0bcea490dd81327ddfbb31e215439672c9a068 /src/reflect
parentInitial commit. (diff)
downloadgolang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.tar.xz
golang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.zip
Adding upstream version 1.16.10.upstream/1.16.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/all_test.go7181
-rw-r--r--src/reflect/asm_386.s36
-rw-r--r--src/reflect/asm_amd64.s40
-rw-r--r--src/reflect/asm_arm.s38
-rw-r--r--src/reflect/asm_arm64.s36
-rw-r--r--src/reflect/asm_mips64x.s40
-rw-r--r--src/reflect/asm_mipsx.s40
-rw-r--r--src/reflect/asm_ppc64x.s39
-rw-r--r--src/reflect/asm_riscv64.s36
-rw-r--r--src/reflect/asm_s390x.s36
-rw-r--r--src/reflect/asm_wasm.s50
-rw-r--r--src/reflect/deepequal.go219
-rw-r--r--src/reflect/example_test.go168
-rw-r--r--src/reflect/export_test.go122
-rw-r--r--src/reflect/makefunc.go138
-rw-r--r--src/reflect/set_test.go226
-rw-r--r--src/reflect/swapper.go77
-rw-r--r--src/reflect/tostring_test.go95
-rw-r--r--src/reflect/type.go3132
-rw-r--r--src/reflect/value.go2858
20 files changed, 14607 insertions, 0 deletions
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
new file mode 100644
index 0000000..1225d61
--- /dev/null
+++ b/src/reflect/all_test.go
@@ -0,0 +1,7181 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "encoding/base64"
+ "flag"
+ "fmt"
+ "go/token"
+ "io"
+ "math"
+ "math/rand"
+ "os"
+ . "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+var sink interface{}
+
+func TestBool(t *testing.T) {
+ v := ValueOf(true)
+ if v.Bool() != true {
+ t.Fatal("ValueOf(true).Bool() = false")
+ }
+}
+
+type integer int
+type T struct {
+ a int
+ b float64
+ c string
+ d *int
+}
+
+type pair struct {
+ i interface{}
+ s string
+}
+
+func assert(t *testing.T, s, want string) {
+ if s != want {
+ t.Errorf("have %#q want %#q", s, want)
+ }
+}
+
+var typeTests = []pair{
+ {struct{ x int }{}, "int"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x int16 }{}, "int16"},
+ {struct{ x int32 }{}, "int32"},
+ {struct{ x int64 }{}, "int64"},
+ {struct{ x uint }{}, "uint"},
+ {struct{ x uint8 }{}, "uint8"},
+ {struct{ x uint16 }{}, "uint16"},
+ {struct{ x uint32 }{}, "uint32"},
+ {struct{ x uint64 }{}, "uint64"},
+ {struct{ x float32 }{}, "float32"},
+ {struct{ x float64 }{}, "float64"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x (**int8) }{}, "**int8"},
+ {struct{ x (**integer) }{}, "**reflect_test.integer"},
+ {struct{ x ([32]int32) }{}, "[32]int32"},
+ {struct{ x ([]int8) }{}, "[]int8"},
+ {struct{ x (map[string]int32) }{}, "map[string]int32"},
+ {struct{ x (chan<- string) }{}, "chan<- string"},
+ {struct{ x (chan<- chan string) }{}, "chan<- chan string"},
+ {struct{ x (chan<- <-chan string) }{}, "chan<- <-chan string"},
+ {struct{ x (<-chan <-chan string) }{}, "<-chan <-chan string"},
+ {struct{ x (chan (<-chan string)) }{}, "chan (<-chan string)"},
+ {struct {
+ x struct {
+ c chan *int32
+ d float32
+ }
+ }{},
+ "struct { c chan *int32; d float32 }",
+ },
+ {struct{ x (func(a int8, b int32)) }{}, "func(int8, int32)"},
+ {struct {
+ x struct {
+ c func(chan *integer, *int8)
+ }
+ }{},
+ "struct { c func(chan *reflect_test.integer, *int8) }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int32
+ }
+ }{},
+ "struct { a int8; b int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int32
+ }
+ }{},
+ "struct { a int8; b int8; c int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int8
+ f int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int8; f int32 }",
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi there"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi there\"" }`,
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi \x00there\t\n\"\\"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi \\x00there\\t\\n\\\"\\\\\"" }`,
+ },
+ {struct {
+ x struct {
+ f func(args ...int)
+ }
+ }{},
+ "struct { f func(...int) }",
+ },
+ {struct {
+ x (interface {
+ a(func(func(int) int) func(func(int)) int)
+ b()
+ })
+ }{},
+ "interface { reflect_test.a(func(func(int) int) func(func(int)) int); reflect_test.b() }",
+ },
+ {struct {
+ x struct {
+ int32
+ int64
+ }
+ }{},
+ "struct { int32; int64 }",
+ },
+}
+
+var valueTests = []pair{
+ {new(int), "132"},
+ {new(int8), "8"},
+ {new(int16), "16"},
+ {new(int32), "32"},
+ {new(int64), "64"},
+ {new(uint), "132"},
+ {new(uint8), "8"},
+ {new(uint16), "16"},
+ {new(uint32), "32"},
+ {new(uint64), "64"},
+ {new(float32), "256.25"},
+ {new(float64), "512.125"},
+ {new(complex64), "532.125+10i"},
+ {new(complex128), "564.25+1i"},
+ {new(string), "stringy cheese"},
+ {new(bool), "true"},
+ {new(*int8), "*int8(0)"},
+ {new(**int8), "**int8(0)"},
+ {new([5]int32), "[5]int32{0, 0, 0, 0, 0}"},
+ {new(**integer), "**reflect_test.integer(0)"},
+ {new(map[string]int32), "map[string]int32{<can't iterate on maps>}"},
+ {new(chan<- string), "chan<- string"},
+ {new(func(a int8, b int32)), "func(int8, int32)(0)"},
+ {new(struct {
+ c chan *int32
+ d float32
+ }),
+ "struct { c chan *int32; d float32 }{chan *int32, 0}",
+ },
+ {new(struct{ c func(chan *integer, *int8) }),
+ "struct { c func(chan *reflect_test.integer, *int8) }{func(chan *reflect_test.integer, *int8)(0)}",
+ },
+ {new(struct {
+ a int8
+ b int32
+ }),
+ "struct { a int8; b int32 }{0, 0}",
+ },
+ {new(struct {
+ a int8
+ b int8
+ c int32
+ }),
+ "struct { a int8; b int8; c int32 }{0, 0, 0}",
+ },
+}
+
+func testType(t *testing.T, i int, typ Type, want string) {
+ s := typ.String()
+ if s != want {
+ t.Errorf("#%d: have %#q, want %#q", i, s, want)
+ }
+}
+
+func TestTypes(t *testing.T) {
+ for i, tt := range typeTests {
+ testType(t, i, ValueOf(tt.i).Field(0).Type(), tt.s)
+ }
+}
+
+func TestSet(t *testing.T) {
+ for i, tt := range valueTests {
+ v := ValueOf(tt.i)
+ v = v.Elem()
+ switch v.Kind() {
+ case Int:
+ v.SetInt(132)
+ case Int8:
+ v.SetInt(8)
+ case Int16:
+ v.SetInt(16)
+ case Int32:
+ v.SetInt(32)
+ case Int64:
+ v.SetInt(64)
+ case Uint:
+ v.SetUint(132)
+ case Uint8:
+ v.SetUint(8)
+ case Uint16:
+ v.SetUint(16)
+ case Uint32:
+ v.SetUint(32)
+ case Uint64:
+ v.SetUint(64)
+ case Float32:
+ v.SetFloat(256.25)
+ case Float64:
+ v.SetFloat(512.125)
+ case Complex64:
+ v.SetComplex(532.125 + 10i)
+ case Complex128:
+ v.SetComplex(564.25 + 1i)
+ case String:
+ v.SetString("stringy cheese")
+ case Bool:
+ v.SetBool(true)
+ }
+ s := valueToString(v)
+ if s != tt.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
+ }
+ }
+}
+
+func TestSetValue(t *testing.T) {
+ for i, tt := range valueTests {
+ v := ValueOf(tt.i).Elem()
+ switch v.Kind() {
+ case Int:
+ v.Set(ValueOf(int(132)))
+ case Int8:
+ v.Set(ValueOf(int8(8)))
+ case Int16:
+ v.Set(ValueOf(int16(16)))
+ case Int32:
+ v.Set(ValueOf(int32(32)))
+ case Int64:
+ v.Set(ValueOf(int64(64)))
+ case Uint:
+ v.Set(ValueOf(uint(132)))
+ case Uint8:
+ v.Set(ValueOf(uint8(8)))
+ case Uint16:
+ v.Set(ValueOf(uint16(16)))
+ case Uint32:
+ v.Set(ValueOf(uint32(32)))
+ case Uint64:
+ v.Set(ValueOf(uint64(64)))
+ case Float32:
+ v.Set(ValueOf(float32(256.25)))
+ case Float64:
+ v.Set(ValueOf(512.125))
+ case Complex64:
+ v.Set(ValueOf(complex64(532.125 + 10i)))
+ case Complex128:
+ v.Set(ValueOf(complex128(564.25 + 1i)))
+ case String:
+ v.Set(ValueOf("stringy cheese"))
+ case Bool:
+ v.Set(ValueOf(true))
+ }
+ s := valueToString(v)
+ if s != tt.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
+ }
+ }
+}
+
+func TestCanSetField(t *testing.T) {
+ type embed struct{ x, X int }
+ type Embed struct{ x, X int }
+ type S1 struct {
+ embed
+ x, X int
+ }
+ type S2 struct {
+ *embed
+ x, X int
+ }
+ type S3 struct {
+ Embed
+ x, X int
+ }
+ type S4 struct {
+ *Embed
+ x, X int
+ }
+
+ type testCase struct {
+ // -1 means Addr().Elem() of current value
+ index []int
+ canSet bool
+ }
+ tests := []struct {
+ val Value
+ cases []testCase
+ }{{
+ val: ValueOf(&S1{}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, -1}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{1, -1}, false},
+ {[]int{2}, true},
+ {[]int{2, -1}, true},
+ },
+ }, {
+ val: ValueOf(&S2{embed: &embed{}}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, -1}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S3{}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, -1}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S4{Embed: &Embed{}}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, -1}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 0, -1}, false},
+ {[]int{0, -1, 0}, false},
+ {[]int{0, -1, 0, -1}, false},
+ {[]int{0, 1}, true},
+ {[]int{0, 1, -1}, true},
+ {[]int{0, -1, 1}, true},
+ {[]int{0, -1, 1, -1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }}
+
+ for _, tt := range tests {
+ t.Run(tt.val.Type().Name(), func(t *testing.T) {
+ for _, tc := range tt.cases {
+ f := tt.val
+ for _, i := range tc.index {
+ if f.Kind() == Ptr {
+ f = f.Elem()
+ }
+ if i == -1 {
+ f = f.Addr().Elem()
+ } else {
+ f = f.Field(i)
+ }
+ }
+ if got := f.CanSet(); got != tc.canSet {
+ t.Errorf("CanSet() = %v, want %v", got, tc.canSet)
+ }
+ }
+ })
+ }
+}
+
+var _i = 7
+
+var valueToStringTests = []pair{
+ {123, "123"},
+ {123.5, "123.5"},
+ {byte(123), "123"},
+ {"abc", "abc"},
+ {T{123, 456.75, "hello", &_i}, "reflect_test.T{123, 456.75, hello, *int(&7)}"},
+ {new(chan *T), "*chan *reflect_test.T(&chan *reflect_test.T)"},
+ {[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[10]int(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+ {[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[]int(&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+}
+
+func TestValueToString(t *testing.T) {
+ for i, test := range valueToStringTests {
+ s := valueToString(ValueOf(test.i))
+ if s != test.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, test.s)
+ }
+ }
+}
+
+func TestArrayElemSet(t *testing.T) {
+ v := ValueOf(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}).Elem()
+ v.Index(4).SetInt(123)
+ s := valueToString(v)
+ const want = "[10]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
+ if s != want {
+ t.Errorf("[10]int: have %#q want %#q", s, want)
+ }
+
+ v = ValueOf([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
+ v.Index(4).SetInt(123)
+ s = valueToString(v)
+ const want1 = "[]int{1, 2, 3, 4, 123, 6, 7, 8, 9, 10}"
+ if s != want1 {
+ t.Errorf("[]int: have %#q want %#q", s, want1)
+ }
+}
+
+func TestPtrPointTo(t *testing.T) {
+ var ip *int32
+ var i int32 = 1234
+ vip := ValueOf(&ip)
+ vi := ValueOf(&i).Elem()
+ vip.Elem().Set(vi.Addr())
+ if *ip != 1234 {
+ t.Errorf("got %d, want 1234", *ip)
+ }
+
+ ip = nil
+ vp := ValueOf(&ip).Elem()
+ vp.Set(Zero(vp.Type()))
+ if ip != nil {
+ t.Errorf("got non-nil (%p), want nil", ip)
+ }
+}
+
+func TestPtrSetNil(t *testing.T) {
+ var i int32 = 1234
+ ip := &i
+ vip := ValueOf(&ip)
+ vip.Elem().Set(Zero(vip.Elem().Type()))
+ if ip != nil {
+ t.Errorf("got non-nil (%d), want nil", *ip)
+ }
+}
+
+func TestMapSetNil(t *testing.T) {
+ m := make(map[string]int)
+ vm := ValueOf(&m)
+ vm.Elem().Set(Zero(vm.Elem().Type()))
+ if m != nil {
+ t.Errorf("got non-nil (%p), want nil", m)
+ }
+}
+
+func TestAll(t *testing.T) {
+ testType(t, 1, TypeOf((int8)(0)), "int8")
+ testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8")
+
+ typ := TypeOf((*struct {
+ c chan *int32
+ d float32
+ })(nil))
+ testType(t, 3, typ, "*struct { c chan *int32; d float32 }")
+ etyp := typ.Elem()
+ testType(t, 4, etyp, "struct { c chan *int32; d float32 }")
+ styp := etyp
+ f := styp.Field(0)
+ testType(t, 5, f.Type, "chan *int32")
+
+ f, present := styp.FieldByName("d")
+ if !present {
+ t.Errorf("FieldByName says present field is absent")
+ }
+ testType(t, 6, f.Type, "float32")
+
+ f, present = styp.FieldByName("absent")
+ if present {
+ t.Errorf("FieldByName says absent field is present")
+ }
+
+ typ = TypeOf([32]int32{})
+ testType(t, 7, typ, "[32]int32")
+ testType(t, 8, typ.Elem(), "int32")
+
+ typ = TypeOf((map[string]*int32)(nil))
+ testType(t, 9, typ, "map[string]*int32")
+ mtyp := typ
+ testType(t, 10, mtyp.Key(), "string")
+ testType(t, 11, mtyp.Elem(), "*int32")
+
+ typ = TypeOf((chan<- string)(nil))
+ testType(t, 12, typ, "chan<- string")
+ testType(t, 13, typ.Elem(), "string")
+
+ // make sure tag strings are not part of element type
+ typ = TypeOf(struct {
+ d []uint32 `reflect:"TAG"`
+ }{}).Field(0).Type
+ testType(t, 14, typ, "[]uint32")
+}
+
+func TestInterfaceGet(t *testing.T) {
+ var inter struct {
+ E interface{}
+ }
+ inter.E = 123.456
+ v1 := ValueOf(&inter)
+ v2 := v1.Elem().Field(0)
+ assert(t, v2.Type().String(), "interface {}")
+ i2 := v2.Interface()
+ v3 := ValueOf(i2)
+ assert(t, v3.Type().String(), "float64")
+}
+
+func TestInterfaceValue(t *testing.T) {
+ var inter struct {
+ E interface{}
+ }
+ inter.E = 123.456
+ v1 := ValueOf(&inter)
+ v2 := v1.Elem().Field(0)
+ assert(t, v2.Type().String(), "interface {}")
+ v3 := v2.Elem()
+ assert(t, v3.Type().String(), "float64")
+
+ i3 := v2.Interface()
+ if _, ok := i3.(float64); !ok {
+ t.Error("v2.Interface() did not return float64, got ", TypeOf(i3))
+ }
+}
+
+func TestFunctionValue(t *testing.T) {
+ var x interface{} = func() {}
+ v := ValueOf(x)
+ if fmt.Sprint(v.Interface()) != fmt.Sprint(x) {
+ t.Fatalf("TestFunction returned wrong pointer")
+ }
+ assert(t, v.Type().String(), "func()")
+}
+
+var appendTests = []struct {
+ orig, extra []int
+}{
+ {make([]int, 2, 4), []int{22}},
+ {make([]int, 2, 4), []int{22, 33, 44}},
+}
+
+func sameInts(x, y []int) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ for i, xx := range x {
+ if xx != y[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestAppend(t *testing.T) {
+ for i, test := range appendTests {
+ origLen, extraLen := len(test.orig), len(test.extra)
+ want := append(test.orig, test.extra...)
+ // Convert extra from []int to []Value.
+ e0 := make([]Value, len(test.extra))
+ for j, e := range test.extra {
+ e0[j] = ValueOf(e)
+ }
+ // Convert extra from []int to *SliceValue.
+ e1 := ValueOf(test.extra)
+ // Test Append.
+ a0 := ValueOf(test.orig)
+ have0 := Append(a0, e0...).Interface().([]int)
+ if !sameInts(have0, want) {
+ t.Errorf("Append #%d: have %v, want %v (%p %p)", i, have0, want, test.orig, have0)
+ }
+ // Check that the orig and extra slices were not modified.
+ if len(test.orig) != origLen {
+ t.Errorf("Append #%d origLen: have %v, want %v", i, len(test.orig), origLen)
+ }
+ if len(test.extra) != extraLen {
+ t.Errorf("Append #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
+ }
+ // Test AppendSlice.
+ a1 := ValueOf(test.orig)
+ have1 := AppendSlice(a1, e1).Interface().([]int)
+ if !sameInts(have1, want) {
+ t.Errorf("AppendSlice #%d: have %v, want %v", i, have1, want)
+ }
+ // Check that the orig and extra slices were not modified.
+ if len(test.orig) != origLen {
+ t.Errorf("AppendSlice #%d origLen: have %v, want %v", i, len(test.orig), origLen)
+ }
+ if len(test.extra) != extraLen {
+ t.Errorf("AppendSlice #%d extraLen: have %v, want %v", i, len(test.extra), extraLen)
+ }
+ }
+}
+
+func TestCopy(t *testing.T) {
+ a := []int{1, 2, 3, 4, 10, 9, 8, 7}
+ b := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ c := []int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ for i := 0; i < len(b); i++ {
+ if b[i] != c[i] {
+ t.Fatalf("b != c before test")
+ }
+ }
+ a1 := a
+ b1 := b
+ aa := ValueOf(&a1).Elem()
+ ab := ValueOf(&b1).Elem()
+ for tocopy := 1; tocopy <= 7; tocopy++ {
+ aa.SetLen(tocopy)
+ Copy(ab, aa)
+ aa.SetLen(8)
+ for i := 0; i < tocopy; i++ {
+ if a[i] != b[i] {
+ t.Errorf("(i) tocopy=%d a[%d]=%d, b[%d]=%d",
+ tocopy, i, a[i], i, b[i])
+ }
+ }
+ for i := tocopy; i < len(b); i++ {
+ if b[i] != c[i] {
+ if i < len(a) {
+ t.Errorf("(ii) tocopy=%d a[%d]=%d, b[%d]=%d, c[%d]=%d",
+ tocopy, i, a[i], i, b[i], i, c[i])
+ } else {
+ t.Errorf("(iii) tocopy=%d b[%d]=%d, c[%d]=%d",
+ tocopy, i, b[i], i, c[i])
+ }
+ } else {
+ t.Logf("tocopy=%d elem %d is okay\n", tocopy, i)
+ }
+ }
+ }
+}
+
+func TestCopyString(t *testing.T) {
+ t.Run("Slice", func(t *testing.T) {
+ s := bytes.Repeat([]byte{'_'}, 8)
+ val := ValueOf(s)
+
+ n := Copy(val, ValueOf(""))
+ if expecting := []byte("________"); n != 0 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s, expecting)
+ }
+
+ n = Copy(val, ValueOf("hello"))
+ if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s, expecting)
+ }
+
+ n = Copy(val, ValueOf("helloworld"))
+ if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s, expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s, expecting)
+ }
+ })
+ t.Run("Array", func(t *testing.T) {
+ s := [...]byte{'_', '_', '_', '_', '_', '_', '_', '_'}
+ val := ValueOf(&s).Elem()
+
+ n := Copy(val, ValueOf(""))
+ if expecting := []byte("________"); n != 0 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 0, s = %s", n, s[:], expecting)
+ }
+
+ n = Copy(val, ValueOf("hello"))
+ if expecting := []byte("hello___"); n != 5 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 5, s = %s", n, s[:], expecting)
+ }
+
+ n = Copy(val, ValueOf("helloworld"))
+ if expecting := []byte("hellowor"); n != 8 || !bytes.Equal(s[:], expecting) {
+ t.Errorf("got n = %d, s = %s, expecting n = 8, s = %s", n, s[:], expecting)
+ }
+ })
+}
+
+func TestCopyArray(t *testing.T) {
+ a := [8]int{1, 2, 3, 4, 10, 9, 8, 7}
+ b := [11]int{11, 22, 33, 44, 1010, 99, 88, 77, 66, 55, 44}
+ c := b
+ aa := ValueOf(&a).Elem()
+ ab := ValueOf(&b).Elem()
+ Copy(ab, aa)
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ t.Errorf("(i) a[%d]=%d, b[%d]=%d", i, a[i], i, b[i])
+ }
+ }
+ for i := len(a); i < len(b); i++ {
+ if b[i] != c[i] {
+ t.Errorf("(ii) b[%d]=%d, c[%d]=%d", i, b[i], i, c[i])
+ } else {
+ t.Logf("elem %d is okay\n", i)
+ }
+ }
+}
+
+func TestBigUnnamedStruct(t *testing.T) {
+ b := struct{ a, b, c, d int64 }{1, 2, 3, 4}
+ v := ValueOf(b)
+ b1 := v.Interface().(struct {
+ a, b, c, d int64
+ })
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d {
+ t.Errorf("ValueOf(%v).Interface().(*Big) = %v", b, b1)
+ }
+}
+
+type big struct {
+ a, b, c, d, e int64
+}
+
+func TestBigStruct(t *testing.T) {
+ b := big{1, 2, 3, 4, 5}
+ v := ValueOf(b)
+ b1 := v.Interface().(big)
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d || b1.e != b.e {
+ t.Errorf("ValueOf(%v).Interface().(big) = %v", b, b1)
+ }
+}
+
+type Basic struct {
+ x int
+ y float32
+}
+
+type NotBasic Basic
+
+type DeepEqualTest struct {
+ a, b interface{}
+ eq bool
+}
+
+// Simple functions for DeepEqual tests.
+var (
+ fn1 func() // nil.
+ fn2 func() // nil.
+ fn3 = func() { fn1() } // Not nil.
+)
+
+type self struct{}
+
+type Loop *Loop
+type Loopy interface{}
+
+var loop1, loop2 Loop
+var loopy1, loopy2 Loopy
+var cycleMap1, cycleMap2, cycleMap3 map[string]interface{}
+
+type structWithSelfPtr struct {
+ p *structWithSelfPtr
+ s string
+}
+
+func init() {
+ loop1 = &loop2
+ loop2 = &loop1
+
+ loopy1 = &loopy2
+ loopy2 = &loopy1
+
+ cycleMap1 = map[string]interface{}{}
+ cycleMap1["cycle"] = cycleMap1
+ cycleMap2 = map[string]interface{}{}
+ cycleMap2["cycle"] = cycleMap2
+ cycleMap3 = map[string]interface{}{}
+ cycleMap3["different"] = cycleMap3
+}
+
+var deepEqualTests = []DeepEqualTest{
+ // Equalities
+ {nil, nil, true},
+ {1, 1, true},
+ {int32(1), int32(1), true},
+ {0.5, 0.5, true},
+ {float32(0.5), float32(0.5), true},
+ {"hello", "hello", true},
+ {make([]int, 10), make([]int, 10), true},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true},
+ {Basic{1, 0.5}, Basic{1, 0.5}, true},
+ {error(nil), error(nil), true},
+ {map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
+ {fn1, fn2, true},
+
+ // Inequalities
+ {1, 2, false},
+ {int32(1), int32(2), false},
+ {0.5, 0.6, false},
+ {float32(0.5), float32(0.6), false},
+ {"hello", "hey", false},
+ {make([]int, 10), make([]int, 11), false},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false},
+ {Basic{1, 0.5}, Basic{1, 0.6}, false},
+ {Basic{1, 0}, Basic{2, 0}, false},
+ {map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false},
+ {nil, 1, false},
+ {1, nil, false},
+ {fn1, fn3, false},
+ {fn3, fn3, false},
+ {[][]int{{1}}, [][]int{{2}}, false},
+ {math.NaN(), math.NaN(), false},
+ {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
+ {&[1]float64{math.NaN()}, self{}, true},
+ {[]float64{math.NaN()}, []float64{math.NaN()}, false},
+ {[]float64{math.NaN()}, self{}, true},
+ {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
+ {map[float64]float64{math.NaN(): 1}, self{}, true},
+ {&structWithSelfPtr{p: &structWithSelfPtr{s: "a"}}, &structWithSelfPtr{p: &structWithSelfPtr{s: "b"}}, false},
+
+ // Nil vs empty: not the same.
+ {[]int{}, []int(nil), false},
+ {[]int{}, []int{}, true},
+ {[]int(nil), []int(nil), true},
+ {map[int]int{}, map[int]int(nil), false},
+ {map[int]int{}, map[int]int{}, true},
+ {map[int]int(nil), map[int]int(nil), true},
+
+ // Mismatched types
+ {1, 1.0, false},
+ {int32(1), int64(1), false},
+ {0.5, "hello", false},
+ {[]int{1, 2, 3}, [3]int{1, 2, 3}, false},
+ {&[3]interface{}{1, 2, 4}, &[3]interface{}{1, 2, "s"}, false},
+ {Basic{1, 0.5}, NotBasic{1, 0.5}, false},
+ {map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
+
+ // Possible loops.
+ {&loop1, &loop1, true},
+ {&loop1, &loop2, true},
+ {&loopy1, &loopy1, true},
+ {&loopy1, &loopy2, true},
+ {&cycleMap1, &cycleMap2, true},
+ {&cycleMap1, &cycleMap3, false},
+}
+
+func TestDeepEqual(t *testing.T) {
+ for _, test := range deepEqualTests {
+ if test.b == (self{}) {
+ test.b = test.a
+ }
+ if r := DeepEqual(test.a, test.b); r != test.eq {
+ t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
+ }
+ }
+}
+
+func TestTypeOf(t *testing.T) {
+ // Special case for nil
+ if typ := TypeOf(nil); typ != nil {
+ t.Errorf("expected nil type for nil value; got %v", typ)
+ }
+ for _, test := range deepEqualTests {
+ v := ValueOf(test.a)
+ if !v.IsValid() {
+ continue
+ }
+ typ := TypeOf(test.a)
+ if typ != v.Type() {
+ t.Errorf("TypeOf(%v) = %v, but ValueOf(%v).Type() = %v", test.a, typ, test.a, v.Type())
+ }
+ }
+}
+
+type Recursive struct {
+ x int
+ r *Recursive
+}
+
+func TestDeepEqualRecursiveStruct(t *testing.T) {
+ a, b := new(Recursive), new(Recursive)
+ *a = Recursive{12, a}
+ *b = Recursive{12, b}
+ if !DeepEqual(a, b) {
+ t.Error("DeepEqual(recursive same) = false, want true")
+ }
+}
+
+type _Complex struct {
+ a int
+ b [3]*_Complex
+ c *string
+ d map[float64]float64
+}
+
+func TestDeepEqualComplexStruct(t *testing.T) {
+ m := make(map[float64]float64)
+ stra, strb := "hello", "hello"
+ a, b := new(_Complex), new(_Complex)
+ *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
+ *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
+ if !DeepEqual(a, b) {
+ t.Error("DeepEqual(complex same) = false, want true")
+ }
+}
+
+func TestDeepEqualComplexStructInequality(t *testing.T) {
+ m := make(map[float64]float64)
+ stra, strb := "hello", "helloo" // Difference is here
+ a, b := new(_Complex), new(_Complex)
+ *a = _Complex{5, [3]*_Complex{a, b, a}, &stra, m}
+ *b = _Complex{5, [3]*_Complex{b, a, a}, &strb, m}
+ if DeepEqual(a, b) {
+ t.Error("DeepEqual(complex different) = true, want false")
+ }
+}
+
+type UnexpT struct {
+ m map[int]int
+}
+
+func TestDeepEqualUnexportedMap(t *testing.T) {
+ // Check that DeepEqual can look at unexported fields.
+ x1 := UnexpT{map[int]int{1: 2}}
+ x2 := UnexpT{map[int]int{1: 2}}
+ if !DeepEqual(&x1, &x2) {
+ t.Error("DeepEqual(x1, x2) = false, want true")
+ }
+
+ y1 := UnexpT{map[int]int{2: 3}}
+ if DeepEqual(&x1, &y1) {
+ t.Error("DeepEqual(x1, y1) = true, want false")
+ }
+}
+
+func check2ndField(x interface{}, offs uintptr, t *testing.T) {
+ s := ValueOf(x)
+ f := s.Type().Field(1)
+ if f.Offset != offs {
+ t.Error("mismatched offsets in structure alignment:", f.Offset, offs)
+ }
+}
+
+// Check that structure alignment & offsets viewed through reflect agree with those
+// from the compiler itself.
+func TestAlignment(t *testing.T) {
+ type T1inner struct {
+ a int
+ }
+ type T1 struct {
+ T1inner
+ f int
+ }
+ type T2inner struct {
+ a, b int
+ }
+ type T2 struct {
+ T2inner
+ f int
+ }
+
+ x := T1{T1inner{2}, 17}
+ check2ndField(x, uintptr(unsafe.Pointer(&x.f))-uintptr(unsafe.Pointer(&x)), t)
+
+ x1 := T2{T2inner{2, 3}, 17}
+ check2ndField(x1, uintptr(unsafe.Pointer(&x1.f))-uintptr(unsafe.Pointer(&x1)), t)
+}
+
+func Nil(a interface{}, t *testing.T) {
+ n := ValueOf(a).Field(0)
+ if !n.IsNil() {
+ t.Errorf("%v should be nil", a)
+ }
+}
+
+func NotNil(a interface{}, t *testing.T) {
+ n := ValueOf(a).Field(0)
+ if n.IsNil() {
+ t.Errorf("value of type %v should not be nil", ValueOf(a).Type().String())
+ }
+}
+
+func TestIsNil(t *testing.T) {
+ // These implement IsNil.
+ // Wrap in extra struct to hide interface type.
+ doNil := []interface{}{
+ struct{ x *int }{},
+ struct{ x interface{} }{},
+ struct{ x map[string]int }{},
+ struct{ x func() bool }{},
+ struct{ x chan int }{},
+ struct{ x []string }{},
+ struct{ x unsafe.Pointer }{},
+ }
+ for _, ts := range doNil {
+ ty := TypeOf(ts).Field(0).Type
+ v := Zero(ty)
+ v.IsNil() // panics if not okay to call
+ }
+
+ // Check the implementations
+ var pi struct {
+ x *int
+ }
+ Nil(pi, t)
+ pi.x = new(int)
+ NotNil(pi, t)
+
+ var si struct {
+ x []int
+ }
+ Nil(si, t)
+ si.x = make([]int, 10)
+ NotNil(si, t)
+
+ var ci struct {
+ x chan int
+ }
+ Nil(ci, t)
+ ci.x = make(chan int)
+ NotNil(ci, t)
+
+ var mi struct {
+ x map[int]int
+ }
+ Nil(mi, t)
+ mi.x = make(map[int]int)
+ NotNil(mi, t)
+
+ var ii struct {
+ x interface{}
+ }
+ Nil(ii, t)
+ ii.x = 2
+ NotNil(ii, t)
+
+ var fi struct {
+ x func(t *testing.T)
+ }
+ Nil(fi, t)
+ fi.x = TestIsNil
+ NotNil(fi, t)
+}
+
+func TestIsZero(t *testing.T) {
+ for i, tt := range []struct {
+ x interface{}
+ want bool
+ }{
+ // Booleans
+ {true, false},
+ {false, true},
+ // Numeric types
+ {int(0), true},
+ {int(1), false},
+ {int8(0), true},
+ {int8(1), false},
+ {int16(0), true},
+ {int16(1), false},
+ {int32(0), true},
+ {int32(1), false},
+ {int64(0), true},
+ {int64(1), false},
+ {uint(0), true},
+ {uint(1), false},
+ {uint8(0), true},
+ {uint8(1), false},
+ {uint16(0), true},
+ {uint16(1), false},
+ {uint32(0), true},
+ {uint32(1), false},
+ {uint64(0), true},
+ {uint64(1), false},
+ {float32(0), true},
+ {float32(1.2), false},
+ {float64(0), true},
+ {float64(1.2), false},
+ {math.Copysign(0, -1), false},
+ {complex64(0), true},
+ {complex64(1.2), false},
+ {complex128(0), true},
+ {complex128(1.2), false},
+ {complex(math.Copysign(0, -1), 0), false},
+ {complex(0, math.Copysign(0, -1)), false},
+ {complex(math.Copysign(0, -1), math.Copysign(0, -1)), false},
+ {uintptr(0), true},
+ {uintptr(128), false},
+ // Array
+ {Zero(TypeOf([5]string{})).Interface(), true},
+ {[5]string{"", "", "", "", ""}, true},
+ {[5]string{}, true},
+ {[5]string{"", "", "", "a", ""}, false},
+ // Chan
+ {(chan string)(nil), true},
+ {make(chan string), false},
+ {time.After(1), false},
+ // Func
+ {(func())(nil), true},
+ {New, false},
+ // Interface
+ {New(TypeOf(new(error)).Elem()).Elem(), true},
+ {(io.Reader)(strings.NewReader("")), false},
+ // Map
+ {(map[string]string)(nil), true},
+ {map[string]string{}, false},
+ {make(map[string]string), false},
+ // Ptr
+ {(*func())(nil), true},
+ {(*int)(nil), true},
+ {new(int), false},
+ // Slice
+ {[]string{}, false},
+ {([]string)(nil), true},
+ {make([]string, 0), false},
+ // Strings
+ {"", true},
+ {"not-zero", false},
+ // Structs
+ {T{}, true},
+ {T{123, 456.75, "hello", &_i}, false},
+ // UnsafePointer
+ {(unsafe.Pointer)(nil), true},
+ {(unsafe.Pointer)(new(int)), false},
+ } {
+ var x Value
+ if v, ok := tt.x.(Value); ok {
+ x = v
+ } else {
+ x = ValueOf(tt.x)
+ }
+
+ b := x.IsZero()
+ if b != tt.want {
+ t.Errorf("%d: IsZero((%s)(%+v)) = %t, want %t", i, x.Kind(), tt.x, b, tt.want)
+ }
+
+ if !Zero(TypeOf(tt.x)).IsZero() {
+ t.Errorf("%d: IsZero(Zero(TypeOf((%s)(%+v)))) is false", i, x.Kind(), tt.x)
+ }
+ }
+
+ func() {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Error("should panic for invalid value")
+ }
+ }()
+ (Value{}).IsZero()
+ }()
+}
+
+func TestInterfaceExtraction(t *testing.T) {
+ var s struct {
+ W io.Writer
+ }
+
+ s.W = os.Stdout
+ v := Indirect(ValueOf(&s)).Field(0).Interface()
+ if v != s.W.(interface{}) {
+ t.Error("Interface() on interface: ", v, s.W)
+ }
+}
+
+func TestNilPtrValueSub(t *testing.T) {
+ var pi *int
+ if pv := ValueOf(pi); pv.Elem().IsValid() {
+ t.Error("ValueOf((*int)(nil)).Elem().IsValid()")
+ }
+}
+
+func TestMap(t *testing.T) {
+ m := map[string]int{"a": 1, "b": 2}
+ mv := ValueOf(m)
+ if n := mv.Len(); n != len(m) {
+ t.Errorf("Len = %d, want %d", n, len(m))
+ }
+ keys := mv.MapKeys()
+ newmap := MakeMap(mv.Type())
+ for k, v := range m {
+ // Check that returned Keys match keys in range.
+ // These aren't required to be in the same order.
+ seen := false
+ for _, kv := range keys {
+ if kv.String() == k {
+ seen = true
+ break
+ }
+ }
+ if !seen {
+ t.Errorf("Missing key %q", k)
+ }
+
+ // Check that value lookup is correct.
+ vv := mv.MapIndex(ValueOf(k))
+ if vi := vv.Int(); vi != int64(v) {
+ t.Errorf("Key %q: have value %d, want %d", k, vi, v)
+ }
+
+ // Copy into new map.
+ newmap.SetMapIndex(ValueOf(k), ValueOf(v))
+ }
+ vv := mv.MapIndex(ValueOf("not-present"))
+ if vv.IsValid() {
+ t.Errorf("Invalid key: got non-nil value %s", valueToString(vv))
+ }
+
+ newm := newmap.Interface().(map[string]int)
+ if len(newm) != len(m) {
+ t.Errorf("length after copy: newm=%d, m=%d", len(newm), len(m))
+ }
+
+ for k, v := range newm {
+ mv, ok := m[k]
+ if mv != v {
+ t.Errorf("newm[%q] = %d, but m[%q] = %d, %v", k, v, k, mv, ok)
+ }
+ }
+
+ newmap.SetMapIndex(ValueOf("a"), Value{})
+ v, ok := newm["a"]
+ if ok {
+ t.Errorf("newm[\"a\"] = %d after delete", v)
+ }
+
+ mv = ValueOf(&m).Elem()
+ mv.Set(Zero(mv.Type()))
+ if m != nil {
+ t.Errorf("mv.Set(nil) failed")
+ }
+}
+
+func TestNilMap(t *testing.T) {
+ var m map[string]int
+ mv := ValueOf(m)
+ keys := mv.MapKeys()
+ if len(keys) != 0 {
+ t.Errorf(">0 keys for nil map: %v", keys)
+ }
+
+ // Check that value for missing key is zero.
+ x := mv.MapIndex(ValueOf("hello"))
+ if x.Kind() != Invalid {
+ t.Errorf("m.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
+ }
+
+ // Check big value too.
+ var mbig map[string][10 << 20]byte
+ x = ValueOf(mbig).MapIndex(ValueOf("hello"))
+ if x.Kind() != Invalid {
+ t.Errorf("mbig.MapIndex(\"hello\") for nil map = %v, want Invalid Value", x)
+ }
+
+ // Test that deletes from a nil map succeed.
+ mv.SetMapIndex(ValueOf("hi"), Value{})
+}
+
+func TestChan(t *testing.T) {
+ for loop := 0; loop < 2; loop++ {
+ var c chan int
+ var cv Value
+
+ // check both ways to allocate channels
+ switch loop {
+ case 1:
+ c = make(chan int, 1)
+ cv = ValueOf(c)
+ case 0:
+ cv = MakeChan(TypeOf(c), 1)
+ c = cv.Interface().(chan int)
+ }
+
+ // Send
+ cv.Send(ValueOf(2))
+ if i := <-c; i != 2 {
+ t.Errorf("reflect Send 2, native recv %d", i)
+ }
+
+ // Recv
+ c <- 3
+ if i, ok := cv.Recv(); i.Int() != 3 || !ok {
+ t.Errorf("native send 3, reflect Recv %d, %t", i.Int(), ok)
+ }
+
+ // TryRecv fail
+ val, ok := cv.TryRecv()
+ if val.IsValid() || ok {
+ t.Errorf("TryRecv on empty chan: %s, %t", valueToString(val), ok)
+ }
+
+ // TryRecv success
+ c <- 4
+ val, ok = cv.TryRecv()
+ if !val.IsValid() {
+ t.Errorf("TryRecv on ready chan got nil")
+ } else if i := val.Int(); i != 4 || !ok {
+ t.Errorf("native send 4, TryRecv %d, %t", i, ok)
+ }
+
+ // TrySend fail
+ c <- 100
+ ok = cv.TrySend(ValueOf(5))
+ i := <-c
+ if ok {
+ t.Errorf("TrySend on full chan succeeded: value %d", i)
+ }
+
+ // TrySend success
+ ok = cv.TrySend(ValueOf(6))
+ if !ok {
+ t.Errorf("TrySend on empty chan failed")
+ select {
+ case x := <-c:
+ t.Errorf("TrySend failed but it did send %d", x)
+ default:
+ }
+ } else {
+ if i = <-c; i != 6 {
+ t.Errorf("TrySend 6, recv %d", i)
+ }
+ }
+
+ // Close
+ c <- 123
+ cv.Close()
+ if i, ok := cv.Recv(); i.Int() != 123 || !ok {
+ t.Errorf("send 123 then close; Recv %d, %t", i.Int(), ok)
+ }
+ if i, ok := cv.Recv(); i.Int() != 0 || ok {
+ t.Errorf("after close Recv %d, %t", i.Int(), ok)
+ }
+ }
+
+ // check creation of unbuffered channel
+ var c chan int
+ cv := MakeChan(TypeOf(c), 0)
+ c = cv.Interface().(chan int)
+ if cv.TrySend(ValueOf(7)) {
+ t.Errorf("TrySend on sync chan succeeded")
+ }
+ if v, ok := cv.TryRecv(); v.IsValid() || ok {
+ t.Errorf("TryRecv on sync chan succeeded: isvalid=%v ok=%v", v.IsValid(), ok)
+ }
+
+ // len/cap
+ cv = MakeChan(TypeOf(c), 10)
+ c = cv.Interface().(chan int)
+ for i := 0; i < 3; i++ {
+ c <- i
+ }
+ if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) {
+ t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c))
+ }
+}
+
+// caseInfo describes a single case in a select test.
+type caseInfo struct {
+ desc string
+ canSelect bool
+ recv Value
+ closed bool
+ helper func()
+ panic bool
+}
+
+var allselect = flag.Bool("allselect", false, "exhaustive select test")
+
+func TestSelect(t *testing.T) {
+ selectWatch.once.Do(func() { go selectWatcher() })
+
+ var x exhaustive
+ nch := 0
+ newop := func(n int, cap int) (ch, val Value) {
+ nch++
+ if nch%101%2 == 1 {
+ c := make(chan int, cap)
+ ch = ValueOf(c)
+ val = ValueOf(n)
+ } else {
+ c := make(chan string, cap)
+ ch = ValueOf(c)
+ val = ValueOf(fmt.Sprint(n))
+ }
+ return
+ }
+
+ for n := 0; x.Next(); n++ {
+ if testing.Short() && n >= 1000 {
+ break
+ }
+ if n >= 100000 && !*allselect {
+ break
+ }
+ if n%100000 == 0 && testing.Verbose() {
+ println("TestSelect", n)
+ }
+ var cases []SelectCase
+ var info []caseInfo
+
+ // Ready send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "ready send", canSelect: true})
+ }
+
+ // Ready recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ ch.Send(val)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "ready recv", canSelect: true, recv: val})
+ }
+
+ // Blocking send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Recv() }
+ info = append(info, caseInfo{desc: "blocking send", helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking send"})
+ }
+ }
+
+ // Blocking recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Send(val) }
+ info = append(info, caseInfo{desc: "blocking recv", recv: val, helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking recv"})
+ }
+ }
+
+ // Zero Chan send.
+ if x.Maybe() {
+ // Maybe include value to send.
+ var val Value
+ if x.Maybe() {
+ val = ValueOf(100)
+ }
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "zero Chan send"})
+ }
+
+ // Zero Chan receive.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ })
+ info = append(info, caseInfo{desc: "zero Chan recv"})
+ }
+
+ // nil Chan send.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf((chan int)(nil)),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "nil Chan send"})
+ }
+
+ // nil Chan recv.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf((chan int)(nil)),
+ })
+ info = append(info, caseInfo{desc: "nil Chan recv"})
+ }
+
+ // closed Chan send.
+ if x.Maybe() {
+ ch := make(chan int)
+ close(ch)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf(ch),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "closed Chan send", canSelect: true, panic: true})
+ }
+
+ // closed Chan recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ ch.Close()
+ val = Zero(val.Type())
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "closed Chan recv", canSelect: true, closed: true, recv: val})
+ }
+
+ var helper func() // goroutine to help the select complete
+
+ // Add default? Must be last case here, but will permute.
+ // Add the default if the select would otherwise
+ // block forever, and maybe add it anyway.
+ numCanSelect := 0
+ canProceed := false
+ canBlock := true
+ canPanic := false
+ helpers := []int{}
+ for i, c := range info {
+ if c.canSelect {
+ canProceed = true
+ canBlock = false
+ numCanSelect++
+ if c.panic {
+ canPanic = true
+ }
+ } else if c.helper != nil {
+ canProceed = true
+ helpers = append(helpers, i)
+ }
+ }
+ if !canProceed || x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectDefault,
+ })
+ info = append(info, caseInfo{desc: "default", canSelect: canBlock})
+ numCanSelect++
+ } else if canBlock {
+ // Select needs to communicate with another goroutine.
+ cas := &info[helpers[x.Choose(len(helpers))]]
+ helper = cas.helper
+ cas.canSelect = true
+ numCanSelect++
+ }
+
+ // Permute cases and case info.
+ // Doing too much here makes the exhaustive loop
+ // too exhausting, so just do two swaps.
+ for loop := 0; loop < 2; loop++ {
+ i := x.Choose(len(cases))
+ j := x.Choose(len(cases))
+ cases[i], cases[j] = cases[j], cases[i]
+ info[i], info[j] = info[j], info[i]
+ }
+
+ if helper != nil {
+ // We wait before kicking off a goroutine to satisfy a blocked select.
+ // The pause needs to be big enough to let the select block before
+ // we run the helper, but if we lose that race once in a while it's okay: the
+ // select will just proceed immediately. Not a big deal.
+ // For short tests we can grow [sic] the timeout a bit without fear of taking too long
+ pause := 10 * time.Microsecond
+ if testing.Short() {
+ pause = 100 * time.Microsecond
+ }
+ time.AfterFunc(pause, helper)
+ }
+
+ // Run select.
+ i, recv, recvOK, panicErr := runSelect(cases, info)
+ if panicErr != nil && !canPanic {
+ t.Fatalf("%s\npanicked unexpectedly: %v", fmtSelect(info), panicErr)
+ }
+ if panicErr == nil && canPanic && numCanSelect == 1 {
+ t.Fatalf("%s\nselected #%d incorrectly (should panic)", fmtSelect(info), i)
+ }
+ if panicErr != nil {
+ continue
+ }
+
+ cas := info[i]
+ if !cas.canSelect {
+ recvStr := ""
+ if recv.IsValid() {
+ recvStr = fmt.Sprintf(", received %v, %v", recv.Interface(), recvOK)
+ }
+ t.Fatalf("%s\nselected #%d incorrectly%s", fmtSelect(info), i, recvStr)
+ continue
+ }
+ if cas.panic {
+ t.Fatalf("%s\nselected #%d incorrectly (case should panic)", fmtSelect(info), i)
+ continue
+ }
+
+ if cases[i].Dir == SelectRecv {
+ if !recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ if !cas.recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but internal error: missing recv value", fmtSelect(info), i)
+ }
+ if recv.Interface() != cas.recv.Interface() || recvOK != !cas.closed {
+ if recv.Interface() == cas.recv.Interface() && recvOK == !cas.closed {
+ t.Fatalf("%s\nselected #%d, got %#v, %v, and DeepEqual is broken on %T", fmtSelect(info), i, recv.Interface(), recvOK, recv.Interface())
+ }
+ t.Fatalf("%s\nselected #%d but got %#v, %v, want %#v, %v", fmtSelect(info), i, recv.Interface(), recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ } else {
+ if recv.IsValid() || recvOK {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, Value{}, false)
+ }
+ }
+ }
+}
+
+func TestSelectMaxCases(t *testing.T) {
+ var sCases []SelectCase
+ channel := make(chan int)
+ close(channel)
+ for i := 0; i < 65536; i++ {
+ sCases = append(sCases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ }
+ // Should not panic
+ _, _, _ = Select(sCases)
+ sCases = append(sCases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ defer func() {
+ if err := recover(); err != nil {
+ if err.(string) != "reflect.Select: too many cases (max 65536)" {
+ t.Fatalf("unexpected error from select call with greater than max supported cases")
+ }
+ } else {
+ t.Fatalf("expected select call to panic with greater than max supported cases")
+ }
+ }()
+ // Should panic
+ _, _, _ = Select(sCases)
+}
+
+func TestSelectNop(t *testing.T) {
+ // "select { default: }" should always return the default case.
+ chosen, _, _ := Select([]SelectCase{{Dir: SelectDefault}})
+ if chosen != 0 {
+ t.Fatalf("expected Select to return 0, but got %#v", chosen)
+ }
+}
+
+func BenchmarkSelect(b *testing.B) {
+ channel := make(chan int)
+ close(channel)
+ var cases []SelectCase
+ for i := 0; i < 8; i++ {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf(channel),
+ })
+ }
+ for _, numCases := range []int{1, 4, 8} {
+ b.Run(strconv.Itoa(numCases), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _, _, _ = Select(cases[:numCases])
+ }
+ })
+ }
+}
+
+// selectWatch and the selectWatcher are a watchdog mechanism for running Select.
+// If the selectWatcher notices that the select has been blocked for >1 second, it prints
+// an error describing the select and panics the entire test binary.
+var selectWatch struct {
+ sync.Mutex
+ once sync.Once
+ now time.Time
+ info []caseInfo
+}
+
+func selectWatcher() {
+ for {
+ time.Sleep(1 * time.Second)
+ selectWatch.Lock()
+ if selectWatch.info != nil && time.Since(selectWatch.now) > 10*time.Second {
+ fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info))
+ panic("select stuck")
+ }
+ selectWatch.Unlock()
+ }
+}
+
+// runSelect runs a single select test.
+// It returns the values returned by Select but also returns
+// a panic value if the Select panics.
+func runSelect(cases []SelectCase, info []caseInfo) (chosen int, recv Value, recvOK bool, panicErr interface{}) {
+ defer func() {
+ panicErr = recover()
+
+ selectWatch.Lock()
+ selectWatch.info = nil
+ selectWatch.Unlock()
+ }()
+
+ selectWatch.Lock()
+ selectWatch.now = time.Now()
+ selectWatch.info = info
+ selectWatch.Unlock()
+
+ chosen, recv, recvOK = Select(cases)
+ return
+}
+
+// fmtSelect formats the information about a single select test.
+func fmtSelect(info []caseInfo) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "\nselect {\n")
+ for i, cas := range info {
+ fmt.Fprintf(&buf, "%d: %s", i, cas.desc)
+ if cas.recv.IsValid() {
+ fmt.Fprintf(&buf, " val=%#v", cas.recv.Interface())
+ }
+ if cas.canSelect {
+ fmt.Fprintf(&buf, " canselect")
+ }
+ if cas.panic {
+ fmt.Fprintf(&buf, " panic")
+ }
+ fmt.Fprintf(&buf, "\n")
+ }
+ fmt.Fprintf(&buf, "}")
+ return buf.String()
+}
+
+type two [2]uintptr
+
+// Difficult test for function call because of
+// implicit padding between arguments.
+func dummy(b byte, c int, d byte, e two, f byte, g float32, h byte) (i byte, j int, k byte, l two, m byte, n float32, o byte) {
+ return b, c, d, e, f, g, h
+}
+
+func TestFunc(t *testing.T) {
+ ret := ValueOf(dummy).Call([]Value{
+ ValueOf(byte(10)),
+ ValueOf(20),
+ ValueOf(byte(30)),
+ ValueOf(two{40, 50}),
+ ValueOf(byte(60)),
+ ValueOf(float32(70)),
+ ValueOf(byte(80)),
+ })
+ if len(ret) != 7 {
+ t.Fatalf("Call returned %d values, want 7", len(ret))
+ }
+
+ i := byte(ret[0].Uint())
+ j := int(ret[1].Int())
+ k := byte(ret[2].Uint())
+ l := ret[3].Interface().(two)
+ m := byte(ret[4].Uint())
+ n := float32(ret[5].Float())
+ o := byte(ret[6].Uint())
+
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
+ }
+
+ for i, v := range ret {
+ if v.CanAddr() {
+ t.Errorf("result %d is addressable", i)
+ }
+ }
+}
+
+func TestCallConvert(t *testing.T) {
+ v := ValueOf(new(io.ReadWriter)).Elem()
+ f := ValueOf(func(r io.Reader) io.Reader { return r })
+ out := f.Call([]Value{v})
+ if len(out) != 1 || out[0].Type() != TypeOf(new(io.Reader)).Elem() || !out[0].IsNil() {
+ t.Errorf("expected [nil], got %v", out)
+ }
+}
+
+type emptyStruct struct{}
+
+type nonEmptyStruct struct {
+ member int
+}
+
+func returnEmpty() emptyStruct {
+ return emptyStruct{}
+}
+
+func takesEmpty(e emptyStruct) {
+}
+
+func returnNonEmpty(i int) nonEmptyStruct {
+ return nonEmptyStruct{member: i}
+}
+
+func takesNonEmpty(n nonEmptyStruct) int {
+ return n.member
+}
+
+func TestCallWithStruct(t *testing.T) {
+ r := ValueOf(returnEmpty).Call(nil)
+ if len(r) != 1 || r[0].Type() != TypeOf(emptyStruct{}) {
+ t.Errorf("returning empty struct returned %#v instead", r)
+ }
+ r = ValueOf(takesEmpty).Call([]Value{ValueOf(emptyStruct{})})
+ if len(r) != 0 {
+ t.Errorf("takesEmpty returned values: %#v", r)
+ }
+ r = ValueOf(returnNonEmpty).Call([]Value{ValueOf(42)})
+ if len(r) != 1 || r[0].Type() != TypeOf(nonEmptyStruct{}) || r[0].Field(0).Int() != 42 {
+ t.Errorf("returnNonEmpty returned %#v", r)
+ }
+ r = ValueOf(takesNonEmpty).Call([]Value{ValueOf(nonEmptyStruct{member: 42})})
+ if len(r) != 1 || r[0].Type() != TypeOf(1) || r[0].Int() != 42 {
+ t.Errorf("takesNonEmpty returned %#v", r)
+ }
+}
+
+func TestCallReturnsEmpty(t *testing.T) {
+ // Issue 21717: past-the-end pointer write in Call with
+ // nonzero-sized frame and zero-sized return value.
+ runtime.GC()
+ var finalized uint32
+ f := func() (emptyStruct, *[2]int64) {
+ i := new([2]int64) // big enough to not be tinyalloc'd, so finalizer always runs when i dies
+ runtime.SetFinalizer(i, func(*[2]int64) { atomic.StoreUint32(&finalized, 1) })
+ return emptyStruct{}, i
+ }
+ v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run.
+ timeout := time.After(5 * time.Second)
+ for atomic.LoadUint32(&finalized) == 0 {
+ select {
+ case <-timeout:
+ t.Fatal("finalizer did not run")
+ default:
+ }
+ runtime.Gosched()
+ runtime.GC()
+ }
+ runtime.KeepAlive(v)
+}
+
+func BenchmarkCall(b *testing.B) {
+ fv := ValueOf(func(a, b string) {})
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ args := []Value{ValueOf("a"), ValueOf("b")}
+ for pb.Next() {
+ fv.Call(args)
+ }
+ })
+}
+
+func BenchmarkCallArgCopy(b *testing.B) {
+ byteArray := func(n int) Value {
+ return Zero(ArrayOf(n, TypeOf(byte(0))))
+ }
+ sizes := [...]struct {
+ fv Value
+ arg Value
+ }{
+ {ValueOf(func(a [128]byte) {}), byteArray(128)},
+ {ValueOf(func(a [256]byte) {}), byteArray(256)},
+ {ValueOf(func(a [1024]byte) {}), byteArray(1024)},
+ {ValueOf(func(a [4096]byte) {}), byteArray(4096)},
+ {ValueOf(func(a [65536]byte) {}), byteArray(65536)},
+ }
+ for _, size := range sizes {
+ bench := func(b *testing.B) {
+ args := []Value{size.arg}
+ b.SetBytes(int64(size.arg.Len()))
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ size.fv.Call(args)
+ }
+ })
+ }
+ name := fmt.Sprintf("size=%v", size.arg.Len())
+ b.Run(name, bench)
+ }
+}
+
+func TestMakeFunc(t *testing.T) {
+ f := dummy
+ fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in })
+ ValueOf(&f).Elem().Set(fv)
+
+ // Call g with small arguments so that there is
+ // something predictable (and different from the
+ // correct results) in those positions on the stack.
+ g := dummy
+ g(1, 2, 3, two{4, 5}, 6, 7, 8)
+
+ // Call constructed function f.
+ i, j, k, l, m, n, o := f(10, 20, 30, two{40, 50}, 60, 70, 80)
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
+ }
+}
+
+func TestMakeFuncInterface(t *testing.T) {
+ fn := func(i int) int { return i }
+ incr := func(in []Value) []Value {
+ return []Value{ValueOf(int(in[0].Int() + 1))}
+ }
+ fv := MakeFunc(TypeOf(fn), incr)
+ ValueOf(&fn).Elem().Set(fv)
+ if r := fn(2); r != 3 {
+ t.Errorf("Call returned %d, want 3", r)
+ }
+ if r := fv.Call([]Value{ValueOf(14)})[0].Int(); r != 15 {
+ t.Errorf("Call returned %d, want 15", r)
+ }
+ if r := fv.Interface().(func(int) int)(26); r != 27 {
+ t.Errorf("Call returned %d, want 27", r)
+ }
+}
+
+func TestMakeFuncVariadic(t *testing.T) {
+ // Test that variadic arguments are packed into a slice and passed as last arg
+ fn := func(_ int, is ...int) []int { return nil }
+ fv := MakeFunc(TypeOf(fn), func(in []Value) []Value { return in[1:2] })
+ ValueOf(&fn).Elem().Set(fv)
+
+ r := fn(1, 2, 3)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fn(1, []int{2, 3}...)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fv.Call([]Value{ValueOf(1), ValueOf(2), ValueOf(3)})[0].Interface().([]int)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ r = fv.CallSlice([]Value{ValueOf(1), ValueOf([]int{2, 3})})[0].Interface().([]int)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+
+ f := fv.Interface().(func(int, ...int) []int)
+
+ r = f(1, 2, 3)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+ r = f(1, []int{2, 3}...)
+ if r[0] != 2 || r[1] != 3 {
+ t.Errorf("Call returned [%v, %v]; want 2, 3", r[0], r[1])
+ }
+}
+
+// Dummy type that implements io.WriteCloser
+type WC struct {
+}
+
+func (w *WC) Write(p []byte) (n int, err error) {
+ return 0, nil
+}
+func (w *WC) Close() error {
+ return nil
+}
+
+func TestMakeFuncValidReturnAssignments(t *testing.T) {
+ // reflect.Values returned from the wrapped function should be assignment-converted
+ // to the types returned by the result of MakeFunc.
+
+ // Concrete types should be promotable to interfaces they implement.
+ var f func() error
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(io.EOF)}
+ }).Interface().(func() error)
+ f()
+
+ // Super-interfaces should be promotable to simpler interfaces.
+ var g func() io.Writer
+ g = MakeFunc(TypeOf(g), func([]Value) []Value {
+ var w io.WriteCloser = &WC{}
+ return []Value{ValueOf(&w).Elem()}
+ }).Interface().(func() io.Writer)
+ g()
+
+ // Channels should be promotable to directional channels.
+ var h func() <-chan int
+ h = MakeFunc(TypeOf(h), func([]Value) []Value {
+ return []Value{ValueOf(make(chan int))}
+ }).Interface().(func() <-chan int)
+ h()
+
+ // Unnamed types should be promotable to named types.
+ type T struct{ a, b, c int }
+ var i func() T
+ i = MakeFunc(TypeOf(i), func([]Value) []Value {
+ return []Value{ValueOf(struct{ a, b, c int }{a: 1, b: 2, c: 3})}
+ }).Interface().(func() T)
+ i()
+}
+
+func TestMakeFuncInvalidReturnAssignments(t *testing.T) {
+ // Type doesn't implement the required interface.
+ shouldPanic("", func() {
+ var f func() error
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(int(7))}
+ }).Interface().(func() error)
+ f()
+ })
+ // Assigning to an interface with additional methods.
+ shouldPanic("", func() {
+ var f func() io.ReadWriteCloser
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ var w io.WriteCloser = &WC{}
+ return []Value{ValueOf(&w).Elem()}
+ }).Interface().(func() io.ReadWriteCloser)
+ f()
+ })
+ // Directional channels can't be assigned to bidirectional ones.
+ shouldPanic("", func() {
+ var f func() chan int
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ var c <-chan int = make(chan int)
+ return []Value{ValueOf(c)}
+ }).Interface().(func() chan int)
+ f()
+ })
+ // Two named types which are otherwise identical.
+ shouldPanic("", func() {
+ type T struct{ a, b, c int }
+ type U struct{ a, b, c int }
+ var f func() T
+ f = MakeFunc(TypeOf(f), func([]Value) []Value {
+ return []Value{ValueOf(U{a: 1, b: 2, c: 3})}
+ }).Interface().(func() T)
+ f()
+ })
+}
+
+type Point struct {
+ x, y int
+}
+
+// This will be index 0.
+func (p Point) AnotherMethod(scale int) int {
+ return -1
+}
+
+// This will be index 1.
+func (p Point) Dist(scale int) int {
+ //println("Point.Dist", p.x, p.y, scale)
+ return p.x*p.x*scale + p.y*p.y*scale
+}
+
+// This will be index 2.
+func (p Point) GCMethod(k int) int {
+ runtime.GC()
+ return k + p.x
+}
+
+// This will be index 3.
+func (p Point) NoArgs() {
+ // Exercise no-argument/no-result paths.
+}
+
+// This will be index 4.
+func (p Point) TotalDist(points ...Point) int {
+ tot := 0
+ for _, q := range points {
+ dx := q.x - p.x
+ dy := q.y - p.y
+ tot += dx*dx + dy*dy // Should call Sqrt, but it's just a test.
+
+ }
+ return tot
+}
+
+// This will be index 5.
+func (p *Point) Int64Method(x int64) int64 {
+ return x
+}
+
+// This will be index 6.
+func (p *Point) Int32Method(x int32) int32 {
+ return x
+}
+
+func TestMethod(t *testing.T) {
+ // Non-curried method of type.
+ p := Point{3, 4}
+ i := TypeOf(p).Method(1).Func.Call([]Value{ValueOf(p), ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Type Method returned %d; want 250", i)
+ }
+
+ m, ok := TypeOf(p).MethodByName("Dist")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ i = m.Func.Call([]Value{ValueOf(p), ValueOf(11)})[0].Int()
+ if i != 275 {
+ t.Errorf("Type MethodByName returned %d; want 275", i)
+ }
+
+ m, ok = TypeOf(p).MethodByName("NoArgs")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ n := len(m.Func.Call([]Value{ValueOf(p)}))
+ if n != 0 {
+ t.Errorf("NoArgs returned %d values; want 0", n)
+ }
+
+ i = TypeOf(&p).Method(1).Func.Call([]Value{ValueOf(&p), ValueOf(12)})[0].Int()
+ if i != 300 {
+ t.Errorf("Pointer Type Method returned %d; want 300", i)
+ }
+
+ m, ok = TypeOf(&p).MethodByName("Dist")
+ if !ok {
+ t.Fatalf("ptr method by name failed")
+ }
+ i = m.Func.Call([]Value{ValueOf(&p), ValueOf(13)})[0].Int()
+ if i != 325 {
+ t.Errorf("Pointer Type MethodByName returned %d; want 325", i)
+ }
+
+ m, ok = TypeOf(&p).MethodByName("NoArgs")
+ if !ok {
+ t.Fatalf("method by name failed")
+ }
+ n = len(m.Func.Call([]Value{ValueOf(&p)}))
+ if n != 0 {
+ t.Errorf("NoArgs returned %d values; want 0", n)
+ }
+
+ // Curried method of value.
+ tfunc := TypeOf((func(int) int)(nil))
+ v := ValueOf(p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(14)})[0].Int()
+ if i != 350 {
+ t.Errorf("Value Method returned %d; want 350", i)
+ }
+ v = ValueOf(p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(15)})[0].Int()
+ if i != 375 {
+ t.Errorf("Value MethodByName returned %d; want 375", i)
+ }
+ v = ValueOf(p).MethodByName("NoArgs")
+ v.Call(nil)
+
+ // Curried method of pointer.
+ v = ValueOf(&p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(16)})[0].Int()
+ if i != 400 {
+ t.Errorf("Pointer Value Method returned %d; want 400", i)
+ }
+ v = ValueOf(&p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(17)})[0].Int()
+ if i != 425 {
+ t.Errorf("Pointer Value MethodByName returned %d; want 425", i)
+ }
+ v = ValueOf(&p).MethodByName("NoArgs")
+ v.Call(nil)
+
+ // Curried method of interface value.
+ // Have to wrap interface value in a struct to get at it.
+ // Passing it to ValueOf directly would
+ // access the underlying Point, not the interface.
+ var x interface {
+ Dist(int) int
+ } = p
+ pv := ValueOf(&x).Elem()
+ v = pv.Method(0)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(18)})[0].Int()
+ if i != 450 {
+ t.Errorf("Interface Method returned %d; want 450", i)
+ }
+ v = pv.MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = v.Call([]Value{ValueOf(19)})[0].Int()
+ if i != 475 {
+ t.Errorf("Interface MethodByName returned %d; want 475", i)
+ }
+}
+
+func TestMethodValue(t *testing.T) {
+ p := Point{3, 4}
+ var i int64
+
+ // Curried method of value.
+ tfunc := TypeOf((func(int) int)(nil))
+ v := ValueOf(p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Value Method returned %d; want 250", i)
+ }
+ v = ValueOf(p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(11)})[0].Int()
+ if i != 275 {
+ t.Errorf("Value MethodByName returned %d; want 275", i)
+ }
+ v = ValueOf(p).MethodByName("NoArgs")
+ ValueOf(v.Interface()).Call(nil)
+ v.Interface().(func())()
+
+ // Curried method of pointer.
+ v = ValueOf(&p).Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(12)})[0].Int()
+ if i != 300 {
+ t.Errorf("Pointer Value Method returned %d; want 300", i)
+ }
+ v = ValueOf(&p).MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(13)})[0].Int()
+ if i != 325 {
+ t.Errorf("Pointer Value MethodByName returned %d; want 325", i)
+ }
+ v = ValueOf(&p).MethodByName("NoArgs")
+ ValueOf(v.Interface()).Call(nil)
+ v.Interface().(func())()
+
+ // Curried method of pointer to pointer.
+ pp := &p
+ v = ValueOf(&pp).Elem().Method(1)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Pointer Value Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(14)})[0].Int()
+ if i != 350 {
+ t.Errorf("Pointer Pointer Value Method returned %d; want 350", i)
+ }
+ v = ValueOf(&pp).Elem().MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Pointer Pointer Value MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(15)})[0].Int()
+ if i != 375 {
+ t.Errorf("Pointer Pointer Value MethodByName returned %d; want 375", i)
+ }
+
+ // Curried method of interface value.
+ // Have to wrap interface value in a struct to get at it.
+ // Passing it to ValueOf directly would
+ // access the underlying Point, not the interface.
+ var s = struct {
+ X interface {
+ Dist(int) int
+ }
+ }{p}
+ pv := ValueOf(s).Field(0)
+ v = pv.Method(0)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface Method Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(16)})[0].Int()
+ if i != 400 {
+ t.Errorf("Interface Method returned %d; want 400", i)
+ }
+ v = pv.MethodByName("Dist")
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Interface MethodByName Type is %s; want %s", tt, tfunc)
+ }
+ i = ValueOf(v.Interface()).Call([]Value{ValueOf(17)})[0].Int()
+ if i != 425 {
+ t.Errorf("Interface MethodByName returned %d; want 425", i)
+ }
+
+ // For issue #33628: method args are not stored at the right offset
+ // on amd64p32.
+ m64 := ValueOf(&p).MethodByName("Int64Method").Interface().(func(int64) int64)
+ if x := m64(123); x != 123 {
+ t.Errorf("Int64Method returned %d; want 123", x)
+ }
+ m32 := ValueOf(&p).MethodByName("Int32Method").Interface().(func(int32) int32)
+ if x := m32(456); x != 456 {
+ t.Errorf("Int32Method returned %d; want 456", x)
+ }
+}
+
+func TestVariadicMethodValue(t *testing.T) {
+ p := Point{3, 4}
+ points := []Point{{20, 21}, {22, 23}, {24, 25}}
+ want := int64(p.TotalDist(points[0], points[1], points[2]))
+
+ // Variadic method of type.
+ tfunc := TypeOf((func(Point, ...Point) int)(nil))
+ if tt := TypeOf(p).Method(4).Type; tt != tfunc {
+ t.Errorf("Variadic Method Type from TypeOf is %s; want %s", tt, tfunc)
+ }
+
+ // Curried method of value.
+ tfunc = TypeOf((func(...Point) int)(nil))
+ v := ValueOf(p).Method(4)
+ if tt := v.Type(); tt != tfunc {
+ t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc)
+ }
+ i := ValueOf(v.Interface()).Call([]Value{ValueOf(points[0]), ValueOf(points[1]), ValueOf(points[2])})[0].Int()
+ if i != want {
+ t.Errorf("Variadic Method returned %d; want %d", i, want)
+ }
+ i = ValueOf(v.Interface()).CallSlice([]Value{ValueOf(points)})[0].Int()
+ if i != want {
+ t.Errorf("Variadic Method CallSlice returned %d; want %d", i, want)
+ }
+
+ f := v.Interface().(func(...Point) int)
+ i = int64(f(points[0], points[1], points[2]))
+ if i != want {
+ t.Errorf("Variadic Method Interface returned %d; want %d", i, want)
+ }
+ i = int64(f(points...))
+ if i != want {
+ t.Errorf("Variadic Method Interface Slice returned %d; want %d", i, want)
+ }
+}
+
+type DirectIfaceT struct {
+ p *int
+}
+
+func (d DirectIfaceT) M() int { return *d.p }
+
+func TestDirectIfaceMethod(t *testing.T) {
+ x := 42
+ v := DirectIfaceT{&x}
+ typ := TypeOf(v)
+ m, ok := typ.MethodByName("M")
+ if !ok {
+ t.Fatalf("cannot find method M")
+ }
+ in := []Value{ValueOf(v)}
+ out := m.Func.Call(in)
+ if got := out[0].Int(); got != 42 {
+ t.Errorf("Call with value receiver got %d, want 42", got)
+ }
+
+ pv := &v
+ typ = TypeOf(pv)
+ m, ok = typ.MethodByName("M")
+ if !ok {
+ t.Fatalf("cannot find method M")
+ }
+ in = []Value{ValueOf(pv)}
+ out = m.Func.Call(in)
+ if got := out[0].Int(); got != 42 {
+ t.Errorf("Call with pointer receiver got %d, want 42", got)
+ }
+}
+
+// Reflect version of $GOROOT/test/method5.go
+
+// Concrete types implementing M method.
+// Smaller than a word, word-sized, larger than a word.
+// Value and pointer receivers.
+
+type Tinter interface {
+ M(int, byte) (byte, int)
+}
+
+type Tsmallv byte
+
+func (v Tsmallv) M(x int, b byte) (byte, int) { return b, x + int(v) }
+
+type Tsmallp byte
+
+func (p *Tsmallp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
+
+type Twordv uintptr
+
+func (v Twordv) M(x int, b byte) (byte, int) { return b, x + int(v) }
+
+type Twordp uintptr
+
+func (p *Twordp) M(x int, b byte) (byte, int) { return b, x + int(*p) }
+
+type Tbigv [2]uintptr
+
+func (v Tbigv) M(x int, b byte) (byte, int) { return b, x + int(v[0]) + int(v[1]) }
+
+type Tbigp [2]uintptr
+
+func (p *Tbigp) M(x int, b byte) (byte, int) { return b, x + int(p[0]) + int(p[1]) }
+
+type tinter interface {
+ m(int, byte) (byte, int)
+}
+
+// Embedding via pointer.
+
+type Tm1 struct {
+ Tm2
+}
+
+type Tm2 struct {
+ *Tm3
+}
+
+type Tm3 struct {
+ *Tm4
+}
+
+type Tm4 struct {
+}
+
+func (t4 Tm4) M(x int, b byte) (byte, int) { return b, x + 40 }
+
+func TestMethod5(t *testing.T) {
+ CheckF := func(name string, f func(int, byte) (byte, int), inc int) {
+ b, x := f(1000, 99)
+ if b != 99 || x != 1000+inc {
+ t.Errorf("%s(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
+ }
+ }
+
+ CheckV := func(name string, i Value, inc int) {
+ bx := i.Method(0).Call([]Value{ValueOf(1000), ValueOf(byte(99))})
+ b := bx[0].Interface()
+ x := bx[1].Interface()
+ if b != byte(99) || x != 1000+inc {
+ t.Errorf("direct %s.M(1000, 99) = %v, %v, want 99, %v", name, b, x, 1000+inc)
+ }
+
+ CheckF(name+".M", i.Method(0).Interface().(func(int, byte) (byte, int)), inc)
+ }
+
+ var TinterType = TypeOf(new(Tinter)).Elem()
+
+ CheckI := func(name string, i interface{}, inc int) {
+ v := ValueOf(i)
+ CheckV(name, v, inc)
+ CheckV("(i="+name+")", v.Convert(TinterType), inc)
+ }
+
+ sv := Tsmallv(1)
+ CheckI("sv", sv, 1)
+ CheckI("&sv", &sv, 1)
+
+ sp := Tsmallp(2)
+ CheckI("&sp", &sp, 2)
+
+ wv := Twordv(3)
+ CheckI("wv", wv, 3)
+ CheckI("&wv", &wv, 3)
+
+ wp := Twordp(4)
+ CheckI("&wp", &wp, 4)
+
+ bv := Tbigv([2]uintptr{5, 6})
+ CheckI("bv", bv, 11)
+ CheckI("&bv", &bv, 11)
+
+ bp := Tbigp([2]uintptr{7, 8})
+ CheckI("&bp", &bp, 15)
+
+ t4 := Tm4{}
+ t3 := Tm3{&t4}
+ t2 := Tm2{&t3}
+ t1 := Tm1{t2}
+ CheckI("t4", t4, 40)
+ CheckI("&t4", &t4, 40)
+ CheckI("t3", t3, 40)
+ CheckI("&t3", &t3, 40)
+ CheckI("t2", t2, 40)
+ CheckI("&t2", &t2, 40)
+ CheckI("t1", t1, 40)
+ CheckI("&t1", &t1, 40)
+
+ var tnil Tinter
+ vnil := ValueOf(&tnil).Elem()
+ shouldPanic("Method", func() { vnil.Method(0) })
+}
+
+func TestInterfaceSet(t *testing.T) {
+ p := &Point{3, 4}
+
+ var s struct {
+ I interface{}
+ P interface {
+ Dist(int) int
+ }
+ }
+ sv := ValueOf(&s).Elem()
+ sv.Field(0).Set(ValueOf(p))
+ if q := s.I.(*Point); q != p {
+ t.Errorf("i: have %p want %p", q, p)
+ }
+
+ pv := sv.Field(1)
+ pv.Set(ValueOf(p))
+ if q := s.P.(*Point); q != p {
+ t.Errorf("i: have %p want %p", q, p)
+ }
+
+ i := pv.Method(0).Call([]Value{ValueOf(10)})[0].Int()
+ if i != 250 {
+ t.Errorf("Interface Method returned %d; want 250", i)
+ }
+}
+
+type T1 struct {
+ a string
+ int
+}
+
+func TestAnonymousFields(t *testing.T) {
+ var field StructField
+ var ok bool
+ var t1 T1
+ type1 := TypeOf(t1)
+ if field, ok = type1.FieldByName("int"); !ok {
+ t.Fatal("no field 'int'")
+ }
+ if field.Index[0] != 1 {
+ t.Error("field index should be 1; is", field.Index)
+ }
+}
+
+type FTest struct {
+ s interface{}
+ name string
+ index []int
+ value int
+}
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+// The X in S15.S11.S1 and S16.S11.S1 annihilate.
+type S14 struct {
+ S15
+ S16
+}
+
+type S15 struct {
+ S11
+}
+
+type S16 struct {
+ S11
+}
+
+var fieldTests = []FTest{
+ {struct{}{}, "", nil, 0},
+ {struct{}{}, "Foo", nil, 0},
+ {S0{A: 'a'}, "A", []int{0}, 'a'},
+ {S0{}, "D", nil, 0},
+ {S1{S0: S0{A: 'a'}}, "A", []int{1, 0}, 'a'},
+ {S1{B: 'b'}, "B", []int{0}, 'b'},
+ {S1{}, "S0", []int{1}, 0},
+ {S1{S0: S0{C: 'c'}}, "C", []int{1, 2}, 'c'},
+ {S2{A: 'a'}, "A", []int{0}, 'a'},
+ {S2{}, "S1", []int{1}, 0},
+ {S2{S1: &S1{B: 'b'}}, "B", []int{1, 0}, 'b'},
+ {S2{S1: &S1{S0: S0{C: 'c'}}}, "C", []int{1, 1, 2}, 'c'},
+ {S2{}, "D", nil, 0},
+ {S3{}, "S1", nil, 0},
+ {S3{S2: S2{A: 'a'}}, "A", []int{1, 0}, 'a'},
+ {S3{}, "B", nil, 0},
+ {S3{D: 'd'}, "D", []int{2}, 0},
+ {S3{E: 'e'}, "E", []int{3}, 'e'},
+ {S4{A: 'a'}, "A", []int{1}, 'a'},
+ {S4{}, "B", nil, 0},
+ {S5{}, "X", nil, 0},
+ {S5{}, "Y", []int{2, 0, 1}, 0},
+ {S10{}, "X", nil, 0},
+ {S10{}, "Y", []int{2, 0, 0, 1}, 0},
+ {S14{}, "X", nil, 0},
+}
+
+func TestFieldByIndex(t *testing.T) {
+ for _, test := range fieldTests {
+ s := TypeOf(test.s)
+ f := s.FieldByIndex(test.index)
+ if f.Name != "" {
+ if test.index != nil {
+ if f.Name != test.name {
+ t.Errorf("%s.%s found; want %s", s.Name(), f.Name, test.name)
+ }
+ } else {
+ t.Errorf("%s.%s found", s.Name(), f.Name)
+ }
+ } else if len(test.index) > 0 {
+ t.Errorf("%s.%s not found", s.Name(), test.name)
+ }
+
+ if test.value != 0 {
+ v := ValueOf(test.s).FieldByIndex(test.index)
+ if v.IsValid() {
+ if x, ok := v.Interface().(int); ok {
+ if x != test.value {
+ t.Errorf("%s%v is %d; want %d", s.Name(), test.index, x, test.value)
+ }
+ } else {
+ t.Errorf("%s%v value not an int", s.Name(), test.index)
+ }
+ } else {
+ t.Errorf("%s%v value not found", s.Name(), test.index)
+ }
+ }
+ }
+}
+
+func TestFieldByName(t *testing.T) {
+ for _, test := range fieldTests {
+ s := TypeOf(test.s)
+ f, found := s.FieldByName(test.name)
+ if found {
+ if test.index != nil {
+ // Verify field depth and index.
+ if len(f.Index) != len(test.index) {
+ t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index)
+ } else {
+ for i, x := range f.Index {
+ if x != test.index[i] {
+ t.Errorf("%s.%s.Index[%d] is %d; want %d", s.Name(), test.name, i, x, test.index[i])
+ }
+ }
+ }
+ } else {
+ t.Errorf("%s.%s found", s.Name(), f.Name)
+ }
+ } else if len(test.index) > 0 {
+ t.Errorf("%s.%s not found", s.Name(), test.name)
+ }
+
+ if test.value != 0 {
+ v := ValueOf(test.s).FieldByName(test.name)
+ if v.IsValid() {
+ if x, ok := v.Interface().(int); ok {
+ if x != test.value {
+ t.Errorf("%s.%s is %d; want %d", s.Name(), test.name, x, test.value)
+ }
+ } else {
+ t.Errorf("%s.%s value not an int", s.Name(), test.name)
+ }
+ } else {
+ t.Errorf("%s.%s value not found", s.Name(), test.name)
+ }
+ }
+ }
+}
+
+func TestImportPath(t *testing.T) {
+ tests := []struct {
+ t Type
+ path string
+ }{
+ {TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"},
+ {TypeOf(int(0)), ""},
+ {TypeOf(int8(0)), ""},
+ {TypeOf(int16(0)), ""},
+ {TypeOf(int32(0)), ""},
+ {TypeOf(int64(0)), ""},
+ {TypeOf(uint(0)), ""},
+ {TypeOf(uint8(0)), ""},
+ {TypeOf(uint16(0)), ""},
+ {TypeOf(uint32(0)), ""},
+ {TypeOf(uint64(0)), ""},
+ {TypeOf(uintptr(0)), ""},
+ {TypeOf(float32(0)), ""},
+ {TypeOf(float64(0)), ""},
+ {TypeOf(complex64(0)), ""},
+ {TypeOf(complex128(0)), ""},
+ {TypeOf(byte(0)), ""},
+ {TypeOf(rune(0)), ""},
+ {TypeOf([]byte(nil)), ""},
+ {TypeOf([]rune(nil)), ""},
+ {TypeOf(string("")), ""},
+ {TypeOf((*interface{})(nil)).Elem(), ""},
+ {TypeOf((*byte)(nil)), ""},
+ {TypeOf((*rune)(nil)), ""},
+ {TypeOf((*int64)(nil)), ""},
+ {TypeOf(map[string]int{}), ""},
+ {TypeOf((*error)(nil)).Elem(), ""},
+ {TypeOf((*Point)(nil)), ""},
+ {TypeOf((*Point)(nil)).Elem(), "reflect_test"},
+ }
+ for _, test := range tests {
+ if path := test.t.PkgPath(); path != test.path {
+ t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path)
+ }
+ }
+}
+
+func TestFieldPkgPath(t *testing.T) {
+ type x int
+ typ := TypeOf(struct {
+ Exported string
+ unexported string
+ OtherPkgFields
+ int // issue 21702
+ *x // issue 21122
+ }{})
+
+ type pkgpathTest struct {
+ index []int
+ pkgPath string
+ embedded bool
+ }
+
+ checkPkgPath := func(name string, s []pkgpathTest) {
+ for _, test := range s {
+ f := typ.FieldByIndex(test.index)
+ if got, want := f.PkgPath, test.pkgPath; got != want {
+ t.Errorf("%s: Field(%d).PkgPath = %q, want %q", name, test.index, got, want)
+ }
+ if got, want := f.Anonymous, test.embedded; got != want {
+ t.Errorf("%s: Field(%d).Anonymous = %v, want %v", name, test.index, got, want)
+ }
+ }
+ }
+
+ checkPkgPath("testStruct", []pkgpathTest{
+ {[]int{0}, "", false}, // Exported
+ {[]int{1}, "reflect_test", false}, // unexported
+ {[]int{2}, "", true}, // OtherPkgFields
+ {[]int{2, 0}, "", false}, // OtherExported
+ {[]int{2, 1}, "reflect", false}, // otherUnexported
+ {[]int{3}, "reflect_test", true}, // int
+ {[]int{4}, "reflect_test", true}, // *x
+ })
+
+ type localOtherPkgFields OtherPkgFields
+ typ = TypeOf(localOtherPkgFields{})
+ checkPkgPath("localOtherPkgFields", []pkgpathTest{
+ {[]int{0}, "", false}, // OtherExported
+ {[]int{1}, "reflect", false}, // otherUnexported
+ })
+}
+
+func TestVariadicType(t *testing.T) {
+ // Test example from Type documentation.
+ var f func(x int, y ...float64)
+ typ := TypeOf(f)
+ if typ.NumIn() == 2 && typ.In(0) == TypeOf(int(0)) {
+ sl := typ.In(1)
+ if sl.Kind() == Slice {
+ if sl.Elem() == TypeOf(0.0) {
+ // ok
+ return
+ }
+ }
+ }
+
+ // Failed
+ t.Errorf("want NumIn() = 2, In(0) = int, In(1) = []float64")
+ s := fmt.Sprintf("have NumIn() = %d", typ.NumIn())
+ for i := 0; i < typ.NumIn(); i++ {
+ s += fmt.Sprintf(", In(%d) = %s", i, typ.In(i))
+ }
+ t.Error(s)
+}
+
+type inner struct {
+ x int
+}
+
+type outer struct {
+ y int
+ inner
+}
+
+func (*inner) M() {}
+func (*outer) M() {}
+
+func TestNestedMethods(t *testing.T) {
+ typ := TypeOf((*outer)(nil))
+ if typ.NumMethod() != 1 || typ.Method(0).Func.Pointer() != ValueOf((*outer).M).Pointer() {
+ t.Errorf("Wrong method table for outer: (M=%p)", (*outer).M)
+ for i := 0; i < typ.NumMethod(); i++ {
+ m := typ.Method(i)
+ t.Errorf("\t%d: %s %#x\n", i, m.Name, m.Func.Pointer())
+ }
+ }
+}
+
+type unexp struct{}
+
+func (*unexp) f() (int32, int8) { return 7, 7 }
+func (*unexp) g() (int64, int8) { return 8, 8 }
+
+type unexpI interface {
+ f() (int32, int8)
+}
+
+var unexpi unexpI = new(unexp)
+
+func TestUnexportedMethods(t *testing.T) {
+ typ := TypeOf(unexpi)
+
+ if got := typ.NumMethod(); got != 0 {
+ t.Errorf("NumMethod=%d, want 0 satisfied methods", got)
+ }
+}
+
+type InnerInt struct {
+ X int
+}
+
+type OuterInt struct {
+ Y int
+ InnerInt
+}
+
+func (i *InnerInt) M() int {
+ return i.X
+}
+
+func TestEmbeddedMethods(t *testing.T) {
+ typ := TypeOf((*OuterInt)(nil))
+ if typ.NumMethod() != 1 || typ.Method(0).Func.Pointer() != ValueOf((*OuterInt).M).Pointer() {
+ t.Errorf("Wrong method table for OuterInt: (m=%p)", (*OuterInt).M)
+ for i := 0; i < typ.NumMethod(); i++ {
+ m := typ.Method(i)
+ t.Errorf("\t%d: %s %#x\n", i, m.Name, m.Func.Pointer())
+ }
+ }
+
+ i := &InnerInt{3}
+ if v := ValueOf(i).Method(0).Call(nil)[0].Int(); v != 3 {
+ t.Errorf("i.M() = %d, want 3", v)
+ }
+
+ o := &OuterInt{1, InnerInt{2}}
+ if v := ValueOf(o).Method(0).Call(nil)[0].Int(); v != 2 {
+ t.Errorf("i.M() = %d, want 2", v)
+ }
+
+ f := (*OuterInt).M
+ if v := f(o); v != 2 {
+ t.Errorf("f(o) = %d, want 2", v)
+ }
+}
+
+type FuncDDD func(...interface{}) error
+
+func (f FuncDDD) M() {}
+
+func TestNumMethodOnDDD(t *testing.T) {
+ rv := ValueOf((FuncDDD)(nil))
+ if n := rv.NumMethod(); n != 1 {
+ t.Fatalf("NumMethod()=%d, want 1", n)
+ }
+}
+
+func TestPtrTo(t *testing.T) {
+ // This block of code means that the ptrToThis field of the
+ // reflect data for *unsafe.Pointer is non zero, see
+ // https://golang.org/issue/19003
+ var x unsafe.Pointer
+ var y = &x
+ var z = &y
+
+ var i int
+
+ typ := TypeOf(z)
+ for i = 0; i < 100; i++ {
+ typ = PtrTo(typ)
+ }
+ for i = 0; i < 100; i++ {
+ typ = typ.Elem()
+ }
+ if typ != TypeOf(z) {
+ t.Errorf("after 100 PtrTo and Elem, have %s, want %s", typ, TypeOf(z))
+ }
+}
+
+func TestPtrToGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ pt := PtrTo(tt)
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := New(pt)
+ p := new(*uintptr)
+ *p = new(uintptr)
+ **p = uintptr(i)
+ v.Elem().Set(ValueOf(p).Convert(pt))
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ k := ValueOf(xi).Elem().Elem().Elem().Interface().(uintptr)
+ if k != uintptr(i) {
+ t.Errorf("lost x[%d] = %d, want %d", i, k, i)
+ }
+ }
+}
+
+func BenchmarkPtrTo(b *testing.B) {
+ // Construct a type with a zero ptrToThis.
+ type T struct{ int }
+ t := SliceOf(TypeOf(T{}))
+ ptrToThis := ValueOf(t).Elem().FieldByName("ptrToThis")
+ if !ptrToThis.IsValid() {
+ b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
+ }
+ if ptrToThis.Int() != 0 {
+ b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
+ }
+ b.ResetTimer()
+
+ // Now benchmark calling PtrTo on it: we'll have to hit the ptrMap cache on
+ // every call.
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ PtrTo(t)
+ }
+ })
+}
+
+func TestAddr(t *testing.T) {
+ var p struct {
+ X, Y int
+ }
+
+ v := ValueOf(&p)
+ v = v.Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(2)
+ if p.X != 2 {
+ t.Errorf("Addr.Elem.Set failed to set value")
+ }
+
+ // Again but take address of the ValueOf value.
+ // Exercises generation of PtrTypes not present in the binary.
+ q := &p
+ v = ValueOf(&q).Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Elem()
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(3)
+ if p.X != 3 {
+ t.Errorf("Addr.Elem.Set failed to set value")
+ }
+
+ // Starting without pointer we should get changed value
+ // in interface.
+ qq := p
+ v = ValueOf(&qq).Elem()
+ v0 := v
+ v = v.Addr()
+ v = v.Elem()
+ v = v.Field(0)
+ v.SetInt(4)
+ if p.X != 3 { // should be unchanged from last time
+ t.Errorf("somehow value Set changed original p")
+ }
+ p = v0.Interface().(struct {
+ X, Y int
+ })
+ if p.X != 4 {
+ t.Errorf("Addr.Elem.Set valued to set value in top value")
+ }
+
+ // Verify that taking the address of a type gives us a pointer
+ // which we can convert back using the usual interface
+ // notation.
+ var s struct {
+ B *bool
+ }
+ ps := ValueOf(&s).Elem().Field(0).Addr().Interface()
+ *(ps.(**bool)) = new(bool)
+ if s.B == nil {
+ t.Errorf("Addr.Interface direct assignment failed")
+ }
+}
+
+func noAlloc(t *testing.T, n int, f func(int)) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ if runtime.GOMAXPROCS(0) > 1 {
+ t.Skip("skipping; GOMAXPROCS>1")
+ }
+ i := -1
+ allocs := testing.AllocsPerRun(n, func() {
+ f(i)
+ i++
+ })
+ if allocs > 0 {
+ t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
+ }
+}
+
+func TestAllocations(t *testing.T) {
+ noAlloc(t, 100, func(j int) {
+ var i interface{}
+ var v Value
+
+ // We can uncomment this when compiler escape analysis
+ // is good enough to see that the integer assigned to i
+ // does not escape and therefore need not be allocated.
+ //
+ // i = 42 + j
+ // v = ValueOf(i)
+ // if int(v.Int()) != 42+j {
+ // panic("wrong int")
+ // }
+
+ i = func(j int) int { return j }
+ v = ValueOf(i)
+ if v.Interface().(func(int) int)(j) != j {
+ panic("wrong result")
+ }
+ })
+}
+
+func TestSmallNegativeInt(t *testing.T) {
+ i := int16(-1)
+ v := ValueOf(i)
+ if v.Int() != -1 {
+ t.Errorf("int16(-1).Int() returned %v", v.Int())
+ }
+}
+
+func TestIndex(t *testing.T) {
+ xs := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Index(3).Interface().(byte)
+ if v != xs[3] {
+ t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3])
+ }
+ xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(xa).Index(2).Interface().(byte)
+ if v != xa[2] {
+ t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2])
+ }
+ s := "0123456789"
+ v = ValueOf(s).Index(3).Interface().(byte)
+ if v != s[3] {
+ t.Errorf("s.Index(3) = %v; expected %v", v, s[3])
+ }
+}
+
+func TestSlice(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Slice(3, 5).Interface().([]int)
+ if len(v) != 2 {
+ t.Errorf("len(xs.Slice(3, 5)) = %d", len(v))
+ }
+ if cap(v) != 5 {
+ t.Errorf("cap(xs.Slice(3, 5)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:5], xs[3:]) {
+ t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5])
+ }
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int)
+ if len(v) != 3 {
+ t.Errorf("len(xa.Slice(2, 5)) = %d", len(v))
+ }
+ if cap(v) != 6 {
+ t.Errorf("cap(xa.Slice(2, 5)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:6], xa[2:]) {
+ t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6])
+ }
+ s := "0123456789"
+ vs := ValueOf(s).Slice(3, 5).Interface().(string)
+ if vs != s[3:5] {
+ t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
+ }
+
+ rv := ValueOf(&xs).Elem()
+ rv = rv.Slice(3, 4)
+ ptr2 := rv.Pointer()
+ rv = rv.Slice(5, 5)
+ ptr3 := rv.Pointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice(3,4).Slice3(5,5).Pointer() = %#x, want %#x", ptr3, ptr2)
+ }
+}
+
+func TestSlice3(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Slice3(3, 5, 7).Interface().([]int)
+ if len(v) != 2 {
+ t.Errorf("len(xs.Slice3(3, 5, 7)) = %d", len(v))
+ }
+ if cap(v) != 4 {
+ t.Errorf("cap(xs.Slice3(3, 5, 7)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:4], xs[3:7:7]) {
+ t.Errorf("xs.Slice3(3, 5, 7)[0:4] = %v", v[0:4])
+ }
+ rv := ValueOf(&xs).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) })
+ shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) })
+ shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) })
+
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(&xa).Elem().Slice3(2, 5, 6).Interface().([]int)
+ if len(v) != 3 {
+ t.Errorf("len(xa.Slice(2, 5, 6)) = %d", len(v))
+ }
+ if cap(v) != 4 {
+ t.Errorf("cap(xa.Slice(2, 5, 6)) = %d", cap(v))
+ }
+ if !DeepEqual(v[0:4], xa[2:6:6]) {
+ t.Errorf("xs.Slice(2, 5, 6)[0:4] = %v", v[0:4])
+ }
+ rv = ValueOf(&xa).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 1) })
+ shouldPanic("Slice3", func() { rv.Slice3(1, 1, 11) })
+ shouldPanic("Slice3", func() { rv.Slice3(2, 2, 1) })
+
+ s := "hello world"
+ rv = ValueOf(&s).Elem()
+ shouldPanic("Slice3", func() { rv.Slice3(1, 2, 3) })
+
+ rv = ValueOf(&xs).Elem()
+ rv = rv.Slice3(3, 5, 7)
+ ptr2 := rv.Pointer()
+ rv = rv.Slice3(4, 4, 4)
+ ptr3 := rv.Pointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).Pointer() = %#x, want %#x", ptr3, ptr2)
+ }
+}
+
+func TestSetLenCap(t *testing.T) {
+ xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
+ xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
+
+ vs := ValueOf(&xs).Elem()
+ shouldPanic("SetLen", func() { vs.SetLen(10) })
+ shouldPanic("SetCap", func() { vs.SetCap(10) })
+ shouldPanic("SetLen", func() { vs.SetLen(-1) })
+ shouldPanic("SetCap", func() { vs.SetCap(-1) })
+ shouldPanic("SetCap", func() { vs.SetCap(6) }) // smaller than len
+ vs.SetLen(5)
+ if len(xs) != 5 || cap(xs) != 8 {
+ t.Errorf("after SetLen(5), len, cap = %d, %d, want 5, 8", len(xs), cap(xs))
+ }
+ vs.SetCap(6)
+ if len(xs) != 5 || cap(xs) != 6 {
+ t.Errorf("after SetCap(6), len, cap = %d, %d, want 5, 6", len(xs), cap(xs))
+ }
+ vs.SetCap(5)
+ if len(xs) != 5 || cap(xs) != 5 {
+ t.Errorf("after SetCap(5), len, cap = %d, %d, want 5, 5", len(xs), cap(xs))
+ }
+ shouldPanic("SetCap", func() { vs.SetCap(4) }) // smaller than len
+ shouldPanic("SetLen", func() { vs.SetLen(6) }) // bigger than cap
+
+ va := ValueOf(&xa).Elem()
+ shouldPanic("SetLen", func() { va.SetLen(8) })
+ shouldPanic("SetCap", func() { va.SetCap(8) })
+}
+
+func TestVariadic(t *testing.T) {
+ var b bytes.Buffer
+ V := ValueOf
+
+ b.Reset()
+ V(fmt.Fprintf).Call([]Value{V(&b), V("%s, %d world"), V("hello"), V(42)})
+ if b.String() != "hello, 42 world" {
+ t.Errorf("after Fprintf Call: %q != %q", b.String(), "hello 42 world")
+ }
+
+ b.Reset()
+ V(fmt.Fprintf).CallSlice([]Value{V(&b), V("%s, %d world"), V([]interface{}{"hello", 42})})
+ if b.String() != "hello, 42 world" {
+ t.Errorf("after Fprintf CallSlice: %q != %q", b.String(), "hello 42 world")
+ }
+}
+
+func TestFuncArg(t *testing.T) {
+ f1 := func(i int, f func(int) int) int { return f(i) }
+ f2 := func(i int) int { return i + 1 }
+ r := ValueOf(f1).Call([]Value{ValueOf(100), ValueOf(f2)})
+ if r[0].Int() != 101 {
+ t.Errorf("function returned %d, want 101", r[0].Int())
+ }
+}
+
+func TestStructArg(t *testing.T) {
+ type padded struct {
+ B string
+ C int32
+ }
+ var (
+ gotA padded
+ gotB uint32
+ wantA = padded{"3", 4}
+ wantB = uint32(5)
+ )
+ f := func(a padded, b uint32) {
+ gotA, gotB = a, b
+ }
+ ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
+ if gotA != wantA || gotB != wantB {
+ t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
+ }
+}
+
+var tagGetTests = []struct {
+ Tag StructTag
+ Key string
+ Value string
+}{
+ {`protobuf:"PB(1,2)"`, `protobuf`, `PB(1,2)`},
+ {`protobuf:"PB(1,2)"`, `foo`, ``},
+ {`protobuf:"PB(1,2)"`, `rotobuf`, ``},
+ {`protobuf:"PB(1,2)" json:"name"`, `json`, `name`},
+ {`protobuf:"PB(1,2)" json:"name"`, `protobuf`, `PB(1,2)`},
+ {`k0:"values contain spaces" k1:"and\ttabs"`, "k0", "values contain spaces"},
+ {`k0:"values contain spaces" k1:"and\ttabs"`, "k1", "and\ttabs"},
+}
+
+func TestTagGet(t *testing.T) {
+ for _, tt := range tagGetTests {
+ if v := tt.Tag.Get(tt.Key); v != tt.Value {
+ t.Errorf("StructTag(%#q).Get(%#q) = %#q, want %#q", tt.Tag, tt.Key, v, tt.Value)
+ }
+ }
+}
+
+func TestBytes(t *testing.T) {
+ type B []byte
+ x := B{1, 2, 3, 4}
+ y := ValueOf(x).Bytes()
+ if !bytes.Equal(x, y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
+ }
+ if &x[0] != &y[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
+ }
+}
+
+func TestSetBytes(t *testing.T) {
+ type B []byte
+ var x B
+ y := []byte{1, 2, 3, 4}
+ ValueOf(&x).Elem().SetBytes(y)
+ if !bytes.Equal(x, y) {
+ t.Fatalf("ValueOf(%v).Bytes() = %v", x, y)
+ }
+ if &x[0] != &y[0] {
+ t.Errorf("ValueOf(%p).Bytes() = %p", &x[0], &y[0])
+ }
+}
+
+type Private struct {
+ x int
+ y **int
+ Z int
+}
+
+func (p *Private) m() {
+}
+
+type private struct {
+ Z int
+ z int
+ S string
+ A [1]Private
+ T []Private
+}
+
+func (p *private) P() {
+}
+
+type Public struct {
+ X int
+ Y **int
+ private
+}
+
+func (p *Public) M() {
+}
+
+func TestUnexported(t *testing.T) {
+ var pub Public
+ pub.S = "S"
+ pub.T = pub.A[:]
+ v := ValueOf(&pub)
+ isValid(v.Elem().Field(0))
+ isValid(v.Elem().Field(1))
+ isValid(v.Elem().Field(2))
+ isValid(v.Elem().FieldByName("X"))
+ isValid(v.Elem().FieldByName("Y"))
+ isValid(v.Elem().FieldByName("Z"))
+ isValid(v.Type().Method(0).Func)
+ m, _ := v.Type().MethodByName("M")
+ isValid(m.Func)
+ m, _ = v.Type().MethodByName("P")
+ isValid(m.Func)
+ isNonNil(v.Elem().Field(0).Interface())
+ isNonNil(v.Elem().Field(1).Interface())
+ isNonNil(v.Elem().Field(2).Field(2).Index(0))
+ isNonNil(v.Elem().FieldByName("X").Interface())
+ isNonNil(v.Elem().FieldByName("Y").Interface())
+ isNonNil(v.Elem().FieldByName("Z").Interface())
+ isNonNil(v.Elem().FieldByName("S").Index(0).Interface())
+ isNonNil(v.Type().Method(0).Func.Interface())
+ m, _ = v.Type().MethodByName("P")
+ isNonNil(m.Func.Interface())
+
+ var priv Private
+ v = ValueOf(&priv)
+ isValid(v.Elem().Field(0))
+ isValid(v.Elem().Field(1))
+ isValid(v.Elem().FieldByName("x"))
+ isValid(v.Elem().FieldByName("y"))
+ shouldPanic("Interface", func() { v.Elem().Field(0).Interface() })
+ shouldPanic("Interface", func() { v.Elem().Field(1).Interface() })
+ shouldPanic("Interface", func() { v.Elem().FieldByName("x").Interface() })
+ shouldPanic("Interface", func() { v.Elem().FieldByName("y").Interface() })
+ shouldPanic("Method", func() { v.Type().Method(0) })
+}
+
+func TestSetPanic(t *testing.T) {
+ ok := func(f func()) { f() }
+ bad := func(f func()) { shouldPanic("Set", f) }
+ clear := func(v Value) { v.Set(Zero(v.Type())) }
+
+ type t0 struct {
+ W int
+ }
+
+ type t1 struct {
+ Y int
+ t0
+ }
+
+ type T2 struct {
+ Z int
+ namedT0 t0
+ }
+
+ type T struct {
+ X int
+ t1
+ T2
+ NamedT1 t1
+ NamedT2 T2
+ namedT1 t1
+ namedT2 T2
+ }
+
+ // not addressable
+ v := ValueOf(T{})
+ bad(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ bad(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ bad(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ bad(func() { clear(v.Field(2)) }) // .T2
+ bad(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ bad(func() { clear(v.Field(3)) }) // .NamedT1
+ bad(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ bad(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ bad(func() { clear(v.Field(4)) }) // .NamedT2
+ bad(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+
+ // addressable
+ v = ValueOf(&T{}).Elem()
+ ok(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ ok(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ ok(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ ok(func() { clear(v.Field(2)) }) // .T2
+ ok(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ ok(func() { clear(v.Field(3)) }) // .NamedT1
+ ok(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ ok(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ ok(func() { clear(v.Field(4)) }) // .NamedT2
+ ok(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+}
+
+type timp int
+
+func (t timp) W() {}
+func (t timp) Y() {}
+func (t timp) w() {}
+func (t timp) y() {}
+
+func TestCallPanic(t *testing.T) {
+ type t0 interface {
+ W()
+ w()
+ }
+ type T1 interface {
+ Y()
+ y()
+ }
+ type T2 struct {
+ T1
+ t0
+ }
+ type T struct {
+ t0 // 0
+ T1 // 1
+
+ NamedT0 t0 // 2
+ NamedT1 T1 // 3
+ NamedT2 T2 // 4
+
+ namedT0 t0 // 5
+ namedT1 T1 // 6
+ namedT2 T2 // 7
+ }
+ ok := func(f func()) { f() }
+ badCall := func(f func()) { shouldPanic("Call", f) }
+ badMethod := func(f func()) { shouldPanic("Method", f) }
+ call := func(v Value) { v.Call(nil) }
+
+ i := timp(0)
+ v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}})
+ badCall(func() { call(v.Field(0).Method(0)) }) // .t0.W
+ badCall(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W
+ badCall(func() { call(v.Field(0).Method(1)) }) // .t0.w
+ badMethod(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w
+ ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y
+ ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y
+ badCall(func() { call(v.Field(1).Method(1)) }) // .T1.y
+ badMethod(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y
+
+ ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W
+ ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W
+ badCall(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w
+ badMethod(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w
+
+ ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y
+ ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y
+ badCall(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y
+ badMethod(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y
+
+ ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y
+ ok(func() { call(v.Field(4).Field(0).Elem().Method(0)) }) // .NamedT2.T1.W
+ badCall(func() { call(v.Field(4).Field(1).Method(0)) }) // .NamedT2.t0.W
+ badCall(func() { call(v.Field(4).Field(1).Elem().Method(0)) }) // .NamedT2.t0.W
+
+ badCall(func() { call(v.Field(5).Method(0)) }) // .namedT0.W
+ badCall(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W
+ badCall(func() { call(v.Field(5).Method(1)) }) // .namedT0.w
+ badMethod(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w
+
+ badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y
+ badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.Y
+ badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.y
+ badCall(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.y
+
+ badCall(func() { call(v.Field(7).Field(0).Method(0)) }) // .namedT2.T1.Y
+ badCall(func() { call(v.Field(7).Field(0).Elem().Method(0)) }) // .namedT2.T1.W
+ badCall(func() { call(v.Field(7).Field(1).Method(0)) }) // .namedT2.t0.W
+ badCall(func() { call(v.Field(7).Field(1).Elem().Method(0)) }) // .namedT2.t0.W
+}
+
+func shouldPanic(expect string, f func()) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ panic("did not panic")
+ }
+ if expect != "" {
+ var s string
+ switch r := r.(type) {
+ case string:
+ s = r
+ case *ValueError:
+ s = r.Error()
+ default:
+ panic(fmt.Sprintf("panicked with unexpected type %T", r))
+ }
+ if !strings.HasPrefix(s, "reflect") {
+ panic(`panic string does not start with "reflect": ` + s)
+ }
+ if !strings.Contains(s, expect) {
+ panic(`panic string does not contain "` + expect + `": ` + s)
+ }
+ }
+ }()
+ f()
+}
+
+func isNonNil(x interface{}) {
+ if x == nil {
+ panic("nil interface")
+ }
+}
+
+func isValid(v Value) {
+ if !v.IsValid() {
+ panic("zero Value")
+ }
+}
+
+func TestAlias(t *testing.T) {
+ x := string("hello")
+ v := ValueOf(&x).Elem()
+ oldvalue := v.Interface()
+ v.SetString("world")
+ newvalue := v.Interface()
+
+ if oldvalue != "hello" || newvalue != "world" {
+ t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue)
+ }
+}
+
+var V = ValueOf
+
+func EmptyInterfaceV(x interface{}) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReaderV(x io.Reader) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReadWriterV(x io.ReadWriter) Value {
+ return ValueOf(&x).Elem()
+}
+
+type Empty struct{}
+type MyStruct struct {
+ x int `some:"tag"`
+}
+type MyString string
+type MyBytes []byte
+type MyRunes []int32
+type MyFunc func()
+type MyByte byte
+
+type IntChan chan int
+type IntChanRecv <-chan int
+type IntChanSend chan<- int
+type BytesChan chan []byte
+type BytesChanRecv <-chan []byte
+type BytesChanSend chan<- []byte
+
+var convertTests = []struct {
+ in Value
+ out Value
+}{
+ // numbers
+ /*
+ Edit .+1,/\*\//-1>cat >/tmp/x.go && go run /tmp/x.go
+
+ package main
+
+ import "fmt"
+
+ var numbers = []string{
+ "int8", "uint8", "int16", "uint16",
+ "int32", "uint32", "int64", "uint64",
+ "int", "uint", "uintptr",
+ "float32", "float64",
+ }
+
+ func main() {
+ // all pairs but in an unusual order,
+ // to emit all the int8, uint8 cases
+ // before n grows too big.
+ n := 1
+ for i, f := range numbers {
+ for _, g := range numbers[i:] {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", f, n, g, n)
+ n++
+ if f != g {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", g, n, f, n)
+ n++
+ }
+ }
+ }
+ }
+ */
+ {V(int8(1)), V(int8(1))},
+ {V(int8(2)), V(uint8(2))},
+ {V(uint8(3)), V(int8(3))},
+ {V(int8(4)), V(int16(4))},
+ {V(int16(5)), V(int8(5))},
+ {V(int8(6)), V(uint16(6))},
+ {V(uint16(7)), V(int8(7))},
+ {V(int8(8)), V(int32(8))},
+ {V(int32(9)), V(int8(9))},
+ {V(int8(10)), V(uint32(10))},
+ {V(uint32(11)), V(int8(11))},
+ {V(int8(12)), V(int64(12))},
+ {V(int64(13)), V(int8(13))},
+ {V(int8(14)), V(uint64(14))},
+ {V(uint64(15)), V(int8(15))},
+ {V(int8(16)), V(int(16))},
+ {V(int(17)), V(int8(17))},
+ {V(int8(18)), V(uint(18))},
+ {V(uint(19)), V(int8(19))},
+ {V(int8(20)), V(uintptr(20))},
+ {V(uintptr(21)), V(int8(21))},
+ {V(int8(22)), V(float32(22))},
+ {V(float32(23)), V(int8(23))},
+ {V(int8(24)), V(float64(24))},
+ {V(float64(25)), V(int8(25))},
+ {V(uint8(26)), V(uint8(26))},
+ {V(uint8(27)), V(int16(27))},
+ {V(int16(28)), V(uint8(28))},
+ {V(uint8(29)), V(uint16(29))},
+ {V(uint16(30)), V(uint8(30))},
+ {V(uint8(31)), V(int32(31))},
+ {V(int32(32)), V(uint8(32))},
+ {V(uint8(33)), V(uint32(33))},
+ {V(uint32(34)), V(uint8(34))},
+ {V(uint8(35)), V(int64(35))},
+ {V(int64(36)), V(uint8(36))},
+ {V(uint8(37)), V(uint64(37))},
+ {V(uint64(38)), V(uint8(38))},
+ {V(uint8(39)), V(int(39))},
+ {V(int(40)), V(uint8(40))},
+ {V(uint8(41)), V(uint(41))},
+ {V(uint(42)), V(uint8(42))},
+ {V(uint8(43)), V(uintptr(43))},
+ {V(uintptr(44)), V(uint8(44))},
+ {V(uint8(45)), V(float32(45))},
+ {V(float32(46)), V(uint8(46))},
+ {V(uint8(47)), V(float64(47))},
+ {V(float64(48)), V(uint8(48))},
+ {V(int16(49)), V(int16(49))},
+ {V(int16(50)), V(uint16(50))},
+ {V(uint16(51)), V(int16(51))},
+ {V(int16(52)), V(int32(52))},
+ {V(int32(53)), V(int16(53))},
+ {V(int16(54)), V(uint32(54))},
+ {V(uint32(55)), V(int16(55))},
+ {V(int16(56)), V(int64(56))},
+ {V(int64(57)), V(int16(57))},
+ {V(int16(58)), V(uint64(58))},
+ {V(uint64(59)), V(int16(59))},
+ {V(int16(60)), V(int(60))},
+ {V(int(61)), V(int16(61))},
+ {V(int16(62)), V(uint(62))},
+ {V(uint(63)), V(int16(63))},
+ {V(int16(64)), V(uintptr(64))},
+ {V(uintptr(65)), V(int16(65))},
+ {V(int16(66)), V(float32(66))},
+ {V(float32(67)), V(int16(67))},
+ {V(int16(68)), V(float64(68))},
+ {V(float64(69)), V(int16(69))},
+ {V(uint16(70)), V(uint16(70))},
+ {V(uint16(71)), V(int32(71))},
+ {V(int32(72)), V(uint16(72))},
+ {V(uint16(73)), V(uint32(73))},
+ {V(uint32(74)), V(uint16(74))},
+ {V(uint16(75)), V(int64(75))},
+ {V(int64(76)), V(uint16(76))},
+ {V(uint16(77)), V(uint64(77))},
+ {V(uint64(78)), V(uint16(78))},
+ {V(uint16(79)), V(int(79))},
+ {V(int(80)), V(uint16(80))},
+ {V(uint16(81)), V(uint(81))},
+ {V(uint(82)), V(uint16(82))},
+ {V(uint16(83)), V(uintptr(83))},
+ {V(uintptr(84)), V(uint16(84))},
+ {V(uint16(85)), V(float32(85))},
+ {V(float32(86)), V(uint16(86))},
+ {V(uint16(87)), V(float64(87))},
+ {V(float64(88)), V(uint16(88))},
+ {V(int32(89)), V(int32(89))},
+ {V(int32(90)), V(uint32(90))},
+ {V(uint32(91)), V(int32(91))},
+ {V(int32(92)), V(int64(92))},
+ {V(int64(93)), V(int32(93))},
+ {V(int32(94)), V(uint64(94))},
+ {V(uint64(95)), V(int32(95))},
+ {V(int32(96)), V(int(96))},
+ {V(int(97)), V(int32(97))},
+ {V(int32(98)), V(uint(98))},
+ {V(uint(99)), V(int32(99))},
+ {V(int32(100)), V(uintptr(100))},
+ {V(uintptr(101)), V(int32(101))},
+ {V(int32(102)), V(float32(102))},
+ {V(float32(103)), V(int32(103))},
+ {V(int32(104)), V(float64(104))},
+ {V(float64(105)), V(int32(105))},
+ {V(uint32(106)), V(uint32(106))},
+ {V(uint32(107)), V(int64(107))},
+ {V(int64(108)), V(uint32(108))},
+ {V(uint32(109)), V(uint64(109))},
+ {V(uint64(110)), V(uint32(110))},
+ {V(uint32(111)), V(int(111))},
+ {V(int(112)), V(uint32(112))},
+ {V(uint32(113)), V(uint(113))},
+ {V(uint(114)), V(uint32(114))},
+ {V(uint32(115)), V(uintptr(115))},
+ {V(uintptr(116)), V(uint32(116))},
+ {V(uint32(117)), V(float32(117))},
+ {V(float32(118)), V(uint32(118))},
+ {V(uint32(119)), V(float64(119))},
+ {V(float64(120)), V(uint32(120))},
+ {V(int64(121)), V(int64(121))},
+ {V(int64(122)), V(uint64(122))},
+ {V(uint64(123)), V(int64(123))},
+ {V(int64(124)), V(int(124))},
+ {V(int(125)), V(int64(125))},
+ {V(int64(126)), V(uint(126))},
+ {V(uint(127)), V(int64(127))},
+ {V(int64(128)), V(uintptr(128))},
+ {V(uintptr(129)), V(int64(129))},
+ {V(int64(130)), V(float32(130))},
+ {V(float32(131)), V(int64(131))},
+ {V(int64(132)), V(float64(132))},
+ {V(float64(133)), V(int64(133))},
+ {V(uint64(134)), V(uint64(134))},
+ {V(uint64(135)), V(int(135))},
+ {V(int(136)), V(uint64(136))},
+ {V(uint64(137)), V(uint(137))},
+ {V(uint(138)), V(uint64(138))},
+ {V(uint64(139)), V(uintptr(139))},
+ {V(uintptr(140)), V(uint64(140))},
+ {V(uint64(141)), V(float32(141))},
+ {V(float32(142)), V(uint64(142))},
+ {V(uint64(143)), V(float64(143))},
+ {V(float64(144)), V(uint64(144))},
+ {V(int(145)), V(int(145))},
+ {V(int(146)), V(uint(146))},
+ {V(uint(147)), V(int(147))},
+ {V(int(148)), V(uintptr(148))},
+ {V(uintptr(149)), V(int(149))},
+ {V(int(150)), V(float32(150))},
+ {V(float32(151)), V(int(151))},
+ {V(int(152)), V(float64(152))},
+ {V(float64(153)), V(int(153))},
+ {V(uint(154)), V(uint(154))},
+ {V(uint(155)), V(uintptr(155))},
+ {V(uintptr(156)), V(uint(156))},
+ {V(uint(157)), V(float32(157))},
+ {V(float32(158)), V(uint(158))},
+ {V(uint(159)), V(float64(159))},
+ {V(float64(160)), V(uint(160))},
+ {V(uintptr(161)), V(uintptr(161))},
+ {V(uintptr(162)), V(float32(162))},
+ {V(float32(163)), V(uintptr(163))},
+ {V(uintptr(164)), V(float64(164))},
+ {V(float64(165)), V(uintptr(165))},
+ {V(float32(166)), V(float32(166))},
+ {V(float32(167)), V(float64(167))},
+ {V(float64(168)), V(float32(168))},
+ {V(float64(169)), V(float64(169))},
+
+ // truncation
+ {V(float64(1.5)), V(int(1))},
+
+ // complex
+ {V(complex64(1i)), V(complex64(1i))},
+ {V(complex64(2i)), V(complex128(2i))},
+ {V(complex128(3i)), V(complex64(3i))},
+ {V(complex128(4i)), V(complex128(4i))},
+
+ // string
+ {V(string("hello")), V(string("hello"))},
+ {V(string("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(string("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(string("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(string("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(string("a"))},
+ {V(int8('a')), V(string("a"))},
+ {V(int16('a')), V(string("a"))},
+ {V(int32('a')), V(string("a"))},
+ {V(int64('a')), V(string("a"))},
+ {V(uint('a')), V(string("a"))},
+ {V(uint8('a')), V(string("a"))},
+ {V(uint16('a')), V(string("a"))},
+ {V(uint32('a')), V(string("a"))},
+ {V(uint64('a')), V(string("a"))},
+ {V(uintptr('a')), V(string("a"))},
+ {V(int(-1)), V(string("\uFFFD"))},
+ {V(int8(-2)), V(string("\uFFFD"))},
+ {V(int16(-3)), V(string("\uFFFD"))},
+ {V(int32(-4)), V(string("\uFFFD"))},
+ {V(int64(-5)), V(string("\uFFFD"))},
+ {V(int64(-1 << 32)), V(string("\uFFFD"))},
+ {V(int64(1 << 32)), V(string("\uFFFD"))},
+ {V(uint(0x110001)), V(string("\uFFFD"))},
+ {V(uint32(0x110002)), V(string("\uFFFD"))},
+ {V(uint64(0x110003)), V(string("\uFFFD"))},
+ {V(uint64(1 << 32)), V(string("\uFFFD"))},
+ {V(uintptr(0x110004)), V(string("\uFFFD"))},
+
+ // named string
+ {V(MyString("hello")), V(string("hello"))},
+ {V(string("hello")), V(MyString("hello"))},
+ {V(string("hello")), V(string("hello"))},
+ {V(MyString("hello")), V(MyString("hello"))},
+ {V(MyString("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(MyString("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(MyString("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(MyString("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V([]rune("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyRunes("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(MyString("a"))},
+ {V(int8('a')), V(MyString("a"))},
+ {V(int16('a')), V(MyString("a"))},
+ {V(int32('a')), V(MyString("a"))},
+ {V(int64('a')), V(MyString("a"))},
+ {V(uint('a')), V(MyString("a"))},
+ {V(uint8('a')), V(MyString("a"))},
+ {V(uint16('a')), V(MyString("a"))},
+ {V(uint32('a')), V(MyString("a"))},
+ {V(uint64('a')), V(MyString("a"))},
+ {V(uintptr('a')), V(MyString("a"))},
+ {V(int(-1)), V(MyString("\uFFFD"))},
+ {V(int8(-2)), V(MyString("\uFFFD"))},
+ {V(int16(-3)), V(MyString("\uFFFD"))},
+ {V(int32(-4)), V(MyString("\uFFFD"))},
+ {V(int64(-5)), V(MyString("\uFFFD"))},
+ {V(uint(0x110001)), V(MyString("\uFFFD"))},
+ {V(uint32(0x110002)), V(MyString("\uFFFD"))},
+ {V(uint64(0x110003)), V(MyString("\uFFFD"))},
+ {V(uintptr(0x110004)), V(MyString("\uFFFD"))},
+
+ // named []byte
+ {V(string("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(string("bytes2"))},
+ {V(MyBytes("bytes3")), V(MyBytes("bytes3"))},
+ {V(MyString("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(MyString("bytes2"))},
+
+ // named []rune
+ {V(string("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(string("runes♕"))},
+ {V(MyRunes("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyString("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(MyString("runes♕"))},
+
+ // named types and equal underlying types
+ {V(new(int)), V(new(integer))},
+ {V(new(integer)), V(new(int))},
+ {V(Empty{}), V(struct{}{})},
+ {V(new(Empty)), V(new(struct{}))},
+ {V(struct{}{}), V(Empty{})},
+ {V(new(struct{})), V(new(Empty))},
+ {V(Empty{}), V(Empty{})},
+ {V(MyBytes{}), V([]byte{})},
+ {V([]byte{}), V(MyBytes{})},
+ {V((func())(nil)), V(MyFunc(nil))},
+ {V((MyFunc)(nil)), V((func())(nil))},
+
+ // structs with different tags
+ {V(struct {
+ x int `some:"foo"`
+ }{}), V(struct {
+ x int `some:"bar"`
+ }{})},
+
+ {V(struct {
+ x int `some:"bar"`
+ }{}), V(struct {
+ x int `some:"foo"`
+ }{})},
+
+ {V(MyStruct{}), V(struct {
+ x int `some:"foo"`
+ }{})},
+
+ {V(struct {
+ x int `some:"foo"`
+ }{}), V(MyStruct{})},
+
+ {V(MyStruct{}), V(struct {
+ x int `some:"bar"`
+ }{})},
+
+ {V(struct {
+ x int `some:"bar"`
+ }{}), V(MyStruct{})},
+
+ // can convert *byte and *MyByte
+ {V((*byte)(nil)), V((*MyByte)(nil))},
+ {V((*MyByte)(nil)), V((*byte)(nil))},
+
+ // cannot convert mismatched array sizes
+ {V([2]byte{}), V([2]byte{})},
+ {V([3]byte{}), V([3]byte{})},
+
+ // cannot convert other instances
+ {V((**byte)(nil)), V((**byte)(nil))},
+ {V((**MyByte)(nil)), V((**MyByte)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V(([]byte)(nil)), V(([]byte)(nil))},
+ {V(([]MyByte)(nil)), V(([]MyByte)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[int]MyByte)(nil)), V((map[int]MyByte)(nil))},
+ {V((map[byte]int)(nil)), V((map[byte]int)(nil))},
+ {V((map[MyByte]int)(nil)), V((map[MyByte]int)(nil))},
+ {V([2]byte{}), V([2]byte{})},
+ {V([2]MyByte{}), V([2]MyByte{})},
+
+ // other
+ {V((***int)(nil)), V((***int)(nil))},
+ {V((***byte)(nil)), V((***byte)(nil))},
+ {V((***int32)(nil)), V((***int32)(nil))},
+ {V((***int64)(nil)), V((***int64)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V((map[int]bool)(nil)), V((map[int]bool)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[uint]bool)(nil)), V((map[uint]bool)(nil))},
+ {V([]uint(nil)), V([]uint(nil))},
+ {V([]int(nil)), V([]int(nil))},
+ {V(new(interface{})), V(new(interface{}))},
+ {V(new(io.Reader)), V(new(io.Reader))},
+ {V(new(io.Writer)), V(new(io.Writer))},
+
+ // channels
+ {V(IntChan(nil)), V((chan<- int)(nil))},
+ {V(IntChan(nil)), V((<-chan int)(nil))},
+ {V((chan int)(nil)), V(IntChanRecv(nil))},
+ {V((chan int)(nil)), V(IntChanSend(nil))},
+ {V(IntChanRecv(nil)), V((<-chan int)(nil))},
+ {V((<-chan int)(nil)), V(IntChanRecv(nil))},
+ {V(IntChanSend(nil)), V((chan<- int)(nil))},
+ {V((chan<- int)(nil)), V(IntChanSend(nil))},
+ {V(IntChan(nil)), V((chan int)(nil))},
+ {V((chan int)(nil)), V(IntChan(nil))},
+ {V((chan int)(nil)), V((<-chan int)(nil))},
+ {V((chan int)(nil)), V((chan<- int)(nil))},
+ {V(BytesChan(nil)), V((chan<- []byte)(nil))},
+ {V(BytesChan(nil)), V((<-chan []byte)(nil))},
+ {V((chan []byte)(nil)), V(BytesChanRecv(nil))},
+ {V((chan []byte)(nil)), V(BytesChanSend(nil))},
+ {V(BytesChanRecv(nil)), V((<-chan []byte)(nil))},
+ {V((<-chan []byte)(nil)), V(BytesChanRecv(nil))},
+ {V(BytesChanSend(nil)), V((chan<- []byte)(nil))},
+ {V((chan<- []byte)(nil)), V(BytesChanSend(nil))},
+ {V(BytesChan(nil)), V((chan []byte)(nil))},
+ {V((chan []byte)(nil)), V(BytesChan(nil))},
+ {V((chan []byte)(nil)), V((<-chan []byte)(nil))},
+ {V((chan []byte)(nil)), V((chan<- []byte)(nil))},
+
+ // cannot convert other instances (channels)
+ {V(IntChan(nil)), V(IntChan(nil))},
+ {V(IntChanRecv(nil)), V(IntChanRecv(nil))},
+ {V(IntChanSend(nil)), V(IntChanSend(nil))},
+ {V(BytesChan(nil)), V(BytesChan(nil))},
+ {V(BytesChanRecv(nil)), V(BytesChanRecv(nil))},
+ {V(BytesChanSend(nil)), V(BytesChanSend(nil))},
+
+ // interfaces
+ {V(int(1)), EmptyInterfaceV(int(1))},
+ {V(string("hello")), EmptyInterfaceV(string("hello"))},
+ {V(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {ReadWriterV(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {V(new(bytes.Buffer)), ReadWriterV(new(bytes.Buffer))},
+}
+
+func TestConvert(t *testing.T) {
+ canConvert := map[[2]Type]bool{}
+ all := map[Type]bool{}
+
+ for _, tt := range convertTests {
+ t1 := tt.in.Type()
+ if !t1.ConvertibleTo(t1) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t1)
+ continue
+ }
+
+ t2 := tt.out.Type()
+ if !t1.ConvertibleTo(t2) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t2)
+ continue
+ }
+
+ all[t1] = true
+ all[t2] = true
+ canConvert[[2]Type{t1, t2}] = true
+
+ // vout1 represents the in value converted to the in type.
+ v1 := tt.in
+ vout1 := v1.Convert(t1)
+ out1 := vout1.Interface()
+ if vout1.Type() != tt.in.Type() || !DeepEqual(out1, tt.in.Interface()) {
+ t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t1, out1, tt.in.Interface())
+ }
+
+ // vout2 represents the in value converted to the out type.
+ vout2 := v1.Convert(t2)
+ out2 := vout2.Interface()
+ if vout2.Type() != tt.out.Type() || !DeepEqual(out2, tt.out.Interface()) {
+ t.Errorf("ValueOf(%T(%[1]v)).Convert(%s) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out2, tt.out.Interface())
+ }
+
+ // vout3 represents a new value of the out type, set to vout2. This makes
+ // sure the converted value vout2 is really usable as a regular value.
+ vout3 := New(t2).Elem()
+ vout3.Set(vout2)
+ out3 := vout3.Interface()
+ if vout3.Type() != tt.out.Type() || !DeepEqual(out3, tt.out.Interface()) {
+ t.Errorf("Set(ValueOf(%T(%[1]v)).Convert(%s)) = %T(%[3]v), want %T(%[4]v)", tt.in.Interface(), t2, out3, tt.out.Interface())
+ }
+
+ if IsRO(v1) {
+ t.Errorf("table entry %v is RO, should not be", v1)
+ }
+ if IsRO(vout1) {
+ t.Errorf("self-conversion output %v is RO, should not be", vout1)
+ }
+ if IsRO(vout2) {
+ t.Errorf("conversion output %v is RO, should not be", vout2)
+ }
+ if IsRO(vout3) {
+ t.Errorf("set(conversion output) %v is RO, should not be", vout3)
+ }
+ if !IsRO(MakeRO(v1).Convert(t1)) {
+ t.Errorf("RO self-conversion output %v is not RO, should be", v1)
+ }
+ if !IsRO(MakeRO(v1).Convert(t2)) {
+ t.Errorf("RO conversion output %v is not RO, should be", v1)
+ }
+ }
+
+ // Assume that of all the types we saw during the tests,
+ // if there wasn't an explicit entry for a conversion between
+ // a pair of types, then it's not to be allowed. This checks for
+ // things like 'int64' converting to '*int'.
+ for t1 := range all {
+ for t2 := range all {
+ expectOK := t1 == t2 || canConvert[[2]Type{t1, t2}] || t2.Kind() == Interface && t2.NumMethod() == 0
+ if ok := t1.ConvertibleTo(t2); ok != expectOK {
+ t.Errorf("(%s).ConvertibleTo(%s) = %v, want %v", t1, t2, ok, expectOK)
+ }
+ }
+ }
+}
+
+var gFloat32 float32
+
+func TestConvertNaNs(t *testing.T) {
+ const snan uint32 = 0x7f800001
+ type myFloat32 float32
+ x := V(myFloat32(math.Float32frombits(snan)))
+ y := x.Convert(TypeOf(float32(0)))
+ z := y.Interface().(float32)
+ if got := math.Float32bits(z); got != snan {
+ t.Errorf("signaling nan conversion got %x, want %x", got, snan)
+ }
+}
+
+type ComparableStruct struct {
+ X int
+}
+
+type NonComparableStruct struct {
+ X int
+ Y map[string]int
+}
+
+var comparableTests = []struct {
+ typ Type
+ ok bool
+}{
+ {TypeOf(1), true},
+ {TypeOf("hello"), true},
+ {TypeOf(new(byte)), true},
+ {TypeOf((func())(nil)), false},
+ {TypeOf([]byte{}), false},
+ {TypeOf(map[string]int{}), false},
+ {TypeOf(make(chan int)), true},
+ {TypeOf(1.5), true},
+ {TypeOf(false), true},
+ {TypeOf(1i), true},
+ {TypeOf(ComparableStruct{}), true},
+ {TypeOf(NonComparableStruct{}), false},
+ {TypeOf([10]map[string]int{}), false},
+ {TypeOf([10]string{}), true},
+ {TypeOf(new(interface{})).Elem(), true},
+}
+
+func TestComparable(t *testing.T) {
+ for _, tt := range comparableTests {
+ if ok := tt.typ.Comparable(); ok != tt.ok {
+ t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok)
+ }
+ }
+}
+
+func TestOverflow(t *testing.T) {
+ if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
+ t.Errorf("%v wrongly overflows float64", 1e300)
+ }
+
+ maxFloat32 := float64((1<<24 - 1) << (127 - 23))
+ if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf {
+ t.Errorf("%v wrongly overflows float32", maxFloat32)
+ }
+ ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52))
+ if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", ovfFloat32)
+ }
+ if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", -ovfFloat32)
+ }
+
+ maxInt32 := int64(0x7fffffff)
+ if ovf := V(int32(0)).OverflowInt(maxInt32); ovf {
+ t.Errorf("%v wrongly overflows int32", maxInt32)
+ }
+ if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf {
+ t.Errorf("%v wrongly overflows int32", -int64(1)<<31)
+ }
+ ovfInt32 := int64(1 << 31)
+ if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf {
+ t.Errorf("%v should overflow int32", ovfInt32)
+ }
+
+ maxUint32 := uint64(0xffffffff)
+ if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf {
+ t.Errorf("%v wrongly overflows uint32", maxUint32)
+ }
+ ovfUint32 := uint64(1 << 32)
+ if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf {
+ t.Errorf("%v should overflow uint32", ovfUint32)
+ }
+}
+
+func checkSameType(t *testing.T, x Type, y interface{}) {
+ if x != TypeOf(y) || TypeOf(Zero(x).Interface()) != TypeOf(y) {
+ t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y))
+ }
+}
+
+func TestArrayOf(t *testing.T) {
+ // check construction and use of type not in binary
+ tests := []struct {
+ n int
+ value func(i int) interface{}
+ comparable bool
+ want string
+ }{
+ {
+ n: 0,
+ value: func(i int) interface{} { type Tint int; return Tint(i) },
+ comparable: true,
+ want: "[]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tint int; return Tint(i) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tfloat float64; return Tfloat(i) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tstring string; return Tstring(strconv.Itoa(i)) },
+ comparable: true,
+ want: "[0 1 2 3 4 5 6 7 8 9]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tstruct struct{ V int }; return Tstruct{i} },
+ comparable: true,
+ want: "[{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tint int; return []Tint{Tint(i)} },
+ comparable: false,
+ want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tint int; return [1]Tint{Tint(i)} },
+ comparable: true,
+ want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tstruct struct{ V [1]int }; return Tstruct{[1]int{i}} },
+ comparable: true,
+ want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type Tstruct struct{ V []int }; return Tstruct{[]int{i}} },
+ comparable: false,
+ want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} { type TstructUV struct{ U, V int }; return TstructUV{i, i} },
+ comparable: true,
+ want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
+ },
+ {
+ n: 10,
+ value: func(i int) interface{} {
+ type TstructUV struct {
+ U int
+ V float64
+ }
+ return TstructUV{i, float64(i)}
+ },
+ comparable: true,
+ want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
+ },
+ }
+
+ for _, table := range tests {
+ at := ArrayOf(table.n, TypeOf(table.value(0)))
+ v := New(at).Elem()
+ vok := New(at).Elem()
+ vnot := New(at).Elem()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(table.value(i)))
+ vok.Index(i).Set(ValueOf(table.value(i)))
+ j := i
+ if i+1 == v.Len() {
+ j = i + 1
+ }
+ vnot.Index(i).Set(ValueOf(table.value(j))) // make it differ only by last element
+ }
+ s := fmt.Sprint(v.Interface())
+ if s != table.want {
+ t.Errorf("constructed array = %s, want %s", s, table.want)
+ }
+
+ if table.comparable != at.Comparable() {
+ t.Errorf("constructed array (%#v) is comparable=%v, want=%v", v.Interface(), at.Comparable(), table.comparable)
+ }
+ if table.comparable {
+ if table.n > 0 {
+ if DeepEqual(vnot.Interface(), v.Interface()) {
+ t.Errorf(
+ "arrays (%#v) compare ok (but should not)",
+ v.Interface(),
+ )
+ }
+ }
+ if !DeepEqual(vok.Interface(), v.Interface()) {
+ t.Errorf(
+ "arrays (%#v) compare NOT-ok (but should)",
+ v.Interface(),
+ )
+ }
+ }
+ }
+
+ // check that type already in binary is found
+ type T int
+ checkSameType(t, ArrayOf(5, TypeOf(T(1))), [5]T{})
+}
+
+func TestArrayOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := New(ArrayOf(n, tt)).Elem()
+ for j := 0; j < v.Len(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Index(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.Len(); j++ {
+ k := v.Index(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestArrayOfAlg(t *testing.T) {
+ at := ArrayOf(6, TypeOf(byte(0)))
+ v1 := New(at).Elem()
+ v2 := New(at).Elem()
+ if v1.Interface() != v1.Interface() {
+ t.Errorf("constructed array %v not equal to itself", v1.Interface())
+ }
+ v1.Index(5).Set(ValueOf(byte(1)))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
+ t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
+ }
+
+ at = ArrayOf(6, TypeOf([]int(nil)))
+ v1 = New(at).Elem()
+ shouldPanic("", func() { _ = v1.Interface() == v1.Interface() })
+}
+
+func TestArrayOfGenericAlg(t *testing.T) {
+ at1 := ArrayOf(5, TypeOf(string("")))
+ at := ArrayOf(6, at1)
+ v1 := New(at).Elem()
+ v2 := New(at).Elem()
+ if v1.Interface() != v1.Interface() {
+ t.Errorf("constructed array %v not equal to itself", v1.Interface())
+ }
+
+ v1.Index(0).Index(0).Set(ValueOf("abc"))
+ v2.Index(0).Index(0).Set(ValueOf("efg"))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 {
+ t.Errorf("constructed arrays %v and %v should not be equal", i1, i2)
+ }
+
+ v1.Index(0).Index(0).Set(ValueOf("abc"))
+ v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3]))
+ if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 {
+ t.Errorf("constructed arrays %v and %v should be equal", i1, i2)
+ }
+
+ // Test hash
+ m := MakeMap(MapOf(at, TypeOf(int(0))))
+ m.SetMapIndex(v1, ValueOf(1))
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed arrays %v and %v have different hashes", i1, i2)
+ }
+}
+
+func TestArrayOfDirectIface(t *testing.T) {
+ {
+ type T [1]*byte
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(ArrayOf(1, PtrTo(TypeOf(int8(0))))).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 != 0 {
+ t.Errorf("got p1=%v. want=%v", p1, nil)
+ }
+
+ if p2 != 0 {
+ t.Errorf("got p2=%v. want=%v", p2, nil)
+ }
+ }
+ {
+ type T [0]*byte
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(ArrayOf(0, PtrTo(TypeOf(int8(0))))).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 == 0 {
+ t.Errorf("got p1=%v. want=not-%v", p1, nil)
+ }
+
+ if p2 == 0 {
+ t.Errorf("got p2=%v. want=not-%v", p2, nil)
+ }
+ }
+}
+
+func TestSliceOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ st := SliceOf(TypeOf(T(1)))
+ if got, want := st.String(), "[]reflect_test.T"; got != want {
+ t.Errorf("SliceOf(T(1)).String()=%q, want %q", got, want)
+ }
+ v := MakeSlice(st, 10, 10)
+ runtime.GC()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ runtime.GC()
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed slice = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, SliceOf(TypeOf(T1(1))), []T1{})
+}
+
+func TestSliceOverflow(t *testing.T) {
+ // check that MakeSlice panics when size of slice overflows uint
+ const S = 1e6
+ s := uint(S)
+ l := (1<<(unsafe.Sizeof((*byte)(nil))*8)-1)/s + 1
+ if l*s >= s {
+ t.Fatal("slice size does not overflow")
+ }
+ var x [S]byte
+ st := SliceOf(TypeOf(x))
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Fatal("slice overflow does not panic")
+ }
+ }()
+ MakeSlice(st, int(l), int(l))
+}
+
+func TestSliceOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ st := SliceOf(tt)
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := MakeSlice(st, n, n)
+ for j := 0; j < v.Len(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Index(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.Len(); j++ {
+ k := v.Index(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestStructOfFieldName(t *testing.T) {
+ // invalid field name "1nvalid"
+ shouldPanic("has invalid name", func() {
+ StructOf([]StructField{
+ {Name: "Valid", Type: TypeOf("")},
+ {Name: "1nvalid", Type: TypeOf("")},
+ })
+ })
+
+ // invalid field name "+"
+ shouldPanic("has invalid name", func() {
+ StructOf([]StructField{
+ {Name: "Val1d", Type: TypeOf("")},
+ {Name: "+", Type: TypeOf("")},
+ })
+ })
+
+ // no field name
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Name: "", Type: TypeOf("")},
+ })
+ })
+
+ // verify creation of a struct with valid struct fields
+ validFields := []StructField{
+ {
+ Name: "φ",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "ValidName",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "Val1dNam5",
+ Type: TypeOf(""),
+ },
+ }
+
+ validStruct := StructOf(validFields)
+
+ const structStr = `struct { φ string; ValidName string; Val1dNam5 string }`
+ if got, want := validStruct.String(), structStr; got != want {
+ t.Errorf("StructOf(validFields).String()=%q, want %q", got, want)
+ }
+}
+
+func TestStructOf(t *testing.T) {
+ // check construction and use of type not in binary
+ fields := []StructField{
+ {
+ Name: "S",
+ Tag: "s",
+ Type: TypeOf(""),
+ },
+ {
+ Name: "X",
+ Tag: "x",
+ Type: TypeOf(byte(0)),
+ },
+ {
+ Name: "Y",
+ Type: TypeOf(uint64(0)),
+ },
+ {
+ Name: "Z",
+ Type: TypeOf([3]uint16{}),
+ },
+ }
+
+ st := StructOf(fields)
+ v := New(st).Elem()
+ runtime.GC()
+ v.FieldByName("X").Set(ValueOf(byte(2)))
+ v.FieldByIndex([]int{1}).Set(ValueOf(byte(1)))
+ runtime.GC()
+
+ s := fmt.Sprint(v.Interface())
+ want := `{ 1 0 [0 0 0]}`
+ if s != want {
+ t.Errorf("constructed struct = %s, want %s", s, want)
+ }
+ const stStr = `struct { S string "s"; X uint8 "x"; Y uint64; Z [3]uint16 }`
+ if got, want := st.String(), stStr; got != want {
+ t.Errorf("StructOf(fields).String()=%q, want %q", got, want)
+ }
+
+ // check the size, alignment and field offsets
+ stt := TypeOf(struct {
+ String string
+ X byte
+ Y uint64
+ Z [3]uint16
+ }{})
+ if st.Size() != stt.Size() {
+ t.Errorf("constructed struct size = %v, want %v", st.Size(), stt.Size())
+ }
+ if st.Align() != stt.Align() {
+ t.Errorf("constructed struct align = %v, want %v", st.Align(), stt.Align())
+ }
+ if st.FieldAlign() != stt.FieldAlign() {
+ t.Errorf("constructed struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
+ }
+ for i := 0; i < st.NumField(); i++ {
+ o1 := st.Field(i).Offset
+ o2 := stt.Field(i).Offset
+ if o1 != o2 {
+ t.Errorf("constructed struct field %v offset = %v, want %v", i, o1, o2)
+ }
+ }
+
+ // Check size and alignment with a trailing zero-sized field.
+ st = StructOf([]StructField{
+ {
+ Name: "F1",
+ Type: TypeOf(byte(0)),
+ },
+ {
+ Name: "F2",
+ Type: TypeOf([0]*byte{}),
+ },
+ })
+ stt = TypeOf(struct {
+ G1 byte
+ G2 [0]*byte
+ }{})
+ if st.Size() != stt.Size() {
+ t.Errorf("constructed zero-padded struct size = %v, want %v", st.Size(), stt.Size())
+ }
+ if st.Align() != stt.Align() {
+ t.Errorf("constructed zero-padded struct align = %v, want %v", st.Align(), stt.Align())
+ }
+ if st.FieldAlign() != stt.FieldAlign() {
+ t.Errorf("constructed zero-padded struct field align = %v, want %v", st.FieldAlign(), stt.FieldAlign())
+ }
+ for i := 0; i < st.NumField(); i++ {
+ o1 := st.Field(i).Offset
+ o2 := stt.Field(i).Offset
+ if o1 != o2 {
+ t.Errorf("constructed zero-padded struct field %v offset = %v, want %v", i, o1, o2)
+ }
+ }
+
+ // check duplicate names
+ shouldPanic("duplicate field", func() {
+ StructOf([]StructField{
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ })
+ })
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Type: TypeOf("")},
+ {Name: "string", PkgPath: "p", Type: TypeOf("")},
+ })
+ })
+ shouldPanic("has no name", func() {
+ StructOf([]StructField{
+ {Type: TypeOf("")},
+ {Type: TypeOf("")},
+ })
+ })
+ // check that type already in binary is found
+ checkSameType(t, StructOf(fields[2:3]), struct{ Y uint64 }{})
+
+ // gccgo used to fail this test.
+ type structFieldType interface{}
+ checkSameType(t,
+ StructOf([]StructField{
+ {
+ Name: "F",
+ Type: TypeOf((*structFieldType)(nil)).Elem(),
+ },
+ }),
+ struct{ F structFieldType }{})
+}
+
+func TestStructOfExportRules(t *testing.T) {
+ type S1 struct{}
+ type s2 struct{}
+ type ΦType struct{}
+ type φType struct{}
+
+ testPanic := func(i int, mustPanic bool, f func()) {
+ defer func() {
+ err := recover()
+ if err == nil && mustPanic {
+ t.Errorf("test-%d did not panic", i)
+ }
+ if err != nil && !mustPanic {
+ t.Errorf("test-%d panicked: %v\n", i, err)
+ }
+ }()
+ f()
+ }
+
+ tests := []struct {
+ field StructField
+ mustPanic bool
+ exported bool
+ }{
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "Name", Type: nil, PkgPath: ""},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(S1{}), PkgPath: ""},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf(S1{}), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "S1", Anonymous: true, Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf(s2{}), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Anonymous: true, Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s2", Type: TypeOf(int(0)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf(S1{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf((*S1)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf(s2{})},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "S", Type: TypeOf((*s2)(nil))},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(S1{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*S1)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(s2{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*s2)(nil))},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(S1{}), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*S1)(nil)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf(s2{}), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "s", Type: TypeOf((*s2)(nil)), PkgPath: "other/pkg"},
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(ΦType{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "", Type: TypeOf(φType{})},
+ mustPanic: true,
+ },
+ {
+ field: StructField{Name: "Φ", Type: TypeOf(0)},
+ exported: true,
+ },
+ {
+ field: StructField{Name: "φ", Type: TypeOf(0)},
+ exported: false,
+ },
+ }
+
+ for i, test := range tests {
+ testPanic(i, test.mustPanic, func() {
+ typ := StructOf([]StructField{test.field})
+ if typ == nil {
+ t.Errorf("test-%d: error creating struct type", i)
+ return
+ }
+ field := typ.Field(0)
+ n := field.Name
+ if n == "" {
+ panic("field.Name must not be empty")
+ }
+ exported := token.IsExported(n)
+ if exported != test.exported {
+ t.Errorf("test-%d: got exported=%v want exported=%v", i, exported, test.exported)
+ }
+ if field.PkgPath != test.field.PkgPath {
+ t.Errorf("test-%d: got PkgPath=%q want pkgPath=%q", i, field.PkgPath, test.field.PkgPath)
+ }
+ })
+ }
+}
+
+func TestStructOfGC(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ fields := []StructField{
+ {Name: "X", Type: tt},
+ {Name: "Y", Type: tt},
+ }
+ st := StructOf(fields)
+
+ const n = 10000
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := New(st).Elem()
+ for j := 0; j < v.NumField(); j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Field(j).Set(ValueOf(p).Convert(tt))
+ }
+ x = append(x, v.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi)
+ for j := 0; j < v.NumField(); j++ {
+ k := v.Field(j).Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d].%c = %d, want %d", i, "XY"[j], k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestStructOfAlg(t *testing.T) {
+ st := StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf(int(0))}})
+ v1 := New(st).Elem()
+ v2 := New(st).Elem()
+ if !DeepEqual(v1.Interface(), v1.Interface()) {
+ t.Errorf("constructed struct %v not equal to itself", v1.Interface())
+ }
+ v1.FieldByName("X").Set(ValueOf(int(1)))
+ if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
+ }
+
+ st = StructOf([]StructField{{Name: "X", Tag: "x", Type: TypeOf([]int(nil))}})
+ v1 = New(st).Elem()
+ shouldPanic("", func() { _ = v1.Interface() == v1.Interface() })
+}
+
+func TestStructOfGenericAlg(t *testing.T) {
+ st1 := StructOf([]StructField{
+ {Name: "X", Tag: "x", Type: TypeOf(int64(0))},
+ {Name: "Y", Type: TypeOf(string(""))},
+ })
+ st := StructOf([]StructField{
+ {Name: "S0", Type: st1},
+ {Name: "S1", Type: st1},
+ })
+
+ tests := []struct {
+ rt Type
+ idx []int
+ }{
+ {
+ rt: st,
+ idx: []int{0, 1},
+ },
+ {
+ rt: st1,
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([0]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([0]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([2]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([1]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([1]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([1]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf([2]int{})},
+ {Name: "YY", Type: TypeOf("")},
+ {Name: "ZZ", Type: TypeOf([2]int{})},
+ },
+ ),
+ idx: []int{1},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf(int64(0))},
+ {Name: "YY", Type: TypeOf(byte(0))},
+ {Name: "ZZ", Type: TypeOf("")},
+ },
+ ),
+ idx: []int{2},
+ },
+ {
+ rt: StructOf(
+ []StructField{
+ {Name: "XX", Type: TypeOf(int64(0))},
+ {Name: "YY", Type: TypeOf(int64(0))},
+ {Name: "ZZ", Type: TypeOf("")},
+ {Name: "AA", Type: TypeOf([1]int64{})},
+ },
+ ),
+ idx: []int{2},
+ },
+ }
+
+ for _, table := range tests {
+ v1 := New(table.rt).Elem()
+ v2 := New(table.rt).Elem()
+
+ if !DeepEqual(v1.Interface(), v1.Interface()) {
+ t.Errorf("constructed struct %v not equal to itself", v1.Interface())
+ }
+
+ v1.FieldByIndex(table.idx).Set(ValueOf("abc"))
+ v2.FieldByIndex(table.idx).Set(ValueOf("def"))
+ if i1, i2 := v1.Interface(), v2.Interface(); DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should not be equal", i1, i2)
+ }
+
+ abc := "abc"
+ v1.FieldByIndex(table.idx).Set(ValueOf(abc))
+ val := "+" + abc + "-"
+ v2.FieldByIndex(table.idx).Set(ValueOf(val[1:4]))
+ if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should be equal", i1, i2)
+ }
+
+ // Test hash
+ m := MakeMap(MapOf(table.rt, TypeOf(int(0))))
+ m.SetMapIndex(v1, ValueOf(1))
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed structs %#v and %#v have different hashes", i1, i2)
+ }
+
+ v2.FieldByIndex(table.idx).Set(ValueOf("abc"))
+ if i1, i2 := v1.Interface(), v2.Interface(); !DeepEqual(i1, i2) {
+ t.Errorf("constructed structs %v and %v should be equal", i1, i2)
+ }
+
+ if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() {
+ t.Errorf("constructed structs %v and %v have different hashes", i1, i2)
+ }
+ }
+}
+
+func TestStructOfDirectIface(t *testing.T) {
+ {
+ type T struct{ X [1]*byte }
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(StructOf([]StructField{
+ {
+ Name: "X",
+ Type: ArrayOf(1, TypeOf((*int8)(nil))),
+ },
+ })).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 != 0 {
+ t.Errorf("got p1=%v. want=%v", p1, nil)
+ }
+
+ if p2 != 0 {
+ t.Errorf("got p2=%v. want=%v", p2, nil)
+ }
+ }
+ {
+ type T struct{ X [0]*byte }
+ i1 := Zero(TypeOf(T{})).Interface()
+ v1 := ValueOf(&i1).Elem()
+ p1 := v1.InterfaceData()[1]
+
+ i2 := Zero(StructOf([]StructField{
+ {
+ Name: "X",
+ Type: ArrayOf(0, TypeOf((*int8)(nil))),
+ },
+ })).Interface()
+ v2 := ValueOf(&i2).Elem()
+ p2 := v2.InterfaceData()[1]
+
+ if p1 == 0 {
+ t.Errorf("got p1=%v. want=not-%v", p1, nil)
+ }
+
+ if p2 == 0 {
+ t.Errorf("got p2=%v. want=not-%v", p2, nil)
+ }
+ }
+}
+
+type StructI int
+
+func (i StructI) Get() int { return int(i) }
+
+type StructIPtr int
+
+func (i *StructIPtr) Get() int { return int(*i) }
+func (i *StructIPtr) Set(v int) { *(*int)(i) = v }
+
+type SettableStruct struct {
+ SettableField int
+}
+
+func (p *SettableStruct) Set(v int) { p.SettableField = v }
+
+type SettablePointer struct {
+ SettableField *int
+}
+
+func (p *SettablePointer) Set(v int) { *p.SettableField = v }
+
+func TestStructOfWithInterface(t *testing.T) {
+ const want = 42
+ type Iface interface {
+ Get() int
+ }
+ type IfaceSet interface {
+ Set(int)
+ }
+ tests := []struct {
+ name string
+ typ Type
+ val Value
+ impl bool
+ }{
+ {
+ name: "StructI",
+ typ: TypeOf(StructI(want)),
+ val: ValueOf(StructI(want)),
+ impl: true,
+ },
+ {
+ name: "StructI",
+ typ: PtrTo(TypeOf(StructI(want))),
+ val: ValueOf(func() interface{} {
+ v := StructI(want)
+ return &v
+ }()),
+ impl: true,
+ },
+ {
+ name: "StructIPtr",
+ typ: PtrTo(TypeOf(StructIPtr(want))),
+ val: ValueOf(func() interface{} {
+ v := StructIPtr(want)
+ return &v
+ }()),
+ impl: true,
+ },
+ {
+ name: "StructIPtr",
+ typ: TypeOf(StructIPtr(want)),
+ val: ValueOf(StructIPtr(want)),
+ impl: false,
+ },
+ // {
+ // typ: TypeOf((*Iface)(nil)).Elem(), // FIXME(sbinet): fix method.ifn/tfn
+ // val: ValueOf(StructI(want)),
+ // impl: true,
+ // },
+ }
+
+ for i, table := range tests {
+ for j := 0; j < 2; j++ {
+ var fields []StructField
+ if j == 1 {
+ fields = append(fields, StructField{
+ Name: "Dummy",
+ PkgPath: "",
+ Type: TypeOf(int(0)),
+ })
+ }
+ fields = append(fields, StructField{
+ Name: table.name,
+ Anonymous: true,
+ PkgPath: "",
+ Type: table.typ,
+ })
+
+ // We currently do not correctly implement methods
+ // for embedded fields other than the first.
+ // Therefore, for now, we expect those methods
+ // to not exist. See issues 15924 and 20824.
+ // When those issues are fixed, this test of panic
+ // should be removed.
+ if j == 1 && table.impl {
+ func() {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Errorf("test-%d-%d did not panic", i, j)
+ }
+ }()
+ _ = StructOf(fields)
+ }()
+ continue
+ }
+
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ rv.Field(j).Set(table.val)
+
+ if _, ok := rv.Interface().(Iface); ok != table.impl {
+ if table.impl {
+ t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ)
+ } else {
+ t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ)
+ }
+ continue
+ }
+
+ if !table.impl {
+ continue
+ }
+
+ v := rv.Interface().(Iface).Get()
+ if v != want {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want)
+ }
+
+ fct := rv.MethodByName("Get")
+ out := fct.Call(nil)
+ if !DeepEqual(out[0].Interface(), want) {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want)
+ }
+ }
+ }
+
+ // Test an embedded nil pointer with pointer methods.
+ fields := []StructField{{
+ Name: "StructIPtr",
+ Anonymous: true,
+ Type: PtrTo(TypeOf(StructIPtr(want))),
+ }}
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ // This should panic since the pointer is nil.
+ shouldPanic("", func() {
+ rv.Interface().(IfaceSet).Set(want)
+ })
+
+ // Test an embedded nil pointer to a struct with pointer methods.
+
+ fields = []StructField{{
+ Name: "SettableStruct",
+ Anonymous: true,
+ Type: PtrTo(TypeOf(SettableStruct{})),
+ }}
+ rt = StructOf(fields)
+ rv = New(rt).Elem()
+ // This should panic since the pointer is nil.
+ shouldPanic("", func() {
+ rv.Interface().(IfaceSet).Set(want)
+ })
+
+ // The behavior is different if there is a second field,
+ // since now an interface value holds a pointer to the struct
+ // rather than just holding a copy of the struct.
+ fields = []StructField{
+ {
+ Name: "SettableStruct",
+ Anonymous: true,
+ Type: PtrTo(TypeOf(SettableStruct{})),
+ },
+ {
+ Name: "EmptyStruct",
+ Anonymous: true,
+ Type: StructOf(nil),
+ },
+ }
+ // With the current implementation this is expected to panic.
+ // Ideally it should work and we should be able to see a panic
+ // if we call the Set method.
+ shouldPanic("", func() {
+ StructOf(fields)
+ })
+
+ // Embed a field that can be stored directly in an interface,
+ // with a second field.
+ fields = []StructField{
+ {
+ Name: "SettablePointer",
+ Anonymous: true,
+ Type: TypeOf(SettablePointer{}),
+ },
+ {
+ Name: "EmptyStruct",
+ Anonymous: true,
+ Type: StructOf(nil),
+ },
+ }
+ // With the current implementation this is expected to panic.
+ // Ideally it should work and we should be able to call the
+ // Set and Get methods.
+ shouldPanic("", func() {
+ StructOf(fields)
+ })
+}
+
+func TestStructOfTooManyFields(t *testing.T) {
+ // Bug Fix: #25402 - this should not panic
+ tt := StructOf([]StructField{
+ {Name: "Time", Type: TypeOf(time.Time{}), Anonymous: true},
+ })
+
+ if _, present := tt.MethodByName("After"); !present {
+ t.Errorf("Expected method `After` to be found")
+ }
+}
+
+func TestStructOfDifferentPkgPath(t *testing.T) {
+ fields := []StructField{
+ {
+ Name: "f1",
+ PkgPath: "p1",
+ Type: TypeOf(int(0)),
+ },
+ {
+ Name: "f2",
+ PkgPath: "p2",
+ Type: TypeOf(int(0)),
+ },
+ }
+ shouldPanic("different PkgPath", func() {
+ StructOf(fields)
+ })
+}
+
+func TestChanOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ ct := ChanOf(BothDir, TypeOf(T("")))
+ v := MakeChan(ct, 2)
+ runtime.GC()
+ v.Send(ValueOf(T("hello")))
+ runtime.GC()
+ v.Send(ValueOf(T("world")))
+ runtime.GC()
+
+ sv1, _ := v.Recv()
+ sv2, _ := v.Recv()
+ s1 := sv1.String()
+ s2 := sv2.String()
+ if s1 != "hello" || s2 != "world" {
+ t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world")
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, ChanOf(BothDir, TypeOf(T1(1))), (chan T1)(nil))
+
+ // Check arrow token association in undefined chan types.
+ var left chan<- chan T
+ var right chan (<-chan T)
+ tLeft := ChanOf(SendDir, ChanOf(BothDir, TypeOf(T(""))))
+ tRight := ChanOf(BothDir, ChanOf(RecvDir, TypeOf(T(""))))
+ if tLeft != TypeOf(left) {
+ t.Errorf("chan<-chan: have %s, want %T", tLeft, left)
+ }
+ if tRight != TypeOf(right) {
+ t.Errorf("chan<-chan: have %s, want %T", tRight, right)
+ }
+}
+
+func TestChanOfDir(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ crt := ChanOf(RecvDir, TypeOf(T("")))
+ cst := ChanOf(SendDir, TypeOf(T("")))
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, ChanOf(RecvDir, TypeOf(T1(1))), (<-chan T1)(nil))
+ checkSameType(t, ChanOf(SendDir, TypeOf(T1(1))), (chan<- T1)(nil))
+
+ // check String form of ChanDir
+ if crt.ChanDir().String() != "<-chan" {
+ t.Errorf("chan dir: have %q, want %q", crt.ChanDir().String(), "<-chan")
+ }
+ if cst.ChanDir().String() != "chan<-" {
+ t.Errorf("chan dir: have %q, want %q", cst.ChanDir().String(), "chan<-")
+ }
+}
+
+func TestChanOfGC(t *testing.T) {
+ done := make(chan bool, 1)
+ go func() {
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ panic("deadlock in TestChanOfGC")
+ }
+ }()
+
+ defer func() {
+ done <- true
+ }()
+
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ ct := ChanOf(BothDir, tt)
+
+ // NOTE: The garbage collector handles allocated channels specially,
+ // so we have to save pointers to channels in x; the pointer code will
+ // use the gc info in the newly constructed chan type.
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := MakeChan(ct, n)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.Send(ValueOf(p).Convert(tt))
+ }
+ pv := New(ct)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ for j := 0; j < n; j++ {
+ pv, _ := v.Recv()
+ k := pv.Elem().Interface()
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestMapOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0))))
+ runtime.GC()
+ v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1)))
+ runtime.GC()
+
+ s := fmt.Sprint(v.Interface())
+ want := "map[a:1]"
+ if s != want {
+ t.Errorf("constructed map = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, MapOf(TypeOf(V(0)), TypeOf(K(""))), map[V]K(nil))
+
+ // check that invalid key type panics
+ shouldPanic("invalid key type", func() { MapOf(TypeOf((func())(nil)), TypeOf(false)) })
+}
+
+func TestMapOfGCKeys(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ mt := MapOf(tt, TypeOf(false))
+
+ // NOTE: The garbage collector handles allocated maps specially,
+ // so we have to save pointers to maps in x; the pointer code will
+ // use the gc info in the newly constructed map type.
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := MakeMap(mt)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.SetMapIndex(ValueOf(p).Convert(tt), ValueOf(true))
+ }
+ pv := New(mt)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ var out []int
+ for _, kv := range v.MapKeys() {
+ out = append(out, int(kv.Elem().Interface().(uintptr)))
+ }
+ sort.Ints(out)
+ for j, k := range out {
+ if k != i*n+j {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestMapOfGCValues(t *testing.T) {
+ type T *uintptr
+ tt := TypeOf(T(nil))
+ mt := MapOf(TypeOf(1), tt)
+
+ // NOTE: The garbage collector handles allocated maps specially,
+ // so we have to save pointers to maps in x; the pointer code will
+ // use the gc info in the newly constructed map type.
+ const n = 100
+ var x []interface{}
+ for i := 0; i < n; i++ {
+ v := MakeMap(mt)
+ for j := 0; j < n; j++ {
+ p := new(uintptr)
+ *p = uintptr(i*n + j)
+ v.SetMapIndex(ValueOf(j), ValueOf(p).Convert(tt))
+ }
+ pv := New(mt)
+ pv.Elem().Set(v)
+ x = append(x, pv.Interface())
+ }
+ runtime.GC()
+
+ for i, xi := range x {
+ v := ValueOf(xi).Elem()
+ for j := 0; j < n; j++ {
+ k := v.MapIndex(ValueOf(j)).Elem().Interface().(uintptr)
+ if k != uintptr(i*n+j) {
+ t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j)
+ }
+ }
+ }
+}
+
+func TestTypelinksSorted(t *testing.T) {
+ var last string
+ for i, n := range TypeLinks() {
+ if n < last {
+ t.Errorf("typelinks not sorted: %q [%d] > %q [%d]", last, i-1, n, i)
+ }
+ last = n
+ }
+}
+
+func TestFuncOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ fn := func(args []Value) []Value {
+ if len(args) != 1 {
+ t.Errorf("args == %v, want exactly one arg", args)
+ } else if args[0].Type() != TypeOf(K("")) {
+ t.Errorf("args[0] is type %v, want %v", args[0].Type(), TypeOf(K("")))
+ } else if args[0].String() != "gopher" {
+ t.Errorf("args[0] = %q, want %q", args[0].String(), "gopher")
+ }
+ return []Value{ValueOf(V(3.14))}
+ }
+ v := MakeFunc(FuncOf([]Type{TypeOf(K(""))}, []Type{TypeOf(V(0))}, false), fn)
+
+ outs := v.Call([]Value{ValueOf(K("gopher"))})
+ if len(outs) != 1 {
+ t.Fatalf("v.Call returned %v, want exactly one result", outs)
+ } else if outs[0].Type() != TypeOf(V(0)) {
+ t.Fatalf("c.Call[0] is type %v, want %v", outs[0].Type(), TypeOf(V(0)))
+ }
+ f := outs[0].Float()
+ if f != 3.14 {
+ t.Errorf("constructed func returned %f, want %f", f, 3.14)
+ }
+
+ // check that types already in binary are found
+ type T1 int
+ testCases := []struct {
+ in, out []Type
+ variadic bool
+ want interface{}
+ }{
+ {in: []Type{TypeOf(T1(0))}, want: (func(T1))(nil)},
+ {in: []Type{TypeOf(int(0))}, want: (func(int))(nil)},
+ {in: []Type{SliceOf(TypeOf(int(0)))}, variadic: true, want: (func(...int))(nil)},
+ {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false)}, want: (func(int) bool)(nil)},
+ {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false), TypeOf("")}, want: (func(int) (bool, string))(nil)},
+ }
+ for _, tt := range testCases {
+ checkSameType(t, FuncOf(tt.in, tt.out, tt.variadic), tt.want)
+ }
+
+ // check that variadic requires last element be a slice.
+ FuncOf([]Type{TypeOf(1), TypeOf(""), SliceOf(TypeOf(false))}, nil, true)
+ shouldPanic("must be slice", func() { FuncOf([]Type{TypeOf(0), TypeOf(""), TypeOf(false)}, nil, true) })
+ shouldPanic("must be slice", func() { FuncOf(nil, nil, true) })
+}
+
+type B1 struct {
+ X int
+ Y int
+ Z int
+}
+
+func BenchmarkFieldByName1(b *testing.B) {
+ t := TypeOf(B1{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("Z")
+ }
+ })
+}
+
+func BenchmarkFieldByName2(b *testing.B) {
+ t := TypeOf(S3{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("B")
+ }
+ })
+}
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+func TestEmbed(t *testing.T) {
+ typ := TypeOf(R0{})
+ f, ok := typ.FieldByName("X")
+ if ok {
+ t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index)
+ }
+}
+
+func BenchmarkFieldByName3(b *testing.B) {
+ t := TypeOf(R0{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("X")
+ }
+ })
+}
+
+type S struct {
+ i1 int64
+ i2 int64
+}
+
+func BenchmarkInterfaceBig(b *testing.B) {
+ v := ValueOf(S{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
+ b.StopTimer()
+}
+
+func TestAllocsInterfaceBig(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ v := ValueOf(S{})
+ if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
+ t.Error("allocs:", allocs)
+ }
+}
+
+func BenchmarkInterfaceSmall(b *testing.B) {
+ v := ValueOf(int64(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
+}
+
+func TestAllocsInterfaceSmall(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ v := ValueOf(int64(0))
+ if allocs := testing.AllocsPerRun(100, func() { v.Interface() }); allocs > 0 {
+ t.Error("allocs:", allocs)
+ }
+}
+
+// An exhaustive is a mechanism for writing exhaustive or stochastic tests.
+// The basic usage is:
+//
+// for x.Next() {
+// ... code using x.Maybe() or x.Choice(n) to create test cases ...
+// }
+//
+// Each iteration of the loop returns a different set of results, until all
+// possible result sets have been explored. It is okay for different code paths
+// to make different method call sequences on x, but there must be no
+// other source of non-determinism in the call sequences.
+//
+// When faced with a new decision, x chooses randomly. Future explorations
+// of that path will choose successive values for the result. Thus, stopping
+// the loop after a fixed number of iterations gives somewhat stochastic
+// testing.
+//
+// Example:
+//
+// for x.Next() {
+// v := make([]bool, x.Choose(4))
+// for i := range v {
+// v[i] = x.Maybe()
+// }
+// fmt.Println(v)
+// }
+//
+// prints (in some order):
+//
+// []
+// [false]
+// [true]
+// [false false]
+// [false true]
+// ...
+// [true true]
+// [false false false]
+// ...
+// [true true true]
+// [false false false false]
+// ...
+// [true true true true]
+//
+type exhaustive struct {
+ r *rand.Rand
+ pos int
+ last []choice
+}
+
+type choice struct {
+ off int
+ n int
+ max int
+}
+
+func (x *exhaustive) Next() bool {
+ if x.r == nil {
+ x.r = rand.New(rand.NewSource(time.Now().UnixNano()))
+ }
+ x.pos = 0
+ if x.last == nil {
+ x.last = []choice{}
+ return true
+ }
+ for i := len(x.last) - 1; i >= 0; i-- {
+ c := &x.last[i]
+ if c.n+1 < c.max {
+ c.n++
+ x.last = x.last[:i+1]
+ return true
+ }
+ }
+ return false
+}
+
+func (x *exhaustive) Choose(max int) int {
+ if x.pos >= len(x.last) {
+ x.last = append(x.last, choice{x.r.Intn(max), 0, max})
+ }
+ c := &x.last[x.pos]
+ x.pos++
+ if c.max != max {
+ panic("inconsistent use of exhaustive tester")
+ }
+ return (c.n + c.off) % max
+}
+
+func (x *exhaustive) Maybe() bool {
+ return x.Choose(2) == 1
+}
+
+func GCFunc(args []Value) []Value {
+ runtime.GC()
+ return []Value{}
+}
+
+func TestReflectFuncTraceback(t *testing.T) {
+ f := MakeFunc(TypeOf(func() {}), GCFunc)
+ f.Call([]Value{})
+}
+
+func TestReflectMethodTraceback(t *testing.T) {
+ p := Point{3, 4}
+ m := ValueOf(p).MethodByName("GCMethod")
+ i := ValueOf(m.Interface()).Call([]Value{ValueOf(5)})[0].Int()
+ if i != 8 {
+ t.Errorf("Call returned %d; want 8", i)
+ }
+}
+
+func TestSmallZero(t *testing.T) {
+ type T [10]byte
+ typ := TypeOf(T{})
+ if allocs := testing.AllocsPerRun(100, func() { Zero(typ) }); allocs > 0 {
+ t.Errorf("Creating small zero values caused %f allocs, want 0", allocs)
+ }
+}
+
+func TestBigZero(t *testing.T) {
+ const size = 1 << 10
+ var v [size]byte
+ z := Zero(ValueOf(v).Type()).Interface().([size]byte)
+ for i := 0; i < size; i++ {
+ if z[i] != 0 {
+ t.Fatalf("Zero object not all zero, index %d", i)
+ }
+ }
+}
+
+func TestZeroSet(t *testing.T) {
+ type T [16]byte
+ type S struct {
+ a uint64
+ T T
+ b uint64
+ }
+ v := S{
+ a: 0xaaaaaaaaaaaaaaaa,
+ T: T{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
+ b: 0xbbbbbbbbbbbbbbbb,
+ }
+ ValueOf(&v).Elem().Field(1).Set(Zero(TypeOf(T{})))
+ if v != (S{
+ a: 0xaaaaaaaaaaaaaaaa,
+ b: 0xbbbbbbbbbbbbbbbb,
+ }) {
+ t.Fatalf("Setting a field to a Zero value didn't work")
+ }
+}
+
+func TestFieldByIndexNil(t *testing.T) {
+ type P struct {
+ F int
+ }
+ type T struct {
+ *P
+ }
+ v := ValueOf(T{})
+
+ v.FieldByName("P") // should be fine
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("no error")
+ } else if !strings.Contains(fmt.Sprint(err), "nil pointer to embedded struct") {
+ t.Fatalf(`err=%q, wanted error containing "nil pointer to embedded struct"`, err)
+ }
+ }()
+ v.FieldByName("F") // should panic
+
+ t.Fatalf("did not panic")
+}
+
+// Given
+// type Outer struct {
+// *Inner
+// ...
+// }
+// the compiler generates the implementation of (*Outer).M dispatching to the embedded Inner.
+// The implementation is logically:
+// func (p *Outer) M() {
+// (p.Inner).M()
+// }
+// but since the only change here is the replacement of one pointer receiver with another,
+// the actual generated code overwrites the original receiver with the p.Inner pointer and
+// then jumps to the M method expecting the *Inner receiver.
+//
+// During reflect.Value.Call, we create an argument frame and the associated data structures
+// to describe it to the garbage collector, populate the frame, call reflect.call to
+// run a function call using that frame, and then copy the results back out of the frame.
+// The reflect.call function does a memmove of the frame structure onto the
+// stack (to set up the inputs), runs the call, and the memmoves the stack back to
+// the frame structure (to preserve the outputs).
+//
+// Originally reflect.call did not distinguish inputs from outputs: both memmoves
+// were for the full stack frame. However, in the case where the called function was
+// one of these wrappers, the rewritten receiver is almost certainly a different type
+// than the original receiver. This is not a problem on the stack, where we use the
+// program counter to determine the type information and understand that
+// during (*Outer).M the receiver is an *Outer while during (*Inner).M the receiver in the same
+// memory word is now an *Inner. But in the statically typed argument frame created
+// by reflect, the receiver is always an *Outer. Copying the modified receiver pointer
+// off the stack into the frame will store an *Inner there, and then if a garbage collection
+// happens to scan that argument frame before it is discarded, it will scan the *Inner
+// memory as if it were an *Outer. If the two have different memory layouts, the
+// collection will interpret the memory incorrectly.
+//
+// One such possible incorrect interpretation is to treat two arbitrary memory words
+// (Inner.P1 and Inner.P2 below) as an interface (Outer.R below). Because interpreting
+// an interface requires dereferencing the itab word, the misinterpretation will try to
+// deference Inner.P1, causing a crash during garbage collection.
+//
+// This came up in a real program in issue 7725.
+
+type Outer struct {
+ *Inner
+ R io.Reader
+}
+
+type Inner struct {
+ X *Outer
+ P1 uintptr
+ P2 uintptr
+}
+
+func (pi *Inner) M() {
+ // Clear references to pi so that the only way the
+ // garbage collection will find the pointer is in the
+ // argument frame, typed as a *Outer.
+ pi.X.Inner = nil
+
+ // Set up an interface value that will cause a crash.
+ // P1 = 1 is a non-zero, so the interface looks non-nil.
+ // P2 = pi ensures that the data word points into the
+ // allocated heap; if not the collection skips the interface
+ // value as irrelevant, without dereferencing P1.
+ pi.P1 = 1
+ pi.P2 = uintptr(unsafe.Pointer(pi))
+}
+
+func TestCallMethodJump(t *testing.T) {
+ // In reflect.Value.Call, trigger a garbage collection after reflect.call
+ // returns but before the args frame has been discarded.
+ // This is a little clumsy but makes the failure repeatable.
+ *CallGC = true
+
+ p := &Outer{Inner: new(Inner)}
+ p.Inner.X = p
+ ValueOf(p).Method(0).Call(nil)
+
+ // Stop garbage collecting during reflect.call.
+ *CallGC = false
+}
+
+func TestMakeFuncStackCopy(t *testing.T) {
+ target := func(in []Value) []Value {
+ runtime.GC()
+ useStack(16)
+ return []Value{ValueOf(9)}
+ }
+
+ var concrete func(*int, int) int
+ fn := MakeFunc(ValueOf(concrete).Type(), target)
+ ValueOf(&concrete).Elem().Set(fn)
+ x := concrete(nil, 7)
+ if x != 9 {
+ t.Errorf("have %#q want 9", x)
+ }
+}
+
+// use about n KB of stack
+func useStack(n int) {
+ if n == 0 {
+ return
+ }
+ var b [1024]byte // makes frame about 1KB
+ useStack(n - 1 + int(b[99]))
+}
+
+type Impl struct{}
+
+func (Impl) F() {}
+
+func TestValueString(t *testing.T) {
+ rv := ValueOf(Impl{})
+ if rv.String() != "<reflect_test.Impl Value>" {
+ t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "<reflect_test.Impl Value>")
+ }
+
+ method := rv.Method(0)
+ if method.String() != "<func() Value>" {
+ t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "<func() Value>")
+ }
+}
+
+func TestInvalid(t *testing.T) {
+ // Used to have inconsistency between IsValid() and Kind() != Invalid.
+ type T struct{ v interface{} }
+
+ v := ValueOf(T{}).Field(0)
+ if v.IsValid() != true || v.Kind() != Interface {
+ t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind())
+ }
+ v = v.Elem()
+ if v.IsValid() != false || v.Kind() != Invalid {
+ t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind())
+ }
+}
+
+// Issue 8917.
+func TestLargeGCProg(t *testing.T) {
+ fv := ValueOf(func([256]*byte) {})
+ fv.Call([]Value{ValueOf([256]*byte{})})
+}
+
+func fieldIndexRecover(t Type, i int) (recovered interface{}) {
+ defer func() {
+ recovered = recover()
+ }()
+
+ t.Field(i)
+ return
+}
+
+// Issue 15046.
+func TestTypeFieldOutOfRangePanic(t *testing.T) {
+ typ := TypeOf(struct{ X int }{10})
+ testIndices := [...]struct {
+ i int
+ mustPanic bool
+ }{
+ 0: {-2, true},
+ 1: {0, false},
+ 2: {1, true},
+ 3: {1 << 10, true},
+ }
+ for i, tt := range testIndices {
+ recoveredErr := fieldIndexRecover(typ, tt.i)
+ if tt.mustPanic {
+ if recoveredErr == nil {
+ t.Errorf("#%d: fieldIndex %d expected to panic", i, tt.i)
+ }
+ } else {
+ if recoveredErr != nil {
+ t.Errorf("#%d: got err=%v, expected no panic", i, recoveredErr)
+ }
+ }
+ }
+}
+
+// Issue 9179.
+func TestCallGC(t *testing.T) {
+ f := func(a, b, c, d, e string) {
+ }
+ g := func(in []Value) []Value {
+ runtime.GC()
+ return nil
+ }
+ typ := ValueOf(f).Type()
+ f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string))
+ f2("four", "five5", "six666", "seven77", "eight888")
+}
+
+// Issue 18635 (function version).
+func TestKeepFuncLive(t *testing.T) {
+ // Test that we keep makeFuncImpl live as long as it is
+ // referenced on the stack.
+ typ := TypeOf(func(i int) {})
+ var f, g func(in []Value) []Value
+ f = func(in []Value) []Value {
+ clobber()
+ i := int(in[0].Int())
+ if i > 0 {
+ // We can't use Value.Call here because
+ // runtime.call* will keep the makeFuncImpl
+ // alive. However, by converting it to an
+ // interface value and calling that,
+ // reflect.callReflect is the only thing that
+ // can keep the makeFuncImpl live.
+ //
+ // Alternate between f and g so that if we do
+ // reuse the memory prematurely it's more
+ // likely to get obviously corrupted.
+ MakeFunc(typ, g).Interface().(func(i int))(i - 1)
+ }
+ return nil
+ }
+ g = func(in []Value) []Value {
+ clobber()
+ i := int(in[0].Int())
+ MakeFunc(typ, f).Interface().(func(i int))(i)
+ return nil
+ }
+ MakeFunc(typ, f).Call([]Value{ValueOf(10)})
+}
+
+type UnExportedFirst int
+
+func (i UnExportedFirst) ΦExported() {}
+func (i UnExportedFirst) unexported() {}
+
+// Issue 21177
+func TestMethodByNameUnExportedFirst(t *testing.T) {
+ defer func() {
+ if recover() != nil {
+ t.Errorf("should not panic")
+ }
+ }()
+ typ := TypeOf(UnExportedFirst(0))
+ m, _ := typ.MethodByName("ΦExported")
+ if m.Name != "ΦExported" {
+ t.Errorf("got %s, expected ΦExported", m.Name)
+ }
+}
+
+// Issue 18635 (method version).
+type KeepMethodLive struct{}
+
+func (k KeepMethodLive) Method1(i int) {
+ clobber()
+ if i > 0 {
+ ValueOf(k).MethodByName("Method2").Interface().(func(i int))(i - 1)
+ }
+}
+
+func (k KeepMethodLive) Method2(i int) {
+ clobber()
+ ValueOf(k).MethodByName("Method1").Interface().(func(i int))(i)
+}
+
+func TestKeepMethodLive(t *testing.T) {
+ // Test that we keep methodValue live as long as it is
+ // referenced on the stack.
+ KeepMethodLive{}.Method1(10)
+}
+
+// clobber tries to clobber unreachable memory.
+func clobber() {
+ runtime.GC()
+ for i := 1; i < 32; i++ {
+ for j := 0; j < 10; j++ {
+ obj := make([]*byte, i)
+ sink = obj
+ }
+ }
+ runtime.GC()
+}
+
+type funcLayoutTest struct {
+ rcvr, t Type
+ size, argsize, retOffset uintptr
+ stack []byte // pointer bitmap: 1 is pointer, 0 is scalar
+ gc []byte
+}
+
+var funcLayoutTests []funcLayoutTest
+
+func init() {
+ var argAlign uintptr = PtrSize
+ roundup := func(x uintptr, a uintptr) uintptr {
+ return (x + a - 1) / a * a
+ }
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a, b string) string { return "" }).Type(),
+ 6 * PtrSize,
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{1, 0, 1, 0, 1},
+ []byte{1, 0, 1, 0, 1},
+ })
+
+ var r []byte
+ if PtrSize == 4 {
+ r = []byte{0, 0, 0, 1}
+ } else {
+ r = []byte{0, 0, 1}
+ }
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
+ roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign),
+ roundup(3*4, PtrSize) + PtrSize + 2,
+ roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign),
+ r,
+ r,
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(),
+ 4 * PtrSize,
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{1, 0, 1, 1},
+ []byte{1, 0, 1, 1},
+ })
+
+ type S struct {
+ a, b uintptr
+ c, d *byte
+ }
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a S) {}).Type(),
+ 4 * PtrSize,
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{0, 0, 1, 1},
+ []byte{0, 0, 1, 1},
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ ValueOf((*byte)(nil)).Type(),
+ ValueOf(func(a uintptr, b *int) {}).Type(),
+ roundup(3*PtrSize, argAlign),
+ 3 * PtrSize,
+ roundup(3*PtrSize, argAlign),
+ []byte{1, 0, 1},
+ []byte{1, 0, 1},
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a uintptr) {}).Type(),
+ roundup(PtrSize, argAlign),
+ PtrSize,
+ roundup(PtrSize, argAlign),
+ []byte{},
+ []byte{},
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func() uintptr { return 0 }).Type(),
+ PtrSize,
+ 0,
+ 0,
+ []byte{},
+ []byte{},
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ ValueOf(uintptr(0)).Type(),
+ ValueOf(func(a uintptr) {}).Type(),
+ 2 * PtrSize,
+ 2 * PtrSize,
+ 2 * PtrSize,
+ []byte{1},
+ []byte{1},
+ // Note: this one is tricky, as the receiver is not a pointer. But we
+ // pass the receiver by reference to the autogenerated pointer-receiver
+ // version of the function.
+ })
+}
+
+func TestFuncLayout(t *testing.T) {
+ for _, lt := range funcLayoutTests {
+ typ, argsize, retOffset, stack, gc, ptrs := FuncLayout(lt.t, lt.rcvr)
+ if typ.Size() != lt.size {
+ t.Errorf("funcLayout(%v, %v).size=%d, want %d", lt.t, lt.rcvr, typ.Size(), lt.size)
+ }
+ if argsize != lt.argsize {
+ t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.t, lt.rcvr, argsize, lt.argsize)
+ }
+ if retOffset != lt.retOffset {
+ t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.t, lt.rcvr, retOffset, lt.retOffset)
+ }
+ if !bytes.Equal(stack, lt.stack) {
+ t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.t, lt.rcvr, stack, lt.stack)
+ }
+ if !bytes.Equal(gc, lt.gc) {
+ t.Errorf("funcLayout(%v, %v).gc=%v, want %v", lt.t, lt.rcvr, gc, lt.gc)
+ }
+ if ptrs && len(stack) == 0 || !ptrs && len(stack) > 0 {
+ t.Errorf("funcLayout(%v, %v) pointers flag=%v, want %v", lt.t, lt.rcvr, ptrs, !ptrs)
+ }
+ }
+}
+
+func verifyGCBits(t *testing.T, typ Type, bits []byte) {
+ heapBits := GCBits(New(typ).Interface())
+ if !bytes.Equal(heapBits, bits) {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
+ }
+}
+
+func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
+ // Creating a slice causes the runtime to repeat a bitmap,
+ // which exercises a different path from making the compiler
+ // repeat a bitmap for a small array or executing a repeat in
+ // a GC program.
+ val := MakeSlice(typ, 0, cap)
+ data := NewAt(ArrayOf(cap, typ), unsafe.Pointer(val.Pointer()))
+ heapBits := GCBits(data.Interface())
+ // Repeat the bitmap for the slice size, trimming scalars in
+ // the last element.
+ bits = rep(cap, bits)
+ for len(bits) > 0 && bits[len(bits)-1] == 0 {
+ bits = bits[:len(bits)-1]
+ }
+ if !bytes.Equal(heapBits, bits) {
+ t.Errorf("heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", typ, cap, heapBits, bits)
+ }
+}
+
+func TestGCBits(t *testing.T) {
+ verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
+
+ // Building blocks for types seen by the compiler (like [2]Xscalar).
+ // The compiler will create the type structures for the derived types,
+ // including their GC metadata.
+ type Xscalar struct{ x uintptr }
+ type Xptr struct{ x *byte }
+ type Xptrscalar struct {
+ *byte
+ uintptr
+ }
+ type Xscalarptr struct {
+ uintptr
+ *byte
+ }
+ type Xbigptrscalar struct {
+ _ [100]*byte
+ _ [100]uintptr
+ }
+
+ var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
+ {
+ // Building blocks for types constructed by reflect.
+ // This code is in a separate block so that code below
+ // cannot accidentally refer to these.
+ // The compiler must NOT see types derived from these
+ // (for example, [2]Scalar must NOT appear in the program),
+ // or else reflect will use it instead of having to construct one.
+ // The goal is to test the construction.
+ type Scalar struct{ x uintptr }
+ type Ptr struct{ x *byte }
+ type Ptrscalar struct {
+ *byte
+ uintptr
+ }
+ type Scalarptr struct {
+ uintptr
+ *byte
+ }
+ type Bigptrscalar struct {
+ _ [100]*byte
+ _ [100]uintptr
+ }
+ type Int64 int64
+ Tscalar = TypeOf(Scalar{})
+ Tint64 = TypeOf(Int64(0))
+ Tptr = TypeOf(Ptr{})
+ Tscalarptr = TypeOf(Scalarptr{})
+ Tptrscalar = TypeOf(Ptrscalar{})
+ Tbigptrscalar = TypeOf(Bigptrscalar{})
+ }
+
+ empty := []byte{}
+
+ verifyGCBits(t, TypeOf(Xscalar{}), empty)
+ verifyGCBits(t, Tscalar, empty)
+ verifyGCBits(t, TypeOf(Xptr{}), lit(1))
+ verifyGCBits(t, Tptr, lit(1))
+ verifyGCBits(t, TypeOf(Xscalarptr{}), lit(0, 1))
+ verifyGCBits(t, Tscalarptr, lit(0, 1))
+ verifyGCBits(t, TypeOf(Xptrscalar{}), lit(1))
+ verifyGCBits(t, Tptrscalar, lit(1))
+
+ verifyGCBits(t, TypeOf([0]Xptr{}), empty)
+ verifyGCBits(t, ArrayOf(0, Tptr), empty)
+ verifyGCBits(t, TypeOf([1]Xptrscalar{}), lit(1))
+ verifyGCBits(t, ArrayOf(1, Tptrscalar), lit(1))
+ verifyGCBits(t, TypeOf([2]Xscalar{}), empty)
+ verifyGCBits(t, ArrayOf(2, Tscalar), empty)
+ verifyGCBits(t, TypeOf([10000]Xscalar{}), empty)
+ verifyGCBits(t, ArrayOf(10000, Tscalar), empty)
+ verifyGCBits(t, TypeOf([2]Xptr{}), lit(1, 1))
+ verifyGCBits(t, ArrayOf(2, Tptr), lit(1, 1))
+ verifyGCBits(t, TypeOf([10000]Xptr{}), rep(10000, lit(1)))
+ verifyGCBits(t, ArrayOf(10000, Tptr), rep(10000, lit(1)))
+ verifyGCBits(t, TypeOf([2]Xscalarptr{}), lit(0, 1, 0, 1))
+ verifyGCBits(t, ArrayOf(2, Tscalarptr), lit(0, 1, 0, 1))
+ verifyGCBits(t, TypeOf([10000]Xscalarptr{}), rep(10000, lit(0, 1)))
+ verifyGCBits(t, ArrayOf(10000, Tscalarptr), rep(10000, lit(0, 1)))
+ verifyGCBits(t, TypeOf([2]Xptrscalar{}), lit(1, 0, 1))
+ verifyGCBits(t, ArrayOf(2, Tptrscalar), lit(1, 0, 1))
+ verifyGCBits(t, TypeOf([10000]Xptrscalar{}), rep(10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(10000, Tptrscalar), rep(10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([1][10000]Xptrscalar{}), rep(10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(1, ArrayOf(10000, Tptrscalar)), rep(10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([2][10000]Xptrscalar{}), rep(2*10000, lit(1, 0)))
+ verifyGCBits(t, ArrayOf(2, ArrayOf(10000, Tptrscalar)), rep(2*10000, lit(1, 0)))
+ verifyGCBits(t, TypeOf([4]Xbigptrscalar{}), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
+ verifyGCBits(t, ArrayOf(4, Tbigptrscalar), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1))))
+
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 0, empty)
+ verifyGCBitsSlice(t, SliceOf(Tptr), 0, empty)
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 1, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 1, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 2, lit(0))
+ verifyGCBitsSlice(t, SliceOf(Tscalar), 2, lit(0))
+ verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 10000, lit(0))
+ verifyGCBitsSlice(t, SliceOf(Tscalar), 10000, lit(0))
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 2, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptr), 2, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xptr{}), 10000, lit(1))
+ verifyGCBitsSlice(t, SliceOf(Tptr), 10000, lit(1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 2, lit(0, 1))
+ verifyGCBitsSlice(t, SliceOf(Tscalarptr), 2, lit(0, 1))
+ verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 10000, lit(0, 1))
+ verifyGCBitsSlice(t, SliceOf(Tscalarptr), 10000, lit(0, 1))
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 2, lit(1, 0))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 2, lit(1, 0))
+ verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 10000, lit(1, 0))
+ verifyGCBitsSlice(t, SliceOf(Tptrscalar), 10000, lit(1, 0))
+ verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 1, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 1, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 2, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 2, rep(10000, lit(1, 0)))
+ verifyGCBitsSlice(t, TypeOf([]Xbigptrscalar{}), 4, join(rep(100, lit(1)), rep(100, lit(0))))
+ verifyGCBitsSlice(t, SliceOf(Tbigptrscalar), 4, join(rep(100, lit(1)), rep(100, lit(0))))
+
+ verifyGCBits(t, TypeOf((chan [100]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, ChanOf(BothDir, ArrayOf(100, Tscalar)), lit(1))
+
+ verifyGCBits(t, TypeOf((func([10000]Xscalarptr))(nil)), lit(1))
+ verifyGCBits(t, FuncOf([]Type{ArrayOf(10000, Tscalarptr)}, nil, false), lit(1))
+
+ verifyGCBits(t, TypeOf((map[[10000]Xscalarptr]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, MapOf(ArrayOf(10000, Tscalarptr), Tscalar), lit(1))
+
+ verifyGCBits(t, TypeOf((*[10000]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, PtrTo(ArrayOf(10000, Tscalar)), lit(1))
+
+ verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
+ verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
+
+ hdr := make([]byte, 8/PtrSize)
+
+ verifyMapBucket := func(t *testing.T, k, e Type, m interface{}, want []byte) {
+ verifyGCBits(t, MapBucketOf(k, e), want)
+ verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+ }
+ verifyMapBucket(t,
+ Tscalar, Tptr,
+ map[Xscalar]Xptr(nil),
+ join(hdr, rep(8, lit(0)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalarptr, Tptr,
+ map[Xscalarptr]Xptr(nil),
+ join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t, Tint64, Tptr,
+ map[int64]Xptr(nil),
+ join(hdr, rep(8, rep(8/PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalar, Tscalar,
+ map[Xscalar]Xscalar(nil),
+ empty)
+ verifyMapBucket(t,
+ ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
+ map[[2]Xscalarptr][3]Xptrscalar(nil),
+ join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
+ map[[64 / PtrSize]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
+ map[[64/PtrSize + 1]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
+ map[[64 / PtrSize]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
+ map[[64/PtrSize + 1]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1)))
+}
+
+func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
+func join(b ...[]byte) []byte { return bytes.Join(b, nil) }
+func lit(x ...byte) []byte { return x }
+
+func TestTypeOfTypeOf(t *testing.T) {
+ // Check that all the type constructors return concrete *rtype implementations.
+ // It's difficult to test directly because the reflect package is only at arm's length.
+ // The easiest thing to do is just call a function that crashes if it doesn't get an *rtype.
+ check := func(name string, typ Type) {
+ if underlying := TypeOf(typ).String(); underlying != "*reflect.rtype" {
+ t.Errorf("%v returned %v, not *reflect.rtype", name, underlying)
+ }
+ }
+
+ type T struct{ int }
+ check("TypeOf", TypeOf(T{}))
+
+ check("ArrayOf", ArrayOf(10, TypeOf(T{})))
+ check("ChanOf", ChanOf(BothDir, TypeOf(T{})))
+ check("FuncOf", FuncOf([]Type{TypeOf(T{})}, nil, false))
+ check("MapOf", MapOf(TypeOf(T{}), TypeOf(T{})))
+ check("PtrTo", PtrTo(TypeOf(T{})))
+ check("SliceOf", SliceOf(TypeOf(T{})))
+}
+
+type XM struct{ _ bool }
+
+func (*XM) String() string { return "" }
+
+func TestPtrToMethods(t *testing.T) {
+ var y struct{ XM }
+ yp := New(TypeOf(y)).Interface()
+ _, ok := yp.(fmt.Stringer)
+ if !ok {
+ t.Fatal("does not implement Stringer, but should")
+ }
+}
+
+func TestMapAlloc(t *testing.T) {
+ m := ValueOf(make(map[int]int, 10))
+ k := ValueOf(5)
+ v := ValueOf(7)
+ allocs := testing.AllocsPerRun(100, func() {
+ m.SetMapIndex(k, v)
+ })
+ if allocs > 0.5 {
+ t.Errorf("allocs per map assignment: want 0 got %f", allocs)
+ }
+
+ const size = 1000
+ tmp := 0
+ val := ValueOf(&tmp).Elem()
+ allocs = testing.AllocsPerRun(100, func() {
+ mv := MakeMapWithSize(TypeOf(map[int]int{}), size)
+ // Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets.
+ for i := 0; i < size/2; i++ {
+ val.SetInt(int64(i))
+ mv.SetMapIndex(val, val)
+ }
+ })
+ if allocs > 10 {
+ t.Errorf("allocs per map assignment: want at most 10 got %f", allocs)
+ }
+ // Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set
+ // the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the
+ // map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added.
+}
+
+func TestChanAlloc(t *testing.T) {
+ // Note: for a chan int, the return Value must be allocated, so we
+ // use a chan *int instead.
+ c := ValueOf(make(chan *int, 1))
+ v := ValueOf(new(int))
+ allocs := testing.AllocsPerRun(100, func() {
+ c.Send(v)
+ _, _ = c.Recv()
+ })
+ if allocs < 0.5 || allocs > 1.5 {
+ t.Errorf("allocs per chan send/recv: want 1 got %f", allocs)
+ }
+ // Note: there is one allocation in reflect.recv which seems to be
+ // a limitation of escape analysis. If that is ever fixed the
+ // allocs < 0.5 condition will trigger and this test should be fixed.
+}
+
+type TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678 int
+
+type nameTest struct {
+ v interface{}
+ want string
+}
+
+var nameTests = []nameTest{
+ {(*int32)(nil), "int32"},
+ {(*D1)(nil), "D1"},
+ {(*[]D1)(nil), ""},
+ {(*chan D1)(nil), ""},
+ {(*func() D1)(nil), ""},
+ {(*<-chan D1)(nil), ""},
+ {(*chan<- D1)(nil), ""},
+ {(*interface{})(nil), ""},
+ {(*interface {
+ F()
+ })(nil), ""},
+ {(*TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678)(nil), "TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678"},
+}
+
+func TestNames(t *testing.T) {
+ for _, test := range nameTests {
+ typ := TypeOf(test.v).Elem()
+ if got := typ.Name(); got != test.want {
+ t.Errorf("%v Name()=%q, want %q", typ, got, test.want)
+ }
+ }
+}
+
+func TestExported(t *testing.T) {
+ type ΦExported struct{}
+ type φUnexported struct{}
+ type BigP *big
+ type P int
+ type p *P
+ type P2 p
+ type p3 p
+
+ type exportTest struct {
+ v interface{}
+ want bool
+ }
+ exportTests := []exportTest{
+ {D1{}, true},
+ {(*D1)(nil), true},
+ {big{}, false},
+ {(*big)(nil), false},
+ {(BigP)(nil), true},
+ {(*BigP)(nil), true},
+ {ΦExported{}, true},
+ {φUnexported{}, false},
+ {P(0), true},
+ {(p)(nil), false},
+ {(P2)(nil), true},
+ {(p3)(nil), false},
+ }
+
+ for i, test := range exportTests {
+ typ := TypeOf(test.v)
+ if got := IsExported(typ); got != test.want {
+ t.Errorf("%d: %s exported=%v, want %v", i, typ.Name(), got, test.want)
+ }
+ }
+}
+
+type embed struct {
+ EmbedWithUnexpMeth
+}
+
+func TestNameBytesAreAligned(t *testing.T) {
+ typ := TypeOf(embed{})
+ b := FirstMethodNameBytes(typ)
+ v := uintptr(unsafe.Pointer(b))
+ if v%unsafe.Alignof((*byte)(nil)) != 0 {
+ t.Errorf("reflect.name.bytes pointer is not aligned: %x", v)
+ }
+}
+
+func TestTypeStrings(t *testing.T) {
+ type stringTest struct {
+ typ Type
+ want string
+ }
+ stringTests := []stringTest{
+ {TypeOf(func(int) {}), "func(int)"},
+ {FuncOf([]Type{TypeOf(int(0))}, nil, false), "func(int)"},
+ {TypeOf(XM{}), "reflect_test.XM"},
+ {TypeOf(new(XM)), "*reflect_test.XM"},
+ {TypeOf(new(XM).String), "func() string"},
+ {TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"},
+ {ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"},
+ {MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"},
+ }
+
+ for i, test := range stringTests {
+ if got, want := test.typ.String(), test.want; got != want {
+ t.Errorf("type %d String()=%q, want %q", i, got, want)
+ }
+ }
+}
+
+func TestOffsetLock(t *testing.T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 4; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ for j := 0; j < 50; j++ {
+ ResolveReflectName(fmt.Sprintf("OffsetLockName:%d:%d", i, j))
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkNew(b *testing.B) {
+ v := TypeOf(XM{})
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ New(v)
+ }
+ })
+}
+
+func TestSwapper(t *testing.T) {
+ type I int
+ var a, b, c I
+ type pair struct {
+ x, y int
+ }
+ type pairPtr struct {
+ x, y int
+ p *I
+ }
+ type S string
+
+ tests := []struct {
+ in interface{}
+ i, j int
+ want interface{}
+ }{
+ {
+ in: []int{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []int{300, 20, 1},
+ },
+ {
+ in: []uintptr{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []uintptr{300, 20, 1},
+ },
+ {
+ in: []int16{1, 20, 300},
+ i: 0,
+ j: 2,
+ want: []int16{300, 20, 1},
+ },
+ {
+ in: []int8{1, 20, 100},
+ i: 0,
+ j: 2,
+ want: []int8{100, 20, 1},
+ },
+ {
+ in: []*I{&a, &b, &c},
+ i: 0,
+ j: 2,
+ want: []*I{&c, &b, &a},
+ },
+ {
+ in: []string{"eric", "sergey", "larry"},
+ i: 0,
+ j: 2,
+ want: []string{"larry", "sergey", "eric"},
+ },
+ {
+ in: []S{"eric", "sergey", "larry"},
+ i: 0,
+ j: 2,
+ want: []S{"larry", "sergey", "eric"},
+ },
+ {
+ in: []pair{{1, 2}, {3, 4}, {5, 6}},
+ i: 0,
+ j: 2,
+ want: []pair{{5, 6}, {3, 4}, {1, 2}},
+ },
+ {
+ in: []pairPtr{{1, 2, &a}, {3, 4, &b}, {5, 6, &c}},
+ i: 0,
+ j: 2,
+ want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}},
+ },
+ }
+
+ for i, tt := range tests {
+ inStr := fmt.Sprint(tt.in)
+ Swapper(tt.in)(tt.i, tt.j)
+ if !DeepEqual(tt.in, tt.want) {
+ t.Errorf("%d. swapping %v and %v of %v = %v; want %v", i, tt.i, tt.j, inStr, tt.in, tt.want)
+ }
+ }
+}
+
+// TestUnaddressableField tests that the reflect package will not allow
+// a type from another package to be used as a named type with an
+// unexported field.
+//
+// This ensures that unexported fields cannot be modified by other packages.
+func TestUnaddressableField(t *testing.T) {
+ var b Buffer // type defined in reflect, a different package
+ var localBuffer struct {
+ buf []byte
+ }
+ lv := ValueOf(&localBuffer).Elem()
+ rv := ValueOf(b)
+ shouldPanic("Set", func() {
+ lv.Set(rv)
+ })
+}
+
+type Tint int
+
+type Tint2 = Tint
+
+type Talias1 struct {
+ byte
+ uint8
+ int
+ int32
+ rune
+}
+
+type Talias2 struct {
+ Tint
+ Tint2
+}
+
+func TestAliasNames(t *testing.T) {
+ t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
+ out := fmt.Sprintf("%#v", t1)
+ want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
+ if out != want {
+ t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
+ }
+
+ t2 := Talias2{Tint: 1, Tint2: 2}
+ out = fmt.Sprintf("%#v", t2)
+ want = "reflect_test.Talias2{Tint:1, Tint2:2}"
+ if out != want {
+ t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
+ }
+}
+
+func TestIssue22031(t *testing.T) {
+ type s []struct{ C int }
+
+ type t1 struct{ s }
+ type t2 struct{ f s }
+
+ tests := []Value{
+ ValueOf(t1{s{{}}}).Field(0).Index(0).Field(0),
+ ValueOf(t2{s{{}}}).Field(0).Index(0).Field(0),
+ }
+
+ for i, test := range tests {
+ if test.CanSet() {
+ t.Errorf("%d: CanSet: got true, want false", i)
+ }
+ }
+}
+
+type NonExportedFirst int
+
+func (i NonExportedFirst) ΦExported() {}
+func (i NonExportedFirst) nonexported() int { panic("wrong") }
+
+func TestIssue22073(t *testing.T) {
+ m := ValueOf(NonExportedFirst(0)).Method(0)
+
+ if got := m.Type().NumOut(); got != 0 {
+ t.Errorf("NumOut: got %v, want 0", got)
+ }
+
+ // Shouldn't panic.
+ m.Call(nil)
+}
+
+func TestMapIterNonEmptyMap(t *testing.T) {
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want {
+ t.Errorf("iterator returned %s (after sorting), want %s", got, want)
+ }
+}
+
+func TestMapIterNilMap(t *testing.T) {
+ var m map[string]int
+ iter := ValueOf(m).MapRange()
+ if got, want := iterateToString(iter), `[]`; got != want {
+ t.Errorf("non-empty result iteratoring nil map: %s", got)
+ }
+}
+
+func TestMapIterSafety(t *testing.T) {
+ // Using a zero MapIter causes a panic, but not a crash.
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Value()
+ t.Fatal("Value did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ new(MapIter).Next()
+ t.Fatal("Next did not panic")
+ }()
+
+ // Calling Key/Value on a MapIter before Next
+ // causes a panic, but not a crash.
+ var m map[string]int
+ iter := ValueOf(m).MapRange()
+
+ func() {
+ defer func() { recover() }()
+ iter.Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Value()
+ t.Fatal("Value did not panic")
+ }()
+
+ // Calling Next, Key, or Value on an exhausted iterator
+ // causes a panic, but not a crash.
+ iter.Next() // -> false
+ func() {
+ defer func() { recover() }()
+ iter.Key()
+ t.Fatal("Key did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Value()
+ t.Fatal("Value did not panic")
+ }()
+ func() {
+ defer func() { recover() }()
+ iter.Next()
+ t.Fatal("Next did not panic")
+ }()
+}
+
+func TestMapIterNext(t *testing.T) {
+ // The first call to Next should reflect any
+ // insertions to the map since the iterator was created.
+ m := map[string]int{}
+ iter := ValueOf(m).MapRange()
+ m["one"] = 1
+ if got, want := iterateToString(iter), `[one: 1]`; got != want {
+ t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
+ }
+}
+
+func TestMapIterDelete0(t *testing.T) {
+ // Delete all elements before first iteration.
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ delete(m, "one")
+ delete(m, "two")
+ delete(m, "three")
+ if got, want := iterateToString(iter), `[]`; got != want {
+ t.Errorf("iterator returned deleted elements: got %s, want %s", got, want)
+ }
+}
+
+func TestMapIterDelete1(t *testing.T) {
+ // Delete all elements after first iteration.
+ m := map[string]int{"one": 1, "two": 2, "three": 3}
+ iter := ValueOf(m).MapRange()
+ var got []string
+ for iter.Next() {
+ got = append(got, fmt.Sprint(iter.Key(), iter.Value()))
+ delete(m, "one")
+ delete(m, "two")
+ delete(m, "three")
+ }
+ if len(got) != 1 {
+ t.Errorf("iterator returned wrong number of elements: got %d, want 1", len(got))
+ }
+}
+
+// iterateToString returns the set of elements
+// returned by an iterator in readable form.
+func iterateToString(it *MapIter) string {
+ var got []string
+ for it.Next() {
+ line := fmt.Sprintf("%v: %v", it.Key(), it.Value())
+ got = append(got, line)
+ }
+ sort.Strings(got)
+ return "[" + strings.Join(got, ", ") + "]"
+}
diff --git a/src/reflect/asm_386.s b/src/reflect/asm_386.s
new file mode 100644
index 0000000..e79beb6
--- /dev/null
+++ b/src/reflect/asm_386.s
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVL DX, 0(SP)
+ LEAL argframe+0(FP), CX
+ MOVL CX, 4(SP)
+ MOVB $0, 12(SP)
+ LEAL 12(SP), AX
+ MOVL AX, 8(SP)
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVL DX, 0(SP)
+ LEAL argframe+0(FP), CX
+ MOVL CX, 4(SP)
+ MOVB $0, 12(SP)
+ LEAL 12(SP), AX
+ MOVL AX, 8(SP)
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s
new file mode 100644
index 0000000..5c8e565
--- /dev/null
+++ b/src/reflect/asm_amd64.s
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+// makeFuncStub must be ABIInternal because it is placed directly
+// in function values.
+TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVQ DX, 0(SP)
+ LEAQ argframe+0(FP), CX
+ MOVQ CX, 8(SP)
+ MOVB $0, 24(SP)
+ LEAQ 24(SP), AX
+ MOVQ AX, 16(SP)
+ CALL ·callReflect<ABIInternal>(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+// methodValueCall must be ABIInternal because it is placed directly
+// in function values.
+TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVQ DX, 0(SP)
+ LEAQ argframe+0(FP), CX
+ MOVQ CX, 8(SP)
+ MOVB $0, 24(SP)
+ LEAQ 24(SP), AX
+ MOVQ AX, 16(SP)
+ CALL ·callMethod<ABIInternal>(SB)
+ RET
diff --git a/src/reflect/asm_arm.s b/src/reflect/asm_arm.s
new file mode 100644
index 0000000..cd50d33
--- /dev/null
+++ b/src/reflect/asm_arm.s
@@ -0,0 +1,38 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is jumped to by the code generated by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVW R7, 4(R13)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R13)
+ MOVW $0, R1
+ MOVB R1, 16(R13)
+ ADD $16, R13, R1
+ MOVW R1, 12(R13)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No argsize here, gc generates argsize info at call site.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVW R7, 4(R13)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R13)
+ MOVW $0, R1
+ MOVB R1, 16(R13)
+ ADD $16, R13, R1
+ MOVW R1, 12(R13)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s
new file mode 100644
index 0000000..28bb86c
--- /dev/null
+++ b/src/reflect/asm_arm64.s
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVD R26, 8(RSP)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(RSP)
+ MOVB $0, 32(RSP)
+ ADD $32, RSP, R3
+ MOVD R3, 24(RSP)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+ NO_LOCAL_POINTERS
+ MOVD R26, 8(RSP)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(RSP)
+ MOVB $0, 32(RSP)
+ ADD $32, RSP, R3
+ MOVD R3, 24(RSP)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_mips64x.s b/src/reflect/asm_mips64x.s
new file mode 100644
index 0000000..6f76685
--- /dev/null
+++ b/src/reflect/asm_mips64x.s
@@ -0,0 +1,40 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+
+#include "textflag.h"
+#include "funcdata.h"
+
+#define REGCTXT R22
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R29)
+ MOVV $argframe+0(FP), R1
+ MOVV R1, 16(R29)
+ MOVB R0, 32(R29)
+ ADDV $32, R29, R1
+ MOVV R1, 24(R29)
+ JAL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVV REGCTXT, 8(R29)
+ MOVV $argframe+0(FP), R1
+ MOVV R1, 16(R29)
+ MOVB R0, 32(R29)
+ ADDV $32, R29, R1
+ MOVV R1, 24(R29)
+ JAL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_mipsx.s b/src/reflect/asm_mipsx.s
new file mode 100644
index 0000000..5a5c53e
--- /dev/null
+++ b/src/reflect/asm_mipsx.s
@@ -0,0 +1,40 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips mipsle
+
+#include "textflag.h"
+#include "funcdata.h"
+
+#define REGCTXT R22
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVW REGCTXT, 4(R29)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R29)
+ MOVB R0, 16(R29)
+ ADD $16, R29, R1
+ MOVW R1, 12(R29)
+ JAL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVW REGCTXT, 4(R29)
+ MOVW $argframe+0(FP), R1
+ MOVW R1, 8(R29)
+ MOVB R0, 16(R29)
+ ADD $16, R29, R1
+ MOVW R1, 12(R29)
+ JAL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_ppc64x.s b/src/reflect/asm_ppc64x.s
new file mode 100644
index 0000000..4609f6b
--- /dev/null
+++ b/src/reflect/asm_ppc64x.s
@@ -0,0 +1,39 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "asm_ppc64x.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVD R11, FIXED_FRAME+0(R1)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, FIXED_FRAME+8(R1)
+ MOVB R0, FIXED_FRAME+24(R1)
+ ADD $FIXED_FRAME+24, R1, R3
+ MOVD R3, FIXED_FRAME+16(R1)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVD R11, FIXED_FRAME+0(R1)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, FIXED_FRAME+8(R1)
+ MOVB R0, FIXED_FRAME+24(R1)
+ ADD $FIXED_FRAME+24, R1, R3
+ MOVD R3, FIXED_FRAME+16(R1)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_riscv64.s b/src/reflect/asm_riscv64.s
new file mode 100644
index 0000000..e6fab39
--- /dev/null
+++ b/src/reflect/asm_riscv64.s
@@ -0,0 +1,36 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ ADD $32, SP, T1
+ MOV T1, 24(SP)
+ MOVB ZERO, 32(SP)
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOV CTXT, 8(SP)
+ MOV $argframe+0(FP), T0
+ MOV T0, 16(SP)
+ ADD $32, SP, T1
+ MOV T1, 24(SP)
+ MOVB ZERO, 32(SP)
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_s390x.s b/src/reflect/asm_s390x.s
new file mode 100644
index 0000000..cb7954c
--- /dev/null
+++ b/src/reflect/asm_s390x.s
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ MOVB $0, 32(R15)
+ ADD $32, R15, R3
+ MOVD R3, 24(R15)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ MOVB $0, 32(R15)
+ ADD $32, R15, R3
+ MOVD R3, 24(R15)
+ BL ·callMethod(SB)
+ RET
diff --git a/src/reflect/asm_wasm.s b/src/reflect/asm_wasm.s
new file mode 100644
index 0000000..63b4d94
--- /dev/null
+++ b/src/reflect/asm_wasm.s
@@ -0,0 +1,50 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+
+ MOVD CTXT, 0(SP)
+
+ Get SP
+ Get SP
+ I64ExtendI32U
+ I64Const $argframe+0(FP)
+ I64Add
+ I64Store $8
+
+ MOVB $0, 24(SP)
+ MOVD $24(SP), 16(SP)
+
+ CALL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32
+ NO_LOCAL_POINTERS
+
+ MOVD CTXT, 0(SP)
+
+ Get SP
+ Get SP
+ I64ExtendI32U
+ I64Const $argframe+0(FP)
+ I64Add
+ I64Store $8
+
+ MOVB $0, 24(SP)
+ MOVD $24(SP), 16(SP)
+
+ CALL ·callMethod(SB)
+ RET
diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go
new file mode 100644
index 0000000..d951d8d
--- /dev/null
+++ b/src/reflect/deepequal.go
@@ -0,0 +1,219 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Deep equality test via reflection
+
+package reflect
+
+import "unsafe"
+
+// During deepValueEqual, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+ a1 unsafe.Pointer
+ a2 unsafe.Pointer
+ typ Type
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+
+ // We want to avoid putting more in the visited map than we need to.
+ // For any possible reference cycle that might be encountered,
+ // hard(v1, v2) needs to return true for at least one of the types in the cycle,
+ // and it's safe and valid to get Value's internal pointer.
+ hard := func(v1, v2 Value) bool {
+ switch v1.Kind() {
+ case Ptr:
+ if v1.typ.ptrdata == 0 {
+ // go:notinheap pointers can't be cyclic.
+ // At least, all of our current uses of go:notinheap have
+ // that property. The runtime ones aren't cyclic (and we don't use
+ // DeepEqual on them anyway), and the cgo-generated ones are
+ // all empty structs.
+ return false
+ }
+ fallthrough
+ case Map, Slice, Interface:
+ // Nil pointers cannot be cyclic. Avoid putting them in the visited map.
+ return !v1.IsNil() && !v2.IsNil()
+ }
+ return false
+ }
+
+ if hard(v1, v2) {
+ // For a Ptr or Map value, we need to check flagIndir,
+ // which we do by calling the pointer method.
+ // For Slice or Interface, flagIndir is always set,
+ // and using v.ptr suffices.
+ ptrval := func(v Value) unsafe.Pointer {
+ switch v.Kind() {
+ case Ptr, Map:
+ return v.pointer()
+ default:
+ return v.ptr
+ }
+ }
+ addr1 := ptrval(v1)
+ addr2 := ptrval(v2)
+ if uintptr(addr1) > uintptr(addr2) {
+ // Canonicalize order to reduce number of entries in visited.
+ // Assumes non-moving garbage collector.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are already seen.
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case Array:
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Slice:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Ptr:
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !deepValueEqual(v1.Field(i), v2.Field(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Map:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ val1 := v1.MapIndex(k)
+ val2 := v2.MapIndex(k)
+ if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
+ return false
+ }
+ }
+ return true
+ case Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ // Normal equality suffices
+ return valueInterface(v1, false) == valueInterface(v2, false)
+ }
+}
+
+// DeepEqual reports whether x and y are ``deeply equal,'' defined as follows.
+// Two values of identical type are deeply equal if one of the following cases applies.
+// Values of distinct types are never deeply equal.
+//
+// Array values are deeply equal when their corresponding elements are deeply equal.
+//
+// Struct values are deeply equal if their corresponding fields,
+// both exported and unexported, are deeply equal.
+//
+// Func values are deeply equal if both are nil; otherwise they are not deeply equal.
+//
+// Interface values are deeply equal if they hold deeply equal concrete values.
+//
+// Map values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they are the same map object or their corresponding keys
+// (matched using Go equality) map to deeply equal values.
+//
+// Pointer values are deeply equal if they are equal using Go's == operator
+// or if they point to deeply equal values.
+//
+// Slice values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they point to the same initial entry of the same underlying array
+// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal.
+// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil))
+// are not deeply equal.
+//
+// Other values - numbers, bools, strings, and channels - are deeply equal
+// if they are equal using Go's == operator.
+//
+// In general DeepEqual is a recursive relaxation of Go's == operator.
+// However, this idea is impossible to implement without some inconsistency.
+// Specifically, it is possible for a value to be unequal to itself,
+// either because it is of func type (uncomparable in general)
+// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
+// or because it is an array, struct, or interface containing
+// such a value.
+// On the other hand, pointer values are always equal to themselves,
+// even if they point at or contain such problematic values,
+// because they compare equal using Go's == operator, and that
+// is a sufficient condition to be deeply equal, regardless of content.
+// DeepEqual has been defined so that the same short-cut applies
+// to slices and maps: if x and y are the same slice or the same map,
+// they are deeply equal regardless of content.
+//
+// As DeepEqual traverses the data values it may find a cycle. The
+// second and subsequent times that DeepEqual compares two pointer
+// values that have been compared before, it treats the values as
+// equal rather than examining the values to which they point.
+// This ensures that DeepEqual terminates.
+func DeepEqual(x, y interface{}) bool {
+ if x == nil || y == nil {
+ return x == y
+ }
+ v1 := ValueOf(x)
+ v2 := ValueOf(y)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return deepValueEqual(v1, v2, make(map[visit]bool))
+}
diff --git a/src/reflect/example_test.go b/src/reflect/example_test.go
new file mode 100644
index 0000000..23c08e4
--- /dev/null
+++ b/src/reflect/example_test.go
@@ -0,0 +1,168 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+func ExampleKind() {
+ for _, v := range []interface{}{"hi", 42, func() {}} {
+ switch v := reflect.ValueOf(v); v.Kind() {
+ case reflect.String:
+ fmt.Println(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fmt.Println(v.Int())
+ default:
+ fmt.Printf("unhandled kind %s", v.Kind())
+ }
+ }
+
+ // Output:
+ // hi
+ // 42
+ // unhandled kind func
+}
+
+func ExampleMakeFunc() {
+ // swap is the implementation passed to MakeFunc.
+ // It must work in terms of reflect.Values so that it is possible
+ // to write code without knowing beforehand what the types
+ // will be.
+ swap := func(in []reflect.Value) []reflect.Value {
+ return []reflect.Value{in[1], in[0]}
+ }
+
+ // makeSwap expects fptr to be a pointer to a nil function.
+ // It sets that pointer to a new function created with MakeFunc.
+ // When the function is invoked, reflect turns the arguments
+ // into Values, calls swap, and then turns swap's result slice
+ // into the values returned by the new function.
+ makeSwap := func(fptr interface{}) {
+ // fptr is a pointer to a function.
+ // Obtain the function value itself (likely nil) as a reflect.Value
+ // so that we can query its type and then set the value.
+ fn := reflect.ValueOf(fptr).Elem()
+
+ // Make a function of the right type.
+ v := reflect.MakeFunc(fn.Type(), swap)
+
+ // Assign it to the value fn represents.
+ fn.Set(v)
+ }
+
+ // Make and call a swap function for ints.
+ var intSwap func(int, int) (int, int)
+ makeSwap(&intSwap)
+ fmt.Println(intSwap(0, 1))
+
+ // Make and call a swap function for float64s.
+ var floatSwap func(float64, float64) (float64, float64)
+ makeSwap(&floatSwap)
+ fmt.Println(floatSwap(2.72, 3.14))
+
+ // Output:
+ // 1 0
+ // 3.14 2.72
+}
+
+func ExampleStructTag() {
+ type S struct {
+ F string `species:"gopher" color:"blue"`
+ }
+
+ s := S{}
+ st := reflect.TypeOf(s)
+ field := st.Field(0)
+ fmt.Println(field.Tag.Get("color"), field.Tag.Get("species"))
+
+ // Output:
+ // blue gopher
+}
+
+func ExampleStructTag_Lookup() {
+ type S struct {
+ F0 string `alias:"field_0"`
+ F1 string `alias:""`
+ F2 string
+ }
+
+ s := S{}
+ st := reflect.TypeOf(s)
+ for i := 0; i < st.NumField(); i++ {
+ field := st.Field(i)
+ if alias, ok := field.Tag.Lookup("alias"); ok {
+ if alias == "" {
+ fmt.Println("(blank)")
+ } else {
+ fmt.Println(alias)
+ }
+ } else {
+ fmt.Println("(not specified)")
+ }
+ }
+
+ // Output:
+ // field_0
+ // (blank)
+ // (not specified)
+}
+
+func ExampleTypeOf() {
+ // As interface types are only used for static typing, a
+ // common idiom to find the reflection Type for an interface
+ // type Foo is to use a *Foo value.
+ writerType := reflect.TypeOf((*io.Writer)(nil)).Elem()
+
+ fileType := reflect.TypeOf((*os.File)(nil))
+ fmt.Println(fileType.Implements(writerType))
+
+ // Output:
+ // true
+}
+
+func ExampleStructOf() {
+ typ := reflect.StructOf([]reflect.StructField{
+ {
+ Name: "Height",
+ Type: reflect.TypeOf(float64(0)),
+ Tag: `json:"height"`,
+ },
+ {
+ Name: "Age",
+ Type: reflect.TypeOf(int(0)),
+ Tag: `json:"age"`,
+ },
+ })
+
+ v := reflect.New(typ).Elem()
+ v.Field(0).SetFloat(0.4)
+ v.Field(1).SetInt(2)
+ s := v.Addr().Interface()
+
+ w := new(bytes.Buffer)
+ if err := json.NewEncoder(w).Encode(s); err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("value: %+v\n", s)
+ fmt.Printf("json: %s", w.Bytes())
+
+ r := bytes.NewReader([]byte(`{"height":1.5,"age":10}`))
+ if err := json.NewDecoder(r).Decode(s); err != nil {
+ panic(err)
+ }
+ fmt.Printf("value: %+v\n", s)
+
+ // Output:
+ // value: &{Height:0.4 Age:2}
+ // json: {"height":0.4,"age":2}
+ // value: &{Height:1.5 Age:10}
+}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
new file mode 100644
index 0000000..de426b5
--- /dev/null
+++ b/src/reflect/export_test.go
@@ -0,0 +1,122 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import "unsafe"
+
+// MakeRO returns a copy of v with the read-only flag set.
+func MakeRO(v Value) Value {
+ v.flag |= flagStickyRO
+ return v
+}
+
+// IsRO reports whether v's read-only flag is set.
+func IsRO(v Value) bool {
+ return v.flag&flagStickyRO != 0
+}
+
+var CallGC = &callGC
+
+const PtrSize = ptrSize
+
+func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) {
+ var ft *rtype
+ var s *bitVector
+ if rcvr != nil {
+ ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype))
+ } else {
+ ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil)
+ }
+ frametype = ft
+ for i := uint32(0); i < s.n; i++ {
+ stack = append(stack, s.data[i/8]>>(i%8)&1)
+ }
+ if ft.kind&kindGCProg != 0 {
+ panic("can't handle gc programs")
+ }
+ ptrs = ft.ptrdata != 0
+ if ptrs {
+ nptrs := ft.ptrdata / ptrSize
+ gcdata := ft.gcSlice(0, (nptrs+7)/8)
+ for i := uintptr(0); i < nptrs; i++ {
+ gc = append(gc, gcdata[i/8]>>(i%8)&1)
+ }
+ }
+ return
+}
+
+func TypeLinks() []string {
+ var r []string
+ sections, offset := typelinks()
+ for i, offs := range offset {
+ rodata := sections[i]
+ for _, off := range offs {
+ typ := (*rtype)(resolveTypeOff(unsafe.Pointer(rodata), off))
+ r = append(r, typ.String())
+ }
+ }
+ return r
+}
+
+var GCBits = gcbits
+
+func gcbits(interface{}) []byte // provided by runtime
+
+func MapBucketOf(x, y Type) Type {
+ return bucketOf(x.(*rtype), y.(*rtype))
+}
+
+func CachedBucketOf(m Type) Type {
+ t := m.(*rtype)
+ if Kind(t.kind&kindMask) != Map {
+ panic("not map")
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return tt.bucket
+}
+
+type EmbedWithUnexpMeth struct{}
+
+func (EmbedWithUnexpMeth) f() {}
+
+type pinUnexpMeth interface {
+ f()
+}
+
+var pinUnexpMethI = pinUnexpMeth(EmbedWithUnexpMeth{})
+
+func FirstMethodNameBytes(t Type) *byte {
+ _ = pinUnexpMethI
+
+ ut := t.uncommon()
+ if ut == nil {
+ panic("type has no methods")
+ }
+ m := ut.methods()[0]
+ mname := t.(*rtype).nameOff(m.name)
+ if *mname.data(0, "name flag field")&(1<<2) == 0 {
+ panic("method name does not have pkgPath *string")
+ }
+ return mname.bytes
+}
+
+type OtherPkgFields struct {
+ OtherExported int
+ otherUnexported int
+}
+
+func IsExported(t Type) bool {
+ typ := t.(*rtype)
+ n := typ.nameOff(typ.str)
+ return n.isExported()
+}
+
+func ResolveReflectName(s string) {
+ resolveReflectName(newName(s, "", false))
+}
+
+type Buffer struct {
+ buf []byte
+}
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
new file mode 100644
index 0000000..67dc485
--- /dev/null
+++ b/src/reflect/makefunc.go
@@ -0,0 +1,138 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MakeFunc implementation.
+
+package reflect
+
+import (
+ "unsafe"
+)
+
+// makeFuncImpl is the closure value implementing the function
+// returned by MakeFunc.
+// The first three words of this type must be kept in sync with
+// methodValue and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type makeFuncImpl struct {
+ code uintptr
+ stack *bitVector // ptrmap for both args and results
+ argLen uintptr // just args
+ ftyp *funcType
+ fn func([]Value) []Value
+}
+
+// MakeFunc returns a new function of the given Type
+// that wraps the function fn. When called, that new function
+// does the following:
+//
+// - converts its arguments to a slice of Values.
+// - runs results := fn(args).
+// - returns the results as a slice of Values, one per formal result.
+//
+// The implementation fn can assume that the argument Value slice
+// has the number and type of arguments given by typ.
+// If typ describes a variadic function, the final Value is itself
+// a slice representing the variadic arguments, as in the
+// body of a variadic function. The result Value slice returned by fn
+// must have the number and type of results given by typ.
+//
+// The Value.Call method allows the caller to invoke a typed function
+// in terms of Values; in contrast, MakeFunc allows the caller to implement
+// a typed function in terms of Values.
+//
+// The Examples section of the documentation includes an illustration
+// of how to use MakeFunc to build a swap function for different types.
+//
+func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
+ if typ.Kind() != Func {
+ panic("reflect: call of MakeFunc with non-Func type")
+ }
+
+ t := typ.common()
+ ftyp := (*funcType)(unsafe.Pointer(t))
+
+ // Indirect Go func value (dummy) to obtain
+ // actual code address. (A Go func value is a pointer
+ // to a C function pointer. https://golang.org/s/go11func.)
+ dummy := makeFuncStub
+ code := **(**uintptr)(unsafe.Pointer(&dummy))
+
+ // makeFuncImpl contains a stack map for use by the runtime
+ _, argLen, _, stack, _ := funcLayout(ftyp, nil)
+
+ impl := &makeFuncImpl{code: code, stack: stack, argLen: argLen, ftyp: ftyp, fn: fn}
+
+ return Value{t, unsafe.Pointer(impl), flag(Func)}
+}
+
+// makeFuncStub is an assembly function that is the code half of
+// the function returned from MakeFunc. It expects a *callReflectFunc
+// as its context register, and its job is to invoke callReflect(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func makeFuncStub()
+
+// The first 3 words of this type must be kept in sync with
+// makeFuncImpl and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type methodValue struct {
+ fn uintptr
+ stack *bitVector // ptrmap for both args and results
+ argLen uintptr // just args
+ method int
+ rcvr Value
+}
+
+// makeMethodValue converts v from the rcvr+method index representation
+// of a method value to an actual method func value, which is
+// basically the receiver value with a special bit set, into a true
+// func value - a value holding an actual func. The output is
+// semantically equivalent to the input as far as the user of package
+// reflect can tell, but the true func representation can be handled
+// by code like Convert and Interface and Assign.
+func makeMethodValue(op string, v Value) Value {
+ if v.flag&flagMethod == 0 {
+ panic("reflect: internal error: invalid use of makeMethodValue")
+ }
+
+ // Ignoring the flagMethod bit, v describes the receiver, not the method type.
+ fl := v.flag & (flagRO | flagAddr | flagIndir)
+ fl |= flag(v.typ.Kind())
+ rcvr := Value{v.typ, v.ptr, fl}
+
+ // v.Type returns the actual type of the method value.
+ ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
+
+ // Indirect Go func value (dummy) to obtain
+ // actual code address. (A Go func value is a pointer
+ // to a C function pointer. https://golang.org/s/go11func.)
+ dummy := methodValueCall
+ code := **(**uintptr)(unsafe.Pointer(&dummy))
+
+ // methodValue contains a stack map for use by the runtime
+ _, argLen, _, stack, _ := funcLayout(ftyp, nil)
+
+ fv := &methodValue{
+ fn: code,
+ stack: stack,
+ argLen: argLen,
+ method: int(v.flag) >> flagMethodShift,
+ rcvr: rcvr,
+ }
+
+ // Cause panic if method is not appropriate.
+ // The panic would still happen during the call if we omit this,
+ // but we want Interface() and other operations to fail early.
+ methodReceiver(op, fv.rcvr, fv.method)
+
+ return Value{&ftyp.rtype, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
+}
+
+// methodValueCall is an assembly function that is the code half of
+// the function returned from makeMethodValue. It expects a *methodValue
+// as its context register, and its job is to invoke callMethod(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func methodValueCall()
diff --git a/src/reflect/set_test.go b/src/reflect/set_test.go
new file mode 100644
index 0000000..a633e6e
--- /dev/null
+++ b/src/reflect/set_test.go
@@ -0,0 +1,226 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ "io"
+ . "reflect"
+ "testing"
+ "unsafe"
+)
+
+func TestImplicitMapConversion(t *testing.T) {
+ // Test implicit conversions in MapIndex and SetMapIndex.
+ {
+ // direct
+ m := make(map[int]int)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#1 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#1 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert interface key
+ m := make(map[interface{}]int)
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#2 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#2 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert interface value
+ m := make(map[int]interface{})
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#3 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#3 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert both interface key and interface value
+ m := make(map[interface{}]interface{})
+ mv := ValueOf(m)
+ mv.SetMapIndex(ValueOf(1), ValueOf(2))
+ x, ok := m[1]
+ if x != 2 {
+ t.Errorf("#4 after SetMapIndex(1,2): %d, %t (map=%v)", x, ok, m)
+ }
+ if n := mv.MapIndex(ValueOf(1)).Interface().(int); n != 2 {
+ t.Errorf("#4 MapIndex(1) = %d", n)
+ }
+ }
+ {
+ // convert both, with non-empty interfaces
+ m := make(map[io.Reader]io.Writer)
+ mv := ValueOf(m)
+ b1 := new(bytes.Buffer)
+ b2 := new(bytes.Buffer)
+ mv.SetMapIndex(ValueOf(b1), ValueOf(b2))
+ x, ok := m[b1]
+ if x != b2 {
+ t.Errorf("#5 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(b1)).Elem().Pointer(); p != uintptr(unsafe.Pointer(b2)) {
+ t.Errorf("#5 MapIndex(b1) = %#x want %p", p, b2)
+ }
+ }
+ {
+ // convert channel direction
+ m := make(map[<-chan int]chan int)
+ mv := ValueOf(m)
+ c1 := make(chan int)
+ c2 := make(chan int)
+ mv.SetMapIndex(ValueOf(c1), ValueOf(c2))
+ x, ok := m[c1]
+ if x != c2 {
+ t.Errorf("#6 after SetMapIndex(c1, c2): %p (!= %p), %t (map=%v)", x, c2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(c1)).Pointer(); p != ValueOf(c2).Pointer() {
+ t.Errorf("#6 MapIndex(c1) = %#x want %p", p, c2)
+ }
+ }
+ {
+ // convert identical underlying types
+ type MyBuffer bytes.Buffer
+ m := make(map[*MyBuffer]*bytes.Buffer)
+ mv := ValueOf(m)
+ b1 := new(MyBuffer)
+ b2 := new(bytes.Buffer)
+ mv.SetMapIndex(ValueOf(b1), ValueOf(b2))
+ x, ok := m[b1]
+ if x != b2 {
+ t.Errorf("#7 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
+ }
+ if p := mv.MapIndex(ValueOf(b1)).Pointer(); p != uintptr(unsafe.Pointer(b2)) {
+ t.Errorf("#7 MapIndex(b1) = %#x want %p", p, b2)
+ }
+ }
+
+}
+
+func TestImplicitSetConversion(t *testing.T) {
+ // Assume TestImplicitMapConversion covered the basics.
+ // Just make sure conversions are being applied at all.
+ var r io.Reader
+ b := new(bytes.Buffer)
+ rv := ValueOf(&r).Elem()
+ rv.Set(ValueOf(b))
+ if r != b {
+ t.Errorf("after Set: r=%T(%v)", r, r)
+ }
+}
+
+func TestImplicitSendConversion(t *testing.T) {
+ c := make(chan io.Reader, 10)
+ b := new(bytes.Buffer)
+ ValueOf(c).Send(ValueOf(b))
+ if bb := <-c; bb != b {
+ t.Errorf("Received %p != %p", bb, b)
+ }
+}
+
+func TestImplicitCallConversion(t *testing.T) {
+ // Arguments must be assignable to parameter types.
+ fv := ValueOf(io.WriteString)
+ b := new(bytes.Buffer)
+ fv.Call([]Value{ValueOf(b), ValueOf("hello world")})
+ if b.String() != "hello world" {
+ t.Errorf("After call: string=%q want %q", b.String(), "hello world")
+ }
+}
+
+func TestImplicitAppendConversion(t *testing.T) {
+ // Arguments must be assignable to the slice's element type.
+ s := []io.Reader{}
+ sv := ValueOf(&s).Elem()
+ b := new(bytes.Buffer)
+ sv.Set(Append(sv, ValueOf(b)))
+ if len(s) != 1 || s[0] != b {
+ t.Errorf("after append: s=%v want [%p]", s, b)
+ }
+}
+
+var implementsTests = []struct {
+ x interface{}
+ t interface{}
+ b bool
+}{
+ {new(*bytes.Buffer), new(io.Reader), true},
+ {new(bytes.Buffer), new(io.Reader), false},
+ {new(*bytes.Buffer), new(io.ReaderAt), false},
+ {new(*ast.Ident), new(ast.Expr), true},
+ {new(*notAnExpr), new(ast.Expr), false},
+ {new(*ast.Ident), new(notASTExpr), false},
+ {new(notASTExpr), new(ast.Expr), false},
+ {new(ast.Expr), new(notASTExpr), false},
+ {new(*notAnExpr), new(notASTExpr), true},
+}
+
+type notAnExpr struct{}
+
+func (notAnExpr) Pos() token.Pos { return token.NoPos }
+func (notAnExpr) End() token.Pos { return token.NoPos }
+func (notAnExpr) exprNode() {}
+
+type notASTExpr interface {
+ Pos() token.Pos
+ End() token.Pos
+ exprNode()
+}
+
+func TestImplements(t *testing.T) {
+ for _, tt := range implementsTests {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.Implements(xt); b != tt.b {
+ t.Errorf("(%s).Implements(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b)
+ }
+ }
+}
+
+var assignableTests = []struct {
+ x interface{}
+ t interface{}
+ b bool
+}{
+ {new(chan int), new(<-chan int), true},
+ {new(<-chan int), new(chan int), false},
+ {new(*int), new(IntPtr), true},
+ {new(IntPtr), new(*int), true},
+ {new(IntPtr), new(IntPtr1), false},
+ {new(Ch), new(<-chan interface{}), true},
+ // test runs implementsTests too
+}
+
+type IntPtr *int
+type IntPtr1 *int
+type Ch <-chan interface{}
+
+func TestAssignableTo(t *testing.T) {
+ for _, tt := range append(assignableTests, implementsTests...) {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.AssignableTo(xt); b != tt.b {
+ t.Errorf("(%s).AssignableTo(%s) = %v, want %v", xv.String(), xt.String(), b, tt.b)
+ }
+ }
+}
diff --git a/src/reflect/swapper.go b/src/reflect/swapper.go
new file mode 100644
index 0000000..0cf4066
--- /dev/null
+++ b/src/reflect/swapper.go
@@ -0,0 +1,77 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/unsafeheader"
+ "unsafe"
+)
+
+// Swapper returns a function that swaps the elements in the provided
+// slice.
+//
+// Swapper panics if the provided interface is not a slice.
+func Swapper(slice interface{}) func(i, j int) {
+ v := ValueOf(slice)
+ if v.Kind() != Slice {
+ panic(&ValueError{Method: "Swapper", Kind: v.Kind()})
+ }
+ // Fast path for slices of size 0 and 1. Nothing to swap.
+ switch v.Len() {
+ case 0:
+ return func(i, j int) { panic("reflect: slice index out of range") }
+ case 1:
+ return func(i, j int) {
+ if i != 0 || j != 0 {
+ panic("reflect: slice index out of range")
+ }
+ }
+ }
+
+ typ := v.Type().Elem().(*rtype)
+ size := typ.Size()
+ hasPtr := typ.ptrdata != 0
+
+ // Some common & small cases, without using memmove:
+ if hasPtr {
+ if size == ptrSize {
+ ps := *(*[]unsafe.Pointer)(v.ptr)
+ return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
+ }
+ if typ.Kind() == String {
+ ss := *(*[]string)(v.ptr)
+ return func(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
+ }
+ } else {
+ switch size {
+ case 8:
+ is := *(*[]int64)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 4:
+ is := *(*[]int32)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 2:
+ is := *(*[]int16)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 1:
+ is := *(*[]int8)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ }
+ }
+
+ s := (*unsafeheader.Slice)(v.ptr)
+ tmp := unsafe_New(typ) // swap scratch space
+
+ return func(i, j int) {
+ if uint(i) >= uint(s.Len) || uint(j) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ val1 := arrayAt(s.Data, i, size, "i < s.Len")
+ val2 := arrayAt(s.Data, j, size, "j < s.Len")
+ typedmemmove(typ, tmp, val1)
+ typedmemmove(typ, val1, val2)
+ typedmemmove(typ, val2, tmp)
+ }
+}
diff --git a/src/reflect/tostring_test.go b/src/reflect/tostring_test.go
new file mode 100644
index 0000000..e416fd8
--- /dev/null
+++ b/src/reflect/tostring_test.go
@@ -0,0 +1,95 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Formatting of reflection types and values for debugging.
+// Not defined as methods so they do not need to be linked into most binaries;
+// the functions are not used by the library itself, only in tests.
+
+package reflect_test
+
+import (
+ . "reflect"
+ "strconv"
+)
+
+// valueToString returns a textual representation of the reflection value val.
+// For debugging only.
+func valueToString(val Value) string {
+ var str string
+ if !val.IsValid() {
+ return "<zero Value>"
+ }
+ typ := val.Type()
+ switch val.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return strconv.FormatInt(val.Int(), 10)
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return strconv.FormatUint(val.Uint(), 10)
+ case Float32, Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, 64)
+ case Complex64, Complex128:
+ c := val.Complex()
+ return strconv.FormatFloat(real(c), 'g', -1, 64) + "+" + strconv.FormatFloat(imag(c), 'g', -1, 64) + "i"
+ case String:
+ return val.String()
+ case Bool:
+ if val.Bool() {
+ return "true"
+ } else {
+ return "false"
+ }
+ case Ptr:
+ v := val
+ str = typ.String() + "("
+ if v.IsNil() {
+ str += "0"
+ } else {
+ str += "&" + valueToString(v.Elem())
+ }
+ str += ")"
+ return str
+ case Array, Slice:
+ v := val
+ str += typ.String()
+ str += "{"
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToString(v.Index(i))
+ }
+ str += "}"
+ return str
+ case Map:
+ t := typ
+ str = t.String()
+ str += "{"
+ str += "<can't iterate on maps>"
+ str += "}"
+ return str
+ case Chan:
+ str = typ.String()
+ return str
+ case Struct:
+ t := typ
+ v := val
+ str += t.String()
+ str += "{"
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToString(v.Field(i))
+ }
+ str += "}"
+ return str
+ case Interface:
+ return typ.String() + "(" + valueToString(val.Elem()) + ")"
+ case Func:
+ v := val
+ return typ.String() + "(" + strconv.FormatUint(uint64(v.Pointer()), 10) + ")"
+ default:
+ panic("valueToString: can't print type " + typ.String())
+ }
+}
diff --git a/src/reflect/type.go b/src/reflect/type.go
new file mode 100644
index 0000000..a1cdf45
--- /dev/null
+++ b/src/reflect/type.go
@@ -0,0 +1,3132 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect implements run-time reflection, allowing a program to
+// manipulate objects with arbitrary types. The typical use is to take a value
+// with static type interface{} and extract its dynamic type information by
+// calling TypeOf, which returns a Type.
+//
+// A call to ValueOf returns a Value representing the run-time data.
+// Zero takes a Type and returns a Value representing a zero value
+// for that type.
+//
+// See "The Laws of Reflection" for an introduction to reflection in Go:
+// https://golang.org/doc/articles/laws_of_reflection.html
+package reflect
+
+import (
+ "internal/unsafeheader"
+ "strconv"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// Type is the representation of a Go type.
+//
+// Not all methods apply to all kinds of types. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of type before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run-time panic.
+//
+// Type values are comparable, such as with the == operator,
+// so they can be used as map keys.
+// Two Type values are equal if they represent identical types.
+type Type interface {
+ // Methods applicable to all types.
+
+ // Align returns the alignment in bytes of a value of
+ // this type when allocated in memory.
+ Align() int
+
+ // FieldAlign returns the alignment in bytes of a value of
+ // this type when used as a field in a struct.
+ FieldAlign() int
+
+ // Method returns the i'th method in the type's method set.
+ // It panics if i is not in the range [0, NumMethod()).
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver,
+ // and only exported methods are accessible.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ //
+ // Methods are sorted in lexicographic order.
+ Method(int) Method
+
+ // MethodByName returns the method with that name in the type's
+ // method set and a boolean indicating if the method was found.
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ MethodByName(string) (Method, bool)
+
+ // NumMethod returns the number of methods accessible using Method.
+ //
+ // Note that NumMethod counts unexported methods only for interface types.
+ NumMethod() int
+
+ // Name returns the type's name within its package for a defined type.
+ // For other (non-defined) types it returns the empty string.
+ Name() string
+
+ // PkgPath returns a defined type's package path, that is, the import path
+ // that uniquely identifies the package, such as "encoding/base64".
+ // If the type was predeclared (string, error) or not defined (*T, struct{},
+ // []int, or A where A is an alias for a non-defined type), the package path
+ // will be the empty string.
+ PkgPath() string
+
+ // Size returns the number of bytes needed to store
+ // a value of the given type; it is analogous to unsafe.Sizeof.
+ Size() uintptr
+
+ // String returns a string representation of the type.
+ // The string representation may use shortened package names
+ // (e.g., base64 instead of "encoding/base64") and is not
+ // guaranteed to be unique among types. To test for type identity,
+ // compare the Types directly.
+ String() string
+
+ // Kind returns the specific kind of this type.
+ Kind() Kind
+
+ // Implements reports whether the type implements the interface type u.
+ Implements(u Type) bool
+
+ // AssignableTo reports whether a value of the type is assignable to type u.
+ AssignableTo(u Type) bool
+
+ // ConvertibleTo reports whether a value of the type is convertible to type u.
+ ConvertibleTo(u Type) bool
+
+ // Comparable reports whether values of this type are comparable.
+ Comparable() bool
+
+ // Methods applicable only to some types, depending on Kind.
+ // The methods allowed for each kind are:
+ //
+ // Int*, Uint*, Float*, Complex*: Bits
+ // Array: Elem, Len
+ // Chan: ChanDir, Elem
+ // Func: In, NumIn, Out, NumOut, IsVariadic.
+ // Map: Key, Elem
+ // Ptr: Elem
+ // Slice: Elem
+ // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
+
+ // Bits returns the size of the type in bits.
+ // It panics if the type's Kind is not one of the
+ // sized or unsized Int, Uint, Float, or Complex kinds.
+ Bits() int
+
+ // ChanDir returns a channel type's direction.
+ // It panics if the type's Kind is not Chan.
+ ChanDir() ChanDir
+
+ // IsVariadic reports whether a function type's final input parameter
+ // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
+ // implicit actual type []T.
+ //
+ // For concreteness, if t represents func(x int, y ... float64), then
+ //
+ // t.NumIn() == 2
+ // t.In(0) is the reflect.Type for "int"
+ // t.In(1) is the reflect.Type for "[]float64"
+ // t.IsVariadic() == true
+ //
+ // IsVariadic panics if the type's Kind is not Func.
+ IsVariadic() bool
+
+ // Elem returns a type's element type.
+ // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
+ Elem() Type
+
+ // Field returns a struct type's i'th field.
+ // It panics if the type's Kind is not Struct.
+ // It panics if i is not in the range [0, NumField()).
+ Field(i int) StructField
+
+ // FieldByIndex returns the nested field corresponding
+ // to the index sequence. It is equivalent to calling Field
+ // successively for each index i.
+ // It panics if the type's Kind is not Struct.
+ FieldByIndex(index []int) StructField
+
+ // FieldByName returns the struct field with the given name
+ // and a boolean indicating if the field was found.
+ FieldByName(name string) (StructField, bool)
+
+ // FieldByNameFunc returns the struct field with a name
+ // that satisfies the match function and a boolean indicating if
+ // the field was found.
+ //
+ // FieldByNameFunc considers the fields in the struct itself
+ // and then the fields in any embedded structs, in breadth first order,
+ // stopping at the shallowest nesting depth containing one or more
+ // fields satisfying the match function. If multiple fields at that depth
+ // satisfy the match function, they cancel each other
+ // and FieldByNameFunc returns no match.
+ // This behavior mirrors Go's handling of name lookup in
+ // structs containing embedded fields.
+ FieldByNameFunc(match func(string) bool) (StructField, bool)
+
+ // In returns the type of a function type's i'th input parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumIn()).
+ In(i int) Type
+
+ // Key returns a map type's key type.
+ // It panics if the type's Kind is not Map.
+ Key() Type
+
+ // Len returns an array type's length.
+ // It panics if the type's Kind is not Array.
+ Len() int
+
+ // NumField returns a struct type's field count.
+ // It panics if the type's Kind is not Struct.
+ NumField() int
+
+ // NumIn returns a function type's input parameter count.
+ // It panics if the type's Kind is not Func.
+ NumIn() int
+
+ // NumOut returns a function type's output parameter count.
+ // It panics if the type's Kind is not Func.
+ NumOut() int
+
+ // Out returns the type of a function type's i'th output parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumOut()).
+ Out(i int) Type
+
+ common() *rtype
+ uncommon() *uncommonType
+}
+
+// BUG(rsc): FieldByName and related functions consider struct field names to be equal
+// if the names are equal, even if they are unexported names originating
+// in different packages. The practical effect of this is that the result of
+// t.FieldByName("x") is not well defined if the struct type t contains
+// multiple fields named x (embedded from different packages).
+// FieldByName may return one of the fields named x or may report that there are none.
+// See https://golang.org/issue/4876 for more details.
+
+/*
+ * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.go.
+ */
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Array
+ Chan
+ Func
+ Interface
+ Map
+ Ptr
+ Slice
+ String
+ Struct
+ UnsafePointer
+)
+
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+//
+// tflag values must be kept in sync with copies in:
+// cmd/compile/internal/gc/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// runtime/type.go
+type tflag uint8
+
+const (
+ // tflagUncommon means that there is a pointer, *uncommonType,
+ // just beyond the outer type structure.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
+ // then t has uncommonType data and it can be accessed as:
+ //
+ // type tUncommon struct {
+ // structType
+ // u uncommonType
+ // }
+ // u := &(*tUncommon)(unsafe.Pointer(t)).u
+ tflagUncommon tflag = 1 << 0
+
+ // tflagExtraStar means the name in the str field has an
+ // extraneous '*' prefix. This is because for most types T in
+ // a program, the type *T also exists and reusing the str data
+ // saves binary size.
+ tflagExtraStar tflag = 1 << 1
+
+ // tflagNamed means the type has a name.
+ tflagNamed tflag = 1 << 2
+
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
+)
+
+// rtype is the common implementation of most values.
+// It is embedded in other struct types.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
+type rtype struct {
+ size uintptr
+ ptrdata uintptr // number of bytes in the type that can contain pointers
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte // garbage collection data
+ str nameOff // string form
+ ptrToThis typeOff // type for pointer to this type, may be zero
+}
+
+// Method on non-interface type
+type method struct {
+ name nameOff // name of method
+ mtyp typeOff // method type (without receiver)
+ ifn textOff // fn used in interface call (one-word receiver)
+ tfn textOff // fn used for normal method call
+}
+
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType struct {
+ pkgPath nameOff // import path; empty for built-in types like int, string
+ mcount uint16 // number of methods
+ xcount uint16 // number of exported methods
+ moff uint32 // offset from this uncommontype to [mcount]method
+ _ uint32 // unused
+}
+
+// ChanDir represents a channel type's direction.
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+)
+
+// arrayType represents a fixed array type.
+type arrayType struct {
+ rtype
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
+}
+
+// chanType represents a channel type.
+type chanType struct {
+ rtype
+ elem *rtype // channel element type
+ dir uintptr // channel direction (ChanDir)
+}
+
+// funcType represents a function type.
+//
+// A *rtype for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
+type funcType struct {
+ rtype
+ inCount uint16
+ outCount uint16 // top bit is set if last input parameter is ...
+}
+
+// imethod represents a method on an interface type
+type imethod struct {
+ name nameOff // name of method
+ typ typeOff // .(*FuncType) underneath
+}
+
+// interfaceType represents an interface type.
+type interfaceType struct {
+ rtype
+ pkgPath name // import path
+ methods []imethod // sorted by hash
+}
+
+// mapType represents a map type.
+type mapType struct {
+ rtype
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// ptrType represents a pointer type.
+type ptrType struct {
+ rtype
+ elem *rtype // pointer element (pointed at) type
+}
+
+// sliceType represents a slice type.
+type sliceType struct {
+ rtype
+ elem *rtype // slice element type
+}
+
+// Struct field
+type structField struct {
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
+}
+
+func (f *structField) offset() uintptr {
+ return f.offsetEmbed >> 1
+}
+
+func (f *structField) embedded() bool {
+ return f.offsetEmbed&1 != 0
+}
+
+// structType represents a struct type.
+type structType struct {
+ rtype
+ pkgPath name
+ fields []structField // sorted by offset
+}
+
+// name is an encoded type name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+//
+// The next two bytes are the data length:
+//
+// l := uint16(data[1])<<8 | uint16(data[2])
+//
+// Bytes [3:3+l] are the string data.
+//
+// If tag data follows then bytes 3+l and 3+l+1 are the tag length,
+// with the data following.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) nameLen() int {
+ return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field")))
+}
+
+func (n name) tagLen() int {
+ if *n.data(0, "name flag field")&(1<<1) == 0 {
+ return 0
+ }
+ off := 3 + n.nameLen()
+ return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field")))
+}
+
+func (n name) name() (s string) {
+ if n.bytes == nil {
+ return
+ }
+ b := (*[4]byte)(unsafe.Pointer(n.bytes))
+
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(&b[3])
+ hdr.Len = int(b[1])<<8 | int(b[2])
+ return s
+}
+
+func (n name) tag() (s string) {
+ tl := n.tagLen()
+ if tl == 0 {
+ return ""
+ }
+ nl := n.nameLen()
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string"))
+ hdr.Len = tl
+ return s
+}
+
+func (n name) pkgPath() string {
+ if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
+ return ""
+ }
+ off := 3 + n.nameLen()
+ if tl := n.tagLen(); tl > 0 {
+ off += 2 + tl
+ }
+ var nameOff int32
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
+ pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
+ return pkgPathName.name()
+}
+
+func newName(n, tag string, exported bool) name {
+ if len(n) > 1<<16-1 {
+ panic("reflect.nameFrom: name too long: " + n)
+ }
+ if len(tag) > 1<<16-1 {
+ panic("reflect.nameFrom: tag too long: " + tag)
+ }
+
+ var bits byte
+ l := 1 + 2 + len(n)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += 2 + len(tag)
+ bits |= 1 << 1
+ }
+
+ b := make([]byte, l)
+ b[0] = bits
+ b[1] = uint8(len(n) >> 8)
+ b[2] = uint8(len(n))
+ copy(b[3:], n)
+ if len(tag) > 0 {
+ tb := b[3+len(n):]
+ tb[0] = uint8(len(tag) >> 8)
+ tb[1] = uint8(len(tag))
+ copy(tb[2:], tag)
+ }
+
+ return name{bytes: &b[0]}
+}
+
+/*
+ * The compiler knows the exact layout of all the data structures above.
+ * The compiler does not know about the data structures and methods below.
+ */
+
+// Method represents a single method.
+type Method struct {
+ // Name is the method name.
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // method name. It is empty for upper case (exported) method names.
+ // The combination of PkgPath and Name uniquely identifies a method
+ // in a method set.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ Name string
+ PkgPath string
+
+ Type Type // method type
+ Func Value // func with receiver as first argument
+ Index int // index for Type.Method
+}
+
+const (
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6 // Type.gc points to GC program
+ kindMask = (1 << 5) - 1
+)
+
+// String returns the name of k.
+func (k Kind) String() string {
+ if int(k) < len(kindNames) {
+ return kindNames[k]
+ }
+ return "kind" + strconv.Itoa(int(k))
+}
+
+var kindNames = []string{
+ Invalid: "invalid",
+ Bool: "bool",
+ Int: "int",
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+ Uint: "uint",
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+ Uintptr: "uintptr",
+ Float32: "float32",
+ Float64: "float64",
+ Complex64: "complex64",
+ Complex128: "complex128",
+ Array: "array",
+ Chan: "chan",
+ Func: "func",
+ Interface: "interface",
+ Map: "map",
+ Ptr: "ptr",
+ Slice: "slice",
+ String: "string",
+ Struct: "struct",
+ UnsafePointer: "unsafe.Pointer",
+}
+
+func (t *uncommonType) methods() []method {
+ if t.mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
+}
+
+func (t *uncommonType) exportedMethods() []method {
+ if t.xcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
+}
+
+// resolveNameOff resolves a name offset from a base pointer.
+// The (*rtype).nameOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTextOff resolves a function pointer offset from a base type.
+// The (*rtype).textOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// addReflectOff adds a pointer to the reflection lookup map in the runtime.
+// It returns a new ID that can be used as a typeOff or textOff, and will
+// be resolved correctly. Implemented in the runtime package.
+func addReflectOff(ptr unsafe.Pointer) int32
+
+// resolveReflectName adds a name to the reflection lookup map in the runtime.
+// It returns a new nameOff that can be used to refer to the pointer.
+func resolveReflectName(n name) nameOff {
+ return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
+}
+
+// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
+// It returns a new typeOff that can be used to refer to the pointer.
+func resolveReflectType(t *rtype) typeOff {
+ return typeOff(addReflectOff(unsafe.Pointer(t)))
+}
+
+// resolveReflectText adds a function pointer to the reflection lookup map in
+// the runtime. It returns a new textOff that can be used to refer to the
+// pointer.
+func resolveReflectText(ptr unsafe.Pointer) textOff {
+ return textOff(addReflectOff(ptr))
+}
+
+type nameOff int32 // offset to a name
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) nameOff(off nameOff) name {
+ return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
+}
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+ return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) textOff(off textOff) unsafe.Pointer {
+ return resolveTextOff(unsafe.Pointer(t), int32(off))
+}
+
+func (t *rtype) uncommon() *uncommonType {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ case Ptr:
+ type u struct {
+ ptrType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ funcType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ sliceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ arrayType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ chanType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ mapType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ interfaceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ rtype
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+func (t *rtype) String() string {
+ s := t.nameOff(t.str).name()
+ if t.tflag&tflagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t *rtype) Size() uintptr { return t.size }
+
+func (t *rtype) Bits() int {
+ if t == nil {
+ panic("reflect: Bits of nil Type")
+ }
+ k := t.Kind()
+ if k < Int || k > Complex128 {
+ panic("reflect: Bits of non-arithmetic Type " + t.String())
+ }
+ return int(t.size) * 8
+}
+
+func (t *rtype) Align() int { return int(t.align) }
+
+func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
+
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
+
+func (t *rtype) pointers() bool { return t.ptrdata != 0 }
+
+func (t *rtype) common() *rtype { return t }
+
+func (t *rtype) exportedMethods() []method {
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.exportedMethods()
+}
+
+func (t *rtype) NumMethod() int {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.NumMethod()
+ }
+ return len(t.exportedMethods())
+}
+
+func (t *rtype) Method(i int) (m Method) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.Method(i)
+ }
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ fl := flag(Func)
+ mtyp := t.typeOff(p.mtyp)
+ ft := (*funcType)(unsafe.Pointer(mtyp))
+ in := make([]Type, 0, 1+len(ft.in()))
+ in = append(in, t)
+ for _, arg := range ft.in() {
+ in = append(in, arg)
+ }
+ out := make([]Type, 0, len(ft.out()))
+ for _, ret := range ft.out() {
+ out = append(out, ret)
+ }
+ mt := FuncOf(in, out, ft.IsVariadic())
+ m.Type = mt
+ tfn := t.textOff(p.tfn)
+ fn := unsafe.Pointer(&tfn)
+ m.Func = Value{mt.(*rtype), fn, fl}
+
+ m.Index = i
+ return m
+}
+
+func (t *rtype) MethodByName(name string) (m Method, ok bool) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.MethodByName(name)
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+ // TODO(mdempsky): Binary search.
+ for i, p := range ut.exportedMethods() {
+ if t.nameOff(p.name).name() == name {
+ return t.Method(i), true
+ }
+ }
+ return Method{}, false
+}
+
+func (t *rtype) PkgPath() string {
+ if t.tflag&tflagNamed == 0 {
+ return ""
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return ""
+ }
+ return t.nameOff(ut.pkgPath).name()
+}
+
+func (t *rtype) hasName() bool {
+ return t.tflag&tflagNamed != 0
+}
+
+func (t *rtype) Name() string {
+ if !t.hasName() {
+ return ""
+ }
+ s := t.String()
+ i := len(s) - 1
+ for i >= 0 && s[i] != '.' {
+ i--
+ }
+ return s[i+1:]
+}
+
+func (t *rtype) ChanDir() ChanDir {
+ if t.Kind() != Chan {
+ panic("reflect: ChanDir of non-chan type " + t.String())
+ }
+ tt := (*chanType)(unsafe.Pointer(t))
+ return ChanDir(tt.dir)
+}
+
+func (t *rtype) IsVariadic() bool {
+ if t.Kind() != Func {
+ panic("reflect: IsVariadic of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return tt.outCount&(1<<15) != 0
+}
+
+func (t *rtype) Elem() Type {
+ switch t.Kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Chan:
+ tt := (*chanType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Map:
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Ptr:
+ tt := (*ptrType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Slice:
+ tt := (*sliceType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ }
+ panic("reflect: Elem of invalid type " + t.String())
+}
+
+func (t *rtype) Field(i int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: Field of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.Field(i)
+}
+
+func (t *rtype) FieldByIndex(index []int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByIndex of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByIndex(index)
+}
+
+func (t *rtype) FieldByName(name string) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByName of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByName(name)
+}
+
+func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByNameFunc of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByNameFunc(match)
+}
+
+func (t *rtype) In(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: In of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.in()[i])
+}
+
+func (t *rtype) Key() Type {
+ if t.Kind() != Map {
+ panic("reflect: Key of non-map type " + t.String())
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.key)
+}
+
+func (t *rtype) Len() int {
+ if t.Kind() != Array {
+ panic("reflect: Len of non-array type " + t.String())
+ }
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return int(tt.len)
+}
+
+func (t *rtype) NumField() int {
+ if t.Kind() != Struct {
+ panic("reflect: NumField of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return len(tt.fields)
+}
+
+func (t *rtype) NumIn() int {
+ if t.Kind() != Func {
+ panic("reflect: NumIn of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return int(tt.inCount)
+}
+
+func (t *rtype) NumOut() int {
+ if t.Kind() != Func {
+ panic("reflect: NumOut of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return len(tt.out())
+}
+
+func (t *rtype) Out(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: Out of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.out()[i])
+}
+
+func (t *funcType) in() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ if t.inCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
+}
+
+func (t *funcType) out() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ if outCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+func (d ChanDir) String() string {
+ switch d {
+ case SendDir:
+ return "chan<-"
+ case RecvDir:
+ return "<-chan"
+ case BothDir:
+ return "chan"
+ }
+ return "ChanDir" + strconv.Itoa(int(d))
+}
+
+// Method returns the i'th method in the type's method set.
+func (t *interfaceType) Method(i int) (m Method) {
+ if i < 0 || i >= len(t.methods) {
+ return
+ }
+ p := &t.methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ if !pname.isExported() {
+ m.PkgPath = pname.pkgPath()
+ if m.PkgPath == "" {
+ m.PkgPath = t.pkgPath.name()
+ }
+ }
+ m.Type = toType(t.typeOff(p.typ))
+ m.Index = i
+ return
+}
+
+// NumMethod returns the number of interface methods in the type's method set.
+func (t *interfaceType) NumMethod() int { return len(t.methods) }
+
+// MethodByName method with the given name in the type's method set.
+func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
+ if t == nil {
+ return
+ }
+ var p *imethod
+ for i := range t.methods {
+ p = &t.methods[i]
+ if t.nameOff(p.name).name() == name {
+ return t.Method(i), true
+ }
+ }
+ return
+}
+
+// A StructField describes a single field in a struct.
+type StructField struct {
+ // Name is the field name.
+ Name string
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // field name. It is empty for upper case (exported) field names.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ PkgPath string
+
+ Type Type // field type
+ Tag StructTag // field tag string
+ Offset uintptr // offset within struct, in bytes
+ Index []int // index sequence for Type.FieldByIndex
+ Anonymous bool // is an embedded field
+}
+
+// A StructTag is the tag string in a struct field.
+//
+// By convention, tag strings are a concatenation of
+// optionally space-separated key:"value" pairs.
+// Each key is a non-empty string consisting of non-control
+// characters other than space (U+0020 ' '), quote (U+0022 '"'),
+// and colon (U+003A ':'). Each value is quoted using U+0022 '"'
+// characters and Go string literal syntax.
+type StructTag string
+
+// Get returns the value associated with key in the tag string.
+// If there is no such key in the tag, Get returns the empty string.
+// If the tag does not have the conventional format, the value
+// returned by Get is unspecified. To determine whether a tag is
+// explicitly set to the empty string, use Lookup.
+func (tag StructTag) Get(key string) string {
+ v, _ := tag.Lookup(key)
+ return v
+}
+
+// Lookup returns the value associated with key in the tag string.
+// If the key is present in the tag the value (which may be empty)
+// is returned. Otherwise the returned value will be the empty string.
+// The ok return value reports whether the value was explicitly set in
+// the tag string. If the tag does not have the conventional format,
+// the value returned by Lookup is unspecified.
+func (tag StructTag) Lookup(key string) (value string, ok bool) {
+ // When modifying this code, also update the validateStructTag code
+ // in cmd/vet/structtag.go.
+
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ break
+ }
+ name := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ break
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ if key == name {
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ break
+ }
+ return value, true
+ }
+ }
+ return "", false
+}
+
+// Field returns the i'th struct field.
+func (t *structType) Field(i int) (f StructField) {
+ if i < 0 || i >= len(t.fields) {
+ panic("reflect: Field index out of bounds")
+ }
+ p := &t.fields[i]
+ f.Type = toType(p.typ)
+ f.Name = p.name.name()
+ f.Anonymous = p.embedded()
+ if !p.name.isExported() {
+ f.PkgPath = t.pkgPath.name()
+ }
+ if tag := p.name.tag(); tag != "" {
+ f.Tag = StructTag(tag)
+ }
+ f.Offset = p.offset()
+
+ // NOTE(rsc): This is the only allocation in the interface
+ // presented by a reflect.Type. It would be nice to avoid,
+ // at least in the common cases, but we need to make sure
+ // that misbehaving clients of reflect cannot affect other
+ // uses of reflect. One possibility is CL 5371098, but we
+ // postponed that ugliness until there is a demonstrated
+ // need for the performance. This is issue 2320.
+ f.Index = []int{i}
+ return
+}
+
+// TODO(gri): Should there be an error/bool indicator if the index
+// is wrong for FieldByIndex?
+
+// FieldByIndex returns the nested field corresponding to index.
+func (t *structType) FieldByIndex(index []int) (f StructField) {
+ f.Type = toType(&t.rtype)
+ for i, x := range index {
+ if i > 0 {
+ ft := f.Type
+ if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
+ ft = ft.Elem()
+ }
+ f.Type = ft
+ }
+ f = f.Type.Field(x)
+ }
+ return
+}
+
+// A fieldScan represents an item on the fieldByNameFunc scan work list.
+type fieldScan struct {
+ typ *structType
+ index []int
+}
+
+// FieldByNameFunc returns the struct field with a name that satisfies the
+// match function and a boolean to indicate if the field was found.
+func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
+ // This uses the same condition that the Go language does: there must be a unique instance
+ // of the match at a given depth level. If there are multiple instances of a match at the
+ // same depth, they annihilate each other and inhibit any possible match at a lower level.
+ // The algorithm is breadth first search, one depth level at a time.
+
+ // The current and next slices are work queues:
+ // current lists the fields to visit on this depth level,
+ // and next lists the fields on the next lower level.
+ current := []fieldScan{}
+ next := []fieldScan{{typ: t}}
+
+ // nextCount records the number of times an embedded type has been
+ // encountered and considered for queueing in the 'next' slice.
+ // We only queue the first one, but we increment the count on each.
+ // If a struct type T can be reached more than once at a given depth level,
+ // then it annihilates itself and need not be considered at all when we
+ // process that next depth level.
+ var nextCount map[*structType]int
+
+ // visited records the structs that have been considered already.
+ // Embedded pointer fields can create cycles in the graph of
+ // reachable embedded types; visited avoids following those cycles.
+ // It also avoids duplicated effort: if we didn't find the field in an
+ // embedded type T at level 2, we won't find it in one at level 4 either.
+ visited := map[*structType]bool{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count := nextCount
+ nextCount = nil
+
+ // Process all the fields at this depth, now listed in 'current'.
+ // The loop queues embedded fields found in 'next', for processing during the next
+ // iteration. The multiplicity of the 'current' field counts is recorded
+ // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
+ for _, scan := range current {
+ t := scan.typ
+ if visited[t] {
+ // We've looked through this type before, at a higher level.
+ // That higher level would shadow the lower level we're now at,
+ // so this one can't be useful to us. Ignore it.
+ continue
+ }
+ visited[t] = true
+ for i := range t.fields {
+ f := &t.fields[i]
+ // Find name and (for embedded field) type for field f.
+ fname := f.name.name()
+ var ntyp *rtype
+ if f.embedded() {
+ // Embedded field of type T or *T.
+ ntyp = f.typ
+ if ntyp.Kind() == Ptr {
+ ntyp = ntyp.Elem().common()
+ }
+ }
+
+ // Does it match?
+ if match(fname) {
+ // Potential match
+ if count[t] > 1 || ok {
+ // Name appeared multiple times at this level: annihilate.
+ return StructField{}, false
+ }
+ result = t.Field(i)
+ result.Index = nil
+ result.Index = append(result.Index, scan.index...)
+ result.Index = append(result.Index, i)
+ ok = true
+ continue
+ }
+
+ // Queue embedded struct fields for processing with next level,
+ // but only if we haven't seen a match yet at this level and only
+ // if the embedded types haven't already been queued.
+ if ok || ntyp == nil || ntyp.Kind() != Struct {
+ continue
+ }
+ styp := (*structType)(unsafe.Pointer(ntyp))
+ if nextCount[styp] > 0 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ continue
+ }
+ if nextCount == nil {
+ nextCount = map[*structType]int{}
+ }
+ nextCount[styp] = 1
+ if count[t] > 1 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ }
+ var index []int
+ index = append(index, scan.index...)
+ index = append(index, i)
+ next = append(next, fieldScan{styp, index})
+ }
+ }
+ if ok {
+ break
+ }
+ }
+ return
+}
+
+// FieldByName returns the struct field with the given name
+// and a boolean to indicate if the field was found.
+func (t *structType) FieldByName(name string) (f StructField, present bool) {
+ // Quick check for top-level name, or struct without embedded fields.
+ hasEmbeds := false
+ if name != "" {
+ for i := range t.fields {
+ tf := &t.fields[i]
+ if tf.name.name() == name {
+ return t.Field(i), true
+ }
+ if tf.embedded() {
+ hasEmbeds = true
+ }
+ }
+ }
+ if !hasEmbeds {
+ return
+ }
+ return t.FieldByNameFunc(func(s string) bool { return s == name })
+}
+
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
+func TypeOf(i interface{}) Type {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return toType(eface.typ)
+}
+
+// ptrMap is the cache for PtrTo.
+var ptrMap sync.Map // map[*rtype]*ptrType
+
+// PtrTo returns the pointer type with element t.
+// For example, if t represents type Foo, PtrTo(t) represents *Foo.
+func PtrTo(t Type) Type {
+ return t.(*rtype).ptrTo()
+}
+
+func (t *rtype) ptrTo() *rtype {
+ if t.ptrToThis != 0 {
+ return t.typeOff(t.ptrToThis)
+ }
+
+ // Check the cache.
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
+ }
+
+ // Look in known types.
+ s := "*" + t.String()
+ for _, tt := range typesByString(s) {
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem != t {
+ continue
+ }
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
+ }
+
+ // Create a new ptrType starting with the description
+ // of an *unsafe.Pointer.
+ var iptr interface{} = (*unsafe.Pointer)(nil)
+ prototype := *(**ptrType)(unsafe.Pointer(&iptr))
+ pp := *prototype
+
+ pp.str = resolveReflectName(newName(s, "", false))
+ pp.ptrToThis = 0
+
+ // For the type structures linked into the binary, the
+ // compiler provides a good hash of the string.
+ // Create a good hash for the new string by using
+ // the FNV-1 hash's mixing function to combine the
+ // old hash and the new "*".
+ pp.hash = fnv1(t.hash, '*')
+
+ pp.elem = t
+
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
+ return &pi.(*ptrType).rtype
+}
+
+// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
+func fnv1(x uint32, list ...byte) uint32 {
+ for _, b := range list {
+ x = x*16777619 ^ uint32(b)
+ }
+ return x
+}
+
+func (t *rtype) Implements(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.Implements")
+ }
+ if u.Kind() != Interface {
+ panic("reflect: non-interface type passed to Type.Implements")
+ }
+ return implements(u.(*rtype), t)
+}
+
+func (t *rtype) AssignableTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.AssignableTo")
+ }
+ uu := u.(*rtype)
+ return directlyAssignable(uu, t) || implements(uu, t)
+}
+
+func (t *rtype) ConvertibleTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.ConvertibleTo")
+ }
+ uu := u.(*rtype)
+ return convertOp(uu, t) != nil
+}
+
+func (t *rtype) Comparable() bool {
+ return t.equal != nil
+}
+
+// implements reports whether the type V implements the interface type T.
+func implements(T, V *rtype) bool {
+ if T.Kind() != Interface {
+ return false
+ }
+ t := (*interfaceType)(unsafe.Pointer(T))
+ if len(t.methods) == 0 {
+ return true
+ }
+
+ // The same algorithm applies in both cases, but the
+ // method tables for an interface type and a concrete type
+ // are different, so the code is duplicated.
+ // In both cases the algorithm is a linear scan over the two
+ // lists - T's methods and V's methods - simultaneously.
+ // Since method tables are stored in a unique sorted order
+ // (alphabetical, with no duplicate method names), the scan
+ // through V's methods must hit a match for each of T's
+ // methods along the way, or else V does not implement T.
+ // This lets us run the scan in overall linear time instead of
+ // the quadratic time a naive search would require.
+ // See also ../runtime/iface.go.
+ if V.Kind() == Interface {
+ v := (*interfaceType)(unsafe.Pointer(V))
+ i := 0
+ for j := 0; j < len(v.methods); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := &v.methods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = v.pkgPath.name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ v := V.uncommon()
+ if v == nil {
+ return false
+ }
+ i := 0
+ vmethods := v.methods()
+ for j := 0; j < int(v.mcount); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := vmethods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = V.nameOff(v.pkgPath).name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// specialChannelAssignability reports whether a value x of channel type V
+// can be directly assigned (using memmove) to another channel type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// T and V must be both of Chan kind.
+func specialChannelAssignability(T, V *rtype) bool {
+ // Special case:
+ // x is a bidirectional channel value, T is a channel type,
+ // x's type V and T have identical element types,
+ // and at least one of V or T is not a defined type.
+ return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
+}
+
+// directlyAssignable reports whether a value x of type V can be directly
+// assigned (using memmove) to a value of type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// Ignoring the interface rules (implemented elsewhere)
+// and the ideal constant rules (no ideal constants at run time).
+func directlyAssignable(T, V *rtype) bool {
+ // x's type V is identical to T?
+ if T == V {
+ return true
+ }
+
+ // Otherwise at least one of T and V must not be defined
+ // and they must have the same kind.
+ if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ if T.Kind() == Chan && specialChannelAssignability(T, V) {
+ return true
+ }
+
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V, true)
+}
+
+func haveIdenticalType(T, V Type, cmpTags bool) bool {
+ if cmpTags {
+ return T == V
+ }
+
+ if T.Name() != V.Name() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ return haveIdenticalUnderlyingType(T.common(), V.common(), false)
+}
+
+func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
+ case Array:
+ return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Chan:
+ return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Func:
+ t := (*funcType)(unsafe.Pointer(T))
+ v := (*funcType)(unsafe.Pointer(V))
+ if t.outCount != v.outCount || t.inCount != v.inCount {
+ return false
+ }
+ for i := 0; i < t.NumIn(); i++ {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
+ return false
+ }
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
+ return false
+ }
+ }
+ return true
+
+ case Interface:
+ t := (*interfaceType)(unsafe.Pointer(T))
+ v := (*interfaceType)(unsafe.Pointer(V))
+ if len(t.methods) == 0 && len(v.methods) == 0 {
+ return true
+ }
+ // Might have the same methods but still
+ // need a run time conversion.
+ return false
+
+ case Map:
+ return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Ptr, Slice:
+ return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Struct:
+ t := (*structType)(unsafe.Pointer(T))
+ v := (*structType)(unsafe.Pointer(V))
+ if len(t.fields) != len(v.fields) {
+ return false
+ }
+ if t.pkgPath.name() != v.pkgPath.name() {
+ return false
+ }
+ for i := range t.fields {
+ tf := &t.fields[i]
+ vf := &v.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
+ return false
+ }
+ if cmpTags && tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offsetEmbed != vf.offsetEmbed {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// typelinks is implemented in package runtime.
+// It returns a slice of the sections in each module,
+// and a slice of *rtype offsets in each module.
+//
+// The types in each module are sorted by string. That is, the first
+// two linked types of the first module are:
+//
+// d0 := sections[0]
+// t1 := (*rtype)(add(d0, offset[0][0]))
+// t2 := (*rtype)(add(d0, offset[0][1]))
+//
+// and
+//
+// t1.String() < t2.String()
+//
+// Note that strings are not unique identifiers for types:
+// there can be more than one with a given string.
+// Only types we might want to look up are included:
+// pointers, channels, maps, slices, and arrays.
+func typelinks() (sections []unsafe.Pointer, offset [][]int32)
+
+func rtypeOff(section unsafe.Pointer, off int32) *rtype {
+ return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
+}
+
+// typesByString returns the subslice of typelinks() whose elements have
+// the given string representation.
+// It may be empty (no known types with that string) or may have
+// multiple elements (multiple types with that string).
+func typesByString(s string) []*rtype {
+ sections, offset := typelinks()
+ var ret []*rtype
+
+ for offsI, offs := range offset {
+ section := sections[offsI]
+
+ // We are looking for the first index i where the string becomes >= s.
+ // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
+ i, j := 0, len(offs)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if !(rtypeOff(section, offs[h]).String() >= s) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+
+ // Having found the first, linear scan forward to find the last.
+ // We could do a second binary search, but the caller is going
+ // to do a linear scan anyway.
+ for j := i; j < len(offs); j++ {
+ typ := rtypeOff(section, offs[j])
+ if typ.String() != s {
+ break
+ }
+ ret = append(ret, typ)
+ }
+ }
+ return ret
+}
+
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
+
+// A cacheKey is the key for use in the lookupCache.
+// Four values describe any of the types we are looking for:
+// type kind, one or two subtypes, and an extra integer.
+type cacheKey struct {
+ kind Kind
+ t1 *rtype
+ t2 *rtype
+ extra uintptr
+}
+
+// The funcLookupCache caches FuncOf lookups.
+// FuncOf does not share the common lookupCache since cacheKey is not
+// sufficient to represent functions unambiguously.
+var funcLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// ChanOf returns the channel type with the given direction and element type.
+// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
+//
+// The gc runtime imposes a limit of 64 kB on channel element types.
+// If t's size is equal to or exceeds this limit, ChanOf panics.
+func ChanOf(dir ChanDir, t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
+ }
+
+ // This restriction is imposed by the gc compiler and the runtime.
+ if typ.size >= 1<<16 {
+ panic("reflect.ChanOf: element size too large")
+ }
+
+ // Look in known types.
+ var s string
+ switch dir {
+ default:
+ panic("reflect.ChanOf: invalid dir")
+ case SendDir:
+ s = "chan<- " + typ.String()
+ case RecvDir:
+ s = "<-chan " + typ.String()
+ case BothDir:
+ typeStr := typ.String()
+ if typeStr[0] == '<' {
+ // typ is recv chan, need parentheses as "<-" associates with leftmost
+ // chan possible, see:
+ // * https://golang.org/ref/spec#Channel_types
+ // * https://github.com/golang/go/issues/39897
+ s = "chan (" + typeStr + ")"
+ } else {
+ s = "chan " + typeStr
+ }
+ }
+ for _, tt := range typesByString(s) {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a channel type.
+ var ichan interface{} = (chan unsafe.Pointer)(nil)
+ prototype := *(**chanType)(unsafe.Pointer(&ichan))
+ ch := *prototype
+ ch.tflag = tflagRegularMemory
+ ch.dir = uintptr(dir)
+ ch.str = resolveReflectName(newName(s, "", false))
+ ch.hash = fnv1(typ.hash, 'c', byte(dir))
+ ch.elem = typ
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics.
+func MapOf(key, elem Type) Type {
+ ktyp := key.(*rtype)
+ etyp := elem.(*rtype)
+
+ if ktyp.equal == nil {
+ panic("reflect.MapOf: invalid key type " + ktyp.String())
+ }
+
+ // Look in cache.
+ ckey := cacheKey{Map, ktyp, etyp, 0}
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
+ }
+
+ // Look in known types.
+ s := "map[" + ktyp.String() + "]" + etyp.String()
+ for _, tt := range typesByString(s) {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a map type.
+ // Note: flag values must match those used in the TMAP case
+ // in ../cmd/compile/internal/gc/reflect.go:dtypesym.
+ var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+ mt := **(**mapType)(unsafe.Pointer(&imap))
+ mt.str = resolveReflectName(newName(s, "", false))
+ mt.tflag = 0
+ mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
+ mt.key = ktyp
+ mt.elem = etyp
+ mt.bucket = bucketOf(ktyp, etyp)
+ mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+ return typehash(ktyp, p, seed)
+ }
+ mt.flags = 0
+ if ktyp.size > maxKeySize {
+ mt.keysize = uint8(ptrSize)
+ mt.flags |= 1 // indirect key
+ } else {
+ mt.keysize = uint8(ktyp.size)
+ }
+ if etyp.size > maxValSize {
+ mt.valuesize = uint8(ptrSize)
+ mt.flags |= 2 // indirect value
+ } else {
+ mt.valuesize = uint8(etyp.size)
+ }
+ mt.bucketsize = uint16(mt.bucket.size)
+ if isReflexive(ktyp) {
+ mt.flags |= 4
+ }
+ if needKeyUpdate(ktyp) {
+ mt.flags |= 8
+ }
+ if hashMightPanic(ktyp) {
+ mt.flags |= 16
+ }
+ mt.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
+}
+
+// TODO(crawshaw): as these funcTypeFixedN structs have no methods,
+// they could be defined at runtime using the StructOf function.
+type funcTypeFixed4 struct {
+ funcType
+ args [4]*rtype
+}
+type funcTypeFixed8 struct {
+ funcType
+ args [8]*rtype
+}
+type funcTypeFixed16 struct {
+ funcType
+ args [16]*rtype
+}
+type funcTypeFixed32 struct {
+ funcType
+ args [32]*rtype
+}
+type funcTypeFixed64 struct {
+ funcType
+ args [64]*rtype
+}
+type funcTypeFixed128 struct {
+ funcType
+ args [128]*rtype
+}
+
+// FuncOf returns the function type with the given argument and result types.
+// For example if k represents int and e represents string,
+// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
+//
+// The variadic argument controls whether the function is variadic. FuncOf
+// panics if the in[len(in)-1] does not represent a slice and variadic is
+// true.
+func FuncOf(in, out []Type, variadic bool) Type {
+ if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
+ panic("reflect.FuncOf: last arg of variadic func must be slice")
+ }
+
+ // Make a func type.
+ var ifunc interface{} = (func())(nil)
+ prototype := *(**funcType)(unsafe.Pointer(&ifunc))
+ n := len(in) + len(out)
+
+ var ft *funcType
+ var args []*rtype
+ switch {
+ case n <= 4:
+ fixed := new(funcTypeFixed4)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 8:
+ fixed := new(funcTypeFixed8)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 16:
+ fixed := new(funcTypeFixed16)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 32:
+ fixed := new(funcTypeFixed32)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 64:
+ fixed := new(funcTypeFixed64)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 128:
+ fixed := new(funcTypeFixed128)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ default:
+ panic("reflect.FuncOf: too many arguments")
+ }
+ *ft = *prototype
+
+ // Build a hash and minimally populate ft.
+ var hash uint32
+ for _, in := range in {
+ t := in.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+ if variadic {
+ hash = fnv1(hash, 'v')
+ }
+ hash = fnv1(hash, '.')
+ for _, out := range out {
+ t := out.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+ if len(args) > 50 {
+ panic("reflect.FuncOf does not support more than 50 arguments")
+ }
+ ft.tflag = 0
+ ft.hash = hash
+ ft.inCount = uint16(len(in))
+ ft.outCount = uint16(len(out))
+ if variadic {
+ ft.outCount |= 1 << 15
+ }
+
+ // Look in cache.
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ funcLookupCache.Lock()
+ defer funcLookupCache.Unlock()
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
+ }
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
+ }
+
+ // Look in known types for the same string representation.
+ str := funcStr(ft)
+ for _, tt := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
+
+ // Populate the remaining fields of ft and store in cache.
+ ft.str = resolveReflectName(newName(str, "", false))
+ ft.ptrToThis = 0
+ return addToCache(&ft.rtype)
+}
+
+// funcStr builds a string representation of a funcType.
+func funcStr(ft *funcType) string {
+ repr := make([]byte, 0, 64)
+ repr = append(repr, "func("...)
+ for i, t := range ft.in() {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ if ft.IsVariadic() && i == int(ft.inCount)-1 {
+ repr = append(repr, "..."...)
+ repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
+ } else {
+ repr = append(repr, t.String()...)
+ }
+ }
+ repr = append(repr, ')')
+ out := ft.out()
+ if len(out) == 1 {
+ repr = append(repr, ' ')
+ } else if len(out) > 1 {
+ repr = append(repr, " ("...)
+ }
+ for i, t := range out {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ repr = append(repr, t.String()...)
+ }
+ if len(out) > 1 {
+ repr = append(repr, ')')
+ }
+ return string(repr)
+}
+
+// isReflexive reports whether the == operation on the type is reflexive.
+// That is, x == x for all values x of type t.
+func isReflexive(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
+ return true
+ case Float32, Float64, Complex64, Complex128, Interface:
+ return false
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return isReflexive(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if !isReflexive(f.typ) {
+ return false
+ }
+ }
+ return true
+ default:
+ // Func, Map, Slice, Invalid
+ panic("isReflexive called on non-key type " + t.String())
+ }
+}
+
+// needKeyUpdate reports whether map overwrites require the key to be copied.
+func needKeyUpdate(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
+ return false
+ case Float32, Float64, Complex64, Complex128, Interface, String:
+ // Float keys can be updated from +0 to -0.
+ // String keys can be updated to use a smaller backing store.
+ // Interfaces might have floats of strings in them.
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return needKeyUpdate(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if needKeyUpdate(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ // Func, Map, Slice, Invalid
+ panic("needKeyUpdate called on non-key type " + t.String())
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *rtype) bool {
+ switch t.Kind() {
+ case Interface:
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return hashMightPanic(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if hashMightPanic(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+}
+
+// Make sure these routines stay in sync with ../../runtime/map.go!
+// These types exist only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in string
+// for possible debugging use.
+const (
+ bucketSize uintptr = 8
+ maxKeySize uintptr = 128
+ maxValSize uintptr = 128
+)
+
+func bucketOf(ktyp, etyp *rtype) *rtype {
+ if ktyp.size > maxKeySize {
+ ktyp = PtrTo(ktyp).(*rtype)
+ }
+ if etyp.size > maxValSize {
+ etyp = PtrTo(etyp).(*rtype)
+ }
+
+ // Prepare GC data if any.
+ // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
+ // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
+ // Note that since the key and value are known to be <= 128 bytes,
+ // they're guaranteed to have bitmaps instead of GC programs.
+ var gcdata *byte
+ var ptrdata uintptr
+ var overflowPad uintptr
+
+ size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
+ if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
+ panic("reflect: bad size computation in MapOf")
+ }
+
+ if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
+ mask := make([]byte, (nptr+7)/8)
+ base := bucketSize / ptrSize
+
+ if ktyp.ptrdata != 0 {
+ emitGCMask(mask, base, ktyp, bucketSize)
+ }
+ base += bucketSize * ktyp.size / ptrSize
+
+ if etyp.ptrdata != 0 {
+ emitGCMask(mask, base, etyp, bucketSize)
+ }
+ base += bucketSize * etyp.size / ptrSize
+ base += overflowPad / ptrSize
+
+ word := base
+ mask[word/8] |= 1 << (word % 8)
+ gcdata = &mask[0]
+ ptrdata = (word + 1) * ptrSize
+
+ // overflow word must be last
+ if ptrdata != size {
+ panic("reflect: bad layout computation in MapOf")
+ }
+ }
+
+ b := &rtype{
+ align: ptrSize,
+ size: size,
+ kind: uint8(Struct),
+ ptrdata: ptrdata,
+ gcdata: gcdata,
+ }
+ if overflowPad > 0 {
+ b.align = 8
+ }
+ s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
+ b.str = resolveReflectName(newName(s, "", false))
+ return b
+}
+
+func (t *rtype) gcSlice(begin, end uintptr) []byte {
+ return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
+}
+
+// emitGCMask writes the GC mask for [n]typ into out, starting at bit
+// offset base.
+func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
+ if typ.kind&kindGCProg != 0 {
+ panic("reflect: unexpected GC program")
+ }
+ ptrs := typ.ptrdata / ptrSize
+ words := typ.size / ptrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+ for j := uintptr(0); j < ptrs; j++ {
+ if (mask[j/8]>>(j%8))&1 != 0 {
+ for i := uintptr(0); i < n; i++ {
+ k := base + i*words + j
+ out[k/8] |= 1 << (k % 8)
+ }
+ }
+ }
+}
+
+// appendGCProg appends the GC program for the first ptrdata bytes of
+// typ to dst and returns the extended slice.
+func appendGCProg(dst []byte, typ *rtype) []byte {
+ if typ.kind&kindGCProg != 0 {
+ // Element has GC program; emit one element.
+ n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
+ prog := typ.gcSlice(4, 4+n-1)
+ return append(dst, prog...)
+ }
+
+ // Element is small with pointer mask; use as literal bits.
+ ptrs := typ.ptrdata / ptrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+
+ // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
+ for ; ptrs > 120; ptrs -= 120 {
+ dst = append(dst, 120)
+ dst = append(dst, mask[:15]...)
+ mask = mask[15:]
+ }
+
+ dst = append(dst, byte(ptrs))
+ dst = append(dst, mask...)
+ return dst
+}
+
+// SliceOf returns the slice type with element type t.
+// For example, if t represents int, SliceOf(t) represents []int.
+func SliceOf(t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Slice, typ, nil, 0}
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
+ }
+
+ // Look in known types.
+ s := "[]" + typ.String()
+ for _, tt := range typesByString(s) {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a slice type.
+ var islice interface{} = ([]unsafe.Pointer)(nil)
+ prototype := *(**sliceType)(unsafe.Pointer(&islice))
+ slice := *prototype
+ slice.tflag = 0
+ slice.str = resolveReflectName(newName(s, "", false))
+ slice.hash = fnv1(typ.hash, '[')
+ slice.elem = typ
+ slice.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
+}
+
+// The structLookupCache caches StructOf lookups.
+// StructOf does not share the common lookupCache since we need to pin
+// the memory associated with *structTypeFixedN.
+var structLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+type structTypeUncommon struct {
+ structType
+ u uncommonType
+}
+
+// isLetter reports whether a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
+}
+
+// StructOf returns the struct type containing fields.
+// The Offset and Index fields are ignored and computed as they would be
+// by the compiler.
+//
+// StructOf currently does not generate wrapper methods for embedded
+// fields and panics if passed unexported StructFields.
+// These limitations may be lifted in a future version.
+func StructOf(fields []StructField) Type {
+ var (
+ hash = fnv1(0, []byte("struct {")...)
+ size uintptr
+ typalign uint8
+ comparable = true
+ methods []method
+
+ fs = make([]structField, len(fields))
+ repr = make([]byte, 0, 64)
+ fset = map[string]struct{}{} // fields' names
+
+ hasGCProg = false // records whether a struct-field type has a GCProg
+ )
+
+ lastzero := uintptr(0)
+ repr = append(repr, "struct {"...)
+ pkgpath := ""
+ for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
+ if field.Type == nil {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
+ }
+ f, fpkgpath := runtimeStructField(field)
+ ft := f.typ
+ if ft.kind&kindGCProg != 0 {
+ hasGCProg = true
+ }
+ if fpkgpath != "" {
+ if pkgpath == "" {
+ pkgpath = fpkgpath
+ } else if pkgpath != fpkgpath {
+ panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
+ }
+ }
+
+ // Update string and hash
+ name := f.name.name()
+ hash = fnv1(hash, []byte(name)...)
+ repr = append(repr, (" " + name)...)
+ if f.embedded() {
+ // Embedded field
+ if f.typ.Kind() == Ptr {
+ // Embedded ** and *interface{} are illegal
+ elem := ft.Elem()
+ if k := elem.Kind(); k == Ptr || k == Interface {
+ panic("reflect.StructOf: illegal embedded field type " + ft.String())
+ }
+ }
+
+ switch f.typ.Kind() {
+ case Interface:
+ ift := (*interfaceType)(unsafe.Pointer(ft))
+ for im, m := range ift.methods {
+ if ift.nameOff(m.name).pkgPath() != "" {
+ // TODO(sbinet). Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+
+ var (
+ mtyp = ift.typeOff(m.typ)
+ ifield = i
+ imethod = im
+ ifn Value
+ tfn Value
+ )
+
+ if ft.kind&kindDirectIface != 0 {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ } else {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = Indirect(in[0])
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ }
+
+ methods = append(methods, method{
+ name: resolveReflectName(ift.nameOff(m.name)),
+ mtyp: resolveReflectType(mtyp),
+ ifn: resolveReflectText(unsafe.Pointer(&ifn)),
+ tfn: resolveReflectText(unsafe.Pointer(&tfn)),
+ })
+ }
+ case Ptr:
+ ptr := (*ptrType)(unsafe.Pointer(ft))
+ if unt := ptr.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 {
+ panic("reflect: embedded type with methods not implemented if there is more than one field")
+ }
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet).
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.textOff(m.tfn)),
+ })
+ }
+ }
+ if unt := ptr.elem.uncommon(); unt != nil {
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
+ })
+ }
+ }
+ default:
+ if unt := ft.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
+ panic("reflect: embedded type with methods not implemented for non-pointer type")
+ }
+ for _, m := range unt.methods() {
+ mname := ft.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ft.textOff(m.ifn)),
+ tfn: resolveReflectText(ft.textOff(m.tfn)),
+ })
+
+ }
+ }
+ }
+ }
+ if _, dup := fset[name]; dup {
+ panic("reflect.StructOf: duplicate field " + name)
+ }
+ fset[name] = struct{}{}
+
+ hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
+
+ repr = append(repr, (" " + ft.String())...)
+ if f.name.tagLen() > 0 {
+ hash = fnv1(hash, []byte(f.name.tag())...)
+ repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
+ }
+ if i < len(fields)-1 {
+ repr = append(repr, ';')
+ }
+
+ comparable = comparable && (ft.equal != nil)
+
+ offset := align(size, uintptr(ft.align))
+ if ft.align > typalign {
+ typalign = ft.align
+ }
+ size = offset + ft.size
+ f.offsetEmbed |= offset << 1
+
+ if ft.size == 0 {
+ lastzero = size
+ }
+
+ fs[i] = f
+ }
+
+ if size > 0 && lastzero == size {
+ // This is a non-zero sized struct that ends in a
+ // zero-sized field. We add an extra byte of padding,
+ // to ensure that taking the address of the final
+ // zero-sized field can't manufacture a pointer to the
+ // next object in the heap. See issue 9401.
+ size++
+ }
+
+ var typ *structType
+ var ut *uncommonType
+
+ if len(methods) == 0 {
+ t := new(structTypeUncommon)
+ typ = &t.structType
+ ut = &t.u
+ } else {
+ // A *rtype representing a struct is followed directly in memory by an
+ // array of method objects representing the methods attached to the
+ // struct. To get the same layout for a run time generated type, we
+ // need an array directly following the uncommonType memory.
+ // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
+ tt := New(StructOf([]StructField{
+ {Name: "S", Type: TypeOf(structType{})},
+ {Name: "U", Type: TypeOf(uncommonType{})},
+ {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
+ }))
+
+ typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr()))
+ ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr()))
+
+ copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods)
+ }
+ // TODO(sbinet): Once we allow embedding multiple types,
+ // methods will need to be sorted like the compiler does.
+ // TODO(sbinet): Once we allow non-exported methods, we will
+ // need to compute xcount as the number of exported methods.
+ ut.mcount = uint16(len(methods))
+ ut.xcount = ut.mcount
+ ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
+
+ if len(fs) > 0 {
+ repr = append(repr, ' ')
+ }
+ repr = append(repr, '}')
+ hash = fnv1(hash, '}')
+ str := string(repr)
+
+ // Round the size up to be a multiple of the alignment.
+ size = align(size, uintptr(typalign))
+
+ // Make the struct type.
+ var istruct interface{} = struct{}{}
+ prototype := *(**structType)(unsafe.Pointer(&istruct))
+ *typ = *prototype
+ typ.fields = fs
+ if pkgpath != "" {
+ typ.pkgPath = newName(pkgpath, "", false)
+ }
+
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ structLookupCache.Lock()
+ defer structLookupCache.Unlock()
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
+ }
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
+ }
+
+ // Look in known types.
+ for _, t := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ // even if 't' wasn't a structType with methods, we should be ok
+ // as the 'u uncommonType' field won't be accessed except when
+ // tflag&tflagUncommon is set.
+ return addToCache(t)
+ }
+ }
+
+ typ.str = resolveReflectName(newName(str, "", false))
+ typ.tflag = 0 // TODO: set tflagRegularMemory
+ typ.hash = hash
+ typ.size = size
+ typ.ptrdata = typeptrdata(typ.common())
+ typ.align = typalign
+ typ.fieldAlign = typalign
+ typ.ptrToThis = 0
+ if len(methods) > 0 {
+ typ.tflag |= tflagUncommon
+ }
+
+ if hasGCProg {
+ lastPtrField := 0
+ for i, ft := range fs {
+ if ft.typ.pointers() {
+ lastPtrField = i
+ }
+ }
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ var off uintptr
+ for i, ft := range fs {
+ if i > lastPtrField {
+ // gcprog should not include anything for any field after
+ // the last field that contains pointer data
+ break
+ }
+ if !ft.typ.pointers() {
+ // Ignore pointerless fields.
+ continue
+ }
+ // Pad to start of this field with zeros.
+ if ft.offset() > off {
+ n := (ft.offset() - off) / ptrSize
+ prog = append(prog, 0x01, 0x00) // emit a 0 bit
+ if n > 1 {
+ prog = append(prog, 0x81) // repeat previous bit
+ prog = appendVarint(prog, n-1) // n-1 times
+ }
+ off = ft.offset()
+ }
+
+ prog = appendGCProg(prog, ft.typ)
+ off += ft.typ.ptrdata
+ }
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ typ.kind |= kindGCProg
+ typ.gcdata = &prog[0]
+ } else {
+ typ.kind &^= kindGCProg
+ bv := new(bitVector)
+ addTypeBits(bv, 0, typ.common())
+ if len(bv.data) > 0 {
+ typ.gcdata = &bv.data[0]
+ }
+ }
+ typ.equal = nil
+ if comparable {
+ typ.equal = func(p, q unsafe.Pointer) bool {
+ for _, ft := range typ.fields {
+ pi := add(p, ft.offset(), "&x.field safe")
+ qi := add(q, ft.offset(), "&x.field safe")
+ if !ft.typ.equal(pi, qi) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ switch {
+ case len(fs) == 1 && !ifaceIndir(fs[0].typ):
+ // structs of 1 direct iface type can be direct
+ typ.kind |= kindDirectIface
+ default:
+ typ.kind &^= kindDirectIface
+ }
+
+ return addToCache(&typ.rtype)
+}
+
+// runtimeStructField takes a StructField value passed to StructOf and
+// returns both the corresponding internal representation, of type
+// structField, and the pkgpath value to use for this field.
+func runtimeStructField(field StructField) (structField, string) {
+ if field.Anonymous && field.PkgPath != "" {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
+ }
+
+ exported := field.PkgPath == ""
+ if exported {
+ // Best-effort check for misuse.
+ // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
+ }
+ }
+
+ offsetEmbed := uintptr(0)
+ if field.Anonymous {
+ offsetEmbed |= 1
+ }
+
+ resolveReflectType(field.Type.common()) // install in runtime
+ f := structField{
+ name: newName(field.Name, string(field.Tag), exported),
+ typ: field.Type.common(),
+ offsetEmbed: offsetEmbed,
+ }
+ return f, field.PkgPath
+}
+
+// typeptrdata returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+// keep in sync with ../cmd/compile/internal/gc/reflect.go
+func typeptrdata(t *rtype) uintptr {
+ switch t.Kind() {
+ case Struct:
+ st := (*structType)(unsafe.Pointer(t))
+ // find the last field that has pointers.
+ field := -1
+ for i := range st.fields {
+ ft := st.fields[i].typ
+ if ft.pointers() {
+ field = i
+ }
+ }
+ if field == -1 {
+ return 0
+ }
+ f := st.fields[field]
+ return f.offset() + f.typ.ptrdata
+
+ default:
+ panic("reflect.typeptrdata: unexpected type, " + t.String())
+ }
+}
+
+// See cmd/compile/internal/gc/reflect.go for derivation of constant.
+const maxPtrmaskBytes = 2048
+
+// ArrayOf returns the array type with the given count and element type.
+// For example, if t represents int, ArrayOf(5, t) represents [5]int.
+//
+// If the resulting type would be larger than the available address space,
+// ArrayOf panics.
+func ArrayOf(count int, elem Type) Type {
+ typ := elem.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Array, typ, nil, uintptr(count)}
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
+ }
+
+ // Look in known types.
+ s := "[" + strconv.Itoa(count) + "]" + typ.String()
+ for _, tt := range typesByString(s) {
+ array := (*arrayType)(unsafe.Pointer(tt))
+ if array.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make an array type.
+ var iarray interface{} = [1]unsafe.Pointer{}
+ prototype := *(**arrayType)(unsafe.Pointer(&iarray))
+ array := *prototype
+ array.tflag = typ.tflag & tflagRegularMemory
+ array.str = resolveReflectName(newName(s, "", false))
+ array.hash = fnv1(typ.hash, '[')
+ for n := uint32(count); n > 0; n >>= 8 {
+ array.hash = fnv1(array.hash, byte(n))
+ }
+ array.hash = fnv1(array.hash, ']')
+ array.elem = typ
+ array.ptrToThis = 0
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
+ }
+ array.size = typ.size * uintptr(count)
+ if count > 0 && typ.ptrdata != 0 {
+ array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
+ }
+ array.align = typ.align
+ array.fieldAlign = typ.fieldAlign
+ array.len = uintptr(count)
+ array.slice = SliceOf(elem).(*rtype)
+
+ switch {
+ case typ.ptrdata == 0 || array.size == 0:
+ // No pointers.
+ array.gcdata = nil
+ array.ptrdata = 0
+
+ case count == 1:
+ // In memory, 1-element array looks just like the element.
+ array.kind |= typ.kind & kindGCProg
+ array.gcdata = typ.gcdata
+ array.ptrdata = typ.ptrdata
+
+ case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
+ // Element is small with pointer mask; array is still small.
+ // Create direct pointer mask by turning each 1 bit in elem
+ // into count 1 bits in larger mask.
+ mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
+ emitGCMask(mask, 0, typ, array.len)
+ array.gcdata = &mask[0]
+
+ default:
+ // Create program that emits one element
+ // and then repeats to make the array.
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ prog = appendGCProg(prog, typ)
+ // Pad from ptrdata to size.
+ elemPtrs := typ.ptrdata / ptrSize
+ elemWords := typ.size / ptrSize
+ if elemPtrs < elemWords {
+ // Emit literal 0 bit, then repeat as needed.
+ prog = append(prog, 0x01, 0x00)
+ if elemPtrs+1 < elemWords {
+ prog = append(prog, 0x81)
+ prog = appendVarint(prog, elemWords-elemPtrs-1)
+ }
+ }
+ // Repeat count-1 times.
+ if elemWords < 0x80 {
+ prog = append(prog, byte(elemWords|0x80))
+ } else {
+ prog = append(prog, 0x80)
+ prog = appendVarint(prog, elemWords)
+ }
+ prog = appendVarint(prog, uintptr(count)-1)
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ array.kind |= kindGCProg
+ array.gcdata = &prog[0]
+ array.ptrdata = array.size // overestimate but ok; must match program
+ }
+
+ etyp := typ.common()
+ esize := etyp.Size()
+
+ array.equal = nil
+ if eequal := etyp.equal; eequal != nil {
+ array.equal = func(p, q unsafe.Pointer) bool {
+ for i := 0; i < count; i++ {
+ pi := arrayAt(p, i, esize, "i < count")
+ qi := arrayAt(q, i, esize, "i < count")
+ if !eequal(pi, qi) {
+ return false
+ }
+
+ }
+ return true
+ }
+ }
+
+ switch {
+ case count == 1 && !ifaceIndir(typ):
+ // array of 1 direct iface type can be direct
+ array.kind |= kindDirectIface
+ default:
+ array.kind &^= kindDirectIface
+ }
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
+}
+
+func appendVarint(x []byte, v uintptr) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ x = append(x, byte(v|0x80))
+ }
+ x = append(x, byte(v))
+ return x
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *rtype) Type {
+ if t == nil {
+ return nil
+ }
+ return t
+}
+
+type layoutKey struct {
+ ftyp *funcType // function signature
+ rcvr *rtype // receiver type, or nil if none
+}
+
+type layoutType struct {
+ t *rtype
+ argSize uintptr // size of arguments
+ retOffset uintptr // offset of return values.
+ stack *bitVector
+ framePool *sync.Pool
+}
+
+var layoutCache sync.Map // map[layoutKey]layoutType
+
+// funcLayout computes a struct type representing the layout of the
+// function arguments and return values for the function type t.
+// If rcvr != nil, rcvr specifies the type of the receiver.
+// The returned type exists only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in
+// the name for possible debugging use.
+func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) {
+ if t.Kind() != Func {
+ panic("reflect: funcLayout of non-func type " + t.String())
+ }
+ if rcvr != nil && rcvr.Kind() == Interface {
+ panic("reflect: funcLayout with interface receiver " + rcvr.String())
+ }
+ k := layoutKey{t, rcvr}
+ if lti, ok := layoutCache.Load(k); ok {
+ lt := lti.(layoutType)
+ return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
+ }
+
+ // compute gc program & stack bitmap for arguments
+ ptrmap := new(bitVector)
+ var offset uintptr
+ if rcvr != nil {
+ // Reflect uses the "interface" calling convention for
+ // methods, where receivers take one word of argument
+ // space no matter how big they actually are.
+ if ifaceIndir(rcvr) || rcvr.pointers() {
+ ptrmap.append(1)
+ } else {
+ ptrmap.append(0)
+ }
+ offset += ptrSize
+ }
+ for _, arg := range t.in() {
+ offset += -offset & uintptr(arg.align-1)
+ addTypeBits(ptrmap, offset, arg)
+ offset += arg.size
+ }
+ argSize = offset
+ offset += -offset & (ptrSize - 1)
+ retOffset = offset
+ for _, res := range t.out() {
+ offset += -offset & uintptr(res.align-1)
+ addTypeBits(ptrmap, offset, res)
+ offset += res.size
+ }
+ offset += -offset & (ptrSize - 1)
+
+ // build dummy rtype holding gc program
+ x := &rtype{
+ align: ptrSize,
+ size: offset,
+ ptrdata: uintptr(ptrmap.n) * ptrSize,
+ }
+ if ptrmap.n > 0 {
+ x.gcdata = &ptrmap.data[0]
+ }
+
+ var s string
+ if rcvr != nil {
+ s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
+ } else {
+ s = "funcargs(" + t.String() + ")"
+ }
+ x.str = resolveReflectName(newName(s, "", false))
+
+ // cache result for future callers
+ framePool = &sync.Pool{New: func() interface{} {
+ return unsafe_New(x)
+ }}
+ lti, _ := layoutCache.LoadOrStore(k, layoutType{
+ t: x,
+ argSize: argSize,
+ retOffset: retOffset,
+ stack: ptrmap,
+ framePool: framePool,
+ })
+ lt := lti.(layoutType)
+ return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
+}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *rtype) bool {
+ return t.kind&kindDirectIface == 0
+}
+
+// Note: this type must agree with runtime.bitvector.
+type bitVector struct {
+ n uint32 // number of bits
+ data []byte
+}
+
+// append a bit to the bitmap.
+func (bv *bitVector) append(bit uint8) {
+ if bv.n%8 == 0 {
+ bv.data = append(bv.data, 0)
+ }
+ bv.data[bv.n/8] |= bit << (bv.n % 8)
+ bv.n++
+}
+
+func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
+ if t.ptrdata == 0 {
+ return
+ }
+
+ switch Kind(t.kind & kindMask) {
+ case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
+ // 1 pointer at start of representation
+ for bv.n < uint32(offset/uintptr(ptrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+
+ case Interface:
+ // 2 pointers
+ for bv.n < uint32(offset/uintptr(ptrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+ bv.append(1)
+
+ case Array:
+ // repeat inner type
+ tt := (*arrayType)(unsafe.Pointer(t))
+ for i := 0; i < int(tt.len); i++ {
+ addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
+ }
+
+ case Struct:
+ // apply fields
+ tt := (*structType)(unsafe.Pointer(t))
+ for i := range tt.fields {
+ f := &tt.fields[i]
+ addTypeBits(bv, offset+f.offset(), f.typ)
+ }
+ }
+}
diff --git a/src/reflect/value.go b/src/reflect/value.go
new file mode 100644
index 0000000..1f185b5
--- /dev/null
+++ b/src/reflect/value.go
@@ -0,0 +1,2858 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/unsafeheader"
+ "math"
+ "runtime"
+ "unsafe"
+)
+
+const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
+
+// Value is the reflection interface to a Go value.
+//
+// Not all methods apply to all kinds of values. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of value before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run time panic.
+//
+// The zero Value represents no value.
+// Its IsValid method returns false, its Kind method returns Invalid,
+// its String method returns "<invalid Value>", and all other methods panic.
+// Most functions and methods never return an invalid value.
+// If one does, its documentation states the conditions explicitly.
+//
+// A Value can be used concurrently by multiple goroutines provided that
+// the underlying Go value can be used concurrently for the equivalent
+// direct operations.
+//
+// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
+type Value struct {
+ // typ holds the type of the value represented by a Value.
+ typ *rtype
+
+ // Pointer-valued data or, if flagIndir is set, pointer to data.
+ // Valid when either flagIndir is set or typ.pointers() is true.
+ ptr unsafe.Pointer
+
+ // flag holds metadata about the value.
+ // The lowest bits are flag bits:
+ // - flagStickyRO: obtained via unexported not embedded field, so read-only
+ // - flagEmbedRO: obtained via unexported embedded field, so read-only
+ // - flagIndir: val holds a pointer to the data
+ // - flagAddr: v.CanAddr is true (implies flagIndir)
+ // - flagMethod: v is a method value.
+ // The next five bits give the Kind of the value.
+ // This repeats typ.Kind() except for method values.
+ // The remaining 23+ bits give a method number for method values.
+ // If flag.kind() != Func, code can assume that flagMethod is unset.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
+ flag
+
+ // A method value represents a curried method invocation
+ // like r.Read for some receiver r. The typ+val+flag bits describe
+ // the receiver r, but the flag's Kind bits say Func (methods are
+ // functions), and the top bits of the flag give the method number
+ // in r's type's method table.
+}
+
+type flag uintptr
+
+const (
+ flagKindWidth = 5 // there are 27 kinds
+ flagKindMask flag = 1<<flagKindWidth - 1
+ flagStickyRO flag = 1 << 5
+ flagEmbedRO flag = 1 << 6
+ flagIndir flag = 1 << 7
+ flagAddr flag = 1 << 8
+ flagMethod flag = 1 << 9
+ flagMethodShift = 10
+ flagRO flag = flagStickyRO | flagEmbedRO
+)
+
+func (f flag) kind() Kind {
+ return Kind(f & flagKindMask)
+}
+
+func (f flag) ro() flag {
+ if f&flagRO != 0 {
+ return flagStickyRO
+ }
+ return 0
+}
+
+// pointer returns the underlying pointer represented by v.
+// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
+// if v.Kind() == Ptr, the base type must not be go:notinheap.
+func (v Value) pointer() unsafe.Pointer {
+ if v.typ.size != ptrSize || !v.typ.pointers() {
+ panic("can't call pointer on a non-pointer Value")
+ }
+ if v.flag&flagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+// packEface converts v to the empty interface.
+func packEface(v Value) interface{} {
+ t := v.typ
+ var i interface{}
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // First, fill in the data portion of the interface.
+ switch {
+ case ifaceIndir(t):
+ if v.flag&flagIndir == 0 {
+ panic("bad indir")
+ }
+ // Value is indirect, and so is the interface we're making.
+ ptr := v.ptr
+ if v.flag&flagAddr != 0 {
+ // TODO: pass safe boolean from valueInterface so
+ // we don't need to copy if safe==true?
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ }
+ e.word = ptr
+ case v.flag&flagIndir != 0:
+ // Value is indirect, but interface is direct. We need
+ // to load the data at v.ptr into the interface data word.
+ e.word = *(*unsafe.Pointer)(v.ptr)
+ default:
+ // Value is direct, and so is the interface.
+ e.word = v.ptr
+ }
+ // Now, fill in the type portion. We're very careful here not
+ // to have any operation between the e.word and e.typ assignments
+ // that would let the garbage collector observe the partially-built
+ // interface value.
+ e.typ = t
+ return i
+}
+
+// unpackEface converts the empty interface i to a Value.
+func unpackEface(i interface{}) Value {
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // NOTE: don't read e.word until we know whether it is really a pointer or not.
+ t := e.typ
+ if t == nil {
+ return Value{}
+ }
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
+ f |= flagIndir
+ }
+ return Value{t, e.word, f}
+}
+
+// A ValueError occurs when a Value method is invoked on
+// a Value that does not support it. Such cases are documented
+// in the description of each method.
+type ValueError struct {
+ Method string
+ Kind Kind
+}
+
+func (e *ValueError) Error() string {
+ if e.Kind == 0 {
+ return "reflect: call of " + e.Method + " on zero Value"
+ }
+ return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
+}
+
+// methodName returns the name of the calling method,
+// assumed to be two stack frames above.
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+// methodNameSkip is like methodName, but skips another stack frame.
+// This is a separate function so that reflect.flag.mustBe will be inlined.
+func methodNameSkip() string {
+ pc, _, _, _ := runtime.Caller(3)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+// emptyInterface is the header for an interface{} value.
+type emptyInterface struct {
+ typ *rtype
+ word unsafe.Pointer
+}
+
+// nonEmptyInterface is the header for an interface value with methods.
+type nonEmptyInterface struct {
+ // see ../runtime/iface.go:/Itab
+ itab *struct {
+ ityp *rtype // static interface type
+ typ *rtype // dynamic concrete type
+ hash uint32 // copy of typ.hash
+ _ [4]byte
+ fun [100000]unsafe.Pointer // method table
+ }
+ word unsafe.Pointer
+}
+
+// mustBe panics if f's kind is not expected.
+// Making this a method on flag instead of on Value
+// (and embedding flag in Value) means that we can write
+// the very clear v.mustBe(Bool) and have it compile into
+// v.flag.mustBe(Bool), which will only bother to copy the
+// single important word for the receiver.
+func (f flag) mustBe(expected Kind) {
+ // TODO(mvdan): use f.kind() again once mid-stack inlining gets better
+ if Kind(f&flagKindMask) != expected {
+ panic(&ValueError{methodName(), f.kind()})
+ }
+}
+
+// mustBeExported panics if f records that the value was obtained using
+// an unexported field.
+func (f flag) mustBeExported() {
+ if f == 0 || f&flagRO != 0 {
+ f.mustBeExportedSlow()
+ }
+}
+
+func (f flag) mustBeExportedSlow() {
+ if f == 0 {
+ panic(&ValueError{methodNameSkip(), Invalid})
+ }
+ if f&flagRO != 0 {
+ panic("reflect: " + methodNameSkip() + " using value obtained using unexported field")
+ }
+}
+
+// mustBeAssignable panics if f records that the value is not assignable,
+// which is to say that either it was obtained using an unexported field
+// or it is not addressable.
+func (f flag) mustBeAssignable() {
+ if f&flagRO != 0 || f&flagAddr == 0 {
+ f.mustBeAssignableSlow()
+ }
+}
+
+func (f flag) mustBeAssignableSlow() {
+ if f == 0 {
+ panic(&ValueError{methodNameSkip(), Invalid})
+ }
+ // Assignable if addressable and not read-only.
+ if f&flagRO != 0 {
+ panic("reflect: " + methodNameSkip() + " using value obtained using unexported field")
+ }
+ if f&flagAddr == 0 {
+ panic("reflect: " + methodNameSkip() + " using unaddressable value")
+ }
+}
+
+// Addr returns a pointer value representing the address of v.
+// It panics if CanAddr() returns false.
+// Addr is typically used to obtain a pointer to a struct field
+// or slice element in order to call a method that requires a
+// pointer receiver.
+func (v Value) Addr() Value {
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Addr of unaddressable value")
+ }
+ // Preserve flagRO instead of using v.flag.ro() so that
+ // v.Addr().Elem() is equivalent to v (#32772)
+ fl := v.flag & flagRO
+ return Value{v.typ.ptrTo(), v.ptr, fl | flag(Ptr)}
+}
+
+// Bool returns v's underlying value.
+// It panics if v's kind is not Bool.
+func (v Value) Bool() bool {
+ v.mustBe(Bool)
+ return *(*bool)(v.ptr)
+}
+
+// Bytes returns v's underlying value.
+// It panics if v's underlying value is not a slice of bytes.
+func (v Value) Bytes() []byte {
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]byte)(v.ptr)
+}
+
+// runes returns v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) runes() []rune {
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.Bytes of non-rune slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]rune)(v.ptr)
+}
+
+// CanAddr reports whether the value's address can be obtained with Addr.
+// Such values are called addressable. A value is addressable if it is
+// an element of a slice, an element of an addressable array,
+// a field of an addressable struct, or the result of dereferencing a pointer.
+// If CanAddr returns false, calling Addr will panic.
+func (v Value) CanAddr() bool {
+ return v.flag&flagAddr != 0
+}
+
+// CanSet reports whether the value of v can be changed.
+// A Value can be changed only if it is addressable and was not
+// obtained by the use of unexported struct fields.
+// If CanSet returns false, calling Set or any type-specific
+// setter (e.g., SetBool, SetInt) will panic.
+func (v Value) CanSet() bool {
+ return v.flag&(flagAddr|flagRO) == flagAddr
+}
+
+// Call calls the function v with the input arguments in.
+// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
+// Call panics if v's Kind is not Func.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+// If v is a variadic function, Call creates the variadic slice parameter
+// itself, copying in the corresponding values.
+func (v Value) Call(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("Call", in)
+}
+
+// CallSlice calls the variadic function v with the input arguments in,
+// assigning the slice in[len(in)-1] to v's final variadic argument.
+// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
+// CallSlice panics if v's Kind is not Func or if v is not variadic.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+func (v Value) CallSlice(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("CallSlice", in)
+}
+
+var callGC bool // for testing; see TestCallMethodJump
+
+func (v Value) call(op string, in []Value) []Value {
+ // Get function pointer, type.
+ t := (*funcType)(unsafe.Pointer(v.typ))
+ var (
+ fn unsafe.Pointer
+ rcvr Value
+ rcvrtype *rtype
+ )
+ if v.flag&flagMethod != 0 {
+ rcvr = v
+ rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
+ } else if v.flag&flagIndir != 0 {
+ fn = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ fn = v.ptr
+ }
+
+ if fn == nil {
+ panic("reflect.Value.Call: call of nil function")
+ }
+
+ isSlice := op == "CallSlice"
+ n := t.NumIn()
+ if isSlice {
+ if !t.IsVariadic() {
+ panic("reflect: CallSlice of non-variadic function")
+ }
+ if len(in) < n {
+ panic("reflect: CallSlice with too few input arguments")
+ }
+ if len(in) > n {
+ panic("reflect: CallSlice with too many input arguments")
+ }
+ } else {
+ if t.IsVariadic() {
+ n--
+ }
+ if len(in) < n {
+ panic("reflect: Call with too few input arguments")
+ }
+ if !t.IsVariadic() && len(in) > n {
+ panic("reflect: Call with too many input arguments")
+ }
+ }
+ for _, x := range in {
+ if x.Kind() == Invalid {
+ panic("reflect: " + op + " using zero Value argument")
+ }
+ }
+ for i := 0; i < n; i++ {
+ if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
+ panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
+ }
+ }
+ if !isSlice && t.IsVariadic() {
+ // prepare slice for remaining values
+ m := len(in) - n
+ slice := MakeSlice(t.In(n), m, m)
+ elem := t.In(n).Elem()
+ for i := 0; i < m; i++ {
+ x := in[n+i]
+ if xt := x.Type(); !xt.AssignableTo(elem) {
+ panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
+ }
+ slice.Index(i).Set(x)
+ }
+ origIn := in
+ in = make([]Value, n+1)
+ copy(in[:n], origIn)
+ in[n] = slice
+ }
+
+ nin := len(in)
+ if nin != t.NumIn() {
+ panic("reflect.Value.Call: wrong argument count")
+ }
+ nout := t.NumOut()
+
+ // Compute frame type.
+ frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype)
+
+ // Allocate a chunk of memory for frame.
+ var args unsafe.Pointer
+ if nout == 0 {
+ args = framePool.Get().(unsafe.Pointer)
+ } else {
+ // Can't use pool if the function has return values.
+ // We will leak pointer to args in ret, so its lifetime is not scoped.
+ args = unsafe_New(frametype)
+ }
+ off := uintptr(0)
+
+ // Copy inputs into args.
+ if rcvrtype != nil {
+ storeRcvr(rcvr, args)
+ off = ptrSize
+ }
+ for i, v := range in {
+ v.mustBeExported()
+ targ := t.In(i).(*rtype)
+ a := uintptr(targ.align)
+ off = (off + a - 1) &^ (a - 1)
+ n := targ.size
+ if n == 0 {
+ // Not safe to compute args+off pointing at 0 bytes,
+ // because that might point beyond the end of the frame,
+ // but we still need to call assignTo to check assignability.
+ v.assignTo("reflect.Value.Call", targ, nil)
+ continue
+ }
+ addr := add(args, off, "n > 0")
+ v = v.assignTo("reflect.Value.Call", targ, addr)
+ if v.flag&flagIndir != 0 {
+ typedmemmove(targ, addr, v.ptr)
+ } else {
+ *(*unsafe.Pointer)(addr) = v.ptr
+ }
+ off += n
+ }
+
+ // Call.
+ call(frametype, fn, args, uint32(frametype.size), uint32(retOffset))
+
+ // For testing; see TestCallMethodJump.
+ if callGC {
+ runtime.GC()
+ }
+
+ var ret []Value
+ if nout == 0 {
+ typedmemclr(frametype, args)
+ framePool.Put(args)
+ } else {
+ // Zero the now unused input area of args,
+ // because the Values returned by this function contain pointers to the args object,
+ // and will thus keep the args object alive indefinitely.
+ typedmemclrpartial(frametype, args, 0, retOffset)
+
+ // Wrap Values around return values in args.
+ ret = make([]Value, nout)
+ off = retOffset
+ for i := 0; i < nout; i++ {
+ tv := t.Out(i)
+ a := uintptr(tv.Align())
+ off = (off + a - 1) &^ (a - 1)
+ if tv.Size() != 0 {
+ fl := flagIndir | flag(tv.Kind())
+ ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl}
+ // Note: this does introduce false sharing between results -
+ // if any result is live, they are all live.
+ // (And the space for the args is live as well, but as we've
+ // cleared that space it isn't as big a deal.)
+ } else {
+ // For zero-sized return value, args+off may point to the next object.
+ // In this case, return the zero value instead.
+ ret[i] = Zero(tv)
+ }
+ off += tv.Size()
+ }
+ }
+
+ return ret
+}
+
+// callReflect is the call implementation used by a function
+// returned by MakeFunc. In many ways it is the opposite of the
+// method Value.call above. The method above converts a call using Values
+// into a call of a function with a concrete argument frame, while
+// callReflect converts a call of a function with a concrete argument
+// frame into a call using Values.
+// It is in this file so that it can be next to the call method above.
+// The remainder of the MakeFunc implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callReflect".
+//
+// ctxt is the "closure" generated by MakeFunc.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool) {
+ ftyp := ctxt.ftyp
+ f := ctxt.fn
+
+ // Copy argument frame into Values.
+ ptr := frame
+ off := uintptr(0)
+ in := make([]Value, 0, int(ftyp.inCount))
+ for _, typ := range ftyp.in() {
+ off += -off & uintptr(typ.align-1)
+ v := Value{typ, nil, flag(typ.Kind())}
+ if ifaceIndir(typ) {
+ // value cannot be inlined in interface data.
+ // Must make a copy, because f might keep a reference to it,
+ // and we cannot let f keep a reference to the stack frame
+ // after this function returns, not even a read-only reference.
+ v.ptr = unsafe_New(typ)
+ if typ.size > 0 {
+ typedmemmove(typ, v.ptr, add(ptr, off, "typ.size > 0"))
+ }
+ v.flag |= flagIndir
+ } else {
+ v.ptr = *(*unsafe.Pointer)(add(ptr, off, "1-ptr"))
+ }
+ in = append(in, v)
+ off += typ.size
+ }
+
+ // Call underlying function.
+ out := f(in)
+ numOut := ftyp.NumOut()
+ if len(out) != numOut {
+ panic("reflect: wrong return count from function created by MakeFunc")
+ }
+
+ // Copy results back into argument frame.
+ if numOut > 0 {
+ off += -off & (ptrSize - 1)
+ for i, typ := range ftyp.out() {
+ v := out[i]
+ if v.typ == nil {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned zero Value")
+ }
+ if v.flag&flagRO != 0 {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned value obtained from unexported field")
+ }
+ off += -off & uintptr(typ.align-1)
+ if typ.size == 0 {
+ continue
+ }
+ addr := add(ptr, off, "typ.size > 0")
+
+ // Convert v to type typ if v is assignable to a variable
+ // of type t in the language spec.
+ // See issue 28761.
+ if typ.Kind() == Interface {
+ // We must clear the destination before calling assignTo,
+ // in case assignTo writes (with memory barriers) to the
+ // target location used as scratch space. See issue 39541.
+ *(*uintptr)(addr) = 0
+ *(*uintptr)(add(addr, ptrSize, "typ.size == 2*ptrSize")) = 0
+ }
+ v = v.assignTo("reflect.MakeFunc", typ, addr)
+
+ // We are writing to stack. No write barrier.
+ if v.flag&flagIndir != 0 {
+ memmove(addr, v.ptr, typ.size)
+ } else {
+ *(*uintptr)(addr) = uintptr(v.ptr)
+ }
+ off += typ.size
+ }
+ }
+
+ // Announce that the return values are valid.
+ // After this point the runtime can depend on the return values being valid.
+ *retValid = true
+
+ // We have to make sure that the out slice lives at least until
+ // the runtime knows the return values are valid. Otherwise, the
+ // return values might not be scanned by anyone during a GC.
+ // (out would be dead, and the return slots not yet alive.)
+ runtime.KeepAlive(out)
+
+ // runtime.getArgInfo expects to be able to find ctxt on the
+ // stack when it finds our caller, makeFuncStub. Make sure it
+ // doesn't get garbage collected.
+ runtime.KeepAlive(ctxt)
+}
+
+// methodReceiver returns information about the receiver
+// described by v. The Value v may or may not have the
+// flagMethod bit set, so the kind cached in v.flag should
+// not be used.
+// The return value rcvrtype gives the method's actual receiver type.
+// The return value t gives the method type signature (without the receiver).
+// The return value fn is a pointer to the method code.
+func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) {
+ i := methodIndex
+ if v.typ.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ if !tt.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ iface := (*nonEmptyInterface)(v.ptr)
+ if iface.itab == nil {
+ panic("reflect: " + op + " of method on nil interface value")
+ }
+ rcvrtype = iface.itab.typ
+ fn = unsafe.Pointer(&iface.itab.fun[i])
+ t = (*funcType)(unsafe.Pointer(tt.typeOff(m.typ)))
+ } else {
+ rcvrtype = v.typ
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ if !v.typ.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ ifn := v.typ.textOff(m.ifn)
+ fn = unsafe.Pointer(&ifn)
+ t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.mtyp)))
+ }
+ return
+}
+
+// v is a method receiver. Store at p the word which is used to
+// encode that receiver at the start of the argument list.
+// Reflect uses the "interface" calling convention for
+// methods, which always uses one word to record the receiver.
+func storeRcvr(v Value, p unsafe.Pointer) {
+ t := v.typ
+ if t.Kind() == Interface {
+ // the interface data word becomes the receiver word
+ iface := (*nonEmptyInterface)(v.ptr)
+ *(*unsafe.Pointer)(p) = iface.word
+ } else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
+ *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ *(*unsafe.Pointer)(p) = v.ptr
+ }
+}
+
+// align returns the result of rounding x up to a multiple of n.
+// n must be a power of two.
+func align(x, n uintptr) uintptr {
+ return (x + n - 1) &^ (n - 1)
+}
+
+// callMethod is the call implementation used by a function returned
+// by makeMethodValue (used by v.Method(i).Interface()).
+// It is a streamlined version of the usual reflect call: the caller has
+// already laid out the argument frame for us, so we don't have
+// to deal with individual Values for each argument.
+// It is in this file so that it can be next to the two similar functions above.
+// The remainder of the makeMethodValue implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callMethod".
+//
+// ctxt is the "closure" generated by makeVethodValue.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
+ rcvr := ctxt.rcvr
+ rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
+ frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype)
+
+ // Make a new frame that is one word bigger so we can store the receiver.
+ // This space is used for both arguments and return values.
+ scratch := framePool.Get().(unsafe.Pointer)
+
+ // Copy in receiver and rest of args.
+ storeRcvr(rcvr, scratch)
+ // Align the first arg. The alignment can't be larger than ptrSize.
+ argOffset := uintptr(ptrSize)
+ if len(t.in()) > 0 {
+ argOffset = align(argOffset, uintptr(t.in()[0].align))
+ }
+ // Avoid constructing out-of-bounds pointers if there are no args.
+ if argSize-argOffset > 0 {
+ typedmemmovepartial(frametype, add(scratch, argOffset, "argSize > argOffset"), frame, argOffset, argSize-argOffset)
+ }
+
+ // Call.
+ // Call copies the arguments from scratch to the stack, calls fn,
+ // and then copies the results back into scratch.
+ call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset))
+
+ // Copy return values.
+ // Ignore any changes to args and just copy return values.
+ // Avoid constructing out-of-bounds pointers if there are no return values.
+ if frametype.size-retOffset > 0 {
+ callerRetOffset := retOffset - argOffset
+ // This copies to the stack. Write barriers are not needed.
+ memmove(add(frame, callerRetOffset, "frametype.size > retOffset"),
+ add(scratch, retOffset, "frametype.size > retOffset"),
+ frametype.size-retOffset)
+ }
+
+ // Tell the runtime it can now depend on the return values
+ // being properly initialized.
+ *retValid = true
+
+ // Clear the scratch space and put it back in the pool.
+ // This must happen after the statement above, so that the return
+ // values will always be scanned by someone.
+ typedmemclr(frametype, scratch)
+ framePool.Put(scratch)
+
+ // See the comment in callReflect.
+ runtime.KeepAlive(ctxt)
+}
+
+// funcName returns the name of f, for use in error messages.
+func funcName(f func([]Value) []Value) string {
+ pc := *(*uintptr)(unsafe.Pointer(&f))
+ rf := runtime.FuncForPC(pc)
+ if rf != nil {
+ return rf.Name()
+ }
+ return "closure"
+}
+
+// Cap returns v's capacity.
+// It panics if v's Kind is not Array, Chan, or Slice.
+func (v Value) Cap() int {
+ k := v.kind()
+ switch k {
+ case Array:
+ return v.typ.Len()
+ case Chan:
+ return chancap(v.pointer())
+ case Slice:
+ // Slice is always bigger than a word; assume flagIndir.
+ return (*unsafeheader.Slice)(v.ptr).Cap
+ }
+ panic(&ValueError{"reflect.Value.Cap", v.kind()})
+}
+
+// Close closes the channel v.
+// It panics if v's Kind is not Chan.
+func (v Value) Close() {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ chanclose(v.pointer())
+}
+
+// Complex returns v's underlying value, as a complex128.
+// It panics if v's Kind is not Complex64 or Complex128
+func (v Value) Complex() complex128 {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return complex128(*(*complex64)(v.ptr))
+ case Complex128:
+ return *(*complex128)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Complex", v.kind()})
+}
+
+// Elem returns the value that the interface v contains
+// or that the pointer v points to.
+// It panics if v's Kind is not Interface or Ptr.
+// It returns the zero Value if v is nil.
+func (v Value) Elem() Value {
+ k := v.kind()
+ switch k {
+ case Interface:
+ var eface interface{}
+ if v.typ.NumMethod() == 0 {
+ eface = *(*interface{})(v.ptr)
+ } else {
+ eface = (interface{})(*(*interface {
+ M()
+ })(v.ptr))
+ }
+ x := unpackEface(eface)
+ if x.flag != 0 {
+ x.flag |= v.flag.ro()
+ }
+ return x
+ case Ptr:
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ // The returned value's address is v's value.
+ if ptr == nil {
+ return Value{}
+ }
+ tt := (*ptrType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ fl := v.flag&flagRO | flagIndir | flagAddr
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
+ }
+ panic(&ValueError{"reflect.Value.Elem", v.kind()})
+}
+
+// Field returns the i'th field of the struct v.
+// It panics if v's Kind is not Struct or i is out of range.
+func (v Value) Field(i int) Value {
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.fields)) {
+ panic("reflect: Field index out of range")
+ }
+ field := &tt.fields[i]
+ typ := field.typ
+
+ // Inherit permission bits from v, but clear flagEmbedRO.
+ fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
+ // Using an unexported field forces flagRO.
+ if !field.name.isExported() {
+ if field.embedded() {
+ fl |= flagEmbedRO
+ } else {
+ fl |= flagStickyRO
+ }
+ }
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must have field.offset = 0,
+ // so v.ptr + field.offset is still the correct address.
+ ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
+ return Value{typ, ptr, fl}
+}
+
+// FieldByIndex returns the nested field corresponding to index.
+// It panics if v's Kind is not struct.
+func (v Value) FieldByIndex(index []int) Value {
+ if len(index) == 1 {
+ return v.Field(index[0])
+ }
+ v.mustBe(Struct)
+ for i, x := range index {
+ if i > 0 {
+ if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
+ if v.IsNil() {
+ panic("reflect: indirection through nil pointer to embedded struct")
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+// FieldByName returns the struct field with the given name.
+// It returns the zero Value if no field was found.
+// It panics if v's Kind is not struct.
+func (v Value) FieldByName(name string) Value {
+ v.mustBe(Struct)
+ if f, ok := v.typ.FieldByName(name); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// FieldByNameFunc returns the struct field with a name
+// that satisfies the match function.
+// It panics if v's Kind is not struct.
+// It returns the zero Value if no field was found.
+func (v Value) FieldByNameFunc(match func(string) bool) Value {
+ if f, ok := v.typ.FieldByNameFunc(match); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// Float returns v's underlying value, as a float64.
+// It panics if v's Kind is not Float32 or Float64
+func (v Value) Float() float64 {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return float64(*(*float32)(v.ptr))
+ case Float64:
+ return *(*float64)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Float", v.kind()})
+}
+
+var uint8Type = TypeOf(uint8(0)).(*rtype)
+
+// Index returns v's i'th element.
+// It panics if v's Kind is not Array, Slice, or String or i is out of range.
+func (v Value) Index(i int) Value {
+ switch v.kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(tt.len) {
+ panic("reflect: array index out of range")
+ }
+ typ := tt.elem
+ offset := uintptr(i) * typ.size
+
+ // Either flagIndir is set and v.ptr points at array,
+ // or flagIndir is not set and v.ptr is the actual array data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be doing Index(0), so offset = 0,
+ // so v.ptr + offset is still the correct address.
+ val := add(v.ptr, offset, "same as &v[i], i < tt.len")
+ fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
+ return Value{typ, val, fl}
+
+ case Slice:
+ // Element flag same as Elem of Ptr.
+ // Addressable, indirect, possibly read-only.
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ tt := (*sliceType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ val := arrayAt(s.Data, i, typ.size, "i < s.Len")
+ fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
+ return Value{typ, val, fl}
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: string index out of range")
+ }
+ p := arrayAt(s.Data, i, 1, "i < s.Len")
+ fl := v.flag.ro() | flag(Uint8) | flagIndir
+ return Value{uint8Type, p, fl}
+ }
+ panic(&ValueError{"reflect.Value.Index", v.kind()})
+}
+
+// Int returns v's underlying value, as an int64.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) Int() int64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Int:
+ return int64(*(*int)(p))
+ case Int8:
+ return int64(*(*int8)(p))
+ case Int16:
+ return int64(*(*int16)(p))
+ case Int32:
+ return int64(*(*int32)(p))
+ case Int64:
+ return *(*int64)(p)
+ }
+ panic(&ValueError{"reflect.Value.Int", v.kind()})
+}
+
+// CanInterface reports whether Interface can be used without panicking.
+func (v Value) CanInterface() bool {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.CanInterface", Invalid})
+ }
+ return v.flag&flagRO == 0
+}
+
+// Interface returns v's current value as an interface{}.
+// It is equivalent to:
+// var i interface{} = (v's underlying value)
+// It panics if the Value was obtained by accessing
+// unexported struct fields.
+func (v Value) Interface() (i interface{}) {
+ return valueInterface(v, true)
+}
+
+func valueInterface(v Value, safe bool) interface{} {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.Interface", Invalid})
+ }
+ if safe && v.flag&flagRO != 0 {
+ // Do not allow access to unexported values via Interface,
+ // because they might be pointers that should not be
+ // writable or methods or function that should not be callable.
+ panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
+ }
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Interface", v)
+ }
+
+ if v.kind() == Interface {
+ // Special case: return the element inside the interface.
+ // Empty interface has one layout, all interfaces with
+ // methods have a second layout.
+ if v.NumMethod() == 0 {
+ return *(*interface{})(v.ptr)
+ }
+ return *(*interface {
+ M()
+ })(v.ptr)
+ }
+
+ // TODO: pass safe to packEface so we don't need to copy if safe==true?
+ return packEface(v)
+}
+
+// InterfaceData returns the interface v's value as a uintptr pair.
+// It panics if v's Kind is not Interface.
+func (v Value) InterfaceData() [2]uintptr {
+ // TODO: deprecate this
+ v.mustBe(Interface)
+ // We treat this as a read operation, so we allow
+ // it even for unexported data, because the caller
+ // has to import "unsafe" to turn it into something
+ // that can be abused.
+ // Interface value is always bigger than a word; assume flagIndir.
+ return *(*[2]uintptr)(v.ptr)
+}
+
+// IsNil reports whether its argument v is nil. The argument must be
+// a chan, func, interface, map, pointer, or slice value; if it is
+// not, IsNil panics. Note that IsNil is not always equivalent to a
+// regular comparison with nil in Go. For example, if v was created
+// by calling ValueOf with an uninitialized interface variable i,
+// i==nil will be true but v.IsNil will panic as v will be the zero
+// Value.
+func (v Value) IsNil() bool {
+ k := v.kind()
+ switch k {
+ case Chan, Func, Map, Ptr, UnsafePointer:
+ if v.flag&flagMethod != 0 {
+ return false
+ }
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ return ptr == nil
+ case Interface, Slice:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return *(*unsafe.Pointer)(v.ptr) == nil
+ }
+ panic(&ValueError{"reflect.Value.IsNil", v.kind()})
+}
+
+// IsValid reports whether v represents a value.
+// It returns false if v is the zero Value.
+// If IsValid returns false, all other methods except String panic.
+// Most functions and methods never return an invalid Value.
+// If one does, its documentation states the conditions explicitly.
+func (v Value) IsValid() bool {
+ return v.flag != 0
+}
+
+// IsZero reports whether v is the zero value for its type.
+// It panics if the argument is invalid.
+func (v Value) IsZero() bool {
+ switch v.kind() {
+ case Bool:
+ return !v.Bool()
+ case Int, Int8, Int16, Int32, Int64:
+ return v.Int() == 0
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v.Uint() == 0
+ case Float32, Float64:
+ return math.Float64bits(v.Float()) == 0
+ case Complex64, Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case Array:
+ for i := 0; i < v.Len(); i++ {
+ if !v.Index(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ case Chan, Func, Interface, Map, Ptr, Slice, UnsafePointer:
+ return v.IsNil()
+ case String:
+ return v.Len() == 0
+ case Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !v.Field(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ default:
+ // This should never happens, but will act as a safeguard for
+ // later, as a default value doesn't makes sense here.
+ panic(&ValueError{"reflect.Value.IsZero", v.Kind()})
+ }
+}
+
+// Kind returns v's Kind.
+// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+func (v Value) Kind() Kind {
+ return v.kind()
+}
+
+// Len returns v's length.
+// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
+func (v Value) Len() int {
+ k := v.kind()
+ switch k {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ return int(tt.len)
+ case Chan:
+ return chanlen(v.pointer())
+ case Map:
+ return maplen(v.pointer())
+ case Slice:
+ // Slice is bigger than a word; assume flagIndir.
+ return (*unsafeheader.Slice)(v.ptr).Len
+ case String:
+ // String is bigger than a word; assume flagIndir.
+ return (*unsafeheader.String)(v.ptr).Len
+ }
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
+}
+
+// MapIndex returns the value associated with key in the map v.
+// It panics if v's Kind is not Map.
+// It returns the zero Value if key is not found in the map or if v represents a nil map.
+// As in Go, the key's value must be assignable to the map's key type.
+func (v Value) MapIndex(key Value) Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ // Do not require key to be exported, so that DeepEqual
+ // and other programs can use all the keys returned by
+ // MapKeys as arguments to MapIndex. If either the map
+ // or the key is unexported, though, the result will be
+ // considered unexported. This is consistent with the
+ // behavior for structs, which allow read but not write
+ // of unexported fields.
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
+
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ e := mapaccess(v.typ, v.pointer(), k)
+ if e == nil {
+ return Value{}
+ }
+ typ := tt.elem
+ fl := (v.flag | key.flag).ro()
+ fl |= flag(typ.Kind())
+ return copyVal(typ, fl, e)
+}
+
+// MapKeys returns a slice containing all the keys present in the map,
+// in unspecified order.
+// It panics if v's Kind is not Map.
+// It returns an empty slice if v represents a nil map.
+func (v Value) MapKeys() []Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+ keyType := tt.key
+
+ fl := v.flag.ro() | flag(keyType.Kind())
+
+ m := v.pointer()
+ mlen := int(0)
+ if m != nil {
+ mlen = maplen(m)
+ }
+ it := mapiterinit(v.typ, m)
+ a := make([]Value, mlen)
+ var i int
+ for i = 0; i < len(a); i++ {
+ key := mapiterkey(it)
+ if key == nil {
+ // Someone deleted an entry from the map since we
+ // called maplen above. It's a data race, but nothing
+ // we can do about it.
+ break
+ }
+ a[i] = copyVal(keyType, fl, key)
+ mapiternext(it)
+ }
+ return a[:i]
+}
+
+// A MapIter is an iterator for ranging over a map.
+// See Value.MapRange.
+type MapIter struct {
+ m Value
+ it unsafe.Pointer
+}
+
+// Key returns the key of the iterator's current map entry.
+func (it *MapIter) Key() Value {
+ if it.it == nil {
+ panic("MapIter.Key called before Next")
+ }
+ if mapiterkey(it.it) == nil {
+ panic("MapIter.Key called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(it.m.typ))
+ ktype := t.key
+ return copyVal(ktype, it.m.flag.ro()|flag(ktype.Kind()), mapiterkey(it.it))
+}
+
+// Value returns the value of the iterator's current map entry.
+func (it *MapIter) Value() Value {
+ if it.it == nil {
+ panic("MapIter.Value called before Next")
+ }
+ if mapiterkey(it.it) == nil {
+ panic("MapIter.Value called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(it.m.typ))
+ vtype := t.elem
+ return copyVal(vtype, it.m.flag.ro()|flag(vtype.Kind()), mapiterelem(it.it))
+}
+
+// Next advances the map iterator and reports whether there is another
+// entry. It returns false when the iterator is exhausted; subsequent
+// calls to Key, Value, or Next will panic.
+func (it *MapIter) Next() bool {
+ if it.it == nil {
+ it.it = mapiterinit(it.m.typ, it.m.pointer())
+ } else {
+ if mapiterkey(it.it) == nil {
+ panic("MapIter.Next called on exhausted iterator")
+ }
+ mapiternext(it.it)
+ }
+ return mapiterkey(it.it) != nil
+}
+
+// MapRange returns a range iterator for a map.
+// It panics if v's Kind is not Map.
+//
+// Call Next to advance the iterator, and Key/Value to access each entry.
+// Next returns false when the iterator is exhausted.
+// MapRange follows the same iteration semantics as a range statement.
+//
+// Example:
+//
+// iter := reflect.ValueOf(m).MapRange()
+// for iter.Next() {
+// k := iter.Key()
+// v := iter.Value()
+// ...
+// }
+//
+func (v Value) MapRange() *MapIter {
+ v.mustBe(Map)
+ return &MapIter{m: v}
+}
+
+// copyVal returns a Value containing the map key or value at ptr,
+// allocating a new variable as needed.
+func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value {
+ if ifaceIndir(typ) {
+ // Copy result so future changes to the map
+ // won't change the underlying value.
+ c := unsafe_New(typ)
+ typedmemmove(typ, c, ptr)
+ return Value{typ, c, fl | flagIndir}
+ }
+ return Value{typ, *(*unsafe.Pointer)(ptr), fl}
+}
+
+// Method returns a function value corresponding to v's i'th method.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// Method panics if i is out of range or if v is a nil interface value.
+func (v Value) Method(i int) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.Method", Invalid})
+ }
+ if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
+ panic("reflect: Method index out of range")
+ }
+ if v.typ.Kind() == Interface && v.IsNil() {
+ panic("reflect: Method on nil interface value")
+ }
+ fl := v.flag.ro() | (v.flag & flagIndir)
+ fl |= flag(Func)
+ fl |= flag(i)<<flagMethodShift | flagMethod
+ return Value{v.typ, v.ptr, fl}
+}
+
+// NumMethod returns the number of exported methods in the value's method set.
+func (v Value) NumMethod() int {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.NumMethod", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return 0
+ }
+ return v.typ.NumMethod()
+}
+
+// MethodByName returns a function value corresponding to the method
+// of v with the given name.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// It returns the zero Value if no method was found.
+func (v Value) MethodByName(name string) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.MethodByName", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return Value{}
+ }
+ m, ok := v.typ.MethodByName(name)
+ if !ok {
+ return Value{}
+ }
+ return v.Method(m.Index)
+}
+
+// NumField returns the number of fields in the struct v.
+// It panics if v's Kind is not Struct.
+func (v Value) NumField() int {
+ v.mustBe(Struct)
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ return len(tt.fields)
+}
+
+// OverflowComplex reports whether the complex128 x cannot be represented by v's type.
+// It panics if v's Kind is not Complex64 or Complex128.
+func (v Value) OverflowComplex(x complex128) bool {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return overflowFloat32(real(x)) || overflowFloat32(imag(x))
+ case Complex128:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
+}
+
+// OverflowFloat reports whether the float64 x cannot be represented by v's type.
+// It panics if v's Kind is not Float32 or Float64.
+func (v Value) OverflowFloat(x float64) bool {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return overflowFloat32(x)
+ case Float64:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
+}
+
+func overflowFloat32(x float64) bool {
+ if x < 0 {
+ x = -x
+ }
+ return math.MaxFloat32 < x && x <= math.MaxFloat64
+}
+
+// OverflowInt reports whether the int64 x cannot be represented by v's type.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) OverflowInt(x int64) bool {
+ k := v.kind()
+ switch k {
+ case Int, Int8, Int16, Int32, Int64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
+}
+
+// OverflowUint reports whether the uint64 x cannot be represented by v's type.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) OverflowUint(x uint64) bool {
+ k := v.kind()
+ switch k {
+ case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.Pointer when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.Pointer())
+// and make an exception.
+
+// Pointer returns v's value as a uintptr.
+// It returns uintptr instead of unsafe.Pointer so that
+// code using reflect cannot obtain unsafe.Pointers
+// without importing the unsafe package explicitly.
+// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
+//
+// If v's Kind is Slice, the returned pointer is to the first
+// element of the slice. If the slice is nil the returned value
+// is 0. If the slice is empty but non-nil the return value is non-zero.
+func (v Value) Pointer() uintptr {
+ // TODO: deprecate
+ k := v.kind()
+ switch k {
+ case Ptr:
+ if v.typ.ptrdata == 0 {
+ // Handle pointers to go:notinheap types directly,
+ // so we never materialize such pointers as an
+ // unsafe.Pointer. (Such pointers are always indirect.)
+ // See issue 42076.
+ return *(*uintptr)(v.ptr)
+ }
+ fallthrough
+ case Chan, Map, UnsafePointer:
+ return uintptr(v.pointer())
+ case Func:
+ if v.flag&flagMethod != 0 {
+ // As the doc comment says, the returned pointer is an
+ // underlying code pointer but not necessarily enough to
+ // identify a single function uniquely. All method expressions
+ // created via reflect have the same underlying code pointer,
+ // so their Pointers are equal. The function used here must
+ // match the one used in makeMethodValue.
+ f := methodValueCall
+ return **(**uintptr)(unsafe.Pointer(&f))
+ }
+ p := v.pointer()
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return uintptr(p)
+
+ case Slice:
+ return (*SliceHeader)(v.ptr).Data
+ }
+ panic(&ValueError{"reflect.Value.Pointer", v.kind()})
+}
+
+// Recv receives and returns a value from the channel v.
+// It panics if v's Kind is not Chan.
+// The receive blocks until a value is ready.
+// The boolean value ok is true if the value x corresponds to a send
+// on the channel, false if it is a zero value received because the channel is closed.
+func (v Value) Recv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(false)
+}
+
+// internal recv, possibly non-blocking (nb).
+// v is known to be a channel.
+func (v Value) recv(nb bool) (val Value, ok bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect: recv on send-only channel")
+ }
+ t := tt.elem
+ val = Value{t, nil, flag(t.Kind())}
+ var p unsafe.Pointer
+ if ifaceIndir(t) {
+ p = unsafe_New(t)
+ val.ptr = p
+ val.flag |= flagIndir
+ } else {
+ p = unsafe.Pointer(&val.ptr)
+ }
+ selected, ok := chanrecv(v.pointer(), nb, p)
+ if !selected {
+ val = Value{}
+ }
+ return
+}
+
+// Send sends x on the channel v.
+// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) Send(x Value) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ v.send(x, false)
+}
+
+// internal send, possibly non-blocking.
+// v is known to be a channel.
+func (v Value) send(x Value, nb bool) (selected bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect: send on recv-only channel")
+ }
+ x.mustBeExported()
+ x = x.assignTo("reflect.Value.Send", tt.elem, nil)
+ var p unsafe.Pointer
+ if x.flag&flagIndir != 0 {
+ p = x.ptr
+ } else {
+ p = unsafe.Pointer(&x.ptr)
+ }
+ return chansend(v.pointer(), p, nb)
+}
+
+// Set assigns x to the value v.
+// It panics if CanSet returns false.
+// As in Go, x's value must be assignable to v's type.
+func (v Value) Set(x Value) {
+ v.mustBeAssignable()
+ x.mustBeExported() // do not let unexported x leak
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+ x = x.assignTo("reflect.Set", v.typ, target)
+ if x.flag&flagIndir != 0 {
+ if x.ptr == unsafe.Pointer(&zeroVal[0]) {
+ typedmemclr(v.typ, v.ptr)
+ } else {
+ typedmemmove(v.typ, v.ptr, x.ptr)
+ }
+ } else {
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
+ }
+}
+
+// SetBool sets v's underlying value.
+// It panics if v's Kind is not Bool or if CanSet() is false.
+func (v Value) SetBool(x bool) {
+ v.mustBeAssignable()
+ v.mustBe(Bool)
+ *(*bool)(v.ptr) = x
+}
+
+// SetBytes sets v's underlying value.
+// It panics if v's underlying value is not a slice of bytes.
+func (v Value) SetBytes(x []byte) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.SetBytes of non-byte slice")
+ }
+ *(*[]byte)(v.ptr) = x
+}
+
+// setRunes sets v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) setRunes(x []rune) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.setRunes of non-rune slice")
+ }
+ *(*[]rune)(v.ptr) = x
+}
+
+// SetComplex sets v's underlying value to x.
+// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
+func (v Value) SetComplex(x complex128) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
+ case Complex64:
+ *(*complex64)(v.ptr) = complex64(x)
+ case Complex128:
+ *(*complex128)(v.ptr) = x
+ }
+}
+
+// SetFloat sets v's underlying value to x.
+// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
+func (v Value) SetFloat(x float64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
+ case Float32:
+ *(*float32)(v.ptr) = float32(x)
+ case Float64:
+ *(*float64)(v.ptr) = x
+ }
+}
+
+// SetInt sets v's underlying value to x.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
+func (v Value) SetInt(x int64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetInt", v.kind()})
+ case Int:
+ *(*int)(v.ptr) = int(x)
+ case Int8:
+ *(*int8)(v.ptr) = int8(x)
+ case Int16:
+ *(*int16)(v.ptr) = int16(x)
+ case Int32:
+ *(*int32)(v.ptr) = int32(x)
+ case Int64:
+ *(*int64)(v.ptr) = x
+ }
+}
+
+// SetLen sets v's length to n.
+// It panics if v's Kind is not Slice or if n is negative or
+// greater than the capacity of the slice.
+func (v Value) SetLen(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(n) > uint(s.Cap) {
+ panic("reflect: slice length out of range in SetLen")
+ }
+ s.Len = n
+}
+
+// SetCap sets v's capacity to n.
+// It panics if v's Kind is not Slice or if n is smaller than the length or
+// greater than the capacity of the slice.
+func (v Value) SetCap(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if n < s.Len || n > s.Cap {
+ panic("reflect: slice capacity out of range in SetCap")
+ }
+ s.Cap = n
+}
+
+// SetMapIndex sets the element associated with key in the map v to elem.
+// It panics if v's Kind is not Map.
+// If elem is the zero Value, SetMapIndex deletes the key from the map.
+// Otherwise if v holds a nil map, SetMapIndex will panic.
+// As in Go, key's elem must be assignable to the map's key type,
+// and elem's value must be assignable to the map's elem type.
+func (v Value) SetMapIndex(key, elem Value) {
+ v.mustBe(Map)
+ v.mustBeExported()
+ key.mustBeExported()
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+ key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ if elem.typ == nil {
+ mapdelete(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign(v.typ, v.pointer(), k, e)
+}
+
+// SetUint sets v's underlying value to x.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
+func (v Value) SetUint(x uint64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetUint", v.kind()})
+ case Uint:
+ *(*uint)(v.ptr) = uint(x)
+ case Uint8:
+ *(*uint8)(v.ptr) = uint8(x)
+ case Uint16:
+ *(*uint16)(v.ptr) = uint16(x)
+ case Uint32:
+ *(*uint32)(v.ptr) = uint32(x)
+ case Uint64:
+ *(*uint64)(v.ptr) = x
+ case Uintptr:
+ *(*uintptr)(v.ptr) = uintptr(x)
+ }
+}
+
+// SetPointer sets the unsafe.Pointer value v to x.
+// It panics if v's Kind is not UnsafePointer.
+func (v Value) SetPointer(x unsafe.Pointer) {
+ v.mustBeAssignable()
+ v.mustBe(UnsafePointer)
+ *(*unsafe.Pointer)(v.ptr) = x
+}
+
+// SetString sets v's underlying value to x.
+// It panics if v's Kind is not String or if CanSet() is false.
+func (v Value) SetString(x string) {
+ v.mustBeAssignable()
+ v.mustBe(String)
+ *(*string)(v.ptr) = x
+}
+
+// Slice returns v[i:j].
+// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice(i, j int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if i < 0 || j < i || j > s.Len {
+ panic("reflect.Value.Slice: string slice index out of bounds")
+ }
+ var t unsafeheader.String
+ if i < s.Len {
+ t = unsafeheader.String{Data: arrayAt(s.Data, i, 1, "i < s.Len"), Len: j - i}
+ }
+ return Value{v.typ, unsafe.Pointer(&t), v.flag}
+ }
+
+ if i < 0 || j < i || j > cap {
+ panic("reflect.Value.Slice: slice index out of bounds")
+ }
+
+ // Declare slice so that gc can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = cap - i
+ if cap-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
+// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice3(i, j, k int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice3", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice3: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+ }
+
+ if i < 0 || j < i || k < j || k > cap {
+ panic("reflect.Value.Slice3: slice index out of bounds")
+ }
+
+ // Declare slice so that the garbage collector
+ // can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = k - i
+ if k-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// String returns the string v's underlying value, as a string.
+// String is a special case because of Go's String method convention.
+// Unlike the other getters, it does not panic if v's Kind is not String.
+// Instead, it returns a string of the form "<T value>" where T is v's type.
+// The fmt package treats Values specially. It does not call their String
+// method implicitly but instead prints the concrete values they hold.
+func (v Value) String() string {
+ switch k := v.kind(); k {
+ case Invalid:
+ return "<invalid Value>"
+ case String:
+ return *(*string)(v.ptr)
+ }
+ // If you call String on a reflect.Value of other type, it's better to
+ // print something than to panic. Useful in debugging.
+ return "<" + v.Type().String() + " Value>"
+}
+
+// TryRecv attempts to receive a value from the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// If the receive delivers a value, x is the transferred value and ok is true.
+// If the receive cannot finish without blocking, x is the zero Value and ok is false.
+// If the channel is closed, x is the zero value for the channel's element type and ok is false.
+func (v Value) TryRecv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(true)
+}
+
+// TrySend attempts to send x on the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// It reports whether the value was sent.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) TrySend(x Value) bool {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.send(x, true)
+}
+
+// Type returns v's type.
+func (v Value) Type() Type {
+ f := v.flag
+ if f == 0 {
+ panic(&ValueError{"reflect.Value.Type", Invalid})
+ }
+ if f&flagMethod == 0 {
+ // Easy case
+ return v.typ
+ }
+
+ // Method value.
+ // v.typ describes the receiver, not the method type.
+ i := int(v.flag) >> flagMethodShift
+ if v.typ.Kind() == Interface {
+ // Method on interface.
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ return v.typ.typeOff(m.typ)
+ }
+ // Method on concrete type.
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ return v.typ.typeOff(m.mtyp)
+}
+
+// Uint returns v's underlying value, as a uint64.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) Uint() uint64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Uint:
+ return uint64(*(*uint)(p))
+ case Uint8:
+ return uint64(*(*uint8)(p))
+ case Uint16:
+ return uint64(*(*uint16)(p))
+ case Uint32:
+ return uint64(*(*uint32)(p))
+ case Uint64:
+ return *(*uint64)(p)
+ case Uintptr:
+ return uint64(*(*uintptr)(p))
+ }
+ panic(&ValueError{"reflect.Value.Uint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.UnsafeAddr when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.UnsafeAddr())
+// and make an exception.
+
+// UnsafeAddr returns a pointer to v's data.
+// It is for advanced clients that also import the "unsafe" package.
+// It panics if v is not addressable.
+func (v Value) UnsafeAddr() uintptr {
+ // TODO: deprecate
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
+ }
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.UnsafeAddr of unaddressable value")
+ }
+ return uintptr(v.ptr)
+}
+
+// StringHeader is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+type StringHeader struct {
+ Data uintptr
+ Len int
+}
+
+// SliceHeader is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+type SliceHeader struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+func typesMustMatch(what string, t1, t2 Type) {
+ if t1 != t2 {
+ panic(what + ": " + t1.String() + " != " + t2.String())
+ }
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
+}
+
+// grow grows the slice s so that it can hold extra more values, allocating
+// more capacity if needed. It also returns the old and new slice lengths.
+func grow(s Value, extra int) (Value, int, int) {
+ i0 := s.Len()
+ i1 := i0 + extra
+ if i1 < i0 {
+ panic("reflect.Append: slice overflow")
+ }
+ m := s.Cap()
+ if i1 <= m {
+ return s.Slice(0, i1), i0, i1
+ }
+ if m == 0 {
+ m = extra
+ } else {
+ for m < i1 {
+ if i0 < 1024 {
+ m += m
+ } else {
+ m += m / 4
+ }
+ }
+ }
+ t := MakeSlice(s.Type(), i1, m)
+ Copy(t, s)
+ return t, i0, i1
+}
+
+// Append appends the values x to a slice s and returns the resulting slice.
+// As in Go, each x's value must be assignable to the slice's element type.
+func Append(s Value, x ...Value) Value {
+ s.mustBe(Slice)
+ s, i0, i1 := grow(s, len(x))
+ for i, j := i0, 0; i < i1; i, j = i+1, j+1 {
+ s.Index(i).Set(x[j])
+ }
+ return s
+}
+
+// AppendSlice appends a slice t to a slice s and returns the resulting slice.
+// The slices s and t must have the same element type.
+func AppendSlice(s, t Value) Value {
+ s.mustBe(Slice)
+ t.mustBe(Slice)
+ typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
+ s, i0, i1 := grow(s, t.Len())
+ Copy(s.Slice(i0, i1), t)
+ return s
+}
+
+// Copy copies the contents of src into dst until either
+// dst has been filled or src has been exhausted.
+// It returns the number of elements copied.
+// Dst and src each must have kind Slice or Array, and
+// dst and src must have the same element type.
+//
+// As a special case, src can have kind String if the element type of dst is kind Uint8.
+func Copy(dst, src Value) int {
+ dk := dst.kind()
+ if dk != Array && dk != Slice {
+ panic(&ValueError{"reflect.Copy", dk})
+ }
+ if dk == Array {
+ dst.mustBeAssignable()
+ }
+ dst.mustBeExported()
+
+ sk := src.kind()
+ var stringCopy bool
+ if sk != Array && sk != Slice {
+ stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8
+ if !stringCopy {
+ panic(&ValueError{"reflect.Copy", sk})
+ }
+ }
+ src.mustBeExported()
+
+ de := dst.typ.Elem()
+ if !stringCopy {
+ se := src.typ.Elem()
+ typesMustMatch("reflect.Copy", de, se)
+ }
+
+ var ds, ss unsafeheader.Slice
+ if dk == Array {
+ ds.Data = dst.ptr
+ ds.Len = dst.Len()
+ ds.Cap = ds.Len
+ } else {
+ ds = *(*unsafeheader.Slice)(dst.ptr)
+ }
+ if sk == Array {
+ ss.Data = src.ptr
+ ss.Len = src.Len()
+ ss.Cap = ss.Len
+ } else if sk == Slice {
+ ss = *(*unsafeheader.Slice)(src.ptr)
+ } else {
+ sh := *(*unsafeheader.String)(src.ptr)
+ ss.Data = sh.Data
+ ss.Len = sh.Len
+ ss.Cap = sh.Len
+ }
+
+ return typedslicecopy(de.common(), ds, ss)
+}
+
+// A runtimeSelect is a single case passed to rselect.
+// This must match ../runtime/select.go:/runtimeSelect
+type runtimeSelect struct {
+ dir SelectDir // SelectSend, SelectRecv or SelectDefault
+ typ *rtype // channel type
+ ch unsafe.Pointer // channel
+ val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
+}
+
+// rselect runs a select. It returns the index of the chosen case.
+// If the case was a receive, val is filled in with the received value.
+// The conventional OK bool indicates whether the receive corresponds
+// to a sent value.
+//go:noescape
+func rselect([]runtimeSelect) (chosen int, recvOK bool)
+
+// A SelectDir describes the communication direction of a select case.
+type SelectDir int
+
+// NOTE: These values must match ../runtime/select.go:/selectDir.
+
+const (
+ _ SelectDir = iota
+ SelectSend // case Chan <- Send
+ SelectRecv // case <-Chan:
+ SelectDefault // default
+)
+
+// A SelectCase describes a single case in a select operation.
+// The kind of case depends on Dir, the communication direction.
+//
+// If Dir is SelectDefault, the case represents a default case.
+// Chan and Send must be zero Values.
+//
+// If Dir is SelectSend, the case represents a send operation.
+// Normally Chan's underlying value must be a channel, and Send's underlying value must be
+// assignable to the channel's element type. As a special case, if Chan is a zero Value,
+// then the case is ignored, and the field Send will also be ignored and may be either zero
+// or non-zero.
+//
+// If Dir is SelectRecv, the case represents a receive operation.
+// Normally Chan's underlying value must be a channel and Send must be a zero Value.
+// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
+// When a receive operation is selected, the received Value is returned by Select.
+//
+type SelectCase struct {
+ Dir SelectDir // direction of case
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+}
+
+// Select executes a select operation described by the list of cases.
+// Like the Go select statement, it blocks until at least one of the cases
+// can proceed, makes a uniform pseudo-random choice,
+// and then executes that case. It returns the index of the chosen case
+// and, if that case was a receive operation, the value received and a
+// boolean indicating whether the value corresponds to a send on the channel
+// (as opposed to a zero value received because the channel is closed).
+// Select supports a maximum of 65536 cases.
+func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
+ if len(cases) > 65536 {
+ panic("reflect.Select: too many cases (max 65536)")
+ }
+ // NOTE: Do not trust that caller is not modifying cases data underfoot.
+ // The range is safe because the caller cannot modify our copy of the len
+ // and each iteration makes its own copy of the value c.
+ var runcases []runtimeSelect
+ if len(cases) > 4 {
+ // Slice is heap allocated due to runtime dependent capacity.
+ runcases = make([]runtimeSelect, len(cases))
+ } else {
+ // Slice can be stack allocated due to constant capacity.
+ runcases = make([]runtimeSelect, len(cases), 4)
+ }
+
+ haveDefault := false
+ for i, c := range cases {
+ rc := &runcases[i]
+ rc.dir = c.Dir
+ switch c.Dir {
+ default:
+ panic("reflect.Select: invalid Dir")
+
+ case SelectDefault: // default
+ if haveDefault {
+ panic("reflect.Select: multiple default cases")
+ }
+ haveDefault = true
+ if c.Chan.IsValid() {
+ panic("reflect.Select: default case has Chan value")
+ }
+ if c.Send.IsValid() {
+ panic("reflect.Select: default case has Send value")
+ }
+
+ case SelectSend:
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect.Select: SendDir case using recv-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ v := c.Send
+ if !v.IsValid() {
+ panic("reflect.Select: SendDir case missing Send value")
+ }
+ v.mustBeExported()
+ v = v.assignTo("reflect.Select", tt.elem, nil)
+ if v.flag&flagIndir != 0 {
+ rc.val = v.ptr
+ } else {
+ rc.val = unsafe.Pointer(&v.ptr)
+ }
+
+ case SelectRecv:
+ if c.Send.IsValid() {
+ panic("reflect.Select: RecvDir case has Send value")
+ }
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect.Select: RecvDir case using send-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ rc.val = unsafe_New(tt.elem)
+ }
+ }
+
+ chosen, recvOK = rselect(runcases)
+ if runcases[chosen].dir == SelectRecv {
+ tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
+ t := tt.elem
+ p := runcases[chosen].val
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ recv = Value{t, p, fl | flagIndir}
+ } else {
+ recv = Value{t, *(*unsafe.Pointer)(p), fl}
+ }
+ }
+ return chosen, recv, recvOK
+}
+
+/*
+ * constructors
+ */
+
+// implemented in package runtime
+func unsafe_New(*rtype) unsafe.Pointer
+func unsafe_NewArray(*rtype, int) unsafe.Pointer
+
+// MakeSlice creates a new zero-initialized slice value
+// for the specified slice type, length, and capacity.
+func MakeSlice(typ Type, len, cap int) Value {
+ if typ.Kind() != Slice {
+ panic("reflect.MakeSlice of non-slice type")
+ }
+ if len < 0 {
+ panic("reflect.MakeSlice: negative len")
+ }
+ if cap < 0 {
+ panic("reflect.MakeSlice: negative cap")
+ }
+ if len > cap {
+ panic("reflect.MakeSlice: len > cap")
+ }
+
+ s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap}
+ return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)}
+}
+
+// MakeChan creates a new channel with the specified type and buffer size.
+func MakeChan(typ Type, buffer int) Value {
+ if typ.Kind() != Chan {
+ panic("reflect.MakeChan of non-chan type")
+ }
+ if buffer < 0 {
+ panic("reflect.MakeChan: negative buffer size")
+ }
+ if typ.ChanDir() != BothDir {
+ panic("reflect.MakeChan: unidirectional channel type")
+ }
+ t := typ.(*rtype)
+ ch := makechan(t, buffer)
+ return Value{t, ch, flag(Chan)}
+}
+
+// MakeMap creates a new map with the specified type.
+func MakeMap(typ Type) Value {
+ return MakeMapWithSize(typ, 0)
+}
+
+// MakeMapWithSize creates a new map with the specified type
+// and initial space for approximately n elements.
+func MakeMapWithSize(typ Type, n int) Value {
+ if typ.Kind() != Map {
+ panic("reflect.MakeMapWithSize of non-map type")
+ }
+ t := typ.(*rtype)
+ m := makemap(t, n)
+ return Value{t, m, flag(Map)}
+}
+
+// Indirect returns the value that v points to.
+// If v is a nil pointer, Indirect returns a zero Value.
+// If v is not a pointer, Indirect returns v.
+func Indirect(v Value) Value {
+ if v.Kind() != Ptr {
+ return v
+ }
+ return v.Elem()
+}
+
+// ValueOf returns a new Value initialized to the concrete value
+// stored in the interface i. ValueOf(nil) returns the zero Value.
+func ValueOf(i interface{}) Value {
+ if i == nil {
+ return Value{}
+ }
+
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
+ escapes(i)
+
+ return unpackEface(i)
+}
+
+// Zero returns a Value representing the zero value for the specified type.
+// The result is different from the zero value of the Value struct,
+// which represents no value at all.
+// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// The returned value is neither addressable nor settable.
+func Zero(typ Type) Value {
+ if typ == nil {
+ panic("reflect: Zero(nil)")
+ }
+ t := typ.(*rtype)
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ var p unsafe.Pointer
+ if t.size <= maxZero {
+ p = unsafe.Pointer(&zeroVal[0])
+ } else {
+ p = unsafe_New(t)
+ }
+ return Value{t, p, fl | flagIndir}
+ }
+ return Value{t, nil, fl}
+}
+
+// must match declarations in runtime/map.go.
+const maxZero = 1024
+
+//go:linkname zeroVal runtime.zeroVal
+var zeroVal [maxZero]byte
+
+// New returns a Value representing a pointer to a new zero value
+// for the specified type. That is, the returned Value's Type is PtrTo(typ).
+func New(typ Type) Value {
+ if typ == nil {
+ panic("reflect: New(nil)")
+ }
+ t := typ.(*rtype)
+ ptr := unsafe_New(t)
+ fl := flag(Ptr)
+ return Value{t.ptrTo(), ptr, fl}
+}
+
+// NewAt returns a Value representing a pointer to a value of the
+// specified type, using p as that pointer.
+func NewAt(typ Type, p unsafe.Pointer) Value {
+ fl := flag(Ptr)
+ t := typ.(*rtype)
+ return Value{t.ptrTo(), p, fl}
+}
+
+// assignTo returns a value v that can be assigned directly to typ.
+// It panics if v is not assignable to typ.
+// For a conversion to an interface type, target is a suggested scratch space to use.
+// target must be initialized memory (or nil).
+func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue(context, v)
+ }
+
+ switch {
+ case directlyAssignable(dst, v.typ):
+ // Overwrite type so that they match.
+ // Same memory layout, so no harm done.
+ fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
+
+ case implements(dst, v.typ):
+ if target == nil {
+ target = unsafe_New(dst)
+ }
+ if v.Kind() == Interface && v.IsNil() {
+ // A nil ReadWriter passed to nil Reader is OK,
+ // but using ifaceE2I below will panic.
+ // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
+ return Value{dst, nil, flag(Interface)}
+ }
+ x := valueInterface(v, false)
+ if dst.NumMethod() == 0 {
+ *(*interface{})(target) = x
+ } else {
+ ifaceE2I(dst, x, target)
+ }
+ return Value{dst, target, flagIndir | flag(Interface)}
+ }
+
+ // Failed.
+ panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
+}
+
+// Convert returns the value v converted to type t.
+// If the usual Go conversion rules do not allow conversion
+// of the value v to type t, Convert panics.
+func (v Value) Convert(t Type) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Convert", v)
+ }
+ op := convertOp(t.common(), v.typ)
+ if op == nil {
+ panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
+ }
+ return op(v, t)
+}
+
+// convertOp returns the function to convert a value of type src
+// to a value of type dst. If the conversion is illegal, convertOp returns nil.
+func convertOp(dst, src *rtype) func(Value, Type) Value {
+ switch src.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtInt
+ case Float32, Float64:
+ return cvtIntFloat
+ case String:
+ return cvtIntString
+ }
+
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtUint
+ case Float32, Float64:
+ return cvtUintFloat
+ case String:
+ return cvtUintString
+ }
+
+ case Float32, Float64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return cvtFloatInt
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtFloatUint
+ case Float32, Float64:
+ return cvtFloat
+ }
+
+ case Complex64, Complex128:
+ switch dst.Kind() {
+ case Complex64, Complex128:
+ return cvtComplex
+ }
+
+ case String:
+ if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
+ switch dst.Elem().Kind() {
+ case Uint8:
+ return cvtStringBytes
+ case Int32:
+ return cvtStringRunes
+ }
+ }
+
+ case Slice:
+ if dst.Kind() == String && src.Elem().PkgPath() == "" {
+ switch src.Elem().Kind() {
+ case Uint8:
+ return cvtBytesString
+ case Int32:
+ return cvtRunesString
+ }
+ }
+
+ case Chan:
+ if dst.Kind() == Chan && specialChannelAssignability(dst, src) {
+ return cvtDirect
+ }
+ }
+
+ // dst and src have same underlying type.
+ if haveIdenticalUnderlyingType(dst, src, false) {
+ return cvtDirect
+ }
+
+ // dst and src are non-defined pointer types with same underlying base type.
+ if dst.Kind() == Ptr && dst.Name() == "" &&
+ src.Kind() == Ptr && src.Name() == "" &&
+ haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) {
+ return cvtDirect
+ }
+
+ if implements(dst, src) {
+ if src.Kind() == Interface {
+ return cvtI2I
+ }
+ return cvtT2I
+ }
+
+ return nil
+}
+
+// makeInt returns a Value of type t equal to bits (possibly truncated),
+// where t is a signed or unsigned int type.
+func makeInt(f flag, bits uint64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 1:
+ *(*uint8)(ptr) = uint8(bits)
+ case 2:
+ *(*uint16)(ptr) = uint16(bits)
+ case 4:
+ *(*uint32)(ptr) = uint32(bits)
+ case 8:
+ *(*uint64)(ptr) = bits
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
+// where t is a float32 or float64 type.
+func makeFloat(f flag, v float64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 4:
+ *(*float32)(ptr) = float32(v)
+ case 8:
+ *(*float64)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat returns a Value of type t equal to v, where t is a float32 type.
+func makeFloat32(f flag, v float32, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ *(*float32)(ptr) = v
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
+// where t is a complex64 or complex128 type.
+func makeComplex(f flag, v complex128, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 8:
+ *(*complex64)(ptr) = complex64(v)
+ case 16:
+ *(*complex128)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+func makeString(f flag, v string, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetString(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeBytes(f flag, v []byte, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetBytes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeRunes(f flag, v []rune, t Type) Value {
+ ret := New(t).Elem()
+ ret.setRunes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+// These conversion functions are returned by convertOp
+// for classes of conversions. For example, the first function, cvtInt,
+// takes any value v of signed int type and returns the value converted
+// to type t, where t is any signed or unsigned int type.
+
+// convertOp: intXX -> [u]intXX
+func cvtInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Int()), t)
+}
+
+// convertOp: uintXX -> [u]intXX
+func cvtUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), v.Uint(), t)
+}
+
+// convertOp: floatXX -> intXX
+func cvtFloatInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(int64(v.Float())), t)
+}
+
+// convertOp: floatXX -> uintXX
+func cvtFloatUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Float()), t)
+}
+
+// convertOp: intXX -> floatXX
+func cvtIntFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Int()), t)
+}
+
+// convertOp: uintXX -> floatXX
+func cvtUintFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Uint()), t)
+}
+
+// convertOp: floatXX -> floatXX
+func cvtFloat(v Value, t Type) Value {
+ if v.Type().Kind() == Float32 && t.Kind() == Float32 {
+ // Don't do any conversion if both types have underlying type float32.
+ // This avoids converting to float64 and back, which will
+ // convert a signaling NaN to a quiet NaN. See issue 36400.
+ return makeFloat32(v.flag.ro(), *(*float32)(v.ptr), t)
+ }
+ return makeFloat(v.flag.ro(), v.Float(), t)
+}
+
+// convertOp: complexXX -> complexXX
+func cvtComplex(v Value, t Type) Value {
+ return makeComplex(v.flag.ro(), v.Complex(), t)
+}
+
+// convertOp: intXX -> string
+func cvtIntString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Int(); int64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: uintXX -> string
+func cvtUintString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Uint(); uint64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: []byte -> string
+func cvtBytesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.Bytes()), t)
+}
+
+// convertOp: string -> []byte
+func cvtStringBytes(v Value, t Type) Value {
+ return makeBytes(v.flag.ro(), []byte(v.String()), t)
+}
+
+// convertOp: []rune -> string
+func cvtRunesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.runes()), t)
+}
+
+// convertOp: string -> []rune
+func cvtStringRunes(v Value, t Type) Value {
+ return makeRunes(v.flag.ro(), []rune(v.String()), t)
+}
+
+// convertOp: direct copy
+func cvtDirect(v Value, typ Type) Value {
+ f := v.flag
+ t := typ.common()
+ ptr := v.ptr
+ if f&flagAddr != 0 {
+ // indirect, mutable word - make a copy
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ f &^= flagAddr
+ }
+ return Value{t, ptr, v.flag.ro() | f} // v.flag.ro()|f == f?
+}
+
+// convertOp: concrete -> interface
+func cvtT2I(v Value, typ Type) Value {
+ target := unsafe_New(typ.common())
+ x := valueInterface(v, false)
+ if typ.NumMethod() == 0 {
+ *(*interface{})(target) = x
+ } else {
+ ifaceE2I(typ.(*rtype), x, target)
+ }
+ return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)}
+}
+
+// convertOp: interface -> interface
+func cvtI2I(v Value, typ Type) Value {
+ if v.IsNil() {
+ ret := Zero(typ)
+ ret.flag |= v.flag.ro()
+ return ret
+ }
+ return cvtT2I(v.Elem(), typ)
+}
+
+// implemented in ../runtime
+func chancap(ch unsafe.Pointer) int
+func chanclose(ch unsafe.Pointer)
+func chanlen(ch unsafe.Pointer) int
+
+// Note: some of the noescape annotations below are technically a lie,
+// but safe in the context of this package. Functions like chansend
+// and mapassign don't escape the referent, but may escape anything
+// the referent points to (they do shallow copies of the referent).
+// It is safe in this package because the referent may only point
+// to something a Value may point to, and that is always in the heap
+// (due to the escapes() call in ValueOf).
+
+//go:noescape
+func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool)
+
+//go:noescape
+func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
+
+func makechan(typ *rtype, size int) (ch unsafe.Pointer)
+func makemap(t *rtype, cap int) (m unsafe.Pointer)
+
+//go:noescape
+func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+
+//go:noescape
+func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
+
+//go:noescape
+func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
+
+//go:noescape
+func mapiterelem(it unsafe.Pointer) (elem unsafe.Pointer)
+
+//go:noescape
+func mapiternext(it unsafe.Pointer)
+
+//go:noescape
+func maplen(m unsafe.Pointer) int
+
+// call calls fn with a copy of the n argument bytes pointed at by arg.
+// After fn returns, reflectcall copies n-retoffset result bytes
+// back into arg+retoffset before returning. If copying result bytes back,
+// the caller must pass the argument frame type as argtype, so that
+// call can execute appropriate write barriers during the copy.
+//
+//go:linkname call runtime.reflectcall
+func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
+
+func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
+
+// memmove copies size bytes to dst from src. No write barriers are used.
+//go:noescape
+func memmove(dst, src unsafe.Pointer, size uintptr)
+
+// typedmemmove copies a value of type t to dst from src.
+//go:noescape
+func typedmemmove(t *rtype, dst, src unsafe.Pointer)
+
+// typedmemmovepartial is like typedmemmove but assumes that
+// dst and src point off bytes into the value and only copies size bytes.
+//go:noescape
+func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr)
+
+// typedmemclr zeros the value at ptr of type t.
+//go:noescape
+func typedmemclr(t *rtype, ptr unsafe.Pointer)
+
+// typedmemclrpartial is like typedmemclr but assumes that
+// dst points off bytes into the value and only clears size bytes.
+//go:noescape
+func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
+
+// typedslicecopy copies a slice of elemType values from src to dst,
+// returning the number of elements copied.
+//go:noescape
+func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int
+
+//go:noescape
+func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+
+// Dummy annotation marking that the value x escapes,
+// for use in cases where the reflect code is so clever that
+// the compiler cannot follow.
+func escapes(x interface{}) {
+ if dummy.b {
+ dummy.x = x
+ }
+}
+
+var dummy struct {
+ b bool
+ x interface{}
+}