summaryrefslogtreecommitdiffstats
path: root/src/runtime/type.go
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
commitccd992355df7192993c666236047820244914598 (patch)
treef00fea65147227b7743083c6148396f74cd66935 /src/runtime/type.go
parentInitial commit. (diff)
downloadgolang-1.21-ccd992355df7192993c666236047820244914598.tar.xz
golang-1.21-ccd992355df7192993c666236047820244914598.zip
Adding upstream version 1.21.8.upstream/1.21.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/runtime/type.go')
-rw-r--r--src/runtime/type.go469
1 files changed, 469 insertions, 0 deletions
diff --git a/src/runtime/type.go b/src/runtime/type.go
new file mode 100644
index 0000000..1150a53
--- /dev/null
+++ b/src/runtime/type.go
@@ -0,0 +1,469 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime type representation.
+
+package runtime
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+type nameOff = abi.NameOff
+type typeOff = abi.TypeOff
+type textOff = abi.TextOff
+
+type _type = abi.Type
+
+// rtype is a wrapper that allows us to define additional methods.
+type rtype struct {
+ *abi.Type // embedding is okay here (unlike reflect) because none of this is public
+}
+
+func (t rtype) string() string {
+ s := t.nameOff(t.Str).Name()
+ if t.TFlag&abi.TFlagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t rtype) uncommon() *uncommontype {
+ return t.Uncommon()
+}
+
+func (t rtype) name() string {
+ if t.TFlag&abi.TFlagNamed == 0 {
+ return ""
+ }
+ s := t.string()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+// pkgpath returns the path of the package where t was defined, if
+// available. This is not the same as the reflect package's PkgPath
+// method, in that it returns the package path for struct and interface
+// types, not just named types.
+func (t rtype) pkgpath() string {
+ if u := t.uncommon(); u != nil {
+ return t.nameOff(u.PkgPath).Name()
+ }
+ switch t.Kind_ & kindMask {
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t.Type))
+ return st.PkgPath.Name()
+ case kindInterface:
+ it := (*interfacetype)(unsafe.Pointer(t.Type))
+ return it.PkgPath.Name()
+ }
+ return ""
+}
+
+// reflectOffs holds type offsets defined at run time by the reflect package.
+//
+// When a type is defined at run time, its *rtype data lives on the heap.
+// There are a wide range of possible addresses the heap may use, that
+// may not be representable as a 32-bit offset. Moreover the GC may
+// one day start moving heap memory, in which case there is no stable
+// offset that can be defined.
+//
+// To provide stable offsets, we add pin *rtype objects in a global map
+// and treat the offset as an identifier. We use negative offsets that
+// do not overlap with any compile-time module offsets.
+//
+// Entries are created by reflect.addReflectOff.
+var reflectOffs struct {
+ lock mutex
+ next int32
+ m map[int32]unsafe.Pointer
+ minv map[unsafe.Pointer]int32
+}
+
+func reflectOffsLock() {
+ lock(&reflectOffs.lock)
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&reflectOffs.lock))
+ }
+}
+
+func reflectOffsUnlock() {
+ if raceenabled {
+ racerelease(unsafe.Pointer(&reflectOffs.lock))
+ }
+ unlock(&reflectOffs.lock)
+}
+
+func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
+ if off == 0 {
+ return name{}
+ }
+ base := uintptr(ptrInModule)
+ for md := &firstmoduledata; md != nil; md = md.next {
+ if base >= md.types && base < md.etypes {
+ res := md.types + uintptr(off)
+ if res > md.etypes {
+ println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+ throw("runtime: name offset out of range")
+ }
+ return name{Bytes: (*byte)(unsafe.Pointer(res))}
+ }
+ }
+
+ // No module found. see if it is a run time name.
+ reflectOffsLock()
+ res, found := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if !found {
+ println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: name offset base pointer out of range")
+ }
+ return name{Bytes: (*byte)(res)}
+}
+
+func (t rtype) nameOff(off nameOff) name {
+ return resolveNameOff(unsafe.Pointer(t.Type), off)
+}
+
+func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
+ if off == 0 || off == -1 {
+ // -1 is the sentinel value for unreachable code.
+ // See cmd/link/internal/ld/data.go:relocsym.
+ return nil
+ }
+ base := uintptr(ptrInModule)
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ reflectOffsLock()
+ res := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if res == nil {
+ println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: type offset base pointer out of range")
+ }
+ return (*_type)(res)
+ }
+ if t := md.typemap[off]; t != nil {
+ return t
+ }
+ res := md.types + uintptr(off)
+ if res > md.etypes {
+ println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+ throw("runtime: type offset out of range")
+ }
+ return (*_type)(unsafe.Pointer(res))
+}
+
+func (t rtype) typeOff(off typeOff) *_type {
+ return resolveTypeOff(unsafe.Pointer(t.Type), off)
+}
+
+func (t rtype) textOff(off textOff) unsafe.Pointer {
+ if off == -1 {
+ // -1 is the sentinel value for unreachable code.
+ // See cmd/link/internal/ld/data.go:relocsym.
+ return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
+ }
+ base := uintptr(unsafe.Pointer(t.Type))
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ reflectOffsLock()
+ res := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if res == nil {
+ println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: text offset base pointer out of range")
+ }
+ return res
+ }
+ res := md.textAddr(uint32(off))
+ return unsafe.Pointer(res)
+}
+
+type uncommontype = abi.UncommonType
+
+type interfacetype = abi.InterfaceType
+
+type maptype = abi.MapType
+
+type arraytype = abi.ArrayType
+
+type chantype = abi.ChanType
+
+type slicetype = abi.SliceType
+
+type functype = abi.FuncType
+
+type ptrtype = abi.PtrType
+
+type name = abi.Name
+
+type structtype = abi.StructType
+
+func pkgPath(n name) string {
+ if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.ReadVarint(1)
+ off := 1 + i + l
+ if *n.Data(0)&(1<<1) != 0 {
+ i2, l2 := n.ReadVarint(off)
+ off += i2 + l2
+ }
+ var nameOff nameOff
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
+ pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
+ return pkgPathName.Name()
+}
+
+// typelinksinit scans the types from extra modules and builds the
+// moduledata typemap used to de-duplicate type pointers.
+func typelinksinit() {
+ if firstmoduledata.next == nil {
+ return
+ }
+ typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
+
+ modules := activeModules()
+ prev := modules[0]
+ for _, md := range modules[1:] {
+ // Collect types from the previous module into typehash.
+ collect:
+ for _, tl := range prev.typelinks {
+ var t *_type
+ if prev.typemap == nil {
+ t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
+ } else {
+ t = prev.typemap[typeOff(tl)]
+ }
+ // Add to typehash if not seen before.
+ tlist := typehash[t.Hash]
+ for _, tcur := range tlist {
+ if tcur == t {
+ continue collect
+ }
+ }
+ typehash[t.Hash] = append(tlist, t)
+ }
+
+ if md.typemap == nil {
+ // If any of this module's typelinks match a type from a
+ // prior module, prefer that prior type by adding the offset
+ // to this module's typemap.
+ tm := make(map[typeOff]*_type, len(md.typelinks))
+ pinnedTypemaps = append(pinnedTypemaps, tm)
+ md.typemap = tm
+ for _, tl := range md.typelinks {
+ t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
+ for _, candidate := range typehash[t.Hash] {
+ seen := map[_typePair]struct{}{}
+ if typesEqual(t, candidate, seen) {
+ t = candidate
+ break
+ }
+ }
+ md.typemap[typeOff(tl)] = t
+ }
+ }
+
+ prev = md
+ }
+}
+
+type _typePair struct {
+ t1 *_type
+ t2 *_type
+}
+
+func toRType(t *abi.Type) rtype {
+ return rtype{t}
+}
+
+// typesEqual reports whether two types are equal.
+//
+// Everywhere in the runtime and reflect packages, it is assumed that
+// there is exactly one *_type per Go type, so that pointer equality
+// can be used to test if types are equal. There is one place that
+// breaks this assumption: buildmode=shared. In this case a type can
+// appear as two different pieces of memory. This is hidden from the
+// runtime and reflect package by the per-module typemap built in
+// typelinksinit. It uses typesEqual to map types from later modules
+// back into earlier ones.
+//
+// Only typelinksinit needs this function.
+func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
+ tp := _typePair{t, v}
+ if _, ok := seen[tp]; ok {
+ return true
+ }
+
+ // mark these types as seen, and thus equivalent which prevents an infinite loop if
+ // the two types are identical, but recursively defined and loaded from
+ // different modules
+ seen[tp] = struct{}{}
+
+ if t == v {
+ return true
+ }
+ kind := t.Kind_ & kindMask
+ if kind != v.Kind_&kindMask {
+ return false
+ }
+ rt, rv := toRType(t), toRType(v)
+ if rt.string() != rv.string() {
+ return false
+ }
+ ut := t.Uncommon()
+ uv := v.Uncommon()
+ if ut != nil || uv != nil {
+ if ut == nil || uv == nil {
+ return false
+ }
+ pkgpatht := rt.nameOff(ut.PkgPath).Name()
+ pkgpathv := rv.nameOff(uv.PkgPath).Name()
+ if pkgpatht != pkgpathv {
+ return false
+ }
+ }
+ if kindBool <= kind && kind <= kindComplex128 {
+ return true
+ }
+ switch kind {
+ case kindString, kindUnsafePointer:
+ return true
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(t))
+ av := (*arraytype)(unsafe.Pointer(v))
+ return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
+ case kindChan:
+ ct := (*chantype)(unsafe.Pointer(t))
+ cv := (*chantype)(unsafe.Pointer(v))
+ return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
+ case kindFunc:
+ ft := (*functype)(unsafe.Pointer(t))
+ fv := (*functype)(unsafe.Pointer(v))
+ if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
+ return false
+ }
+ tin, vin := ft.InSlice(), fv.InSlice()
+ for i := 0; i < len(tin); i++ {
+ if !typesEqual(tin[i], vin[i], seen) {
+ return false
+ }
+ }
+ tout, vout := ft.OutSlice(), fv.OutSlice()
+ for i := 0; i < len(tout); i++ {
+ if !typesEqual(tout[i], vout[i], seen) {
+ return false
+ }
+ }
+ return true
+ case kindInterface:
+ it := (*interfacetype)(unsafe.Pointer(t))
+ iv := (*interfacetype)(unsafe.Pointer(v))
+ if it.PkgPath.Name() != iv.PkgPath.Name() {
+ return false
+ }
+ if len(it.Methods) != len(iv.Methods) {
+ return false
+ }
+ for i := range it.Methods {
+ tm := &it.Methods[i]
+ vm := &iv.Methods[i]
+ // Note the mhdr array can be relocated from
+ // another module. See #17724.
+ tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
+ vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
+ if tname.Name() != vname.Name() {
+ return false
+ }
+ if pkgPath(tname) != pkgPath(vname) {
+ return false
+ }
+ tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
+ vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
+ if !typesEqual(tityp, vityp, seen) {
+ return false
+ }
+ }
+ return true
+ case kindMap:
+ mt := (*maptype)(unsafe.Pointer(t))
+ mv := (*maptype)(unsafe.Pointer(v))
+ return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
+ case kindPtr:
+ pt := (*ptrtype)(unsafe.Pointer(t))
+ pv := (*ptrtype)(unsafe.Pointer(v))
+ return typesEqual(pt.Elem, pv.Elem, seen)
+ case kindSlice:
+ st := (*slicetype)(unsafe.Pointer(t))
+ sv := (*slicetype)(unsafe.Pointer(v))
+ return typesEqual(st.Elem, sv.Elem, seen)
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ sv := (*structtype)(unsafe.Pointer(v))
+ if len(st.Fields) != len(sv.Fields) {
+ return false
+ }
+ if st.PkgPath.Name() != sv.PkgPath.Name() {
+ return false
+ }
+ for i := range st.Fields {
+ tf := &st.Fields[i]
+ vf := &sv.Fields[i]
+ if tf.Name.Name() != vf.Name.Name() {
+ return false
+ }
+ if !typesEqual(tf.Typ, vf.Typ, seen) {
+ return false
+ }
+ if tf.Name.Tag() != vf.Name.Tag() {
+ return false
+ }
+ if tf.Offset != vf.Offset {
+ return false
+ }
+ if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
+ return false
+ }
+ }
+ return true
+ default:
+ println("runtime: impossible type kind", kind)
+ throw("runtime: impossible type kind")
+ return false
+ }
+}