summaryrefslogtreecommitdiffstats
path: root/src/internal
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
commitccd992355df7192993c666236047820244914598 (patch)
treef00fea65147227b7743083c6148396f74cd66935 /src/internal
parentInitial commit. (diff)
downloadgolang-1.21-ccd992355df7192993c666236047820244914598.tar.xz
golang-1.21-ccd992355df7192993c666236047820244914598.zip
Adding upstream version 1.21.8.upstream/1.21.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/internal')
-rw-r--r--src/internal/abi/abi.go102
-rw-r--r--src/internal/abi/abi_amd64.go18
-rw-r--r--src/internal/abi/abi_arm64.go17
-rw-r--r--src/internal/abi/abi_generic.go38
-rw-r--r--src/internal/abi/abi_ppc64x.go19
-rw-r--r--src/internal/abi/abi_riscv64.go17
-rw-r--r--src/internal/abi/abi_test.go79
-rw-r--r--src/internal/abi/abi_test.s27
-rw-r--r--src/internal/abi/compiletype.go167
-rw-r--r--src/internal/abi/export_test.go14
-rw-r--r--src/internal/abi/funcpc.go31
-rw-r--r--src/internal/abi/funcpc_gccgo.go21
-rw-r--r--src/internal/abi/map.go14
-rw-r--r--src/internal/abi/stack.go33
-rw-r--r--src/internal/abi/stub.s7
-rw-r--r--src/internal/abi/symtab.go106
-rw-r--r--src/internal/abi/testdata/x.go22
-rw-r--r--src/internal/abi/testdata/x.s6
-rw-r--r--src/internal/abi/type.go712
-rw-r--r--src/internal/abi/unsafestring_go119.go32
-rw-r--r--src/internal/abi/unsafestring_go120.go18
-rw-r--r--src/internal/bisect/bisect.go795
-rw-r--r--src/internal/buildcfg/cfg.go235
-rw-r--r--src/internal/buildcfg/cfg_test.go26
-rw-r--r--src/internal/buildcfg/exp.go190
-rw-r--r--src/internal/bytealg/bytealg.go155
-rw-r--r--src/internal/bytealg/compare_386.s144
-rw-r--r--src/internal/bytealg/compare_amd64.s237
-rw-r--r--src/internal/bytealg/compare_arm.s86
-rw-r--r--src/internal/bytealg/compare_arm64.s125
-rw-r--r--src/internal/bytealg/compare_generic.go60
-rw-r--r--src/internal/bytealg/compare_loong64.s87
-rw-r--r--src/internal/bytealg/compare_mips64x.s88
-rw-r--r--src/internal/bytealg/compare_mipsx.s72
-rw-r--r--src/internal/bytealg/compare_native.go19
-rw-r--r--src/internal/bytealg/compare_ppc64x.s332
-rw-r--r--src/internal/bytealg/compare_riscv64.s222
-rw-r--r--src/internal/bytealg/compare_s390x.s69
-rw-r--r--src/internal/bytealg/compare_wasm.s115
-rw-r--r--src/internal/bytealg/count_amd64.s208
-rw-r--r--src/internal/bytealg/count_arm.s43
-rw-r--r--src/internal/bytealg/count_arm64.s90
-rw-r--r--src/internal/bytealg/count_generic.go27
-rw-r--r--src/internal/bytealg/count_native.go33
-rw-r--r--src/internal/bytealg/count_ppc64x.s96
-rw-r--r--src/internal/bytealg/count_riscv64.s47
-rw-r--r--src/internal/bytealg/count_s390x.s169
-rw-r--r--src/internal/bytealg/equal_386.s130
-rw-r--r--src/internal/bytealg/equal_amd64.s162
-rw-r--r--src/internal/bytealg/equal_arm.s91
-rw-r--r--src/internal/bytealg/equal_arm64.s121
-rw-r--r--src/internal/bytealg/equal_generic.go18
-rw-r--r--src/internal/bytealg/equal_loong64.s53
-rw-r--r--src/internal/bytealg/equal_mips64x.s118
-rw-r--r--src/internal/bytealg/equal_mipsx.s62
-rw-r--r--src/internal/bytealg/equal_native.go21
-rw-r--r--src/internal/bytealg/equal_ppc64x.s207
-rw-r--r--src/internal/bytealg/equal_riscv64.s126
-rw-r--r--src/internal/bytealg/equal_s390x.s92
-rw-r--r--src/internal/bytealg/equal_wasm.s77
-rw-r--r--src/internal/bytealg/index_amd64.go26
-rw-r--r--src/internal/bytealg/index_amd64.s276
-rw-r--r--src/internal/bytealg/index_arm64.go23
-rw-r--r--src/internal/bytealg/index_arm64.s206
-rw-r--r--src/internal/bytealg/index_generic.go29
-rw-r--r--src/internal/bytealg/index_native.go19
-rw-r--r--src/internal/bytealg/index_ppc64x.go26
-rw-r--r--src/internal/bytealg/index_ppc64x.s841
-rw-r--r--src/internal/bytealg/index_s390x.go31
-rw-r--r--src/internal/bytealg/index_s390x.s216
-rw-r--r--src/internal/bytealg/indexbyte_386.s34
-rw-r--r--src/internal/bytealg/indexbyte_amd64.s149
-rw-r--r--src/internal/bytealg/indexbyte_arm.s46
-rw-r--r--src/internal/bytealg/indexbyte_arm64.s126
-rw-r--r--src/internal/bytealg/indexbyte_generic.go25
-rw-r--r--src/internal/bytealg/indexbyte_loong64.s54
-rw-r--r--src/internal/bytealg/indexbyte_mips64x.s54
-rw-r--r--src/internal/bytealg/indexbyte_mipsx.s52
-rw-r--r--src/internal/bytealg/indexbyte_native.go13
-rw-r--r--src/internal/bytealg/indexbyte_ppc64x.s314
-rw-r--r--src/internal/bytealg/indexbyte_riscv64.s51
-rw-r--r--src/internal/bytealg/indexbyte_s390x.s108
-rw-r--r--src/internal/bytealg/indexbyte_wasm.s195
-rw-r--r--src/internal/cfg/cfg.go70
-rw-r--r--src/internal/coverage/calloc/batchcounteralloc.go29
-rw-r--r--src/internal/coverage/cformat/fmt_test.go155
-rw-r--r--src/internal/coverage/cformat/format.go352
-rw-r--r--src/internal/coverage/cmddefs.go87
-rw-r--r--src/internal/coverage/cmerge/merge.go127
-rw-r--r--src/internal/coverage/cmerge/merge_test.go118
-rw-r--r--src/internal/coverage/decodecounter/decodecounterfile.go373
-rw-r--r--src/internal/coverage/decodemeta/decode.go136
-rw-r--r--src/internal/coverage/decodemeta/decodefile.go223
-rw-r--r--src/internal/coverage/defs.go374
-rw-r--r--src/internal/coverage/encodecounter/encode.go297
-rw-r--r--src/internal/coverage/encodemeta/encode.go215
-rw-r--r--src/internal/coverage/encodemeta/encodefile.go132
-rw-r--r--src/internal/coverage/pkid.go80
-rw-r--r--src/internal/coverage/pods/pods.go197
-rw-r--r--src/internal/coverage/pods/pods_test.go142
-rw-r--r--src/internal/coverage/rtcov/rtcov.go34
-rw-r--r--src/internal/coverage/slicereader/slicereader.go123
-rw-r--r--src/internal/coverage/slicereader/slr_test.go95
-rw-r--r--src/internal/coverage/slicewriter/slicewriter.go80
-rw-r--r--src/internal/coverage/slicewriter/slw_test.go134
-rw-r--r--src/internal/coverage/stringtab/stringtab.go139
-rw-r--r--src/internal/coverage/test/counter_test.go237
-rw-r--r--src/internal/coverage/test/roundtrip_test.go331
-rw-r--r--src/internal/coverage/uleb128/uleb128.go20
-rw-r--r--src/internal/cpu/cpu.go222
-rw-r--r--src/internal/cpu/cpu.s6
-rw-r--r--src/internal/cpu/cpu_arm.go34
-rw-r--r--src/internal/cpu/cpu_arm64.go69
-rw-r--r--src/internal/cpu/cpu_arm64.s18
-rw-r--r--src/internal/cpu/cpu_arm64_android.go11
-rw-r--r--src/internal/cpu/cpu_arm64_darwin.go33
-rw-r--r--src/internal/cpu/cpu_arm64_freebsd.go14
-rw-r--r--src/internal/cpu/cpu_arm64_hwcap.go66
-rw-r--r--src/internal/cpu/cpu_arm64_linux.go11
-rw-r--r--src/internal/cpu/cpu_arm64_openbsd.go28
-rw-r--r--src/internal/cpu/cpu_arm64_other.go13
-rw-r--r--src/internal/cpu/cpu_loong64.go13
-rw-r--r--src/internal/cpu/cpu_mips.go10
-rw-r--r--src/internal/cpu/cpu_mips64x.go32
-rw-r--r--src/internal/cpu/cpu_mipsle.go10
-rw-r--r--src/internal/cpu/cpu_no_name.go18
-rw-r--r--src/internal/cpu/cpu_ppc64x.go35
-rw-r--r--src/internal/cpu/cpu_ppc64x_aix.go25
-rw-r--r--src/internal/cpu/cpu_ppc64x_linux.go33
-rw-r--r--src/internal/cpu/cpu_ppc64x_other.go13
-rw-r--r--src/internal/cpu/cpu_riscv64.go10
-rw-r--r--src/internal/cpu/cpu_s390x.go205
-rw-r--r--src/internal/cpu/cpu_s390x.s63
-rw-r--r--src/internal/cpu/cpu_s390x_test.go63
-rw-r--r--src/internal/cpu/cpu_test.go61
-rw-r--r--src/internal/cpu/cpu_wasm.go10
-rw-r--r--src/internal/cpu/cpu_x86.go190
-rw-r--r--src/internal/cpu/cpu_x86.s43
-rw-r--r--src/internal/cpu/cpu_x86_test.go39
-rw-r--r--src/internal/cpu/export_test.go9
-rw-r--r--src/internal/cpu/export_x86_test.go11
-rw-r--r--src/internal/dag/alg.go63
-rw-r--r--src/internal/dag/alg_test.go46
-rw-r--r--src/internal/dag/parse.go314
-rw-r--r--src/internal/dag/parse_test.go61
-rw-r--r--src/internal/diff/diff.go261
-rw-r--r--src/internal/diff/diff_test.go43
-rw-r--r--src/internal/diff/testdata/allnew.txt13
-rw-r--r--src/internal/diff/testdata/allold.txt13
-rw-r--r--src/internal/diff/testdata/basic.txt35
-rw-r--r--src/internal/diff/testdata/dups.txt40
-rw-r--r--src/internal/diff/testdata/end.txt38
-rw-r--r--src/internal/diff/testdata/eof.txt9
-rw-r--r--src/internal/diff/testdata/eof1.txt18
-rw-r--r--src/internal/diff/testdata/eof2.txt18
-rw-r--r--src/internal/diff/testdata/long.txt62
-rw-r--r--src/internal/diff/testdata/same.txt5
-rw-r--r--src/internal/diff/testdata/start.txt34
-rw-r--r--src/internal/diff/testdata/triv.txt40
-rw-r--r--src/internal/fmtsort/export_test.go11
-rw-r--r--src/internal/fmtsort/sort.go219
-rw-r--r--src/internal/fmtsort/sort_test.go279
-rw-r--r--src/internal/fuzz/counters_supported.go21
-rw-r--r--src/internal/fuzz/counters_unsupported.go24
-rw-r--r--src/internal/fuzz/coverage.go107
-rw-r--r--src/internal/fuzz/encoding.go361
-rw-r--r--src/internal/fuzz/encoding_test.go409
-rw-r--r--src/internal/fuzz/fuzz.go1102
-rw-r--r--src/internal/fuzz/mem.go138
-rw-r--r--src/internal/fuzz/minimize.go95
-rw-r--r--src/internal/fuzz/minimize_test.go182
-rw-r--r--src/internal/fuzz/mutator.go300
-rw-r--r--src/internal/fuzz/mutator_test.go117
-rw-r--r--src/internal/fuzz/mutators_byteslice.go313
-rw-r--r--src/internal/fuzz/mutators_byteslice_test.go186
-rw-r--r--src/internal/fuzz/pcg.go145
-rw-r--r--src/internal/fuzz/queue.go71
-rw-r--r--src/internal/fuzz/queue_test.go58
-rw-r--r--src/internal/fuzz/sys_posix.go130
-rw-r--r--src/internal/fuzz/sys_unimplemented.go44
-rw-r--r--src/internal/fuzz/sys_windows.go147
-rw-r--r--src/internal/fuzz/trace.go35
-rw-r--r--src/internal/fuzz/worker.go1195
-rw-r--r--src/internal/fuzz/worker_test.go206
-rw-r--r--src/internal/goarch/gengoarch.go60
-rw-r--r--src/internal/goarch/goarch.go60
-rw-r--r--src/internal/goarch/goarch_386.go13
-rw-r--r--src/internal/goarch/goarch_amd64.go13
-rw-r--r--src/internal/goarch/goarch_arm.go13
-rw-r--r--src/internal/goarch/goarch_arm64.go13
-rw-r--r--src/internal/goarch/goarch_loong64.go15
-rw-r--r--src/internal/goarch/goarch_mips.go13
-rw-r--r--src/internal/goarch/goarch_mips64.go13
-rw-r--r--src/internal/goarch/goarch_mips64le.go13
-rw-r--r--src/internal/goarch/goarch_mipsle.go13
-rw-r--r--src/internal/goarch/goarch_ppc64.go13
-rw-r--r--src/internal/goarch/goarch_ppc64le.go13
-rw-r--r--src/internal/goarch/goarch_riscv64.go13
-rw-r--r--src/internal/goarch/goarch_s390x.go13
-rw-r--r--src/internal/goarch/goarch_wasm.go13
-rw-r--r--src/internal/goarch/zgoarch_386.go32
-rw-r--r--src/internal/goarch/zgoarch_amd64.go32
-rw-r--r--src/internal/goarch/zgoarch_arm.go32
-rw-r--r--src/internal/goarch/zgoarch_arm64.go32
-rw-r--r--src/internal/goarch/zgoarch_arm64be.go32
-rw-r--r--src/internal/goarch/zgoarch_armbe.go32
-rw-r--r--src/internal/goarch/zgoarch_loong64.go32
-rw-r--r--src/internal/goarch/zgoarch_mips.go32
-rw-r--r--src/internal/goarch/zgoarch_mips64.go32
-rw-r--r--src/internal/goarch/zgoarch_mips64le.go32
-rw-r--r--src/internal/goarch/zgoarch_mips64p32.go32
-rw-r--r--src/internal/goarch/zgoarch_mips64p32le.go32
-rw-r--r--src/internal/goarch/zgoarch_mipsle.go32
-rw-r--r--src/internal/goarch/zgoarch_ppc.go32
-rw-r--r--src/internal/goarch/zgoarch_ppc64.go32
-rw-r--r--src/internal/goarch/zgoarch_ppc64le.go32
-rw-r--r--src/internal/goarch/zgoarch_riscv.go32
-rw-r--r--src/internal/goarch/zgoarch_riscv64.go32
-rw-r--r--src/internal/goarch/zgoarch_s390.go32
-rw-r--r--src/internal/goarch/zgoarch_s390x.go32
-rw-r--r--src/internal/goarch/zgoarch_sparc.go32
-rw-r--r--src/internal/goarch/zgoarch_sparc64.go32
-rw-r--r--src/internal/goarch/zgoarch_wasm.go32
-rw-r--r--src/internal/godebug/godebug.go290
-rw-r--r--src/internal/godebug/godebug_test.go162
-rw-r--r--src/internal/godebugs/godebugs_test.go46
-rw-r--r--src/internal/godebugs/table.go69
-rw-r--r--src/internal/goexperiment/exp_arenas_off.go9
-rw-r--r--src/internal/goexperiment/exp_arenas_on.go9
-rw-r--r--src/internal/goexperiment/exp_boringcrypto_off.go9
-rw-r--r--src/internal/goexperiment/exp_boringcrypto_on.go9
-rw-r--r--src/internal/goexperiment/exp_cacheprog_off.go9
-rw-r--r--src/internal/goexperiment/exp_cacheprog_on.go9
-rw-r--r--src/internal/goexperiment/exp_cgocheck2_off.go9
-rw-r--r--src/internal/goexperiment/exp_cgocheck2_on.go9
-rw-r--r--src/internal/goexperiment/exp_coverageredesign_off.go9
-rw-r--r--src/internal/goexperiment/exp_coverageredesign_on.go9
-rw-r--r--src/internal/goexperiment/exp_fieldtrack_off.go9
-rw-r--r--src/internal/goexperiment/exp_fieldtrack_on.go9
-rw-r--r--src/internal/goexperiment/exp_heapminimum512kib_off.go9
-rw-r--r--src/internal/goexperiment/exp_heapminimum512kib_on.go9
-rw-r--r--src/internal/goexperiment/exp_loopvar_off.go9
-rw-r--r--src/internal/goexperiment/exp_loopvar_on.go9
-rw-r--r--src/internal/goexperiment/exp_pagetrace_off.go9
-rw-r--r--src/internal/goexperiment/exp_pagetrace_on.go9
-rw-r--r--src/internal/goexperiment/exp_preemptibleloops_off.go9
-rw-r--r--src/internal/goexperiment/exp_preemptibleloops_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabiargs_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabiargs_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabiwrappers_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabiwrappers_on.go9
-rw-r--r--src/internal/goexperiment/exp_staticlockranking_off.go9
-rw-r--r--src/internal/goexperiment/exp_staticlockranking_on.go9
-rw-r--r--src/internal/goexperiment/flags.go112
-rw-r--r--src/internal/goexperiment/mkconsts.go74
-rw-r--r--src/internal/goos/gengoos.go71
-rw-r--r--src/internal/goos/goos.go13
-rw-r--r--src/internal/goos/nonunix.go9
-rw-r--r--src/internal/goos/unix.go9
-rw-r--r--src/internal/goos/zgoos_aix.go26
-rw-r--r--src/internal/goos/zgoos_android.go26
-rw-r--r--src/internal/goos/zgoos_darwin.go26
-rw-r--r--src/internal/goos/zgoos_dragonfly.go26
-rw-r--r--src/internal/goos/zgoos_freebsd.go26
-rw-r--r--src/internal/goos/zgoos_hurd.go26
-rw-r--r--src/internal/goos/zgoos_illumos.go26
-rw-r--r--src/internal/goos/zgoos_ios.go26
-rw-r--r--src/internal/goos/zgoos_js.go26
-rw-r--r--src/internal/goos/zgoos_linux.go26
-rw-r--r--src/internal/goos/zgoos_netbsd.go26
-rw-r--r--src/internal/goos/zgoos_openbsd.go26
-rw-r--r--src/internal/goos/zgoos_plan9.go26
-rw-r--r--src/internal/goos/zgoos_solaris.go26
-rw-r--r--src/internal/goos/zgoos_wasip1.go26
-rw-r--r--src/internal/goos/zgoos_windows.go26
-rw-r--r--src/internal/goos/zgoos_zos.go26
-rw-r--r--src/internal/goroot/gc.go131
-rw-r--r--src/internal/goroot/gccgo.go27
-rw-r--r--src/internal/goversion/goversion.go12
-rw-r--r--src/internal/intern/intern.go181
-rw-r--r--src/internal/intern/intern_test.go199
-rw-r--r--src/internal/itoa/itoa.go33
-rw-r--r--src/internal/itoa/itoa_test.go40
-rw-r--r--src/internal/lazyregexp/lazyre.go78
-rw-r--r--src/internal/lazytemplate/lazytemplate.go52
-rw-r--r--src/internal/nettrace/nettrace.go46
-rw-r--r--src/internal/obscuretestdata/obscuretestdata.go65
-rw-r--r--src/internal/oserror/errors.go18
-rw-r--r--src/internal/pkgbits/codes.go77
-rw-r--r--src/internal/pkgbits/decoder.go515
-rw-r--r--src/internal/pkgbits/doc.go30
-rw-r--r--src/internal/pkgbits/encoder.go394
-rw-r--r--src/internal/pkgbits/flags.go9
-rw-r--r--src/internal/pkgbits/reloc.go42
-rw-r--r--src/internal/pkgbits/support.go17
-rw-r--r--src/internal/pkgbits/sync.go136
-rw-r--r--src/internal/pkgbits/syncmarker_string.go92
-rw-r--r--src/internal/platform/supported.go286
-rw-r--r--src/internal/platform/zosarch.go114
-rw-r--r--src/internal/platform/zosarch_test.go109
-rw-r--r--src/internal/poll/copy_file_range_linux.go128
-rw-r--r--src/internal/poll/errno_unix.go33
-rw-r--r--src/internal/poll/errno_windows.go31
-rw-r--r--src/internal/poll/error_linux_test.go31
-rw-r--r--src/internal/poll/error_stub_test.go21
-rw-r--r--src/internal/poll/error_test.go51
-rw-r--r--src/internal/poll/export_linux_test.go22
-rw-r--r--src/internal/poll/export_posix_test.go15
-rw-r--r--src/internal/poll/export_test.go35
-rw-r--r--src/internal/poll/export_windows_test.go17
-rw-r--r--src/internal/poll/fd.go83
-rw-r--r--src/internal/poll/fd_fsync_darwin.go24
-rw-r--r--src/internal/poll/fd_fsync_posix.go20
-rw-r--r--src/internal/poll/fd_fsync_windows.go16
-rw-r--r--src/internal/poll/fd_io_plan9.go92
-rw-r--r--src/internal/poll/fd_mutex.go252
-rw-r--r--src/internal/poll/fd_mutex_test.go222
-rw-r--r--src/internal/poll/fd_opendir_darwin.go39
-rw-r--r--src/internal/poll/fd_plan9.go232
-rw-r--r--src/internal/poll/fd_poll_js.go99
-rw-r--r--src/internal/poll/fd_poll_runtime.go169
-rw-r--r--src/internal/poll/fd_posix.go79
-rw-r--r--src/internal/poll/fd_posix_test.go43
-rw-r--r--src/internal/poll/fd_unix.go741
-rw-r--r--src/internal/poll/fd_unixjs.go79
-rw-r--r--src/internal/poll/fd_wasip1.go239
-rw-r--r--src/internal/poll/fd_windows.go1331
-rw-r--r--src/internal/poll/fd_windows_test.go198
-rw-r--r--src/internal/poll/fd_writev_libc.go15
-rw-r--r--src/internal/poll/fd_writev_unix.go29
-rw-r--r--src/internal/poll/file_plan9.go42
-rw-r--r--src/internal/poll/hook_cloexec.go12
-rw-r--r--src/internal/poll/hook_unix.go15
-rw-r--r--src/internal/poll/hook_windows.go16
-rw-r--r--src/internal/poll/iovec_solaris.go14
-rw-r--r--src/internal/poll/iovec_unix.go13
-rw-r--r--src/internal/poll/read_test.go61
-rw-r--r--src/internal/poll/sendfile_bsd.go59
-rw-r--r--src/internal/poll/sendfile_linux.go59
-rw-r--r--src/internal/poll/sendfile_solaris.go66
-rw-r--r--src/internal/poll/sendfile_windows.go84
-rw-r--r--src/internal/poll/sock_cloexec.go49
-rw-r--r--src/internal/poll/sock_cloexec_accept.go51
-rw-r--r--src/internal/poll/sockopt.go45
-rw-r--r--src/internal/poll/sockopt_linux.go16
-rw-r--r--src/internal/poll/sockopt_unix.go18
-rw-r--r--src/internal/poll/sockopt_windows.go25
-rw-r--r--src/internal/poll/sockoptip.go27
-rw-r--r--src/internal/poll/splice_linux.go250
-rw-r--r--src/internal/poll/splice_linux_test.go136
-rw-r--r--src/internal/poll/strconv.go13
-rw-r--r--src/internal/poll/sys_cloexec.go36
-rw-r--r--src/internal/poll/writev.go92
-rw-r--r--src/internal/poll/writev_test.go62
-rw-r--r--src/internal/profile/encode.go482
-rw-r--r--src/internal/profile/filter.go158
-rw-r--r--src/internal/profile/legacy_profile.go1268
-rw-r--r--src/internal/profile/merge.go461
-rw-r--r--src/internal/profile/profile.go613
-rw-r--r--src/internal/profile/profile_test.go79
-rw-r--r--src/internal/profile/proto.go356
-rw-r--r--src/internal/profile/proto_test.go71
-rw-r--r--src/internal/profile/prune.go97
-rw-r--r--src/internal/race/doc.go11
-rw-r--r--src/internal/race/norace.go43
-rw-r--r--src/internal/race/race.go55
-rw-r--r--src/internal/reflectlite/all_test.go1039
-rw-r--r--src/internal/reflectlite/asm.s5
-rw-r--r--src/internal/reflectlite/export_test.go117
-rw-r--r--src/internal/reflectlite/reflect_mirror_test.go133
-rw-r--r--src/internal/reflectlite/set_test.go101
-rw-r--r--src/internal/reflectlite/swapper.go78
-rw-r--r--src/internal/reflectlite/tostring_test.go98
-rw-r--r--src/internal/reflectlite/type.go659
-rw-r--r--src/internal/reflectlite/value.go478
-rw-r--r--src/internal/safefilepath/path.go21
-rw-r--r--src/internal/safefilepath/path_other.go23
-rw-r--r--src/internal/safefilepath/path_test.go88
-rw-r--r--src/internal/safefilepath/path_windows.go141
-rw-r--r--src/internal/saferio/io.go135
-rw-r--r--src/internal/saferio/io_test.go136
-rw-r--r--src/internal/singleflight/singleflight.go123
-rw-r--r--src/internal/singleflight/singleflight_test.go186
-rw-r--r--src/internal/syscall/execenv/execenv_default.go19
-rw-r--r--src/internal/syscall/execenv/execenv_windows.go47
-rw-r--r--src/internal/syscall/unix/asm_aix_ppc64.s12
-rw-r--r--src/internal/syscall/unix/asm_darwin.s24
-rw-r--r--src/internal/syscall/unix/asm_solaris.s10
-rw-r--r--src/internal/syscall/unix/at.go40
-rw-r--r--src/internal/syscall/unix/at_aix.go15
-rw-r--r--src/internal/syscall/unix/at_fstatat.go28
-rw-r--r--src/internal/syscall/unix/at_fstatat2.go13
-rw-r--r--src/internal/syscall/unix/at_js.go13
-rw-r--r--src/internal/syscall/unix/at_libc.go64
-rw-r--r--src/internal/syscall/unix/at_libc2.go33
-rw-r--r--src/internal/syscall/unix/at_solaris.go21
-rw-r--r--src/internal/syscall/unix/at_sysnum_darwin.go10
-rw-r--r--src/internal/syscall/unix/at_sysnum_dragonfly.go16
-rw-r--r--src/internal/syscall/unix/at_sysnum_freebsd.go18
-rw-r--r--src/internal/syscall/unix/at_sysnum_fstatat64_linux.go11
-rw-r--r--src/internal/syscall/unix/at_sysnum_fstatat_linux.go11
-rw-r--r--src/internal/syscall/unix/at_sysnum_linux.go19
-rw-r--r--src/internal/syscall/unix/at_sysnum_netbsd.go16
-rw-r--r--src/internal/syscall/unix/at_sysnum_newfstatat_linux.go11
-rw-r--r--src/internal/syscall/unix/at_sysnum_openbsd.go16
-rw-r--r--src/internal/syscall/unix/at_wasip1.go13
-rw-r--r--src/internal/syscall/unix/constants.go13
-rw-r--r--src/internal/syscall/unix/copy_file_range_linux.go26
-rw-r--r--src/internal/syscall/unix/eaccess_linux.go11
-rw-r--r--src/internal/syscall/unix/eaccess_other.go13
-rw-r--r--src/internal/syscall/unix/fallocate_freebsd_386.go17
-rw-r--r--src/internal/syscall/unix/fallocate_freebsd_64bit.go19
-rw-r--r--src/internal/syscall/unix/fallocate_freebsd_arm.go22
-rw-r--r--src/internal/syscall/unix/fcntl_js.go13
-rw-r--r--src/internal/syscall/unix/fcntl_unix.go25
-rw-r--r--src/internal/syscall/unix/fcntl_wasip1.go17
-rw-r--r--src/internal/syscall/unix/getentropy_darwin.go28
-rw-r--r--src/internal/syscall/unix/getentropy_openbsd.go17
-rw-r--r--src/internal/syscall/unix/getentropy_openbsd_mips64.go25
-rw-r--r--src/internal/syscall/unix/getrandom.go39
-rw-r--r--src/internal/syscall/unix/getrandom_dragonfly.go16
-rw-r--r--src/internal/syscall/unix/getrandom_freebsd.go16
-rw-r--r--src/internal/syscall/unix/getrandom_linux.go13
-rw-r--r--src/internal/syscall/unix/getrandom_netbsd.go56
-rw-r--r--src/internal/syscall/unix/getrandom_solaris.go53
-rw-r--r--src/internal/syscall/unix/ioctl_aix.go25
-rw-r--r--src/internal/syscall/unix/kernel_version_linux.go42
-rw-r--r--src/internal/syscall/unix/kernel_version_other.go11
-rw-r--r--src/internal/syscall/unix/net.go44
-rw-r--r--src/internal/syscall/unix/net_darwin.go162
-rw-r--r--src/internal/syscall/unix/net_js.go44
-rw-r--r--src/internal/syscall/unix/net_wasip1.go44
-rw-r--r--src/internal/syscall/unix/nonblocking_js.go15
-rw-r--r--src/internal/syscall/unix/nonblocking_unix.go21
-rw-r--r--src/internal/syscall/unix/nonblocking_wasip1.go31
-rw-r--r--src/internal/syscall/unix/pty_darwin.go65
-rw-r--r--src/internal/syscall/unix/sysnum_linux_386.go10
-rw-r--r--src/internal/syscall/unix/sysnum_linux_amd64.go10
-rw-r--r--src/internal/syscall/unix/sysnum_linux_arm.go10
-rw-r--r--src/internal/syscall/unix/sysnum_linux_generic.go16
-rw-r--r--src/internal/syscall/unix/sysnum_linux_mips64x.go12
-rw-r--r--src/internal/syscall/unix/sysnum_linux_mipsx.go12
-rw-r--r--src/internal/syscall/unix/sysnum_linux_ppc64x.go12
-rw-r--r--src/internal/syscall/unix/sysnum_linux_s390x.go10
-rw-r--r--src/internal/syscall/unix/user_darwin.go121
-rw-r--r--src/internal/syscall/windows/exec_windows_test.go139
-rw-r--r--src/internal/syscall/windows/memory_windows.go24
-rw-r--r--src/internal/syscall/windows/mksyscall.go9
-rw-r--r--src/internal/syscall/windows/net_windows.go40
-rw-r--r--src/internal/syscall/windows/psapi_windows.go20
-rw-r--r--src/internal/syscall/windows/registry/export_test.go11
-rw-r--r--src/internal/syscall/windows/registry/key.go168
-rw-r--r--src/internal/syscall/windows/registry/mksyscall.go9
-rw-r--r--src/internal/syscall/windows/registry/registry_test.go672
-rw-r--r--src/internal/syscall/windows/registry/syscall.go31
-rw-r--r--src/internal/syscall/windows/registry/value.go372
-rw-r--r--src/internal/syscall/windows/registry/zsyscall_windows.go107
-rw-r--r--src/internal/syscall/windows/reparse_windows.go91
-rw-r--r--src/internal/syscall/windows/security_windows.go128
-rw-r--r--src/internal/syscall/windows/symlink_windows.go39
-rw-r--r--src/internal/syscall/windows/syscall_windows.go390
-rw-r--r--src/internal/syscall/windows/sysdll/sysdll.go30
-rw-r--r--src/internal/syscall/windows/zsyscall_windows.go389
-rw-r--r--src/internal/sysinfo/sysinfo.go31
-rw-r--r--src/internal/testenv/exec.go219
-rw-r--r--src/internal/testenv/noopt.go12
-rw-r--r--src/internal/testenv/opt.go12
-rw-r--r--src/internal/testenv/testenv.go506
-rw-r--r--src/internal/testenv/testenv_notunix.go21
-rw-r--r--src/internal/testenv/testenv_notwin.go46
-rw-r--r--src/internal/testenv/testenv_test.go185
-rw-r--r--src/internal/testenv/testenv_unix.go43
-rw-r--r--src/internal/testenv/testenv_windows.go47
-rw-r--r--src/internal/testlog/exit.go33
-rw-r--r--src/internal/testlog/log.go69
-rw-r--r--src/internal/testpty/pty.go38
-rw-r--r--src/internal/testpty/pty_cgo.go34
-rw-r--r--src/internal/testpty/pty_darwin.go32
-rw-r--r--src/internal/testpty/pty_none.go13
-rw-r--r--src/internal/trace/gc.go826
-rw-r--r--src/internal/trace/gc_test.go202
-rw-r--r--src/internal/trace/goroutines.go358
-rwxr-xr-xsrc/internal/trace/mkcanned.bash20
-rw-r--r--src/internal/trace/mud.go223
-rw-r--r--src/internal/trace/mud_test.go87
-rw-r--r--src/internal/trace/order.go285
-rw-r--r--src/internal/trace/parser.go1174
-rw-r--r--src/internal/trace/parser_test.go123
-rw-r--r--src/internal/trace/testdata/http_1_10_goodbin0 -> 2201 bytes
-rw-r--r--src/internal/trace/testdata/http_1_11_goodbin0 -> 2779 bytes
-rw-r--r--src/internal/trace/testdata/http_1_19_goodbin0 -> 28172 bytes
-rw-r--r--src/internal/trace/testdata/http_1_21_goodbin0 -> 6744 bytes
-rw-r--r--src/internal/trace/testdata/http_1_5_goodbin0 -> 42218 bytes
-rw-r--r--src/internal/trace/testdata/http_1_7_goodbin0 -> 1971 bytes
-rw-r--r--src/internal/trace/testdata/http_1_9_goodbin0 -> 2187 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_10_goodbin0 -> 370999 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_11_goodbin0 -> 370129 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_19_goodbin0 -> 322338 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_21_goodbin0 -> 353725 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_5_goodbin0 -> 7446 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_5_unorderedbin0 -> 8194 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_7_goodbin0 -> 396526 bytes
-rw-r--r--src/internal/trace/testdata/stress_1_9_goodbin0 -> 365129 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_10_goodbin0 -> 6338 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_11_goodbin0 -> 4882 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_19_goodbin0 -> 7448 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_21_goodbin0 -> 5002 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_5_goodbin0 -> 6997 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_7_goodbin0 -> 2055 bytes
-rw-r--r--src/internal/trace/testdata/stress_start_stop_1_9_goodbin0 -> 6271 bytes
-rw-r--r--src/internal/trace/testdata/user_task_region_1_11_goodbin0 -> 2000 bytes
-rw-r--r--src/internal/trace/testdata/user_task_region_1_19_goodbin0 -> 1922 bytes
-rw-r--r--src/internal/trace/testdata/user_task_region_1_21_goodbin0 -> 2404 bytes
-rw-r--r--src/internal/trace/writer.go49
-rw-r--r--src/internal/txtar/archive.go140
-rw-r--r--src/internal/types/errors/code_string.go199
-rw-r--r--src/internal/types/errors/codes.go1477
-rw-r--r--src/internal/types/errors/codes_test.go197
-rw-r--r--src/internal/types/errors/generrordocs.go117
-rw-r--r--src/internal/types/testdata/check/blank.go5
-rw-r--r--src/internal/types/testdata/check/builtins0.go1075
-rw-r--r--src/internal/types/testdata/check/builtins1.go330
-rw-r--r--src/internal/types/testdata/check/chans.go62
-rw-r--r--src/internal/types/testdata/check/compliterals.go22
-rw-r--r--src/internal/types/testdata/check/const0.go382
-rw-r--r--src/internal/types/testdata/check/const1.go334
-rw-r--r--src/internal/types/testdata/check/constdecl.go160
-rw-r--r--src/internal/types/testdata/check/conversions0.go93
-rw-r--r--src/internal/types/testdata/check/conversions1.go313
-rw-r--r--src/internal/types/testdata/check/cycles0.go175
-rw-r--r--src/internal/types/testdata/check/cycles1.go77
-rw-r--r--src/internal/types/testdata/check/cycles2.go98
-rw-r--r--src/internal/types/testdata/check/cycles3.go60
-rw-r--r--src/internal/types/testdata/check/cycles4.go121
-rw-r--r--src/internal/types/testdata/check/cycles5.go200
-rw-r--r--src/internal/types/testdata/check/decls0.go210
-rw-r--r--src/internal/types/testdata/check/decls1.go146
-rw-r--r--src/internal/types/testdata/check/decls2/decls2a.go111
-rw-r--r--src/internal/types/testdata/check/decls2/decls2b.go75
-rw-r--r--src/internal/types/testdata/check/decls3.go309
-rw-r--r--src/internal/types/testdata/check/decls4.go199
-rw-r--r--src/internal/types/testdata/check/decls5.go10
-rw-r--r--src/internal/types/testdata/check/errors.go66
-rw-r--r--src/internal/types/testdata/check/expr0.go196
-rw-r--r--src/internal/types/testdata/check/expr1.go127
-rw-r--r--src/internal/types/testdata/check/expr2.go260
-rw-r--r--src/internal/types/testdata/check/expr3.go564
-rw-r--r--src/internal/types/testdata/check/funcinference.go112
-rw-r--r--src/internal/types/testdata/check/go1_12.go36
-rw-r--r--src/internal/types/testdata/check/go1_13.go23
-rw-r--r--src/internal/types/testdata/check/go1_16.go15
-rw-r--r--src/internal/types/testdata/check/go1_19.go15
-rw-r--r--src/internal/types/testdata/check/go1_19_20.go17
-rw-r--r--src/internal/types/testdata/check/go1_20_19.go17
-rw-r--r--src/internal/types/testdata/check/go1_21_19.go17
-rw-r--r--src/internal/types/testdata/check/go1_8.go12
-rw-r--r--src/internal/types/testdata/check/go1_xx_19.go15
-rw-r--r--src/internal/types/testdata/check/gotos.go560
-rw-r--r--src/internal/types/testdata/check/importC.go56
-rw-r--r--src/internal/types/testdata/check/importdecl0/importdecl0a.go53
-rw-r--r--src/internal/types/testdata/check/importdecl0/importdecl0b.go30
-rw-r--r--src/internal/types/testdata/check/importdecl1/importdecl1a.go22
-rw-r--r--src/internal/types/testdata/check/importdecl1/importdecl1b.go11
-rw-r--r--src/internal/types/testdata/check/init0.go106
-rw-r--r--src/internal/types/testdata/check/init1.go97
-rw-r--r--src/internal/types/testdata/check/init2.go139
-rw-r--r--src/internal/types/testdata/check/issue25008/issue25008a.go15
-rw-r--r--src/internal/types/testdata/check/issue25008/issue25008b.go9
-rw-r--r--src/internal/types/testdata/check/issues0.go373
-rw-r--r--src/internal/types/testdata/check/issues1.go250
-rw-r--r--src/internal/types/testdata/check/labels.go207
-rw-r--r--src/internal/types/testdata/check/linalg.go82
-rw-r--r--src/internal/types/testdata/check/literals.go111
-rw-r--r--src/internal/types/testdata/check/main0.go9
-rw-r--r--src/internal/types/testdata/check/main1.go7
-rw-r--r--src/internal/types/testdata/check/map0.go113
-rw-r--r--src/internal/types/testdata/check/map1.go146
-rw-r--r--src/internal/types/testdata/check/methodsets.go214
-rw-r--r--src/internal/types/testdata/check/shifts.go399
-rw-r--r--src/internal/types/testdata/check/slices.go68
-rw-r--r--src/internal/types/testdata/check/stmt0.go994
-rw-r--r--src/internal/types/testdata/check/stmt1.go259
-rw-r--r--src/internal/types/testdata/check/typeinference.go49
-rw-r--r--src/internal/types/testdata/check/typeinst0.go62
-rw-r--r--src/internal/types/testdata/check/typeinst1.go282
-rw-r--r--src/internal/types/testdata/check/typeinstcycles.go11
-rw-r--r--src/internal/types/testdata/check/typeparams.go508
-rw-r--r--src/internal/types/testdata/check/unions.go66
-rw-r--r--src/internal/types/testdata/check/vardecl.go215
-rw-r--r--src/internal/types/testdata/examples/constraints.go80
-rw-r--r--src/internal/types/testdata/examples/functions.go219
-rw-r--r--src/internal/types/testdata/examples/inference.go163
-rw-r--r--src/internal/types/testdata/examples/inference2.go100
-rw-r--r--src/internal/types/testdata/examples/methods.go112
-rw-r--r--src/internal/types/testdata/examples/operations.go29
-rw-r--r--src/internal/types/testdata/examples/types.go315
-rw-r--r--src/internal/types/testdata/examples/typesets.go59
-rw-r--r--src/internal/types/testdata/fixedbugs/issue20583.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue23203a.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue23203b.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue25838.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue26390.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue28251.go65
-rw-r--r--src/internal/types/testdata/fixedbugs/issue3117.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39634.go90
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39664.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39680.go31
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39693.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39699.go29
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39711.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39723.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39725.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39754.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39755.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39768.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39938.go54
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39948.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39976.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue39982.go36
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40038.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40056.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40057.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40301.go12
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40350.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40684.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue40789.go37
-rw-r--r--src/internal/types/testdata/fixedbugs/issue41124.go91
-rw-r--r--src/internal/types/testdata/fixedbugs/issue41176.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue42695.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue42758.go33
-rw-r--r--src/internal/types/testdata/fixedbugs/issue42881.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue42987.go8
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43056.go31
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43087.go43
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43109.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43110.go43
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43124.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43125.go8
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43190.go31
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43527.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue43671.go58
-rw-r--r--src/internal/types/testdata/fixedbugs/issue44688.go83
-rw-r--r--src/internal/types/testdata/fixedbugs/issue44799.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45114.go8
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45548.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45550.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45635.go31
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45639.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45920.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue45985.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46090.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46275.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46403.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46404.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46461.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue46583.go28
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47031.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47115.go40
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47127.go37
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47411.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47747.go71
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47796.go33
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47818.go61
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47887.go28
-rw-r--r--src/internal/types/testdata/fixedbugs/issue47968.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48008.go60
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48018.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48048.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48082.go7
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48083.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48136.go36
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48234.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48312.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48472.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48529.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48582.go29
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48619.go22
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48656.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48695.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48703.go27
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48712.go41
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48819.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48827.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48951.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48962.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue48974.go22
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49003.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49005.go31
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49043.go24
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49112.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49179.go37
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49242.go27
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49247.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49276.go46
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49296.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49439.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49482.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49541.go45
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49579.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49592.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49602.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49705.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49735.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49739.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue49864.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50259.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50276.go39
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50281.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50321.go8
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50372.go27
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50417.go68
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50426.go44
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50427.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50450.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50516.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50646.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50729.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50755.go47
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50779.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50782.go47
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50816.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50833.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50912.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50918.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50929.go68
-rw-r--r--src/internal/types/testdata/fixedbugs/issue50965.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51025.go38
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51048.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51139.go26
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51145.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51158.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51229.go164
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51232.go30
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51233.go27
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51257.go46
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51335.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51339.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51360.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51376.go24
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51386.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51437.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51472.go54
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51509.go7
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51525.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51533.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51578.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51593.go13
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51607.go65
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51610.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51616.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51658.go43
-rw-r--r--src/internal/types/testdata/fixedbugs/issue51877.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue52031.go33
-rw-r--r--src/internal/types/testdata/fixedbugs/issue52401.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue52529.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue52698.go62
-rw-r--r--src/internal/types/testdata/fixedbugs/issue52915.go23
-rw-r--r--src/internal/types/testdata/fixedbugs/issue53358.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue53650.go59
-rw-r--r--src/internal/types/testdata/fixedbugs/issue53692.go15
-rw-r--r--src/internal/types/testdata/fixedbugs/issue54280.go7
-rw-r--r--src/internal/types/testdata/fixedbugs/issue54405.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue54424.go12
-rw-r--r--src/internal/types/testdata/fixedbugs/issue54942.go38
-rw-r--r--src/internal/types/testdata/fixedbugs/issue56351.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue56425.go8
-rw-r--r--src/internal/types/testdata/fixedbugs/issue56665.go30
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57155.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57160.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57192.go22
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57352.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57486.go29
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57500.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue57522.go24
-rw-r--r--src/internal/types/testdata/fixedbugs/issue58611.go27
-rw-r--r--src/internal/types/testdata/fixedbugs/issue58612.go14
-rw-r--r--src/internal/types/testdata/fixedbugs/issue58671.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue58742.go18
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59190.go36
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59207.go12
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59209.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59338a.go21
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59338b.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59371.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59639.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59740.go25
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59848.go10
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59890.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59953.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59956.go47
-rw-r--r--src/internal/types/testdata/fixedbugs/issue59958.go22
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60346.go17
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60377.go88
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60460.go88
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60500.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60542.go12
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60556.go19
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60562.go61
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60688.go16
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60906.go11
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60933.go67
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60946.go38
-rw-r--r--src/internal/types/testdata/fixedbugs/issue61486.go9
-rw-r--r--src/internal/types/testdata/fixedbugs/issue61879.go57
-rw-r--r--src/internal/types/testdata/fixedbugs/issue61903.go20
-rw-r--r--src/internal/types/testdata/fixedbugs/issue62157.go128
-rw-r--r--src/internal/types/testdata/fixedbugs/issue6977.go82
-rw-r--r--src/internal/types/testdata/spec/assignability.go264
-rw-r--r--src/internal/types/testdata/spec/comparable.go26
-rw-r--r--src/internal/types/testdata/spec/comparable1.19.go28
-rw-r--r--src/internal/types/testdata/spec/comparisons.go120
-rw-r--r--src/internal/types/testdata/spec/conversions.go208
-rw-r--r--src/internal/unsafeheader/unsafeheader.go37
-rw-r--r--src/internal/unsafeheader/unsafeheader_test.go100
-rw-r--r--src/internal/xcoff/ar.go226
-rw-r--r--src/internal/xcoff/ar_test.go79
-rw-r--r--src/internal/xcoff/file.go697
-rw-r--r--src/internal/xcoff/file_test.go102
-rw-r--r--src/internal/xcoff/testdata/bigar-empty2
-rw-r--r--src/internal/xcoff/testdata/bigar-ppc64bin0 -> 2468 bytes
-rw-r--r--src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-execbin0 -> 54694 bytes
-rw-r--r--src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-execbin0 -> 57152 bytes
-rw-r--r--src/internal/xcoff/testdata/hello.c7
-rw-r--r--src/internal/xcoff/testdata/printbye.c5
-rw-r--r--src/internal/xcoff/testdata/printhello.c5
-rw-r--r--src/internal/xcoff/xcoff.go367
-rw-r--r--src/internal/zstd/bits.go130
-rw-r--r--src/internal/zstd/block.go436
-rw-r--r--src/internal/zstd/fse.go437
-rw-r--r--src/internal/zstd/fse_test.go89
-rw-r--r--src/internal/zstd/fuzz_test.go140
-rw-r--r--src/internal/zstd/huff.go204
-rw-r--r--src/internal/zstd/literals.go330
-rw-r--r--src/internal/zstd/xxhash.go148
-rw-r--r--src/internal/zstd/xxhash_test.go105
-rw-r--r--src/internal/zstd/zstd.go508
-rw-r--r--src/internal/zstd/zstd_test.go249
837 files changed, 74776 insertions, 0 deletions
diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go
new file mode 100644
index 0000000..e1c8adc
--- /dev/null
+++ b/src/internal/abi/abi.go
@@ -0,0 +1,102 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+ "internal/goarch"
+ "unsafe"
+)
+
+// RegArgs is a struct that has space for each argument
+// and return value register on the current architecture.
+//
+// Assembly code knows the layout of the first two fields
+// of RegArgs.
+//
+// RegArgs also contains additional space to hold pointers
+// when it may not be safe to keep them only in the integer
+// register space otherwise.
+type RegArgs struct {
+ // Values in these slots should be precisely the bit-by-bit
+ // representation of how they would appear in a register.
+ //
+ // This means that on big endian arches, integer values should
+ // be in the top bits of the slot. Floats are usually just
+ // directly represented, but some architectures treat narrow
+ // width floating point values specially (e.g. they're promoted
+ // first, or they need to be NaN-boxed).
+ Ints [IntArgRegs]uintptr // untyped integer registers
+ Floats [FloatArgRegs]uint64 // untyped float registers
+
+ // Fields above this point are known to assembly.
+
+ // Ptrs is a space that duplicates Ints but with pointer type,
+ // used to make pointers passed or returned in registers
+ // visible to the GC by making the type unsafe.Pointer.
+ Ptrs [IntArgRegs]unsafe.Pointer
+
+ // ReturnIsPtr is a bitmap that indicates which registers
+ // contain or will contain pointers on the return path from
+ // a reflectcall. The i'th bit indicates whether the i'th
+ // register contains or will contain a valid Go pointer.
+ ReturnIsPtr IntArgRegBitmap
+}
+
+func (r *RegArgs) Dump() {
+ print("Ints:")
+ for _, x := range r.Ints {
+ print(" ", x)
+ }
+ println()
+ print("Floats:")
+ for _, x := range r.Floats {
+ print(" ", x)
+ }
+ println()
+ print("Ptrs:")
+ for _, x := range r.Ptrs {
+ print(" ", x)
+ }
+ println()
+}
+
+// IntRegArgAddr returns a pointer inside of r.Ints[reg] that is appropriately
+// offset for an argument of size argSize.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+//
+// This method is a helper for dealing with the endianness of different CPU
+// architectures, since sub-word-sized arguments in big endian architectures
+// need to be "aligned" to the upper edge of the register to be interpreted
+// by the CPU correctly.
+func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer {
+ if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
+ panic("invalid argSize")
+ }
+ offset := uintptr(0)
+ if goarch.BigEndian {
+ offset = goarch.PtrSize - argSize
+ }
+ return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Ints[reg])) + offset)
+}
+
+// IntArgRegBitmap is a bitmap large enough to hold one bit per
+// integer argument/return register.
+type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8
+
+// Set sets the i'th bit of the bitmap to 1.
+func (b *IntArgRegBitmap) Set(i int) {
+ b[i/8] |= uint8(1) << (i % 8)
+}
+
+// Get returns whether the i'th bit of the bitmap is set.
+//
+// nosplit because it's called in extremely sensitive contexts, like
+// on the reflectcall return path.
+//
+//go:nosplit
+func (b *IntArgRegBitmap) Get(i int) bool {
+ return b[i/8]&(uint8(1)<<(i%8)) != 0
+}
diff --git a/src/internal/abi/abi_amd64.go b/src/internal/abi/abi_amd64.go
new file mode 100644
index 0000000..d3c5678
--- /dev/null
+++ b/src/internal/abi/abi_amd64.go
@@ -0,0 +1,18 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11.
+ IntArgRegs = 9
+
+ // X0 -> X14.
+ FloatArgRegs = 15
+
+ // We use SSE2 registers which support 64-bit float operations.
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_arm64.go b/src/internal/abi/abi_arm64.go
new file mode 100644
index 0000000..4dc5143
--- /dev/null
+++ b/src/internal/abi/abi_arm64.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // R0 - R15.
+ IntArgRegs = 16
+
+ // F0 - F15.
+ FloatArgRegs = 16
+
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_generic.go b/src/internal/abi/abi_generic.go
new file mode 100644
index 0000000..76ef2e2
--- /dev/null
+++ b/src/internal/abi/abi_generic.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.regabiargs && !amd64 && !arm64 && !ppc64 && !ppc64le && !riscv64
+
+package abi
+
+const (
+ // ABI-related constants.
+ //
+ // In the generic case, these are all zero
+ // which lets them gracefully degrade to ABI0.
+
+ // IntArgRegs is the number of registers dedicated
+ // to passing integer argument values. Result registers are identical
+ // to argument registers, so this number is used for those too.
+ IntArgRegs = 0
+
+ // FloatArgRegs is the number of registers dedicated
+ // to passing floating-point argument values. Result registers are
+ // identical to argument registers, so this number is used for
+ // those too.
+ FloatArgRegs = 0
+
+ // EffectiveFloatRegSize describes the width of floating point
+ // registers on the current platform from the ABI's perspective.
+ //
+ // Since Go only supports 32-bit and 64-bit floating point primitives,
+ // this number should be either 0, 4, or 8. 0 indicates no floating
+ // point registers for the ABI or that floating point values will be
+ // passed via the softfloat ABI.
+ //
+ // For platforms that support larger floating point register widths,
+ // such as x87's 80-bit "registers" (not that we support x87 currently),
+ // use 8.
+ EffectiveFloatRegSize = 0
+)
diff --git a/src/internal/abi/abi_ppc64x.go b/src/internal/abi/abi_ppc64x.go
new file mode 100644
index 0000000..73416d7
--- /dev/null
+++ b/src/internal/abi/abi_ppc64x.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // R3 - R10, R14 - R17.
+ IntArgRegs = 12
+
+ // F1 - F12.
+ FloatArgRegs = 12
+
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_riscv64.go b/src/internal/abi/abi_riscv64.go
new file mode 100644
index 0000000..2bcd9d6
--- /dev/null
+++ b/src/internal/abi/abi_riscv64.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // X8 - X23
+ IntArgRegs = 16
+
+ // F8 - F23.
+ FloatArgRegs = 16
+
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_test.go b/src/internal/abi/abi_test.go
new file mode 100644
index 0000000..44b9e78
--- /dev/null
+++ b/src/internal/abi/abi_test.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi_test
+
+import (
+ "internal/abi"
+ "internal/testenv"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestFuncPC(t *testing.T) {
+ // Test that FuncPC* can get correct function PC.
+ pcFromAsm := abi.FuncPCTestFnAddr
+
+ // Test FuncPC for locally defined function
+ pcFromGo := abi.FuncPCTest()
+ if pcFromGo != pcFromAsm {
+ t.Errorf("FuncPC returns wrong PC, want %x, got %x", pcFromAsm, pcFromGo)
+ }
+
+ // Test FuncPC for imported function
+ pcFromGo = abi.FuncPCABI0(abi.FuncPCTestFn)
+ if pcFromGo != pcFromAsm {
+ t.Errorf("FuncPC returns wrong PC, want %x, got %x", pcFromAsm, pcFromGo)
+ }
+}
+
+func TestFuncPCCompileError(t *testing.T) {
+ // Test that FuncPC* on a function of a mismatched ABI is rejected.
+ testenv.MustHaveGoBuild(t)
+
+ // We want to test internal package, which we cannot normally import.
+ // Run the assembler and compiler manually.
+ tmpdir := t.TempDir()
+ asmSrc := filepath.Join("testdata", "x.s")
+ goSrc := filepath.Join("testdata", "x.go")
+ symabi := filepath.Join(tmpdir, "symabi")
+ obj := filepath.Join(tmpdir, "x.o")
+
+ // Write an importcfg file for the dependencies of the package.
+ importcfgfile := filepath.Join(tmpdir, "hello.importcfg")
+ testenv.WriteImportcfg(t, importcfgfile, nil, "internal/abi")
+
+ // parse assembly code for symabi.
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-gensymabis", "-o", symabi, asmSrc)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go tool asm -gensymabis failed: %v\n%s", err, out)
+ }
+
+ // compile go code.
+ cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-p=p", "-symabis", symabi, "-o", obj, goSrc)
+ out, err = cmd.CombinedOutput()
+ if err == nil {
+ t.Fatalf("go tool compile did not fail")
+ }
+
+ // Expect errors in line 17, 18, 20, no errors on other lines.
+ want := []string{"x.go:17", "x.go:18", "x.go:20"}
+ got := strings.Split(string(out), "\n")
+ if got[len(got)-1] == "" {
+ got = got[:len(got)-1] // remove last empty line
+ }
+ for i, s := range got {
+ if !strings.Contains(s, want[i]) {
+ t.Errorf("did not error on line %s", want[i])
+ }
+ }
+ if len(got) != len(want) {
+ t.Errorf("unexpected number of errors, want %d, got %d", len(want), len(got))
+ }
+ if t.Failed() {
+ t.Logf("output:\n%s", string(out))
+ }
+}
diff --git a/src/internal/abi/abi_test.s b/src/internal/abi/abi_test.s
new file mode 100644
index 0000000..93ace3e
--- /dev/null
+++ b/src/internal/abi/abi_test.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+#ifdef GOARCH_386
+#define PTRSIZE 4
+#endif
+#ifdef GOARCH_arm
+#define PTRSIZE 4
+#endif
+#ifdef GOARCH_mips
+#define PTRSIZE 4
+#endif
+#ifdef GOARCH_mipsle
+#define PTRSIZE 4
+#endif
+#ifndef PTRSIZE
+#define PTRSIZE 8
+#endif
+
+TEXT internal∕abi·FuncPCTestFn(SB),NOSPLIT,$0-0
+ RET
+
+GLOBL internal∕abi·FuncPCTestFnAddr(SB), NOPTR, $PTRSIZE
+DATA internal∕abi·FuncPCTestFnAddr(SB)/PTRSIZE, $internal∕abi·FuncPCTestFn(SB)
diff --git a/src/internal/abi/compiletype.go b/src/internal/abi/compiletype.go
new file mode 100644
index 0000000..d92adde
--- /dev/null
+++ b/src/internal/abi/compiletype.go
@@ -0,0 +1,167 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+// These functions are the build-time version of the Go type data structures.
+
+// Their contents must be kept in sync with their definitions.
+// Because the host and target type sizes can differ, the compiler and
+// linker cannot use the host information that they might get from
+// either unsafe.Sizeof and Alignof, nor runtime, reflect, or reflectlite.
+
+// CommonSize returns sizeof(Type) for a compilation target with a given ptrSize
+func CommonSize(ptrSize int) int { return 4*ptrSize + 8 + 8 }
+
+// StructFieldSize returns sizeof(StructField) for a compilation target with a given ptrSize
+func StructFieldSize(ptrSize int) int { return 3 * ptrSize }
+
+// UncommonSize returns sizeof(UncommonType). This currently does not depend on ptrSize.
+// This exported function is in an internal package, so it may change to depend on ptrSize in the future.
+func UncommonSize() uint64 { return 4 + 2 + 2 + 4 + 4 }
+
+// IMethodSize returns sizeof(IMethod) for a compilation target with a given ptrSize
+func IMethodSize(ptrSize int) int { return 4 + 4 }
+
+// KindOff returns the offset of Type.Kind_ for a compilation target with a given ptrSize
+func KindOff(ptrSize int) int { return 2*ptrSize + 7 }
+
+// SizeOff returns the offset of Type.Size_ for a compilation target with a given ptrSize
+func SizeOff(ptrSize int) int { return 0 }
+
+// PtrBytes returns the offset of Type.PtrBytes for a compilation target with a given ptrSize
+func PtrBytesOff(ptrSize int) int { return ptrSize }
+
+// TFlagOff returns the offset of Type.TFlag for a compilation target with a given ptrSize
+func TFlagOff(ptrSize int) int { return 2*ptrSize + 4 }
+
+// Offset is for computing offsets of type data structures at compile/link time;
+// the target platform may not be the host platform. Its state includes the
+// current offset, necessary alignment for the sequence of types, and the size
+// of pointers and alignment of slices, interfaces, and strings (this is for tearing-
+// resistant access to these types, if/when that is supported).
+type Offset struct {
+ off uint64 // the current offset
+ align uint8 // the required alignmentof the container
+ ptrSize uint8 // the size of a pointer in bytes
+ sliceAlign uint8 // the alignment of slices (and interfaces and strings)
+}
+
+// NewOffset returns a new Offset with offset 0 and alignment 1.
+func NewOffset(ptrSize uint8, twoWordAlignSlices bool) Offset {
+ if twoWordAlignSlices {
+ return Offset{off: 0, align: 1, ptrSize: ptrSize, sliceAlign: 2 * ptrSize}
+ }
+ return Offset{off: 0, align: 1, ptrSize: ptrSize, sliceAlign: ptrSize}
+}
+
+func assertIsAPowerOfTwo(x uint8) {
+ if x == 0 {
+ panic("Zero is not a power of two")
+ }
+ if x&-x == x {
+ return
+ }
+ panic("Not a power of two")
+}
+
+// InitializedOffset returns a new Offset with specified offset, alignment, pointer size, and slice alignment.
+func InitializedOffset(off int, align uint8, ptrSize uint8, twoWordAlignSlices bool) Offset {
+ assertIsAPowerOfTwo(align)
+ o0 := NewOffset(ptrSize, twoWordAlignSlices)
+ o0.off = uint64(off)
+ o0.align = align
+ return o0
+}
+
+func (o Offset) align_(a uint8) Offset {
+ o.off = (o.off + uint64(a) - 1) & ^(uint64(a) - 1)
+ if o.align < a {
+ o.align = a
+ }
+ return o
+}
+
+// Align returns the offset obtained by aligning offset to a multiple of a.
+// a must be a power of two.
+func (o Offset) Align(a uint8) Offset {
+ assertIsAPowerOfTwo(a)
+ return o.align_(a)
+}
+
+// plus returns the offset obtained by appending a power-of-2-sized-and-aligned object to o.
+func (o Offset) plus(x uint64) Offset {
+ o = o.align_(uint8(x))
+ o.off += x
+ return o
+}
+
+// D8 returns the offset obtained by appending an 8-bit field to o.
+func (o Offset) D8() Offset {
+ return o.plus(1)
+}
+
+// D16 returns the offset obtained by appending a 16-bit field to o.
+func (o Offset) D16() Offset {
+ return o.plus(2)
+}
+
+// D32 returns the offset obtained by appending a 32-bit field to o.
+func (o Offset) D32() Offset {
+ return o.plus(4)
+}
+
+// D64 returns the offset obtained by appending a 64-bit field to o.
+func (o Offset) D64() Offset {
+ return o.plus(8)
+}
+
+// D64 returns the offset obtained by appending a pointer field to o.
+func (o Offset) P() Offset {
+ if o.ptrSize == 0 {
+ panic("This offset has no defined pointer size")
+ }
+ return o.plus(uint64(o.ptrSize))
+}
+
+// Slice returns the offset obtained by appending a slice field to o.
+func (o Offset) Slice() Offset {
+ o = o.align_(o.sliceAlign)
+ o.off += 3 * uint64(o.ptrSize)
+ // There's been discussion of whether slices should be 2-word aligned to allow
+ // use of aligned 2-word load/store to prevent tearing, this is future proofing.
+ // In general, for purposes of struct layout (and very likely default C layout
+ // compatibility) the "size" of a Go type is rounded up to its alignment.
+ return o.Align(o.sliceAlign)
+}
+
+// String returns the offset obtained by appending a string field to o.
+func (o Offset) String() Offset {
+ o = o.align_(o.sliceAlign)
+ o.off += 2 * uint64(o.ptrSize)
+ return o // We "know" it needs no further alignment
+}
+
+// Interface returns the offset obtained by appending an interface field to o.
+func (o Offset) Interface() Offset {
+ o = o.align_(o.sliceAlign)
+ o.off += 2 * uint64(o.ptrSize)
+ return o // We "know" it needs no further alignment
+}
+
+// Offset returns the struct-aligned offset (size) of o.
+// This is at least as large as the current internal offset; it may be larger.
+func (o Offset) Offset() uint64 {
+ return o.Align(o.align).off
+}
+
+func (o Offset) PlusUncommon() Offset {
+ o.off += UncommonSize()
+ return o
+}
+
+// CommonOffset returns the Offset to the data after the common portion of type data structures.
+func CommonOffset(ptrSize int, twoWordAlignSlices bool) Offset {
+ return InitializedOffset(CommonSize(ptrSize), uint8(ptrSize), uint8(ptrSize), twoWordAlignSlices)
+}
diff --git a/src/internal/abi/export_test.go b/src/internal/abi/export_test.go
new file mode 100644
index 0000000..2a87e9d
--- /dev/null
+++ b/src/internal/abi/export_test.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+func FuncPCTestFn()
+
+var FuncPCTestFnAddr uintptr // address of FuncPCTestFn, directly retrieved from assembly
+
+//go:noinline
+func FuncPCTest() uintptr {
+ return FuncPCABI0(FuncPCTestFn)
+}
diff --git a/src/internal/abi/funcpc.go b/src/internal/abi/funcpc.go
new file mode 100644
index 0000000..e038d36
--- /dev/null
+++ b/src/internal/abi/funcpc.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gccgo
+
+package abi
+
+// FuncPC* intrinsics.
+//
+// CAREFUL: In programs with plugins, FuncPC* can return different values
+// for the same function (because there are actually multiple copies of
+// the same function in the address space). To be safe, don't use the
+// results of this function in any == expression. It is only safe to
+// use the result as an address at which to start executing code.
+
+// FuncPCABI0 returns the entry PC of the function f, which must be a
+// direct reference of a function defined as ABI0. Otherwise it is a
+// compile-time error.
+//
+// Implemented as a compile intrinsic.
+func FuncPCABI0(f interface{}) uintptr
+
+// FuncPCABIInternal returns the entry PC of the function f. If f is a
+// direct reference of a function, it must be defined as ABIInternal.
+// Otherwise it is a compile-time error. If f is not a direct reference
+// of a defined function, it assumes that f is a func value. Otherwise
+// the behavior is undefined.
+//
+// Implemented as a compile intrinsic.
+func FuncPCABIInternal(f interface{}) uintptr
diff --git a/src/internal/abi/funcpc_gccgo.go b/src/internal/abi/funcpc_gccgo.go
new file mode 100644
index 0000000..ad5fa52
--- /dev/null
+++ b/src/internal/abi/funcpc_gccgo.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// For bootstrapping with gccgo.
+
+//go:build gccgo
+
+package abi
+
+import "unsafe"
+
+func FuncPCABI0(f interface{}) uintptr {
+ words := (*[2]unsafe.Pointer)(unsafe.Pointer(&f))
+ return *(*uintptr)(unsafe.Pointer(words[1]))
+}
+
+func FuncPCABIInternal(f interface{}) uintptr {
+ words := (*[2]unsafe.Pointer)(unsafe.Pointer(&f))
+ return *(*uintptr)(unsafe.Pointer(words[1]))
+}
diff --git a/src/internal/abi/map.go b/src/internal/abi/map.go
new file mode 100644
index 0000000..e5b0a0b
--- /dev/null
+++ b/src/internal/abi/map.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+// Map constants common to several packages
+// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
+const (
+ MapBucketCountBits = 3 // log2 of number of elements in a bucket.
+ MapBucketCount = 1 << MapBucketCountBits
+ MapMaxKeyBytes = 128 // Must fit in a uint8.
+ MapMaxElemBytes = 128 // Must fit in a uint8.
+)
diff --git a/src/internal/abi/stack.go b/src/internal/abi/stack.go
new file mode 100644
index 0000000..8e3327e
--- /dev/null
+++ b/src/internal/abi/stack.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+const (
+ // StackNosplitBase is the base maximum number of bytes that a chain of
+ // NOSPLIT functions can use.
+ //
+ // This value must be multiplied by the stack guard multiplier, so do not
+ // use it directly. See runtime/stack.go:stackNosplit and
+ // cmd/internal/objabi/stack.go:StackNosplit.
+ StackNosplitBase = 800
+
+ // We have three different sequences for stack bounds checks, depending on
+ // whether the stack frame of a function is small, big, or huge.
+
+ // After a stack split check the SP is allowed to be StackSmall bytes below
+ // the stack guard.
+ //
+ // Functions that need frames <= StackSmall can perform the stack check
+ // using a single comparison directly between the stack guard and the SP
+ // because we ensure that StackSmall bytes of stack space are available
+ // beyond the stack guard.
+ StackSmall = 128
+
+ // Functions that need frames <= StackBig can assume that neither
+ // SP-framesize nor stackGuard-StackSmall will underflow, and thus use a
+ // more efficient check. In order to ensure this, StackBig must be <= the
+ // size of the unmapped space at zero.
+ StackBig = 4096
+)
diff --git a/src/internal/abi/stub.s b/src/internal/abi/stub.s
new file mode 100644
index 0000000..5bad98d
--- /dev/null
+++ b/src/internal/abi/stub.s
@@ -0,0 +1,7 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file silences errors about body-less functions
+// that are provided by intrinsics in the latest version of the compiler,
+// but may not be known to the bootstrap compiler.
diff --git a/src/internal/abi/symtab.go b/src/internal/abi/symtab.go
new file mode 100644
index 0000000..bf6ea82
--- /dev/null
+++ b/src/internal/abi/symtab.go
@@ -0,0 +1,106 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+// A FuncFlag records bits about a function, passed to the runtime.
+type FuncFlag uint8
+
+const (
+ // FuncFlagTopFrame indicates a function that appears at the top of its stack.
+ // The traceback routine stop at such a function and consider that a
+ // successful, complete traversal of the stack.
+ // Examples of TopFrame functions include goexit, which appears
+ // at the top of a user goroutine stack, and mstart, which appears
+ // at the top of a system goroutine stack.
+ FuncFlagTopFrame FuncFlag = 1 << iota
+
+ // FuncFlagSPWrite indicates a function that writes an arbitrary value to SP
+ // (any write other than adding or subtracting a constant amount).
+ // The traceback routines cannot encode such changes into the
+ // pcsp tables, so the function traceback cannot safely unwind past
+ // SPWrite functions. Stopping at an SPWrite function is considered
+ // to be an incomplete unwinding of the stack. In certain contexts
+ // (in particular garbage collector stack scans) that is a fatal error.
+ FuncFlagSPWrite
+
+ // FuncFlagAsm indicates that a function was implemented in assembly.
+ FuncFlagAsm
+)
+
+// A FuncID identifies particular functions that need to be treated
+// specially by the runtime.
+// Note that in some situations involving plugins, there may be multiple
+// copies of a particular special runtime function.
+type FuncID uint8
+
+const (
+ // If you add a FuncID, you probably also want to add an entry to the map in
+ // ../../cmd/internal/objabi/funcid.go
+
+ FuncIDNormal FuncID = iota // not a special function
+ FuncID_abort
+ FuncID_asmcgocall
+ FuncID_asyncPreempt
+ FuncID_cgocallback
+ FuncID_debugCallV2
+ FuncID_gcBgMarkWorker
+ FuncID_goexit
+ FuncID_gogo
+ FuncID_gopanic
+ FuncID_handleAsyncEvent
+ FuncID_mcall
+ FuncID_morestack
+ FuncID_mstart
+ FuncID_panicwrap
+ FuncID_rt0_go
+ FuncID_runfinq
+ FuncID_runtime_main
+ FuncID_sigpanic
+ FuncID_systemstack
+ FuncID_systemstack_switch
+ FuncIDWrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
+)
+
+// ArgsSizeUnknown is set in Func.argsize to mark all functions
+// whose argument size is unknown (C vararg functions, and
+// assembly code without an explicit specification).
+// This value is generated by the compiler, assembler, or linker.
+const ArgsSizeUnknown = -0x80000000
+
+// IDs for PCDATA and FUNCDATA tables in Go binaries.
+//
+// These must agree with ../../../runtime/funcdata.h.
+const (
+ PCDATA_UnsafePoint = 0
+ PCDATA_StackMapIndex = 1
+ PCDATA_InlTreeIndex = 2
+ PCDATA_ArgLiveIndex = 3
+
+ FUNCDATA_ArgsPointerMaps = 0
+ FUNCDATA_LocalsPointerMaps = 1
+ FUNCDATA_StackObjects = 2
+ FUNCDATA_InlTree = 3
+ FUNCDATA_OpenCodedDeferInfo = 4
+ FUNCDATA_ArgInfo = 5
+ FUNCDATA_ArgLiveInfo = 6
+ FUNCDATA_WrapInfo = 7
+)
+
+// Special values for the PCDATA_UnsafePoint table.
+const (
+ UnsafePointSafe = -1 // Safe for async preemption
+ UnsafePointUnsafe = -2 // Unsafe for async preemption
+
+ // UnsafePointRestart1(2) apply on a sequence of instructions, within
+ // which if an async preemption happens, we should back off the PC
+ // to the start of the sequence when resuming.
+ // We need two so we can distinguish the start/end of the sequence
+ // in case that two sequences are next to each other.
+ UnsafePointRestart1 = -3
+ UnsafePointRestart2 = -4
+
+ // Like UnsafePointRestart1, but back to function entry if async preempted.
+ UnsafePointRestartAtEntry = -5
+)
diff --git a/src/internal/abi/testdata/x.go b/src/internal/abi/testdata/x.go
new file mode 100644
index 0000000..cae103d
--- /dev/null
+++ b/src/internal/abi/testdata/x.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x
+
+import "internal/abi"
+
+func Fn0() // defined in assembly
+
+func Fn1() {}
+
+var FnExpr func()
+
+func test() {
+ _ = abi.FuncPCABI0(Fn0) // line 16, no error
+ _ = abi.FuncPCABIInternal(Fn0) // line 17, error
+ _ = abi.FuncPCABI0(Fn1) // line 18, error
+ _ = abi.FuncPCABIInternal(Fn1) // line 19, no error
+ _ = abi.FuncPCABI0(FnExpr) // line 20, error
+ _ = abi.FuncPCABIInternal(FnExpr) // line 21, no error
+}
diff --git a/src/internal/abi/testdata/x.s b/src/internal/abi/testdata/x.s
new file mode 100644
index 0000000..63c1385
--- /dev/null
+++ b/src/internal/abi/testdata/x.s
@@ -0,0 +1,6 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT ·Fn0(SB), 0, $0-0
+ RET
diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go
new file mode 100644
index 0000000..4794f5a
--- /dev/null
+++ b/src/internal/abi/type.go
@@ -0,0 +1,712 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+ "unsafe"
+)
+
+// Type is the runtime representation of a Go type.
+//
+// Type is also referenced implicitly
+// (in the form of expressions involving constants and arch.PtrSize)
+// in cmd/compile/internal/reflectdata/reflect.go
+// and cmd/link/internal/ld/decodesym.go
+// (e.g. data[2*arch.PtrSize+4] references the TFlag field)
+// unsafe.OffsetOf(Type{}.TFlag) cannot be used directly in those
+// places because it varies with cross compilation and experiments.
+type Type struct {
+ Size_ uintptr
+ PtrBytes uintptr // number of (prefix) bytes in the type that can contain pointers
+ Hash uint32 // hash of type; avoids computation in hash tables
+ TFlag TFlag // extra type information flags
+ Align_ uint8 // alignment of variable with this type
+ FieldAlign_ uint8 // alignment of struct field with this type
+ Kind_ uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ Equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // GCData stores the GC type data for the garbage collector.
+ // If the KindGCProg bit is set in kind, GCData is a GC program.
+ // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
+ GCData *byte
+ Str NameOff // string form
+ PtrToThis TypeOff // type for pointer to this type, may be zero
+}
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Array
+ Chan
+ Func
+ Interface
+ Map
+ Pointer
+ Slice
+ String
+ Struct
+ UnsafePointer
+)
+
+const (
+ // TODO (khr, drchase) why aren't these in TFlag? Investigate, fix if possible.
+ KindDirectIface = 1 << 5
+ KindGCProg = 1 << 6 // Type.gc points to GC program
+ KindMask = (1 << 5) - 1
+)
+
+// TFlag is used by a Type to signal what extra type information is
+// available in the memory directly following the Type value.
+type TFlag uint8
+
+const (
+ // TFlagUncommon means that there is a data with a type, UncommonType,
+ // just beyond the shared-per-type common data. That is, the data
+ // for struct types will store their UncommonType at one offset, the
+ // data for interface types will store their UncommonType at a different
+ // offset. UncommonType is always accessed via a pointer that is computed
+ // using trust-us-we-are-the-implementors pointer arithmetic.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&TFlagUncommon != 0,
+ // then t has UncommonType data and it can be accessed as:
+ //
+ // type structTypeUncommon struct {
+ // structType
+ // u UncommonType
+ // }
+ // u := &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ TFlagUncommon TFlag = 1 << 0
+
+ // TFlagExtraStar means the name in the str field has an
+ // extraneous '*' prefix. This is because for most types T in
+ // a program, the type *T also exists and reusing the str data
+ // saves binary size.
+ TFlagExtraStar TFlag = 1 << 1
+
+ // TFlagNamed means the type has a name.
+ TFlagNamed TFlag = 1 << 2
+
+ // TFlagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ TFlagRegularMemory TFlag = 1 << 3
+)
+
+// NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime.
+type NameOff int32
+
+// TypeOff is the offset to a type from moduledata.types. See resolveTypeOff in runtime.
+type TypeOff int32
+
+// TextOff is an offset from the top of a text section. See (rtype).textOff in runtime.
+type TextOff int32
+
+// String returns the name of k.
+func (k Kind) String() string {
+ if int(k) < len(kindNames) {
+ return kindNames[k]
+ }
+ return kindNames[0]
+}
+
+var kindNames = []string{
+ Invalid: "invalid",
+ Bool: "bool",
+ Int: "int",
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+ Uint: "uint",
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+ Uintptr: "uintptr",
+ Float32: "float32",
+ Float64: "float64",
+ Complex64: "complex64",
+ Complex128: "complex128",
+ Array: "array",
+ Chan: "chan",
+ Func: "func",
+ Interface: "interface",
+ Map: "map",
+ Pointer: "ptr",
+ Slice: "slice",
+ String: "string",
+ Struct: "struct",
+ UnsafePointer: "unsafe.Pointer",
+}
+
+func (t *Type) Kind() Kind { return Kind(t.Kind_ & KindMask) }
+
+func (t *Type) HasName() bool {
+ return t.TFlag&TFlagNamed != 0
+}
+
+func (t *Type) Pointers() bool { return t.PtrBytes != 0 }
+
+// IfaceIndir reports whether t is stored indirectly in an interface value.
+func (t *Type) IfaceIndir() bool {
+ return t.Kind_&KindDirectIface == 0
+}
+
+// isDirectIface reports whether t is stored directly in an interface value.
+func (t *Type) IsDirectIface() bool {
+ return t.Kind_&KindDirectIface != 0
+}
+
+func (t *Type) GcSlice(begin, end uintptr) []byte {
+ return unsafeSliceFor(t.GCData, int(end))[begin:]
+}
+
+// Method on non-interface type
+type Method struct {
+ Name NameOff // name of method
+ Mtyp TypeOff // method type (without receiver)
+ Ifn TextOff // fn used in interface call (one-word receiver)
+ Tfn TextOff // fn used for normal method call
+}
+
+// UncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type UncommonType struct {
+ PkgPath NameOff // import path; empty for built-in types like int, string
+ Mcount uint16 // number of methods
+ Xcount uint16 // number of exported methods
+ Moff uint32 // offset from this uncommontype to [mcount]Method
+ _ uint32 // unused
+}
+
+func (t *UncommonType) Methods() []Method {
+ if t.Mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]Method)(addChecked(unsafe.Pointer(t), uintptr(t.Moff), "t.mcount > 0"))[:t.Mcount:t.Mcount]
+}
+
+func (t *UncommonType) ExportedMethods() []Method {
+ if t.Xcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]Method)(addChecked(unsafe.Pointer(t), uintptr(t.Moff), "t.xcount > 0"))[:t.Xcount:t.Xcount]
+}
+
+// addChecked returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func addChecked(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+// Imethod represents a method on an interface type
+type Imethod struct {
+ Name NameOff // name of method
+ Typ TypeOff // .(*FuncType) underneath
+}
+
+// ArrayType represents a fixed array type.
+type ArrayType struct {
+ Type
+ Elem *Type // array element type
+ Slice *Type // slice type
+ Len uintptr
+}
+
+// Len returns the length of t if t is an array type, otherwise 0
+func (t *Type) Len() int {
+ if t.Kind() == Array {
+ return int((*ArrayType)(unsafe.Pointer(t)).Len)
+ }
+ return 0
+}
+
+func (t *Type) Common() *Type {
+ return t
+}
+
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+ InvalidDir ChanDir = 0
+)
+
+// ChanType represents a channel type
+type ChanType struct {
+ Type
+ Elem *Type
+ Dir ChanDir
+}
+
+type structTypeUncommon struct {
+ StructType
+ u UncommonType
+}
+
+// ChanDir returns the direction of t if t is a channel type, otherwise InvalidDir (0).
+func (t *Type) ChanDir() ChanDir {
+ if t.Kind() == Chan {
+ ch := (*ChanType)(unsafe.Pointer(t))
+ return ch.Dir
+ }
+ return InvalidDir
+}
+
+// Uncommon returns a pointer to T's "uncommon" data if there is any, otherwise nil
+func (t *Type) Uncommon() *UncommonType {
+ if t.TFlag&TFlagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ case Pointer:
+ type u struct {
+ PtrType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ FuncType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ SliceType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ ArrayType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ ChanType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ MapType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ InterfaceType
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ Type
+ u UncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+// Elem returns the element type for t if t is an array, channel, map, pointer, or slice, otherwise nil.
+func (t *Type) Elem() *Type {
+ switch t.Kind() {
+ case Array:
+ tt := (*ArrayType)(unsafe.Pointer(t))
+ return tt.Elem
+ case Chan:
+ tt := (*ChanType)(unsafe.Pointer(t))
+ return tt.Elem
+ case Map:
+ tt := (*MapType)(unsafe.Pointer(t))
+ return tt.Elem
+ case Pointer:
+ tt := (*PtrType)(unsafe.Pointer(t))
+ return tt.Elem
+ case Slice:
+ tt := (*SliceType)(unsafe.Pointer(t))
+ return tt.Elem
+ }
+ return nil
+}
+
+// StructType returns t cast to a *StructType, or nil if its tag does not match.
+func (t *Type) StructType() *StructType {
+ if t.Kind() != Struct {
+ return nil
+ }
+ return (*StructType)(unsafe.Pointer(t))
+}
+
+// MapType returns t cast to a *MapType, or nil if its tag does not match.
+func (t *Type) MapType() *MapType {
+ if t.Kind() != Map {
+ return nil
+ }
+ return (*MapType)(unsafe.Pointer(t))
+}
+
+// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
+func (t *Type) ArrayType() *ArrayType {
+ if t.Kind() != Array {
+ return nil
+ }
+ return (*ArrayType)(unsafe.Pointer(t))
+}
+
+// FuncType returns t cast to a *FuncType, or nil if its tag does not match.
+func (t *Type) FuncType() *FuncType {
+ if t.Kind() != Func {
+ return nil
+ }
+ return (*FuncType)(unsafe.Pointer(t))
+}
+
+// InterfaceType returns t cast to a *InterfaceType, or nil if its tag does not match.
+func (t *Type) InterfaceType() *InterfaceType {
+ if t.Kind() != Interface {
+ return nil
+ }
+ return (*InterfaceType)(unsafe.Pointer(t))
+}
+
+// Size returns the size of data with type t.
+func (t *Type) Size() uintptr { return t.Size_ }
+
+// Align returns the alignment of data with type t.
+func (t *Type) Align() int { return int(t.Align_) }
+
+func (t *Type) FieldAlign() int { return int(t.FieldAlign_) }
+
+type InterfaceType struct {
+ Type
+ PkgPath Name // import path
+ Methods []Imethod // sorted by hash
+}
+
+func (t *Type) ExportedMethods() []Method {
+ ut := t.Uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.ExportedMethods()
+}
+
+func (t *Type) NumMethod() int {
+ if t.Kind() == Interface {
+ tt := (*InterfaceType)(unsafe.Pointer(t))
+ return tt.NumMethod()
+ }
+ return len(t.ExportedMethods())
+}
+
+// NumMethod returns the number of interface methods in the type's method set.
+func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
+
+type MapType struct {
+ Type
+ Key *Type
+ Elem *Type
+ Bucket *Type // internal type representing a hash bucket
+ // function for hashing keys (ptr to key, seed) -> hash
+ Hasher func(unsafe.Pointer, uintptr) uintptr
+ KeySize uint8 // size of key slot
+ ValueSize uint8 // size of elem slot
+ BucketSize uint16 // size of bucket
+ Flags uint32
+}
+
+// Note: flag values must match those used in the TMAP case
+// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
+ return mt.Flags&1 != 0
+}
+func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
+ return mt.Flags&2 != 0
+}
+func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys
+ return mt.Flags&4 != 0
+}
+func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
+ return mt.Flags&8 != 0
+}
+func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
+ return mt.Flags&16 != 0
+}
+
+func (t *Type) Key() *Type {
+ if t.Kind() == Map {
+ return (*MapType)(unsafe.Pointer(t)).Key
+ }
+ return nil
+}
+
+type SliceType struct {
+ Type
+ Elem *Type // slice element type
+}
+
+// funcType represents a function type.
+//
+// A *Type for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
+type FuncType struct {
+ Type
+ InCount uint16
+ OutCount uint16 // top bit is set if last input parameter is ...
+}
+
+func (t *FuncType) In(i int) *Type {
+ return t.InSlice()[i]
+}
+
+func (t *FuncType) NumIn() int {
+ return int(t.InCount)
+}
+
+func (t *FuncType) NumOut() int {
+ return int(t.OutCount & (1<<15 - 1))
+}
+
+func (t *FuncType) Out(i int) *Type {
+ return (t.OutSlice()[i])
+}
+
+func (t *FuncType) InSlice() []*Type {
+ uadd := unsafe.Sizeof(*t)
+ if t.TFlag&TFlagUncommon != 0 {
+ uadd += unsafe.Sizeof(UncommonType{})
+ }
+ if t.InCount == 0 {
+ return nil
+ }
+ return (*[1 << 16]*Type)(addChecked(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.InCount:t.InCount]
+}
+func (t *FuncType) OutSlice() []*Type {
+ outCount := uint16(t.NumOut())
+ if outCount == 0 {
+ return nil
+ }
+ uadd := unsafe.Sizeof(*t)
+ if t.TFlag&TFlagUncommon != 0 {
+ uadd += unsafe.Sizeof(UncommonType{})
+ }
+ return (*[1 << 17]*Type)(addChecked(unsafe.Pointer(t), uadd, "outCount > 0"))[t.InCount : t.InCount+outCount : t.InCount+outCount]
+}
+
+func (t *FuncType) IsVariadic() bool {
+ return t.OutCount&(1<<15) != 0
+}
+
+type PtrType struct {
+ Type
+ Elem *Type // pointer element (pointed at) type
+}
+
+type StructField struct {
+ Name Name // name is always non-empty
+ Typ *Type // type of field
+ Offset uintptr // byte offset of field
+}
+
+func (f *StructField) Embedded() bool {
+ return f.Name.IsEmbedded()
+}
+
+type StructType struct {
+ Type
+ PkgPath Name
+ Fields []StructField
+}
+
+// Name is an encoded type Name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+// 1<<3 the name is of an embedded (a.k.a. anonymous) field
+//
+// Following that, there is a varint-encoded length of the name,
+// followed by the name itself.
+//
+// If tag data is present, it also has a varint-encoded length
+// followed by the tag itself.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+//
+// Note: this encoding must match here and in:
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+
+type Name struct {
+ Bytes *byte
+}
+
+// DataChecked does pointer arithmetic on n's Bytes, and that arithmetic is asserted to
+// be safe for the reason in whySafe (which can appear in a backtrace, etc.)
+func (n Name) DataChecked(off int, whySafe string) *byte {
+ return (*byte)(addChecked(unsafe.Pointer(n.Bytes), uintptr(off), whySafe))
+}
+
+// Data does pointer arithmetic on n's Bytes, and that arithmetic is asserted to
+// be safe because the runtime made the call (other packages use DataChecked)
+func (n Name) Data(off int) *byte {
+ return (*byte)(addChecked(unsafe.Pointer(n.Bytes), uintptr(off), "the runtime doesn't need to give you a reason"))
+}
+
+// IsExported returns "is n exported?"
+func (n Name) IsExported() bool {
+ return (*n.Bytes)&(1<<0) != 0
+}
+
+// HasTag returns true iff there is tag data following this name
+func (n Name) HasTag() bool {
+ return (*n.Bytes)&(1<<1) != 0
+}
+
+// IsEmbedded returns true iff n is embedded (an anonymous field).
+func (n Name) IsEmbedded() bool {
+ return (*n.Bytes)&(1<<3) != 0
+}
+
+// ReadVarint parses a varint as encoded by encoding/binary.
+// It returns the number of encoded bytes and the encoded value.
+func (n Name) ReadVarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.DataChecked(off+i, "read varint")
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+// IsBlank indicates whether n is "_".
+func (n Name) IsBlank() bool {
+ if n.Bytes == nil {
+ return false
+ }
+ _, l := n.ReadVarint(1)
+ return l == 1 && *n.Data(2) == '_'
+}
+
+// writeVarint writes n to buf in varint form. Returns the
+// number of bytes written. n must be nonnegative.
+// Writes at most 10 bytes.
+func writeVarint(buf []byte, n int) int {
+ for i := 0; ; i++ {
+ b := byte(n & 0x7f)
+ n >>= 7
+ if n == 0 {
+ buf[i] = b
+ return i + 1
+ }
+ buf[i] = b | 0x80
+ }
+}
+
+// Name returns the tag string for n, or empty if there is none.
+func (n Name) Name() string {
+ if n.Bytes == nil {
+ return ""
+ }
+ i, l := n.ReadVarint(1)
+ return unsafeStringFor(n.DataChecked(1+i, "non-empty string"), l)
+}
+
+// Tag returns the tag string for n, or empty if there is none.
+func (n Name) Tag() string {
+ if !n.HasTag() {
+ return ""
+ }
+ i, l := n.ReadVarint(1)
+ i2, l2 := n.ReadVarint(1 + i + l)
+ return unsafeStringFor(n.DataChecked(1+i+l+i2, "non-empty string"), l2)
+}
+
+func NewName(n, tag string, exported, embedded bool) Name {
+ if len(n) >= 1<<29 {
+ panic("abi.NewName: name too long: " + n[:1024] + "...")
+ }
+ if len(tag) >= 1<<29 {
+ panic("abi.NewName: tag too long: " + tag[:1024] + "...")
+ }
+ var nameLen [10]byte
+ var tagLen [10]byte
+ nameLenLen := writeVarint(nameLen[:], len(n))
+ tagLenLen := writeVarint(tagLen[:], len(tag))
+
+ var bits byte
+ l := 1 + nameLenLen + len(n)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += tagLenLen + len(tag)
+ bits |= 1 << 1
+ }
+ if embedded {
+ bits |= 1 << 3
+ }
+
+ b := make([]byte, l)
+ b[0] = bits
+ copy(b[1:], nameLen[:nameLenLen])
+ copy(b[1+nameLenLen:], n)
+ if len(tag) > 0 {
+ tb := b[1+nameLenLen+len(n):]
+ copy(tb, tagLen[:tagLenLen])
+ copy(tb[tagLenLen:], tag)
+ }
+
+ return Name{Bytes: &b[0]}
+}
diff --git a/src/internal/abi/unsafestring_go119.go b/src/internal/abi/unsafestring_go119.go
new file mode 100644
index 0000000..a710384
--- /dev/null
+++ b/src/internal/abi/unsafestring_go119.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.20
+// +build !go1.20
+
+package abi
+
+import "unsafe"
+
+type (
+ stringHeader struct {
+ Data *byte
+ Len int
+ }
+ sliceHeader struct {
+ Data *byte
+ Len int
+ Cap int
+ }
+)
+
+func unsafeStringFor(b *byte, l int) string {
+ h := stringHeader{Data: b, Len: l}
+ return *(*string)(unsafe.Pointer(&h))
+}
+
+func unsafeSliceFor(b *byte, l int) []byte {
+ h := sliceHeader{Data: b, Len: l, Cap: l}
+ return *(*[]byte)(unsafe.Pointer(&h))
+}
diff --git a/src/internal/abi/unsafestring_go120.go b/src/internal/abi/unsafestring_go120.go
new file mode 100644
index 0000000..93ff8ea
--- /dev/null
+++ b/src/internal/abi/unsafestring_go120.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package abi
+
+import "unsafe"
+
+func unsafeStringFor(b *byte, l int) string {
+ return unsafe.String(b, l)
+}
+
+func unsafeSliceFor(b *byte, l int) []byte {
+ return unsafe.Slice(b, l)
+}
diff --git a/src/internal/bisect/bisect.go b/src/internal/bisect/bisect.go
new file mode 100644
index 0000000..48c796e
--- /dev/null
+++ b/src/internal/bisect/bisect.go
@@ -0,0 +1,795 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bisect can be used by compilers and other programs
+// to serve as a target for the bisect debugging tool.
+// See [golang.org/x/tools/cmd/bisect] for details about using the tool.
+//
+// To be a bisect target, allowing bisect to help determine which of a set of independent
+// changes provokes a failure, a program needs to:
+//
+// 1. Define a way to accept a change pattern on its command line or in its environment.
+// The most common mechanism is a command-line flag.
+// The pattern can be passed to [New] to create a [Matcher], the compiled form of a pattern.
+//
+// 2. Assign each change a unique ID. One possibility is to use a sequence number,
+// but the most common mechanism is to hash some kind of identifying information
+// like the file and line number where the change might be applied.
+// [Hash] hashes its arguments to compute an ID.
+//
+// 3. Enable each change that the pattern says should be enabled.
+// The [Matcher.ShouldEnable] method answers this question for a given change ID.
+//
+// 4. Print a report identifying each change that the pattern says should be printed.
+// The [Matcher.ShouldPrint] method answers this question for a given change ID.
+// The report consists of one more lines on standard error or standard output
+// that contain a “match marker”. [Marker] returns the match marker for a given ID.
+// When bisect reports a change as causing the failure, it identifies the change
+// by printing the report lines with the match marker removed.
+//
+// # Example Usage
+//
+// A program starts by defining how it receives the pattern. In this example, we will assume a flag.
+// The next step is to compile the pattern:
+//
+// m, err := bisect.New(patternFlag)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// Then, each time a potential change is considered, the program computes
+// a change ID by hashing identifying information (source file and line, in this case)
+// and then calls m.ShouldPrint and m.ShouldEnable to decide whether to
+// print and enable the change, respectively. The two can return different values
+// depending on whether bisect is trying to find a minimal set of changes to
+// disable or to enable to provoke the failure.
+//
+// It is usually helpful to write a helper function that accepts the identifying information
+// and then takes care of hashing, printing, and reporting whether the identified change
+// should be enabled. For example, a helper for changes identified by a file and line number
+// would be:
+//
+// func ShouldEnable(file string, line int) {
+// h := bisect.Hash(file, line)
+// if m.ShouldPrint(h) {
+// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line)
+// }
+// return m.ShouldEnable(h)
+// }
+//
+// Finally, note that New returns a nil Matcher when there is no pattern,
+// meaning that the target is not running under bisect at all,
+// so all changes should be enabled and none should be printed.
+// In that common case, the computation of the hash can be avoided entirely
+// by checking for m == nil first:
+//
+// func ShouldEnable(file string, line int) bool {
+// if m == nil {
+// return false
+// }
+// h := bisect.Hash(file, line)
+// if m.ShouldPrint(h) {
+// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line)
+// }
+// return m.ShouldEnable(h)
+// }
+//
+// When the identifying information is expensive to format, this code can call
+// [Matcher.MarkerOnly] to find out whether short report lines containing only the
+// marker are permitted for a given run. (Bisect permits such lines when it is
+// still exploring the space of possible changes and will not be showing the
+// output to the user.) If so, the client can choose to print only the marker:
+//
+// func ShouldEnable(file string, line int) bool {
+// if m == nil {
+// return false
+// }
+// h := bisect.Hash(file, line)
+// if m.ShouldPrint(h) {
+// if m.MarkerOnly() {
+// bisect.PrintMarker(os.Stderr)
+// } else {
+// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line)
+// }
+// }
+// return m.ShouldEnable(h)
+// }
+//
+// This specific helper – deciding whether to enable a change identified by
+// file and line number and printing about the change when necessary – is
+// provided by the [Matcher.FileLine] method.
+//
+// Another common usage is deciding whether to make a change in a function
+// based on the caller's stack, to identify the specific calling contexts that the
+// change breaks. The [Matcher.Stack] method takes care of obtaining the stack,
+// printing it when necessary, and reporting whether to enable the change
+// based on that stack.
+//
+// # Pattern Syntax
+//
+// Patterns are generated by the bisect tool and interpreted by [New].
+// Users should not have to understand the patterns except when
+// debugging a target's bisect support or debugging the bisect tool itself.
+//
+// The pattern syntax selecting a change is a sequence of bit strings
+// separated by + and - operators. Each bit string denotes the set of
+// changes with IDs ending in those bits, + is set addition, - is set subtraction,
+// and the expression is evaluated in the usual left-to-right order.
+// The special binary number “y” denotes the set of all changes,
+// standing in for the empty bit string.
+// In the expression, all the + operators must appear before all the - operators.
+// A leading + adds to an empty set. A leading - subtracts from the set of all
+// possible suffixes.
+//
+// For example:
+//
+// - “01+10” and “+01+10” both denote the set of changes
+// with IDs ending with the bits 01 or 10.
+//
+// - “01+10-1001” denotes the set of changes with IDs
+// ending with the bits 01 or 10, but excluding those ending in 1001.
+//
+// - “-01-1000” and “y-01-1000 both denote the set of all changes
+// with IDs not ending in 01 nor 1000.
+//
+// - “0+1-01+001” is not a valid pattern, because all the + operators do not
+// appear before all the - operators.
+//
+// In the syntaxes described so far, the pattern specifies the changes to
+// enable and report. If a pattern is prefixed by a “!”, the meaning
+// changes: the pattern specifies the changes to DISABLE and report. This
+// mode of operation is needed when a program passes with all changes
+// enabled but fails with no changes enabled. In this case, bisect
+// searches for minimal sets of changes to disable.
+// Put another way, the leading “!” inverts the result from [Matcher.ShouldEnable]
+// but does not invert the result from [Matcher.ShouldPrint].
+//
+// As a convenience for manual debugging, “n” is an alias for “!y”,
+// meaning to disable and report all changes.
+//
+// Finally, a leading “v” in the pattern indicates that the reports will be shown
+// to the user of bisect to describe the changes involved in a failure.
+// At the API level, the leading “v” causes [Matcher.Visible] to return true.
+// See the next section for details.
+//
+// # Match Reports
+//
+// The target program must enable only those changed matched
+// by the pattern, and it must print a match report for each such change.
+// A match report consists of one or more lines of text that will be
+// printed by the bisect tool to describe a change implicated in causing
+// a failure. Each line in the report for a given change must contain a
+// match marker with that change ID, as returned by [Marker].
+// The markers are elided when displaying the lines to the user.
+//
+// A match marker has the form “[bisect-match 0x1234]” where
+// 0x1234 is the change ID in hexadecimal.
+// An alternate form is “[bisect-match 010101]”, giving the change ID in binary.
+//
+// When [Matcher.Visible] returns false, the match reports are only
+// being processed by bisect to learn the set of enabled changes,
+// not shown to the user, meaning that each report can be a match
+// marker on a line by itself, eliding the usual textual description.
+// When the textual description is expensive to compute,
+// checking [Matcher.Visible] can help the avoid that expense
+// in most runs.
+package bisect
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// New creates and returns a new Matcher implementing the given pattern.
+// The pattern syntax is defined in the package doc comment.
+//
+// In addition to the pattern syntax syntax, New("") returns nil, nil.
+// The nil *Matcher is valid for use: it returns true from ShouldEnable
+// and false from ShouldPrint for all changes. Callers can avoid calling
+// [Hash], [Matcher.ShouldEnable], and [Matcher.ShouldPrint] entirely
+// when they recognize the nil Matcher.
+func New(pattern string) (*Matcher, error) {
+ if pattern == "" {
+ return nil, nil
+ }
+
+ m := new(Matcher)
+
+ p := pattern
+ // Special case for leading 'q' so that 'qn' quietly disables, e.g. fmahash=qn to disable fma
+ // Any instance of 'v' disables 'q'.
+ if len(p) > 0 && p[0] == 'q' {
+ m.quiet = true
+ p = p[1:]
+ if p == "" {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ }
+ // Allow multiple v, so that “bisect cmd vPATTERN” can force verbose all the time.
+ for len(p) > 0 && p[0] == 'v' {
+ m.verbose = true
+ m.quiet = false
+ p = p[1:]
+ if p == "" {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ }
+
+ // Allow multiple !, each negating the last, so that “bisect cmd !PATTERN” works
+ // even when bisect chooses to add its own !.
+ m.enable = true
+ for len(p) > 0 && p[0] == '!' {
+ m.enable = !m.enable
+ p = p[1:]
+ if p == "" {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ }
+
+ if p == "n" {
+ // n is an alias for !y.
+ m.enable = !m.enable
+ p = "y"
+ }
+
+ // Parse actual pattern syntax.
+ result := true
+ bits := uint64(0)
+ start := 0
+ wid := 1 // 1-bit (binary); sometimes 4-bit (hex)
+ for i := 0; i <= len(p); i++ {
+ // Imagine a trailing - at the end of the pattern to flush final suffix
+ c := byte('-')
+ if i < len(p) {
+ c = p[i]
+ }
+ if i == start && wid == 1 && c == 'x' { // leading x for hex
+ start = i + 1
+ wid = 4
+ continue
+ }
+ switch c {
+ default:
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ case '2', '3', '4', '5', '6', '7', '8', '9':
+ if wid != 4 {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ fallthrough
+ case '0', '1':
+ bits <<= wid
+ bits |= uint64(c - '0')
+ case 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', 'E', 'F':
+ if wid != 4 {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ bits <<= 4
+ bits |= uint64(c&^0x20 - 'A' + 10)
+ case 'y':
+ if i+1 < len(p) && (p[i+1] == '0' || p[i+1] == '1') {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ bits = 0
+ case '+', '-':
+ if c == '+' && result == false {
+ // Have already seen a -. Should be - from here on.
+ return nil, &parseError{"invalid pattern syntax (+ after -): " + pattern}
+ }
+ if i > 0 {
+ n := (i - start) * wid
+ if n > 64 {
+ return nil, &parseError{"pattern bits too long: " + pattern}
+ }
+ if n <= 0 {
+ return nil, &parseError{"invalid pattern syntax: " + pattern}
+ }
+ if p[start] == 'y' {
+ n = 0
+ }
+ mask := uint64(1)<<n - 1
+ m.list = append(m.list, cond{mask, bits, result})
+ } else if c == '-' {
+ // leading - subtracts from complete set
+ m.list = append(m.list, cond{0, 0, true})
+ }
+ bits = 0
+ result = c == '+'
+ start = i + 1
+ wid = 1
+ }
+ }
+ return m, nil
+}
+
+// A Matcher is the parsed, compiled form of a PATTERN string.
+// The nil *Matcher is valid: it has all changes enabled but none reported.
+type Matcher struct {
+ verbose bool // annotate reporting with human-helpful information
+ quiet bool // disables all reporting. reset if verbose is true. use case is -d=fmahash=qn
+ enable bool // when true, list is for “enable and report” (when false, “disable and report”)
+ list []cond // conditions; later ones win over earlier ones
+ dedup atomicPointerDedup
+}
+
+// atomicPointerDedup is an atomic.Pointer[dedup],
+// but we are avoiding using Go 1.19's atomic.Pointer
+// until the bootstrap toolchain can be relied upon to have it.
+type atomicPointerDedup struct {
+ p unsafe.Pointer
+}
+
+func (p *atomicPointerDedup) Load() *dedup {
+ return (*dedup)(atomic.LoadPointer(&p.p))
+}
+
+func (p *atomicPointerDedup) CompareAndSwap(old, new *dedup) bool {
+ return atomic.CompareAndSwapPointer(&p.p, unsafe.Pointer(old), unsafe.Pointer(new))
+}
+
+// A cond is a single condition in the matcher.
+// Given an input id, if id&mask == bits, return the result.
+type cond struct {
+ mask uint64
+ bits uint64
+ result bool
+}
+
+// MarkerOnly reports whether it is okay to print only the marker for
+// a given change, omitting the identifying information.
+// MarkerOnly returns true when bisect is using the printed reports
+// only for an intermediate search step, not for showing to users.
+func (m *Matcher) MarkerOnly() bool {
+ return !m.verbose
+}
+
+// ShouldEnable reports whether the change with the given id should be enabled.
+func (m *Matcher) ShouldEnable(id uint64) bool {
+ if m == nil {
+ return true
+ }
+ return m.matchResult(id) == m.enable
+}
+
+// ShouldPrint reports whether to print identifying information about the change with the given id.
+func (m *Matcher) ShouldPrint(id uint64) bool {
+ if m == nil || m.quiet {
+ return false
+ }
+ return m.matchResult(id)
+}
+
+// matchResult returns the result from the first condition that matches id.
+func (m *Matcher) matchResult(id uint64) bool {
+ for i := len(m.list) - 1; i >= 0; i-- {
+ c := &m.list[i]
+ if id&c.mask == c.bits {
+ return c.result
+ }
+ }
+ return false
+}
+
+// FileLine reports whether the change identified by file and line should be enabled.
+// If the change should be printed, FileLine prints a one-line report to w.
+func (m *Matcher) FileLine(w Writer, file string, line int) bool {
+ if m == nil {
+ return true
+ }
+ return m.fileLine(w, file, line)
+}
+
+// fileLine does the real work for FileLine.
+// This lets FileLine's body handle m == nil and potentially be inlined.
+func (m *Matcher) fileLine(w Writer, file string, line int) bool {
+ h := Hash(file, line)
+ if m.ShouldPrint(h) {
+ if m.MarkerOnly() {
+ PrintMarker(w, h)
+ } else {
+ printFileLine(w, h, file, line)
+ }
+ }
+ return m.ShouldEnable(h)
+}
+
+// printFileLine prints a non-marker-only report for file:line to w.
+func printFileLine(w Writer, h uint64, file string, line int) error {
+ const markerLen = 40 // overestimate
+ b := make([]byte, 0, markerLen+len(file)+24)
+ b = AppendMarker(b, h)
+ b = appendFileLine(b, file, line)
+ b = append(b, '\n')
+ _, err := w.Write(b)
+ return err
+}
+
+// appendFileLine appends file:line to dst, returning the extended slice.
+func appendFileLine(dst []byte, file string, line int) []byte {
+ dst = append(dst, file...)
+ dst = append(dst, ':')
+ u := uint(line)
+ if line < 0 {
+ dst = append(dst, '-')
+ u = -u
+ }
+ var buf [24]byte
+ i := len(buf)
+ for i == len(buf) || u > 0 {
+ i--
+ buf[i] = '0' + byte(u%10)
+ u /= 10
+ }
+ dst = append(dst, buf[i:]...)
+ return dst
+}
+
+// MatchStack assigns the current call stack a change ID.
+// If the stack should be printed, MatchStack prints it.
+// Then MatchStack reports whether a change at the current call stack should be enabled.
+func (m *Matcher) Stack(w Writer) bool {
+ if m == nil {
+ return true
+ }
+ return m.stack(w)
+}
+
+// stack does the real work for Stack.
+// This lets stack's body handle m == nil and potentially be inlined.
+func (m *Matcher) stack(w Writer) bool {
+ const maxStack = 16
+ var stk [maxStack]uintptr
+ n := runtime.Callers(2, stk[:])
+ // caller #2 is not for printing; need it to normalize PCs if ASLR.
+ if n <= 1 {
+ return false
+ }
+
+ base := stk[0]
+ // normalize PCs
+ for i := range stk[:n] {
+ stk[i] -= base
+ }
+
+ h := Hash(stk[:n])
+ if m.ShouldPrint(h) {
+ var d *dedup
+ for {
+ d = m.dedup.Load()
+ if d != nil {
+ break
+ }
+ d = new(dedup)
+ if m.dedup.CompareAndSwap(nil, d) {
+ break
+ }
+ }
+
+ if m.MarkerOnly() {
+ if !d.seenLossy(h) {
+ PrintMarker(w, h)
+ }
+ } else {
+ if !d.seen(h) {
+ // Restore PCs in stack for printing
+ for i := range stk[:n] {
+ stk[i] += base
+ }
+ printStack(w, h, stk[1:n])
+ }
+ }
+ }
+ return m.ShouldEnable(h)
+
+}
+
+// Writer is the same interface as io.Writer.
+// It is duplicated here to avoid importing io.
+type Writer interface {
+ Write([]byte) (int, error)
+}
+
+// PrintMarker prints to w a one-line report containing only the marker for h.
+// It is appropriate to use when [Matcher.ShouldPrint] and [Matcher.MarkerOnly] both return true.
+func PrintMarker(w Writer, h uint64) error {
+ var buf [50]byte
+ b := AppendMarker(buf[:], h)
+ b = append(b, '\n')
+ _, err := w.Write(b)
+ return err
+}
+
+// printStack prints to w a multi-line report containing a formatting of the call stack stk,
+// with each line preceded by the marker for h.
+func printStack(w Writer, h uint64, stk []uintptr) error {
+ buf := make([]byte, 0, 2048)
+
+ var prefixBuf [100]byte
+ prefix := AppendMarker(prefixBuf[:0], h)
+
+ frames := runtime.CallersFrames(stk)
+ for {
+ f, more := frames.Next()
+ buf = append(buf, prefix...)
+ buf = append(buf, f.Func.Name()...)
+ buf = append(buf, "()\n"...)
+ buf = append(buf, prefix...)
+ buf = append(buf, '\t')
+ buf = appendFileLine(buf, f.File, f.Line)
+ buf = append(buf, '\n')
+ if !more {
+ break
+ }
+ }
+ buf = append(buf, prefix...)
+ buf = append(buf, '\n')
+ _, err := w.Write(buf)
+ return err
+}
+
+// Marker returns the match marker text to use on any line reporting details
+// about a match of the given ID.
+// It always returns the hexadecimal format.
+func Marker(id uint64) string {
+ return string(AppendMarker(nil, id))
+}
+
+// AppendMarker is like [Marker] but appends the marker to dst.
+func AppendMarker(dst []byte, id uint64) []byte {
+ const prefix = "[bisect-match 0x"
+ var buf [len(prefix) + 16 + 1]byte
+ copy(buf[:], prefix)
+ for i := 0; i < 16; i++ {
+ buf[len(prefix)+i] = "0123456789abcdef"[id>>60]
+ id <<= 4
+ }
+ buf[len(prefix)+16] = ']'
+ return append(dst, buf[:]...)
+}
+
+// CutMarker finds the first match marker in line and removes it,
+// returning the shortened line (with the marker removed),
+// the ID from the match marker,
+// and whether a marker was found at all.
+// If there is no marker, CutMarker returns line, 0, false.
+func CutMarker(line string) (short string, id uint64, ok bool) {
+ // Find first instance of prefix.
+ prefix := "[bisect-match "
+ i := 0
+ for ; ; i++ {
+ if i >= len(line)-len(prefix) {
+ return line, 0, false
+ }
+ if line[i] == '[' && line[i:i+len(prefix)] == prefix {
+ break
+ }
+ }
+
+ // Scan to ].
+ j := i + len(prefix)
+ for j < len(line) && line[j] != ']' {
+ j++
+ }
+ if j >= len(line) {
+ return line, 0, false
+ }
+
+ // Parse id.
+ idstr := line[i+len(prefix) : j]
+ if len(idstr) >= 3 && idstr[:2] == "0x" {
+ // parse hex
+ if len(idstr) > 2+16 { // max 0x + 16 digits
+ return line, 0, false
+ }
+ for i := 2; i < len(idstr); i++ {
+ id <<= 4
+ switch c := idstr[i]; {
+ case '0' <= c && c <= '9':
+ id |= uint64(c - '0')
+ case 'a' <= c && c <= 'f':
+ id |= uint64(c - 'a' + 10)
+ case 'A' <= c && c <= 'F':
+ id |= uint64(c - 'A' + 10)
+ }
+ }
+ } else {
+ if idstr == "" || len(idstr) > 64 { // min 1 digit, max 64 digits
+ return line, 0, false
+ }
+ // parse binary
+ for i := 0; i < len(idstr); i++ {
+ id <<= 1
+ switch c := idstr[i]; c {
+ default:
+ return line, 0, false
+ case '0', '1':
+ id |= uint64(c - '0')
+ }
+ }
+ }
+
+ // Construct shortened line.
+ // Remove at most one space from around the marker,
+ // so that "foo [marker] bar" shortens to "foo bar".
+ j++ // skip ]
+ if i > 0 && line[i-1] == ' ' {
+ i--
+ } else if j < len(line) && line[j] == ' ' {
+ j++
+ }
+ short = line[:i] + line[j:]
+ return short, id, true
+}
+
+// Hash computes a hash of the data arguments,
+// each of which must be of type string, byte, int, uint, int32, uint32, int64, uint64, uintptr, or a slice of one of those types.
+func Hash(data ...any) uint64 {
+ h := offset64
+ for _, v := range data {
+ switch v := v.(type) {
+ default:
+ // Note: Not printing the type, because reflect.ValueOf(v)
+ // would make the interfaces prepared by the caller escape
+ // and therefore allocate. This way, Hash(file, line) runs
+ // without any allocation. It should be clear from the
+ // source code calling Hash what the bad argument was.
+ panic("bisect.Hash: unexpected argument type")
+ case string:
+ h = fnvString(h, v)
+ case byte:
+ h = fnv(h, v)
+ case int:
+ h = fnvUint64(h, uint64(v))
+ case uint:
+ h = fnvUint64(h, uint64(v))
+ case int32:
+ h = fnvUint32(h, uint32(v))
+ case uint32:
+ h = fnvUint32(h, v)
+ case int64:
+ h = fnvUint64(h, uint64(v))
+ case uint64:
+ h = fnvUint64(h, v)
+ case uintptr:
+ h = fnvUint64(h, uint64(v))
+ case []string:
+ for _, x := range v {
+ h = fnvString(h, x)
+ }
+ case []byte:
+ for _, x := range v {
+ h = fnv(h, x)
+ }
+ case []int:
+ for _, x := range v {
+ h = fnvUint64(h, uint64(x))
+ }
+ case []uint:
+ for _, x := range v {
+ h = fnvUint64(h, uint64(x))
+ }
+ case []int32:
+ for _, x := range v {
+ h = fnvUint32(h, uint32(x))
+ }
+ case []uint32:
+ for _, x := range v {
+ h = fnvUint32(h, x)
+ }
+ case []int64:
+ for _, x := range v {
+ h = fnvUint64(h, uint64(x))
+ }
+ case []uint64:
+ for _, x := range v {
+ h = fnvUint64(h, x)
+ }
+ case []uintptr:
+ for _, x := range v {
+ h = fnvUint64(h, uint64(x))
+ }
+ }
+ }
+ return h
+}
+
+// Trivial error implementation, here to avoid importing errors.
+
+// parseError is a trivial error implementation,
+// defined here to avoid importing errors.
+type parseError struct{ text string }
+
+func (e *parseError) Error() string { return e.text }
+
+// FNV-1a implementation. See Go's hash/fnv/fnv.go.
+// Copied here for simplicity (can handle integers more directly)
+// and to avoid importing hash/fnv.
+
+const (
+ offset64 uint64 = 14695981039346656037
+ prime64 uint64 = 1099511628211
+)
+
+func fnv(h uint64, x byte) uint64 {
+ h ^= uint64(x)
+ h *= prime64
+ return h
+}
+
+func fnvString(h uint64, x string) uint64 {
+ for i := 0; i < len(x); i++ {
+ h ^= uint64(x[i])
+ h *= prime64
+ }
+ return h
+}
+
+func fnvUint64(h uint64, x uint64) uint64 {
+ for i := 0; i < 8; i++ {
+ h ^= uint64(x & 0xFF)
+ x >>= 8
+ h *= prime64
+ }
+ return h
+}
+
+func fnvUint32(h uint64, x uint32) uint64 {
+ for i := 0; i < 4; i++ {
+ h ^= uint64(x & 0xFF)
+ x >>= 8
+ h *= prime64
+ }
+ return h
+}
+
+// A dedup is a deduplicator for call stacks, so that we only print
+// a report for new call stacks, not for call stacks we've already
+// reported.
+//
+// It has two modes: an approximate but lock-free mode that
+// may still emit some duplicates, and a precise mode that uses
+// a lock and never emits duplicates.
+type dedup struct {
+ // 128-entry 4-way, lossy cache for seenLossy
+ recent [128][4]uint64
+
+ // complete history for seen
+ mu sync.Mutex
+ m map[uint64]bool
+}
+
+// seen records that h has now been seen and reports whether it was seen before.
+// When seen returns false, the caller is expected to print a report for h.
+func (d *dedup) seen(h uint64) bool {
+ d.mu.Lock()
+ if d.m == nil {
+ d.m = make(map[uint64]bool)
+ }
+ seen := d.m[h]
+ d.m[h] = true
+ d.mu.Unlock()
+ return seen
+}
+
+// seenLossy is a variant of seen that avoids a lock by using a cache of recently seen hashes.
+// Each cache entry is N-way set-associative: h can appear in any of the slots.
+// If h does not appear in any of them, then it is inserted into a random slot,
+// overwriting whatever was there before.
+func (d *dedup) seenLossy(h uint64) bool {
+ cache := &d.recent[uint(h)%uint(len(d.recent))]
+ for i := 0; i < len(cache); i++ {
+ if atomic.LoadUint64(&cache[i]) == h {
+ return true
+ }
+ }
+
+ // Compute index in set to evict as hash of current set.
+ ch := offset64
+ for _, x := range cache {
+ ch = fnvUint64(ch, x)
+ }
+ atomic.StoreUint64(&cache[uint(ch)%uint(len(cache))], h)
+ return false
+}
diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go
new file mode 100644
index 0000000..b97b9c1
--- /dev/null
+++ b/src/internal/buildcfg/cfg.go
@@ -0,0 +1,235 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package buildcfg provides access to the build configuration
+// described by the current environment. It is for use by build tools
+// such as cmd/go or cmd/compile and for setting up go/build's Default context.
+//
+// Note that it does NOT provide access to the build configuration used to
+// build the currently-running binary. For that, use runtime.GOOS etc
+// as well as internal/goexperiment.
+package buildcfg
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var (
+ GOROOT = runtime.GOROOT() // cached for efficiency
+ GOARCH = envOr("GOARCH", defaultGOARCH)
+ GOOS = envOr("GOOS", defaultGOOS)
+ GO386 = envOr("GO386", defaultGO386)
+ GOAMD64 = goamd64()
+ GOARM = goarm()
+ GOMIPS = gomips()
+ GOMIPS64 = gomips64()
+ GOPPC64 = goppc64()
+ GOWASM = gowasm()
+ ToolTags = toolTags()
+ GO_LDSO = defaultGO_LDSO
+ Version = version
+)
+
+// Error is one of the errors found (if any) in the build configuration.
+var Error error
+
+// Check exits the program with a fatal error if Error is non-nil.
+func Check() {
+ if Error != nil {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), Error)
+ os.Exit(2)
+ }
+}
+
+func envOr(key, value string) string {
+ if x := os.Getenv(key); x != "" {
+ return x
+ }
+ return value
+}
+
+func goamd64() int {
+ switch v := envOr("GOAMD64", defaultGOAMD64); v {
+ case "v1":
+ return 1
+ case "v2":
+ return 2
+ case "v3":
+ return 3
+ case "v4":
+ return 4
+ }
+ Error = fmt.Errorf("invalid GOAMD64: must be v1, v2, v3, v4")
+ return int(defaultGOAMD64[len("v")] - '0')
+}
+
+func goarm() int {
+ def := defaultGOARM
+ if GOOS == "android" && GOARCH == "arm" {
+ // Android arm devices always support GOARM=7.
+ def = "7"
+ }
+ switch v := envOr("GOARM", def); v {
+ case "5":
+ return 5
+ case "6":
+ return 6
+ case "7":
+ return 7
+ }
+ Error = fmt.Errorf("invalid GOARM: must be 5, 6, 7")
+ return int(def[0] - '0')
+}
+
+func gomips() string {
+ switch v := envOr("GOMIPS", defaultGOMIPS); v {
+ case "hardfloat", "softfloat":
+ return v
+ }
+ Error = fmt.Errorf("invalid GOMIPS: must be hardfloat, softfloat")
+ return defaultGOMIPS
+}
+
+func gomips64() string {
+ switch v := envOr("GOMIPS64", defaultGOMIPS64); v {
+ case "hardfloat", "softfloat":
+ return v
+ }
+ Error = fmt.Errorf("invalid GOMIPS64: must be hardfloat, softfloat")
+ return defaultGOMIPS64
+}
+
+func goppc64() int {
+ switch v := envOr("GOPPC64", defaultGOPPC64); v {
+ case "power8":
+ return 8
+ case "power9":
+ return 9
+ case "power10":
+ return 10
+ }
+ Error = fmt.Errorf("invalid GOPPC64: must be power8, power9, power10")
+ return int(defaultGOPPC64[len("power")] - '0')
+}
+
+type gowasmFeatures struct {
+ SatConv bool
+ SignExt bool
+}
+
+func (f gowasmFeatures) String() string {
+ var flags []string
+ if f.SatConv {
+ flags = append(flags, "satconv")
+ }
+ if f.SignExt {
+ flags = append(flags, "signext")
+ }
+ return strings.Join(flags, ",")
+}
+
+func gowasm() (f gowasmFeatures) {
+ for _, opt := range strings.Split(envOr("GOWASM", ""), ",") {
+ switch opt {
+ case "satconv":
+ f.SatConv = true
+ case "signext":
+ f.SignExt = true
+ case "":
+ // ignore
+ default:
+ Error = fmt.Errorf("invalid GOWASM: no such feature %q", opt)
+ }
+ }
+ return
+}
+
+func Getgoextlinkenabled() string {
+ return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED)
+}
+
+func toolTags() []string {
+ tags := experimentTags()
+ tags = append(tags, gogoarchTags()...)
+ return tags
+}
+
+func experimentTags() []string {
+ var list []string
+ // For each experiment that has been enabled in the toolchain, define a
+ // build tag with the same name but prefixed by "goexperiment." which can be
+ // used for compiling alternative files for the experiment. This allows
+ // changes for the experiment, like extra struct fields in the runtime,
+ // without affecting the base non-experiment code at all.
+ for _, exp := range Experiment.Enabled() {
+ list = append(list, "goexperiment."+exp)
+ }
+ return list
+}
+
+// GOGOARCH returns the name and value of the GO$GOARCH setting.
+// For example, if GOARCH is "amd64" it might return "GOAMD64", "v2".
+func GOGOARCH() (name, value string) {
+ switch GOARCH {
+ case "386":
+ return "GO386", GO386
+ case "amd64":
+ return "GOAMD64", fmt.Sprintf("v%d", GOAMD64)
+ case "arm":
+ return "GOARM", strconv.Itoa(GOARM)
+ case "mips", "mipsle":
+ return "GOMIPS", GOMIPS
+ case "mips64", "mips64le":
+ return "GOMIPS64", GOMIPS64
+ case "ppc64", "ppc64le":
+ return "GOPPC64", fmt.Sprintf("power%d", GOPPC64)
+ case "wasm":
+ return "GOWASM", GOWASM.String()
+ }
+ return "", ""
+}
+
+func gogoarchTags() []string {
+ switch GOARCH {
+ case "386":
+ return []string{GOARCH + "." + GO386}
+ case "amd64":
+ var list []string
+ for i := 1; i <= GOAMD64; i++ {
+ list = append(list, fmt.Sprintf("%s.v%d", GOARCH, i))
+ }
+ return list
+ case "arm":
+ var list []string
+ for i := 5; i <= GOARM; i++ {
+ list = append(list, fmt.Sprintf("%s.%d", GOARCH, i))
+ }
+ return list
+ case "mips", "mipsle":
+ return []string{GOARCH + "." + GOMIPS}
+ case "mips64", "mips64le":
+ return []string{GOARCH + "." + GOMIPS64}
+ case "ppc64", "ppc64le":
+ var list []string
+ for i := 8; i <= GOPPC64; i++ {
+ list = append(list, fmt.Sprintf("%s.power%d", GOARCH, i))
+ }
+ return list
+ case "wasm":
+ var list []string
+ if GOWASM.SatConv {
+ list = append(list, GOARCH+".satconv")
+ }
+ if GOWASM.SignExt {
+ list = append(list, GOARCH+".signext")
+ }
+ return list
+ }
+ return nil
+}
diff --git a/src/internal/buildcfg/cfg_test.go b/src/internal/buildcfg/cfg_test.go
new file mode 100644
index 0000000..0123593
--- /dev/null
+++ b/src/internal/buildcfg/cfg_test.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildcfg
+
+import (
+ "os"
+ "testing"
+)
+
+func TestConfigFlags(t *testing.T) {
+ os.Setenv("GOAMD64", "v1")
+ if goamd64() != 1 {
+ t.Errorf("Wrong parsing of GOAMD64=v1")
+ }
+ os.Setenv("GOAMD64", "v4")
+ if goamd64() != 4 {
+ t.Errorf("Wrong parsing of GOAMD64=v4")
+ }
+ Error = nil
+ os.Setenv("GOAMD64", "1")
+ if goamd64(); Error == nil {
+ t.Errorf("Wrong parsing of GOAMD64=1")
+ }
+}
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
new file mode 100644
index 0000000..513070c
--- /dev/null
+++ b/src/internal/buildcfg/exp.go
@@ -0,0 +1,190 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildcfg
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "internal/goexperiment"
+)
+
+// ExperimentFlags represents a set of GOEXPERIMENT flags relative to a baseline
+// (platform-default) experiment configuration.
+type ExperimentFlags struct {
+ goexperiment.Flags
+ baseline goexperiment.Flags
+}
+
+// Experiment contains the toolchain experiments enabled for the
+// current build.
+//
+// (This is not necessarily the set of experiments the compiler itself
+// was built with.)
+//
+// experimentBaseline specifies the experiment flags that are enabled by
+// default in the current toolchain. This is, in effect, the "control"
+// configuration and any variation from this is an experiment.
+var Experiment ExperimentFlags = func() ExperimentFlags {
+ flags, err := ParseGOEXPERIMENT(GOOS, GOARCH, envOr("GOEXPERIMENT", defaultGOEXPERIMENT))
+ if err != nil {
+ Error = err
+ return ExperimentFlags{}
+ }
+ return *flags
+}()
+
+// DefaultGOEXPERIMENT is the embedded default GOEXPERIMENT string.
+// It is not guaranteed to be canonical.
+const DefaultGOEXPERIMENT = defaultGOEXPERIMENT
+
+// FramePointerEnabled enables the use of platform conventions for
+// saving frame pointers.
+//
+// This used to be an experiment, but now it's always enabled on
+// platforms that support it.
+//
+// Note: must agree with runtime.framepointer_enabled.
+var FramePointerEnabled = GOARCH == "amd64" || GOARCH == "arm64"
+
+// ParseGOEXPERIMENT parses a (GOOS, GOARCH, GOEXPERIMENT)
+// configuration tuple and returns the enabled and baseline experiment
+// flag sets.
+//
+// TODO(mdempsky): Move to internal/goexperiment.
+func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
+ // regabiSupported is set to true on platforms where register ABI is
+ // supported and enabled by default.
+ // regabiAlwaysOn is set to true on platforms where register ABI is
+ // always on.
+ var regabiSupported, regabiAlwaysOn bool
+ switch goarch {
+ case "amd64", "arm64", "ppc64le", "ppc64", "riscv64":
+ regabiAlwaysOn = true
+ regabiSupported = true
+ }
+
+ baseline := goexperiment.Flags{
+ RegabiWrappers: regabiSupported,
+ RegabiArgs: regabiSupported,
+ CoverageRedesign: true,
+ }
+
+ // Start with the statically enabled set of experiments.
+ flags := &ExperimentFlags{
+ Flags: baseline,
+ baseline: baseline,
+ }
+
+ // Pick up any changes to the baseline configuration from the
+ // GOEXPERIMENT environment. This can be set at make.bash time
+ // and overridden at build time.
+ if goexp != "" {
+ // Create a map of known experiment names.
+ names := make(map[string]func(bool))
+ rv := reflect.ValueOf(&flags.Flags).Elem()
+ rt := rv.Type()
+ for i := 0; i < rt.NumField(); i++ {
+ field := rv.Field(i)
+ names[strings.ToLower(rt.Field(i).Name)] = field.SetBool
+ }
+
+ // "regabi" is an alias for all working regabi
+ // subexperiments, and not an experiment itself. Doing
+ // this as an alias make both "regabi" and "noregabi"
+ // do the right thing.
+ names["regabi"] = func(v bool) {
+ flags.RegabiWrappers = v
+ flags.RegabiArgs = v
+ }
+
+ // Parse names.
+ for _, f := range strings.Split(goexp, ",") {
+ if f == "" {
+ continue
+ }
+ if f == "none" {
+ // GOEXPERIMENT=none disables all experiment flags.
+ // This is used by cmd/dist, which doesn't know how
+ // to build with any experiment flags.
+ flags.Flags = goexperiment.Flags{}
+ continue
+ }
+ val := true
+ if strings.HasPrefix(f, "no") {
+ f, val = f[2:], false
+ }
+ set, ok := names[f]
+ if !ok {
+ return nil, fmt.Errorf("unknown GOEXPERIMENT %s", f)
+ }
+ set(val)
+ }
+ }
+
+ if regabiAlwaysOn {
+ flags.RegabiWrappers = true
+ flags.RegabiArgs = true
+ }
+ // regabi is only supported on amd64, arm64, riscv64, ppc64 and ppc64le.
+ if !regabiSupported {
+ flags.RegabiWrappers = false
+ flags.RegabiArgs = false
+ }
+ // Check regabi dependencies.
+ if flags.RegabiArgs && !flags.RegabiWrappers {
+ return nil, fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers")
+ }
+ return flags, nil
+}
+
+// String returns the canonical GOEXPERIMENT string to enable this experiment
+// configuration. (Experiments in the same state as in the baseline are elided.)
+func (exp *ExperimentFlags) String() string {
+ return strings.Join(expList(&exp.Flags, &exp.baseline, false), ",")
+}
+
+// expList returns the list of lower-cased experiment names for
+// experiments that differ from base. base may be nil to indicate no
+// experiments. If all is true, then include all experiment flags,
+// regardless of base.
+func expList(exp, base *goexperiment.Flags, all bool) []string {
+ var list []string
+ rv := reflect.ValueOf(exp).Elem()
+ var rBase reflect.Value
+ if base != nil {
+ rBase = reflect.ValueOf(base).Elem()
+ }
+ rt := rv.Type()
+ for i := 0; i < rt.NumField(); i++ {
+ name := strings.ToLower(rt.Field(i).Name)
+ val := rv.Field(i).Bool()
+ baseVal := false
+ if base != nil {
+ baseVal = rBase.Field(i).Bool()
+ }
+ if all || val != baseVal {
+ if val {
+ list = append(list, name)
+ } else {
+ list = append(list, "no"+name)
+ }
+ }
+ }
+ return list
+}
+
+// Enabled returns a list of enabled experiments, as
+// lower-cased experiment names.
+func (exp *ExperimentFlags) Enabled() []string {
+ return expList(&exp.Flags, nil, false)
+}
+
+// All returns a list of all experiment settings.
+// Disabled experiments appear in the list prefixed by "no".
+func (exp *ExperimentFlags) All() []string {
+ return expList(&exp.Flags, nil, true)
+}
diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go
new file mode 100644
index 0000000..28f2742
--- /dev/null
+++ b/src/internal/bytealg/bytealg.go
@@ -0,0 +1,155 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import (
+ "internal/cpu"
+ "unsafe"
+)
+
+// Offsets into internal/cpu records for use in assembly.
+const (
+ offsetX86HasSSE42 = unsafe.Offsetof(cpu.X86.HasSSE42)
+ offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasPOPCNT = unsafe.Offsetof(cpu.X86.HasPOPCNT)
+
+ offsetS390xHasVX = unsafe.Offsetof(cpu.S390X.HasVX)
+
+ offsetPPC64HasPOWER9 = unsafe.Offsetof(cpu.PPC64.IsPOWER9)
+)
+
+// MaxLen is the maximum length of the string to be searched for (argument b) in Index.
+// If MaxLen is not 0, make sure MaxLen >= 4.
+var MaxLen int
+
+// FIXME: the logic of HashStrBytes, HashStrRevBytes, IndexRabinKarpBytes and HashStr, HashStrRev,
+// IndexRabinKarp are exactly the same, except that the types are different. Can we eliminate
+// three of them without causing allocation?
+
+// PrimeRK is the prime base used in Rabin-Karp algorithm.
+const PrimeRK = 16777619
+
+// HashStrBytes returns the hash and the appropriate multiplicative
+// factor for use in Rabin-Karp algorithm.
+func HashStrBytes(sep []byte) (uint32, uint32) {
+ hash := uint32(0)
+ for i := 0; i < len(sep); i++ {
+ hash = hash*PrimeRK + uint32(sep[i])
+ }
+ var pow, sq uint32 = 1, PrimeRK
+ for i := len(sep); i > 0; i >>= 1 {
+ if i&1 != 0 {
+ pow *= sq
+ }
+ sq *= sq
+ }
+ return hash, pow
+}
+
+// HashStr returns the hash and the appropriate multiplicative
+// factor for use in Rabin-Karp algorithm.
+func HashStr(sep string) (uint32, uint32) {
+ hash := uint32(0)
+ for i := 0; i < len(sep); i++ {
+ hash = hash*PrimeRK + uint32(sep[i])
+ }
+ var pow, sq uint32 = 1, PrimeRK
+ for i := len(sep); i > 0; i >>= 1 {
+ if i&1 != 0 {
+ pow *= sq
+ }
+ sq *= sq
+ }
+ return hash, pow
+}
+
+// HashStrRevBytes returns the hash of the reverse of sep and the
+// appropriate multiplicative factor for use in Rabin-Karp algorithm.
+func HashStrRevBytes(sep []byte) (uint32, uint32) {
+ hash := uint32(0)
+ for i := len(sep) - 1; i >= 0; i-- {
+ hash = hash*PrimeRK + uint32(sep[i])
+ }
+ var pow, sq uint32 = 1, PrimeRK
+ for i := len(sep); i > 0; i >>= 1 {
+ if i&1 != 0 {
+ pow *= sq
+ }
+ sq *= sq
+ }
+ return hash, pow
+}
+
+// HashStrRev returns the hash of the reverse of sep and the
+// appropriate multiplicative factor for use in Rabin-Karp algorithm.
+func HashStrRev(sep string) (uint32, uint32) {
+ hash := uint32(0)
+ for i := len(sep) - 1; i >= 0; i-- {
+ hash = hash*PrimeRK + uint32(sep[i])
+ }
+ var pow, sq uint32 = 1, PrimeRK
+ for i := len(sep); i > 0; i >>= 1 {
+ if i&1 != 0 {
+ pow *= sq
+ }
+ sq *= sq
+ }
+ return hash, pow
+}
+
+// IndexRabinKarpBytes uses the Rabin-Karp search algorithm to return the index of the
+// first occurrence of substr in s, or -1 if not present.
+func IndexRabinKarpBytes(s, sep []byte) int {
+ // Rabin-Karp search
+ hashsep, pow := HashStrBytes(sep)
+ n := len(sep)
+ var h uint32
+ for i := 0; i < n; i++ {
+ h = h*PrimeRK + uint32(s[i])
+ }
+ if h == hashsep && Equal(s[:n], sep) {
+ return 0
+ }
+ for i := n; i < len(s); {
+ h *= PrimeRK
+ h += uint32(s[i])
+ h -= pow * uint32(s[i-n])
+ i++
+ if h == hashsep && Equal(s[i-n:i], sep) {
+ return i - n
+ }
+ }
+ return -1
+}
+
+// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the
+// first occurrence of substr in s, or -1 if not present.
+func IndexRabinKarp(s, substr string) int {
+ // Rabin-Karp search
+ hashss, pow := HashStr(substr)
+ n := len(substr)
+ var h uint32
+ for i := 0; i < n; i++ {
+ h = h*PrimeRK + uint32(s[i])
+ }
+ if h == hashss && s[:n] == substr {
+ return 0
+ }
+ for i := n; i < len(s); {
+ h *= PrimeRK
+ h += uint32(s[i])
+ h -= pow * uint32(s[i-n])
+ i++
+ if h == hashss && s[i-n:i] == substr {
+ return i - n
+ }
+ }
+ return -1
+}
+
+// MakeNoZero makes a slice of length and capacity n without zeroing the bytes.
+// It is the caller's responsibility to ensure uninitialized bytes
+// do not leak to the end user.
+func MakeNoZero(n int) []byte
diff --git a/src/internal/bytealg/compare_386.s b/src/internal/bytealg/compare_386.s
new file mode 100644
index 0000000..27b660c
--- /dev/null
+++ b/src/internal/bytealg/compare_386.s
@@ -0,0 +1,144 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT,$0-28
+ MOVL a_base+0(FP), SI
+ MOVL a_len+4(FP), BX
+ MOVL b_base+12(FP), DI
+ MOVL b_len+16(FP), DX
+ LEAL ret+24(FP), AX
+ JMP cmpbody<>(SB)
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-20
+ MOVL a_base+0(FP), SI
+ MOVL a_len+4(FP), BX
+ MOVL b_base+8(FP), DI
+ MOVL b_len+12(FP), DX
+ LEAL ret+16(FP), AX
+ JMP cmpbody<>(SB)
+
+// input:
+// SI = a
+// DI = b
+// BX = alen
+// DX = blen
+// AX = address of return word (set to 1/0/-1)
+TEXT cmpbody<>(SB),NOSPLIT,$0-0
+ MOVL DX, BP
+ SUBL BX, DX // DX = blen-alen
+ JLE 2(PC)
+ MOVL BX, BP // BP = min(alen, blen)
+ CMPL SI, DI
+ JEQ allsame
+ CMPL BP, $4
+ JB small
+#ifdef GO386_softfloat
+ JMP mediumloop
+#endif
+largeloop:
+ CMPL BP, $16
+ JB mediumloop
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, BX
+ XORL $0xffff, BX // convert EQ to NE
+ JNE diff16 // branch if at least one byte is not equal
+ ADDL $16, SI
+ ADDL $16, DI
+ SUBL $16, BP
+ JMP largeloop
+
+diff16:
+ BSFL BX, BX // index of first byte that differs
+ XORL DX, DX
+ MOVB (SI)(BX*1), CX
+ CMPB CX, (DI)(BX*1)
+ SETHI DX
+ LEAL -1(DX*2), DX // convert 1/0 to +1/-1
+ MOVL DX, (AX)
+ RET
+
+mediumloop:
+ CMPL BP, $4
+ JBE _0through4
+ MOVL (SI), BX
+ MOVL (DI), CX
+ CMPL BX, CX
+ JNE diff4
+ ADDL $4, SI
+ ADDL $4, DI
+ SUBL $4, BP
+ JMP mediumloop
+
+_0through4:
+ MOVL -4(SI)(BP*1), BX
+ MOVL -4(DI)(BP*1), CX
+ CMPL BX, CX
+ JEQ allsame
+
+diff4:
+ BSWAPL BX // reverse order of bytes
+ BSWAPL CX
+ XORL BX, CX // find bit differences
+ BSRL CX, CX // index of highest bit difference
+ SHRL CX, BX // move a's bit to bottom
+ ANDL $1, BX // mask bit
+ LEAL -1(BX*2), BX // 1/0 => +1/-1
+ MOVL BX, (AX)
+ RET
+
+ // 0-3 bytes in common
+small:
+ LEAL (BP*8), CX
+ NEGL CX
+ JEQ allsame
+
+ // load si
+ CMPB SI, $0xfc
+ JA si_high
+ MOVL (SI), SI
+ JMP si_finish
+si_high:
+ MOVL -4(SI)(BP*1), SI
+ SHRL CX, SI
+si_finish:
+ SHLL CX, SI
+
+ // same for di
+ CMPB DI, $0xfc
+ JA di_high
+ MOVL (DI), DI
+ JMP di_finish
+di_high:
+ MOVL -4(DI)(BP*1), DI
+ SHRL CX, DI
+di_finish:
+ SHLL CX, DI
+
+ BSWAPL SI // reverse order of bytes
+ BSWAPL DI
+ XORL SI, DI // find bit differences
+ JEQ allsame
+ BSRL DI, CX // index of highest bit difference
+ SHRL CX, SI // move a's bit to bottom
+ ANDL $1, SI // mask bit
+ LEAL -1(SI*2), BX // 1/0 => +1/-1
+ MOVL BX, (AX)
+ RET
+
+ // all the bytes in common are the same, so we just need
+ // to compare the lengths.
+allsame:
+ XORL BX, BX
+ XORL CX, CX
+ TESTL DX, DX
+ SETLT BX // 1 if alen > blen
+ SETEQ CX // 1 if alen == blen
+ LEAL -1(CX)(BX*2), BX // 1,0,-1 result
+ MOVL BX, (AX)
+ RET
diff --git a/src/internal/bytealg/compare_amd64.s b/src/internal/bytealg/compare_amd64.s
new file mode 100644
index 0000000..fdd015f
--- /dev/null
+++ b/src/internal/bytealg/compare_amd64.s
@@ -0,0 +1,237 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "asm_amd64.h"
+#include "textflag.h"
+
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT,$0-56
+ // AX = a_base (want in SI)
+ // BX = a_len (want in BX)
+ // CX = a_cap (unused)
+ // DI = b_base (want in DI)
+ // SI = b_len (want in DX)
+ // R8 = b_cap (unused)
+ MOVQ SI, DX
+ MOVQ AX, SI
+ JMP cmpbody<>(SB)
+
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
+ // AX = a_base (want in SI)
+ // BX = a_len (want in BX)
+ // CX = b_base (want in DI)
+ // DI = b_len (want in DX)
+ MOVQ AX, SI
+ MOVQ DI, DX
+ MOVQ CX, DI
+ JMP cmpbody<>(SB)
+
+// input:
+// SI = a
+// DI = b
+// BX = alen
+// DX = blen
+// output:
+// AX = output (-1/0/1)
+TEXT cmpbody<>(SB),NOSPLIT,$0-0
+ CMPQ SI, DI
+ JEQ allsame
+ CMPQ BX, DX
+ MOVQ DX, R8
+ CMOVQLT BX, R8 // R8 = min(alen, blen) = # of bytes to compare
+ CMPQ R8, $8
+ JB small
+
+ CMPQ R8, $63
+ JBE loop
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JEQ big_loop_avx2
+ JMP big_loop
+#else
+ JMP big_loop_avx2
+#endif
+loop:
+ CMPQ R8, $16
+ JBE _0through16
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX // convert EQ to NE
+ JNE diff16 // branch if at least one byte is not equal
+ ADDQ $16, SI
+ ADDQ $16, DI
+ SUBQ $16, R8
+ JMP loop
+
+diff64:
+ ADDQ $48, SI
+ ADDQ $48, DI
+ JMP diff16
+diff48:
+ ADDQ $32, SI
+ ADDQ $32, DI
+ JMP diff16
+diff32:
+ ADDQ $16, SI
+ ADDQ $16, DI
+ // AX = bit mask of differences
+diff16:
+ BSFQ AX, BX // index of first byte that differs
+ XORQ AX, AX
+ MOVB (SI)(BX*1), CX
+ CMPB CX, (DI)(BX*1)
+ SETHI AX
+ LEAQ -1(AX*2), AX // convert 1/0 to +1/-1
+ RET
+
+ // 0 through 16 bytes left, alen>=8, blen>=8
+_0through16:
+ CMPQ R8, $8
+ JBE _0through8
+ MOVQ (SI), AX
+ MOVQ (DI), CX
+ CMPQ AX, CX
+ JNE diff8
+_0through8:
+ MOVQ -8(SI)(R8*1), AX
+ MOVQ -8(DI)(R8*1), CX
+ CMPQ AX, CX
+ JEQ allsame
+
+ // AX and CX contain parts of a and b that differ.
+diff8:
+ BSWAPQ AX // reverse order of bytes
+ BSWAPQ CX
+ XORQ AX, CX
+ BSRQ CX, CX // index of highest bit difference
+ SHRQ CX, AX // move a's bit to bottom
+ ANDQ $1, AX // mask bit
+ LEAQ -1(AX*2), AX // 1/0 => +1/-1
+ RET
+
+ // 0-7 bytes in common
+small:
+ LEAQ (R8*8), CX // bytes left -> bits left
+ NEGQ CX // - bits lift (== 64 - bits left mod 64)
+ JEQ allsame
+
+ // load bytes of a into high bytes of AX
+ CMPB SI, $0xf8
+ JA si_high
+ MOVQ (SI), SI
+ JMP si_finish
+si_high:
+ MOVQ -8(SI)(R8*1), SI
+ SHRQ CX, SI
+si_finish:
+ SHLQ CX, SI
+
+ // load bytes of b in to high bytes of BX
+ CMPB DI, $0xf8
+ JA di_high
+ MOVQ (DI), DI
+ JMP di_finish
+di_high:
+ MOVQ -8(DI)(R8*1), DI
+ SHRQ CX, DI
+di_finish:
+ SHLQ CX, DI
+
+ BSWAPQ SI // reverse order of bytes
+ BSWAPQ DI
+ XORQ SI, DI // find bit differences
+ JEQ allsame
+ BSRQ DI, CX // index of highest bit difference
+ SHRQ CX, SI // move a's bit to bottom
+ ANDQ $1, SI // mask bit
+ LEAQ -1(SI*2), AX // 1/0 => +1/-1
+ RET
+
+allsame:
+ XORQ AX, AX
+ XORQ CX, CX
+ CMPQ BX, DX
+ SETGT AX // 1 if alen > blen
+ SETEQ CX // 1 if alen == blen
+ LEAQ -1(CX)(AX*2), AX // 1,0,-1 result
+ RET
+
+ // this works for >= 64 bytes of data.
+#ifndef hasAVX2
+big_loop:
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX
+ JNE diff16
+
+ MOVOU 16(SI), X0
+ MOVOU 16(DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX
+ JNE diff32
+
+ MOVOU 32(SI), X0
+ MOVOU 32(DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX
+ JNE diff48
+
+ MOVOU 48(SI), X0
+ MOVOU 48(DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, AX
+ XORQ $0xffff, AX
+ JNE diff64
+
+ ADDQ $64, SI
+ ADDQ $64, DI
+ SUBQ $64, R8
+ CMPQ R8, $64
+ JBE loop
+ JMP big_loop
+#endif
+
+ // Compare 64-bytes per loop iteration.
+ // Loop is unrolled and uses AVX2.
+big_loop_avx2:
+ VMOVDQU (SI), Y2
+ VMOVDQU (DI), Y3
+ VMOVDQU 32(SI), Y4
+ VMOVDQU 32(DI), Y5
+ VPCMPEQB Y2, Y3, Y0
+ VPMOVMSKB Y0, AX
+ XORL $0xffffffff, AX
+ JNE diff32_avx2
+ VPCMPEQB Y4, Y5, Y6
+ VPMOVMSKB Y6, AX
+ XORL $0xffffffff, AX
+ JNE diff64_avx2
+
+ ADDQ $64, SI
+ ADDQ $64, DI
+ SUBQ $64, R8
+ CMPQ R8, $64
+ JB big_loop_avx2_exit
+ JMP big_loop_avx2
+
+ // Avoid AVX->SSE transition penalty and search first 32 bytes of 64 byte chunk.
+diff32_avx2:
+ VZEROUPPER
+ JMP diff16
+
+ // Same as diff32_avx2, but for last 32 bytes.
+diff64_avx2:
+ VZEROUPPER
+ JMP diff48
+
+ // For <64 bytes remainder jump to normal loop.
+big_loop_avx2_exit:
+ VZEROUPPER
+ JMP loop
diff --git a/src/internal/bytealg/compare_arm.s b/src/internal/bytealg/compare_arm.s
new file mode 100644
index 0000000..80d01a2
--- /dev/null
+++ b/src/internal/bytealg/compare_arm.s
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW a_base+0(FP), R2
+ MOVW a_len+4(FP), R0
+ MOVW b_base+12(FP), R3
+ MOVW b_len+16(FP), R1
+ ADD $28, R13, R7
+ B cmpbody<>(SB)
+
+TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-20
+ MOVW a_base+0(FP), R2
+ MOVW a_len+4(FP), R0
+ MOVW b_base+8(FP), R3
+ MOVW b_len+12(FP), R1
+ ADD $20, R13, R7
+ B cmpbody<>(SB)
+
+// On entry:
+// R0 is the length of a
+// R1 is the length of b
+// R2 points to the start of a
+// R3 points to the start of b
+// R7 points to return value (-1/0/1 will be written here)
+//
+// On exit:
+// R4, R5, R6 and R8 are clobbered
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMP R2, R3
+ BEQ samebytes
+ CMP R0, R1
+ MOVW R0, R6
+ MOVW.LT R1, R6 // R6 is min(R0, R1)
+
+ CMP $0, R6
+ BEQ samebytes
+ CMP $4, R6
+ ADD R2, R6 // R2 is current byte in a, R6 is the end of the range to compare
+ BLT byte_loop // length < 4
+ AND $3, R2, R8
+ CMP $0, R8
+ BNE byte_loop // unaligned a, use byte-wise compare (TODO: try to align a)
+aligned_a:
+ AND $3, R3, R8
+ CMP $0, R8
+ BNE byte_loop // unaligned b, use byte-wise compare
+ AND $0xfffffffc, R6, R8
+ // length >= 4
+chunk4_loop:
+ MOVW.P 4(R2), R4
+ MOVW.P 4(R3), R5
+ CMP R4, R5
+ BNE cmp
+ CMP R2, R8
+ BNE chunk4_loop
+ CMP R2, R6
+ BEQ samebytes // all compared bytes were the same; compare lengths
+byte_loop:
+ MOVBU.P 1(R2), R4
+ MOVBU.P 1(R3), R5
+ CMP R4, R5
+ BNE ret
+ CMP R2, R6
+ BNE byte_loop
+samebytes:
+ CMP R0, R1
+ MOVW.LT $1, R0
+ MOVW.GT $-1, R0
+ MOVW.EQ $0, R0
+ MOVW R0, (R7)
+ RET
+ret:
+ // bytes differed
+ MOVW.LT $1, R0
+ MOVW.GT $-1, R0
+ MOVW R0, (R7)
+ RET
+cmp:
+ SUB $4, R2, R2
+ SUB $4, R3, R3
+ B byte_loop
diff --git a/src/internal/bytealg/compare_arm64.s b/src/internal/bytealg/compare_arm64.s
new file mode 100644
index 0000000..cc02c46
--- /dev/null
+++ b/src/internal/bytealg/compare_arm64.s
@@ -0,0 +1,125 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+ // R0 = a_base (want in R0)
+ // R1 = a_len (want in R1)
+ // R2 = a_cap (unused)
+ // R3 = b_base (want in R2)
+ // R4 = b_len (want in R3)
+ // R5 = b_cap (unused)
+ MOVD R3, R2
+ MOVD R4, R3
+ B cmpbody<>(SB)
+
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // R0 = a_base
+ // R1 = a_len
+ // R2 = b_base
+ // R3 = b_len
+ B cmpbody<>(SB)
+
+// On entry:
+// R0 points to the start of a
+// R1 is the length of a
+// R2 points to the start of b
+// R3 is the length of b
+//
+// On exit:
+// R0 is the result
+// R4, R5, R6, R8, R9 and R10 are clobbered
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMP R0, R2
+ BEQ samebytes // same starting pointers; compare lengths
+ CMP R1, R3
+ CSEL LT, R3, R1, R6 // R6 is min(R1, R3)
+
+ CBZ R6, samebytes
+ BIC $0xf, R6, R10
+ CBZ R10, small // length < 16
+ ADD R0, R10 // end of chunk16
+ // length >= 16
+chunk16_loop:
+ LDP.P 16(R0), (R4, R8)
+ LDP.P 16(R2), (R5, R9)
+ CMP R4, R5
+ BNE cmp
+ CMP R8, R9
+ BNE cmpnext
+ CMP R10, R0
+ BNE chunk16_loop
+ AND $0xf, R6, R6
+ CBZ R6, samebytes
+ SUBS $8, R6
+ BLT tail
+ // the length of tail > 8 bytes
+ MOVD.P 8(R0), R4
+ MOVD.P 8(R2), R5
+ CMP R4, R5
+ BNE cmp
+ SUB $8, R6
+ // compare last 8 bytes
+tail:
+ MOVD (R0)(R6), R4
+ MOVD (R2)(R6), R5
+ CMP R4, R5
+ BEQ samebytes
+cmp:
+ REV R4, R4
+ REV R5, R5
+ CMP R4, R5
+ret:
+ MOVD $1, R0
+ CNEG HI, R0, R0
+ RET
+small:
+ TBZ $3, R6, lt_8
+ MOVD (R0), R4
+ MOVD (R2), R5
+ CMP R4, R5
+ BNE cmp
+ SUBS $8, R6
+ BEQ samebytes
+ ADD $8, R0
+ ADD $8, R2
+ SUB $8, R6
+ B tail
+lt_8:
+ TBZ $2, R6, lt_4
+ MOVWU (R0), R4
+ MOVWU (R2), R5
+ CMPW R4, R5
+ BNE cmp
+ SUBS $4, R6
+ BEQ samebytes
+ ADD $4, R0
+ ADD $4, R2
+lt_4:
+ TBZ $1, R6, lt_2
+ MOVHU (R0), R4
+ MOVHU (R2), R5
+ CMPW R4, R5
+ BNE cmp
+ ADD $2, R0
+ ADD $2, R2
+lt_2:
+ TBZ $0, R6, samebytes
+one:
+ MOVBU (R0), R4
+ MOVBU (R2), R5
+ CMPW R4, R5
+ BNE ret
+samebytes:
+ CMP R3, R1
+ CSET NE, R0
+ CNEG LO, R0, R0
+ RET
+cmpnext:
+ REV R8, R4
+ REV R9, R5
+ CMP R4, R5
+ B ret
diff --git a/src/internal/bytealg/compare_generic.go b/src/internal/bytealg/compare_generic.go
new file mode 100644
index 0000000..b04e275
--- /dev/null
+++ b/src/internal/bytealg/compare_generic.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !loong64 && !ppc64 && !ppc64le && !mips && !mipsle && !wasm && !mips64 && !mips64le && !riscv64
+
+package bytealg
+
+import _ "unsafe" // for go:linkname
+
+func Compare(a, b []byte) int {
+ l := len(a)
+ if len(b) < l {
+ l = len(b)
+ }
+ if l == 0 || &a[0] == &b[0] {
+ goto samebytes
+ }
+ for i := 0; i < l; i++ {
+ c1, c2 := a[i], b[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ }
+samebytes:
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(a) > len(b) {
+ return +1
+ }
+ return 0
+}
+
+//go:linkname runtime_cmpstring runtime.cmpstring
+func runtime_cmpstring(a, b string) int {
+ l := len(a)
+ if len(b) < l {
+ l = len(b)
+ }
+ for i := 0; i < l; i++ {
+ c1, c2 := a[i], b[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(a) > len(b) {
+ return +1
+ }
+ return 0
+}
diff --git a/src/internal/bytealg/compare_loong64.s b/src/internal/bytealg/compare_loong64.s
new file mode 100644
index 0000000..c89c5a9
--- /dev/null
+++ b/src/internal/bytealg/compare_loong64.s
@@ -0,0 +1,87 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT,$0-56
+ MOVV a_base+0(FP), R6
+ MOVV b_base+24(FP), R7
+ MOVV a_len+8(FP), R4
+ MOVV b_len+32(FP), R5
+ MOVV $ret+48(FP), R13
+ JMP cmpbody<>(SB)
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-40
+ MOVV a_base+0(FP), R6
+ MOVV b_base+16(FP), R7
+ MOVV a_len+8(FP), R4
+ MOVV b_len+24(FP), R5
+ MOVV $ret+32(FP), R13
+ JMP cmpbody<>(SB)
+
+// On entry:
+// R4 length of a
+// R5 length of b
+// R6 points to the start of a
+// R7 points to the start of b
+// R13 points to the return value (-1/0/1)
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0
+ BEQ R6, R7, samebytes // same start of a and b
+
+ SGTU R4, R5, R9
+ BNE R0, R9, r2_lt_r1
+ MOVV R4, R14
+ JMP entry
+r2_lt_r1:
+ MOVV R5, R14 // R14 is min(R4, R5)
+entry:
+ ADDV R6, R14, R12 // R6 start of a, R14 end of a
+ BEQ R6, R12, samebytes // length is 0
+
+ SRLV $4, R14 // R14 is number of chunks
+ BEQ R0, R14, byte_loop
+
+ // make sure both a and b are aligned.
+ OR R6, R7, R15
+ AND $7, R15
+ BNE R0, R15, byte_loop
+
+ PCALIGN $16
+chunk16_loop:
+ BEQ R0, R14, byte_loop
+ MOVV (R6), R8
+ MOVV (R7), R9
+ BNE R8, R9, byte_loop
+ MOVV 8(R6), R16
+ MOVV 8(R7), R17
+ ADDV $16, R6
+ ADDV $16, R7
+ SUBVU $1, R14
+ BEQ R16, R17, chunk16_loop
+ SUBV $8, R6
+ SUBV $8, R7
+
+byte_loop:
+ BEQ R6, R12, samebytes
+ MOVBU (R6), R8
+ ADDVU $1, R6
+ MOVBU (R7), R9
+ ADDVU $1, R7
+ BEQ R8, R9, byte_loop
+
+byte_cmp:
+ SGTU R8, R9, R12 // R12 = 1 if (R8 > R9)
+ BNE R0, R12, ret
+ MOVV $-1, R12
+ JMP ret
+
+samebytes:
+ SGTU R4, R5, R8
+ SGTU R5, R4, R9
+ SUBV R9, R8, R12
+
+ret:
+ MOVV R12, (R13)
+ RET
diff --git a/src/internal/bytealg/compare_mips64x.s b/src/internal/bytealg/compare_mips64x.s
new file mode 100644
index 0000000..117a9ef
--- /dev/null
+++ b/src/internal/bytealg/compare_mips64x.s
@@ -0,0 +1,88 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT,$0-56
+ MOVV a_base+0(FP), R3
+ MOVV b_base+24(FP), R4
+ MOVV a_len+8(FP), R1
+ MOVV b_len+32(FP), R2
+ MOVV $ret+48(FP), R9
+ JMP cmpbody<>(SB)
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-40
+ MOVV a_base+0(FP), R3
+ MOVV b_base+16(FP), R4
+ MOVV a_len+8(FP), R1
+ MOVV b_len+24(FP), R2
+ MOVV $ret+32(FP), R9
+ JMP cmpbody<>(SB)
+
+// On entry:
+// R1 length of a
+// R2 length of b
+// R3 points to the start of a
+// R4 points to the start of b
+// R9 points to the return value (-1/0/1)
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0
+ BEQ R3, R4, samebytes // same start of a and b
+
+ SGTU R1, R2, R7
+ BNE R0, R7, r2_lt_r1
+ MOVV R1, R10
+ JMP entry
+r2_lt_r1:
+ MOVV R2, R10 // R10 is min(R1, R2)
+entry:
+ ADDV R3, R10, R8 // R3 start of a, R8 end of a
+ BEQ R3, R8, samebytes // length is 0
+
+ SRLV $4, R10 // R10 is number of chunks
+ BEQ R0, R10, byte_loop
+
+ // make sure both a and b are aligned.
+ OR R3, R4, R11
+ AND $7, R11
+ BNE R0, R11, byte_loop
+
+chunk16_loop:
+ BEQ R0, R10, byte_loop
+ MOVV (R3), R6
+ MOVV (R4), R7
+ BNE R6, R7, byte_loop
+ MOVV 8(R3), R13
+ MOVV 8(R4), R14
+ ADDV $16, R3
+ ADDV $16, R4
+ SUBVU $1, R10
+ BEQ R13, R14, chunk16_loop
+ SUBV $8, R3
+ SUBV $8, R4
+
+byte_loop:
+ BEQ R3, R8, samebytes
+ MOVBU (R3), R6
+ ADDVU $1, R3
+ MOVBU (R4), R7
+ ADDVU $1, R4
+ BEQ R6, R7, byte_loop
+
+byte_cmp:
+ SGTU R6, R7, R8 // R8 = 1 if (R6 > R7)
+ BNE R0, R8, ret
+ MOVV $-1, R8
+ JMP ret
+
+samebytes:
+ SGTU R1, R2, R6
+ SGTU R2, R1, R7
+ SUBV R7, R6, R8
+
+ret:
+ MOVV R8, (R9)
+ RET
diff --git a/src/internal/bytealg/compare_mipsx.s b/src/internal/bytealg/compare_mipsx.s
new file mode 100644
index 0000000..857ac13
--- /dev/null
+++ b/src/internal/bytealg/compare_mipsx.s
@@ -0,0 +1,72 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT,$0-28
+ MOVW a_base+0(FP), R3
+ MOVW b_base+12(FP), R4
+ MOVW a_len+4(FP), R1
+ MOVW b_len+16(FP), R2
+ BEQ R3, R4, samebytes
+ SGTU R1, R2, R7
+ MOVW R1, R8
+ CMOVN R7, R2, R8 // R8 is min(R1, R2)
+
+ ADDU R3, R8 // R3 is current byte in a, R8 is last byte in a to compare
+loop:
+ BEQ R3, R8, samebytes
+
+ MOVBU (R3), R6
+ ADDU $1, R3
+ MOVBU (R4), R7
+ ADDU $1, R4
+ BEQ R6, R7 , loop
+
+ SGTU R6, R7, R8
+ MOVW $-1, R6
+ CMOVZ R8, R6, R8
+ JMP cmp_ret
+samebytes:
+ SGTU R1, R2, R6
+ SGTU R2, R1, R7
+ SUBU R7, R6, R8
+cmp_ret:
+ MOVW R8, ret+24(FP)
+ RET
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-20
+ MOVW a_base+0(FP), R3
+ MOVW a_len+4(FP), R1
+ MOVW b_base+8(FP), R4
+ MOVW b_len+12(FP), R2
+ BEQ R3, R4, samebytes
+ SGTU R1, R2, R7
+ MOVW R1, R8
+ CMOVN R7, R2, R8 // R8 is min(R1, R2)
+
+ ADDU R3, R8 // R3 is current byte in a, R8 is last byte in a to compare
+loop:
+ BEQ R3, R8, samebytes // all compared bytes were the same; compare lengths
+
+ MOVBU (R3), R6
+ ADDU $1, R3
+ MOVBU (R4), R7
+ ADDU $1, R4
+ BEQ R6, R7 , loop
+ // bytes differed
+ SGTU R6, R7, R8
+ MOVW $-1, R6
+ CMOVZ R8, R6, R8
+ JMP cmp_ret
+samebytes:
+ SGTU R1, R2, R6
+ SGTU R2, R1, R7
+ SUBU R7, R6, R8
+cmp_ret:
+ MOVW R8, ret+16(FP)
+ RET
diff --git a/src/internal/bytealg/compare_native.go b/src/internal/bytealg/compare_native.go
new file mode 100644
index 0000000..34964e2
--- /dev/null
+++ b/src/internal/bytealg/compare_native.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le || riscv64
+
+package bytealg
+
+import _ "unsafe" // For go:linkname
+
+//go:noescape
+func Compare(a, b []byte) int
+
+// The declaration below generates ABI wrappers for functions
+// implemented in assembly in this package but declared in another
+// package.
+
+//go:linkname abigen_runtime_cmpstring runtime.cmpstring
+func abigen_runtime_cmpstring(a, b string) int
diff --git a/src/internal/bytealg/compare_ppc64x.s b/src/internal/bytealg/compare_ppc64x.s
new file mode 100644
index 0000000..63c33ee
--- /dev/null
+++ b/src/internal/bytealg/compare_ppc64x.s
@@ -0,0 +1,332 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Helper names for x-form loads in BE ordering.
+#ifdef GOARCH_ppc64le
+#define _LDBEX MOVDBR
+#define _LWBEX MOVWBR
+#define _LHBEX MOVHBR
+#else
+#define _LDBEX MOVD
+#define _LWBEX MOVW
+#define _LHBEX MOVH
+#endif
+
+#ifdef GOPPC64_power9
+#define SETB_CR0(rout) SETB CR0, rout
+#define SETB_CR1(rout) SETB CR1, rout
+#define SETB_INIT()
+#define SETB_CR0_NE(rout) SETB_CR0(rout)
+#else
+// A helper macro to emulate SETB on P8. This assumes
+// -1 is in R20, and 1 is in R21. crxlt and crxeq must
+// also be the same CR field.
+#define _SETB(crxlt, crxeq, rout) \
+ ISEL crxeq,R0,R21,rout \
+ ISEL crxlt,R20,rout,rout
+
+// A special case when it is know the comparison
+// will always be not equal. The result must be -1 or 1.
+#define SETB_CR0_NE(rout) \
+ ISEL CR0LT,R20,R21,rout
+
+#define SETB_CR0(rout) _SETB(CR0LT, CR0EQ, rout)
+#define SETB_CR1(rout) _SETB(CR1LT, CR1EQ, rout)
+#define SETB_INIT() \
+ MOVD $-1,R20 \
+ MOVD $1,R21
+#endif
+
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+ // incoming:
+ // R3 a addr
+ // R4 a len
+ // R6 b addr
+ // R7 b len
+ //
+ // on entry to cmpbody:
+ // R3 return value if len(a) == len(b)
+ // R5 a addr
+ // R6 b addr
+ // R9 min(len(a),len(b))
+ SETB_INIT()
+ MOVD R3,R5
+ CMP R4,R7,CR0
+ CMP R3,R6,CR7
+ ISEL CR0LT,R4,R7,R9
+ SETB_CR0(R3)
+ BC $12,30,LR // beqlr cr7
+ BR cmpbody<>(SB)
+
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // incoming:
+ // R3 a addr -> R5
+ // R4 a len -> R3
+ // R5 b addr -> R6
+ // R6 b len -> R4
+ //
+ // on entry to cmpbody:
+ // R3 compare value if compared length is same.
+ // R5 a addr
+ // R6 b addr
+ // R9 min(len(a),len(b))
+ SETB_INIT()
+ CMP R4,R6,CR0
+ CMP R3,R5,CR7
+ ISEL CR0LT,R4,R6,R9
+ MOVD R5,R6
+ MOVD R3,R5
+ SETB_CR0(R3)
+ BC $12,30,LR // beqlr cr7
+ BR cmpbody<>(SB)
+
+#ifdef GOARCH_ppc64le
+DATA byteswap<>+0(SB)/8, $0x0706050403020100
+DATA byteswap<>+8(SB)/8, $0x0f0e0d0c0b0a0908
+GLOBL byteswap<>+0(SB), RODATA, $16
+#define SWAP V21
+#endif
+
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
+start:
+ CMP R9,$16,CR0
+ CMP R9,$32,CR1
+ CMP R9,$64,CR2
+ MOVD $16,R10
+ BLT cmp8
+ BLT CR1,cmp16
+ BLT CR2,cmp32
+
+cmp64: // >= 64B
+ DCBT (R5) // optimize for size>=64
+ DCBT (R6) // cache hint
+
+ SRD $6,R9,R14 // There is at least one iteration.
+ MOVD R14,CTR
+ ANDCC $63,R9,R9
+ CMP R9,$16,CR1 // Do setup for tail check early on.
+ CMP R9,$32,CR2
+ CMP R9,$48,CR3
+ ADD $-16,R9,R9
+
+ MOVD $32,R11 // set offsets to load into vector
+ MOVD $48,R12 // set offsets to load into vector
+
+ PCALIGN $16
+cmp64_loop:
+ LXVD2X (R5)(R0),V3 // load bytes of A at offset 0 into vector
+ LXVD2X (R6)(R0),V4 // load bytes of B at offset 0 into vector
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different // jump out if its different
+
+ LXVD2X (R5)(R10),V3 // load bytes of A at offset 16 into vector
+ LXVD2X (R6)(R10),V4 // load bytes of B at offset 16 into vector
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R5)(R11),V3 // load bytes of A at offset 32 into vector
+ LXVD2X (R6)(R11),V4 // load bytes of B at offset 32 into vector
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R5)(R12),V3 // load bytes of A at offset 64 into vector
+ LXVD2X (R6)(R12),V4 // load bytes of B at offset 64 into vector
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ ADD $64,R5,R5 // increment to next 64 bytes of A
+ ADD $64,R6,R6 // increment to next 64 bytes of B
+ BDNZ cmp64_loop
+ BC $12,2,LR // beqlr
+
+ // Finish out tail with minimal overlapped checking.
+ // Note, 0 tail is handled by beqlr above.
+ BLE CR1,cmp64_tail_gt0
+ BLE CR2,cmp64_tail_gt16
+ BLE CR3,cmp64_tail_gt32
+
+cmp64_tail_gt48: // 49 - 63 B
+ LXVD2X (R0)(R5),V3
+ LXVD2X (R0)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R5)(R10),V3
+ LXVD2X (R6)(R10),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R5)(R11),V3
+ LXVD2X (R6)(R11),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ BR cmp64_tail_gt0
+
+ PCALIGN $16
+cmp64_tail_gt32: // 33 - 48B
+ LXVD2X (R0)(R5),V3
+ LXVD2X (R0)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R5)(R10),V3
+ LXVD2X (R6)(R10),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ BR cmp64_tail_gt0
+
+ PCALIGN $16
+cmp64_tail_gt16: // 17 - 32B
+ LXVD2X (R0)(R5),V3
+ LXVD2X (R0)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ BR cmp64_tail_gt0
+
+ PCALIGN $16
+cmp64_tail_gt0: // 1 - 16B
+ LXVD2X (R5)(R9),V3
+ LXVD2X (R6)(R9),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ RET
+
+ PCALIGN $16
+cmp32: // 32 - 63B
+ ANDCC $31,R9,R9
+
+ LXVD2X (R0)(R5),V3
+ LXVD2X (R0)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R10)(R5),V3
+ LXVD2X (R10)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ BC $12,2,LR // beqlr
+ ADD R9,R10,R10
+
+ LXVD2X (R9)(R5),V3
+ LXVD2X (R9)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+
+ LXVD2X (R10)(R5),V3
+ LXVD2X (R10)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+ RET
+
+ PCALIGN $16
+cmp16: // 16 - 31B
+ ANDCC $15,R9,R9
+ LXVD2X (R0)(R5),V3
+ LXVD2X (R0)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+ BC $12,2,LR // beqlr
+
+ LXVD2X (R9)(R5),V3
+ LXVD2X (R9)(R6),V4
+ VCMPEQUDCC V3,V4,V1
+ BGE CR6,different
+ RET
+
+ PCALIGN $16
+different:
+#ifdef GOARCH_ppc64le
+ MOVD $byteswap<>+00(SB),R16
+ LXVD2X (R16)(R0),SWAP // Set up swap string
+
+ VPERM V3,V3,SWAP,V3
+ VPERM V4,V4,SWAP,V4
+#endif
+
+ MFVSRD VS35,R16 // move upper doublewords of A and B into GPR for comparison
+ MFVSRD VS36,R10
+
+ CMPU R16,R10
+ BEQ lower
+ SETB_CR0_NE(R3)
+ RET
+
+ PCALIGN $16
+lower:
+ VSLDOI $8,V3,V3,V3 // move lower doublewords of A and B into GPR for comparison
+ MFVSRD VS35,R16
+ VSLDOI $8,V4,V4,V4
+ MFVSRD VS36,R10
+
+ CMPU R16,R10
+ SETB_CR0_NE(R3)
+ RET
+
+ PCALIGN $16
+cmp8: // 8 - 15B
+ CMP R9,$8
+ BLT cmp4
+ ANDCC $7,R9,R9
+ _LDBEX (R0)(R5),R10
+ _LDBEX (R0)(R6),R11
+ _LDBEX (R9)(R5),R12
+ _LDBEX (R9)(R6),R14
+ CMPU R10,R11,CR0
+ SETB_CR0(R5)
+ CMPU R12,R14,CR1
+ SETB_CR1(R6)
+ CRAND CR0EQ,CR1EQ,CR1EQ // If both equal, length determines return value.
+ ISEL CR0EQ,R6,R5,R4
+ ISEL CR1EQ,R3,R4,R3
+ RET
+
+ PCALIGN $16
+cmp4: // 4 - 7B
+ CMP R9,$4
+ BLT cmp2
+ ANDCC $3,R9,R9
+ _LWBEX (R0)(R5),R10
+ _LWBEX (R0)(R6),R11
+ _LWBEX (R9)(R5),R12
+ _LWBEX (R9)(R6),R14
+ RLDIMI $32,R10,$0,R12
+ RLDIMI $32,R11,$0,R14
+ CMPU R12,R14
+ BR cmp0
+
+ PCALIGN $16
+cmp2: // 2 - 3B
+ CMP R9,$2
+ BLT cmp1
+ ANDCC $1,R9,R9
+ _LHBEX (R0)(R5),R10
+ _LHBEX (R0)(R6),R11
+ _LHBEX (R9)(R5),R12
+ _LHBEX (R9)(R6),R14
+ RLDIMI $32,R10,$0,R12
+ RLDIMI $32,R11,$0,R14
+ CMPU R12,R14
+ BR cmp0
+
+ PCALIGN $16
+cmp1:
+ CMP R9,$0
+ BEQ cmp0
+ MOVBZ (R5),R10
+ MOVBZ (R6),R11
+ CMPU R10,R11
+cmp0:
+ SETB_CR0(R6)
+ ISEL CR0EQ,R3,R6,R3
+ RET
diff --git a/src/internal/bytealg/compare_riscv64.s b/src/internal/bytealg/compare_riscv64.s
new file mode 100644
index 0000000..a4164a2
--- /dev/null
+++ b/src/internal/bytealg/compare_riscv64.s
@@ -0,0 +1,222 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+ // X10 = a_base
+ // X11 = a_len
+ // X12 = a_cap (unused)
+ // X13 = b_base (want in X12)
+ // X14 = b_len (want in X13)
+ // X15 = b_cap (unused)
+ MOV X13, X12
+ MOV X14, X13
+ JMP compare<>(SB)
+
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // X10 = a_base
+ // X11 = a_len
+ // X12 = b_base
+ // X13 = b_len
+ JMP compare<>(SB)
+
+// On entry:
+// X10 points to start of a
+// X11 length of a
+// X12 points to start of b
+// X13 length of b
+// for non-regabi X14 points to the address to store the return value (-1/0/1)
+// for regabi the return value in X10
+TEXT compare<>(SB),NOSPLIT|NOFRAME,$0
+ BEQ X10, X12, cmp_len
+
+ MOV X11, X5
+ BGE X13, X5, use_a_len // X5 = min(len(a), len(b))
+ MOV X13, X5
+use_a_len:
+ BEQZ X5, cmp_len
+
+ MOV $32, X6
+ BLT X5, X6, check8_unaligned
+
+ // Check alignment - if alignment differs we have to do one byte at a time.
+ AND $7, X10, X7
+ AND $7, X12, X8
+ BNE X7, X8, check8_unaligned
+ BEQZ X7, compare32
+
+ // Check one byte at a time until we reach 8 byte alignment.
+ SUB X7, X0, X7
+ ADD $8, X7, X7
+ SUB X7, X5, X5
+align:
+ ADD $-1, X7
+ MOVBU 0(X10), X8
+ MOVBU 0(X12), X9
+ BNE X8, X9, cmp
+ ADD $1, X10
+ ADD $1, X12
+ BNEZ X7, align
+
+check32:
+ // X6 contains $32
+ BLT X5, X6, compare16
+compare32:
+ MOV 0(X10), X15
+ MOV 0(X12), X16
+ MOV 8(X10), X17
+ MOV 8(X12), X18
+ BNE X15, X16, cmp8a
+ BNE X17, X18, cmp8b
+ MOV 16(X10), X15
+ MOV 16(X12), X16
+ MOV 24(X10), X17
+ MOV 24(X12), X18
+ BNE X15, X16, cmp8a
+ BNE X17, X18, cmp8b
+ ADD $32, X10
+ ADD $32, X12
+ ADD $-32, X5
+ BGE X5, X6, compare32
+ BEQZ X5, cmp_len
+
+check16:
+ MOV $16, X6
+ BLT X5, X6, check8_unaligned
+compare16:
+ MOV 0(X10), X15
+ MOV 0(X12), X16
+ MOV 8(X10), X17
+ MOV 8(X12), X18
+ BNE X15, X16, cmp8a
+ BNE X17, X18, cmp8b
+ ADD $16, X10
+ ADD $16, X12
+ ADD $-16, X5
+ BEQZ X5, cmp_len
+
+check8_unaligned:
+ MOV $8, X6
+ BLT X5, X6, check4_unaligned
+compare8_unaligned:
+ MOVBU 0(X10), X8
+ MOVBU 1(X10), X15
+ MOVBU 2(X10), X17
+ MOVBU 3(X10), X19
+ MOVBU 4(X10), X21
+ MOVBU 5(X10), X23
+ MOVBU 6(X10), X25
+ MOVBU 7(X10), X29
+ MOVBU 0(X12), X9
+ MOVBU 1(X12), X16
+ MOVBU 2(X12), X18
+ MOVBU 3(X12), X20
+ MOVBU 4(X12), X22
+ MOVBU 5(X12), X24
+ MOVBU 6(X12), X28
+ MOVBU 7(X12), X30
+ BNE X8, X9, cmp1a
+ BNE X15, X16, cmp1b
+ BNE X17, X18, cmp1c
+ BNE X19, X20, cmp1d
+ BNE X21, X22, cmp1e
+ BNE X23, X24, cmp1f
+ BNE X25, X28, cmp1g
+ BNE X29, X30, cmp1h
+ ADD $8, X10
+ ADD $8, X12
+ ADD $-8, X5
+ BGE X5, X6, compare8_unaligned
+ BEQZ X5, cmp_len
+
+check4_unaligned:
+ MOV $4, X6
+ BLT X5, X6, compare1
+compare4_unaligned:
+ MOVBU 0(X10), X8
+ MOVBU 1(X10), X15
+ MOVBU 2(X10), X17
+ MOVBU 3(X10), X19
+ MOVBU 0(X12), X9
+ MOVBU 1(X12), X16
+ MOVBU 2(X12), X18
+ MOVBU 3(X12), X20
+ BNE X8, X9, cmp1a
+ BNE X15, X16, cmp1b
+ BNE X17, X18, cmp1c
+ BNE X19, X20, cmp1d
+ ADD $4, X10
+ ADD $4, X12
+ ADD $-4, X5
+ BGE X5, X6, compare4_unaligned
+
+compare1:
+ BEQZ X5, cmp_len
+ MOVBU 0(X10), X8
+ MOVBU 0(X12), X9
+ BNE X8, X9, cmp
+ ADD $1, X10
+ ADD $1, X12
+ ADD $-1, X5
+ JMP compare1
+
+ // Compare 8 bytes of memory in X15/X16 that are known to differ.
+cmp8a:
+ MOV X15, X17
+ MOV X16, X18
+
+ // Compare 8 bytes of memory in X17/X18 that are known to differ.
+cmp8b:
+ MOV $0xff, X19
+cmp8_loop:
+ AND X17, X19, X8
+ AND X18, X19, X9
+ BNE X8, X9, cmp
+ SLLI $8, X19
+ JMP cmp8_loop
+
+cmp1a:
+ SLTU X9, X8, X5
+ SLTU X8, X9, X6
+ JMP cmp_ret
+cmp1b:
+ SLTU X16, X15, X5
+ SLTU X15, X16, X6
+ JMP cmp_ret
+cmp1c:
+ SLTU X18, X17, X5
+ SLTU X17, X18, X6
+ JMP cmp_ret
+cmp1d:
+ SLTU X20, X19, X5
+ SLTU X19, X20, X6
+ JMP cmp_ret
+cmp1e:
+ SLTU X22, X21, X5
+ SLTU X21, X22, X6
+ JMP cmp_ret
+cmp1f:
+ SLTU X24, X23, X5
+ SLTU X23, X24, X6
+ JMP cmp_ret
+cmp1g:
+ SLTU X28, X25, X5
+ SLTU X25, X28, X6
+ JMP cmp_ret
+cmp1h:
+ SLTU X30, X29, X5
+ SLTU X29, X30, X6
+ JMP cmp_ret
+
+cmp_len:
+ MOV X11, X8
+ MOV X13, X9
+cmp:
+ SLTU X9, X8, X5
+ SLTU X8, X9, X6
+cmp_ret:
+ SUB X5, X6, X10
+ RET
diff --git a/src/internal/bytealg/compare_s390x.s b/src/internal/bytealg/compare_s390x.s
new file mode 100644
index 0000000..5394548
--- /dev/null
+++ b/src/internal/bytealg/compare_s390x.s
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56
+ MOVD a_base+0(FP), R3
+ MOVD a_len+8(FP), R4
+ MOVD b_base+24(FP), R5
+ MOVD b_len+32(FP), R6
+ LA ret+48(FP), R7
+ BR cmpbody<>(SB)
+
+TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
+ MOVD a_base+0(FP), R3
+ MOVD a_len+8(FP), R4
+ MOVD b_base+16(FP), R5
+ MOVD b_len+24(FP), R6
+ LA ret+32(FP), R7
+ BR cmpbody<>(SB)
+
+// input:
+// R3 = a
+// R4 = alen
+// R5 = b
+// R6 = blen
+// R7 = address of output word (stores -1/0/1 here)
+TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMPBEQ R3, R5, cmplengths
+ MOVD R4, R8
+ CMPBLE R4, R6, amin
+ MOVD R6, R8
+amin:
+ CMPBEQ R8, $0, cmplengths
+ CMP R8, $256
+ BLE tail
+loop:
+ CLC $256, 0(R3), 0(R5)
+ BGT gt
+ BLT lt
+ SUB $256, R8
+ MOVD $256(R3), R3
+ MOVD $256(R5), R5
+ CMP R8, $256
+ BGT loop
+tail:
+ SUB $1, R8
+ EXRL $cmpbodyclc<>(SB), R8
+ BGT gt
+ BLT lt
+cmplengths:
+ CMP R4, R6
+ BEQ eq
+ BLT lt
+gt:
+ MOVD $1, 0(R7)
+ RET
+lt:
+ MOVD $-1, 0(R7)
+ RET
+eq:
+ MOVD $0, 0(R7)
+ RET
+
+TEXT cmpbodyclc<>(SB),NOSPLIT|NOFRAME,$0-0
+ CLC $1, 0(R3), 0(R5)
+ RET
diff --git a/src/internal/bytealg/compare_wasm.s b/src/internal/bytealg/compare_wasm.s
new file mode 100644
index 0000000..dc8fb33
--- /dev/null
+++ b/src/internal/bytealg/compare_wasm.s
@@ -0,0 +1,115 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Compare(SB), NOSPLIT, $0-56
+ Get SP
+ I64Load a_base+0(FP)
+ I64Load a_len+8(FP)
+ I64Load b_base+24(FP)
+ I64Load b_len+32(FP)
+ Call cmpbody<>(SB)
+ I64Store ret+48(FP)
+ RET
+
+TEXT runtime·cmpstring(SB), NOSPLIT, $0-40
+ Get SP
+ I64Load a_base+0(FP)
+ I64Load a_len+8(FP)
+ I64Load b_base+16(FP)
+ I64Load b_len+24(FP)
+ Call cmpbody<>(SB)
+ I64Store ret+32(FP)
+ RET
+
+// params: a, alen, b, blen
+// ret: -1/0/1
+TEXT cmpbody<>(SB), NOSPLIT, $0-0
+ // len = min(alen, blen)
+ Get R1
+ Get R3
+ Get R1
+ Get R3
+ I64LtU
+ Select
+ Set R4
+
+ Get R0
+ I32WrapI64
+ Get R2
+ I32WrapI64
+ Get R4
+ I32WrapI64
+ Call memcmp<>(SB)
+ I64ExtendI32S
+ Tee R5
+
+ I64Eqz
+ If
+ // check length
+ Get R1
+ Get R3
+ I64Sub
+ Set R5
+ End
+
+ I64Const $0
+ I64Const $-1
+ I64Const $1
+ Get R5
+ I64Const $0
+ I64LtS
+ Select
+ Get R5
+ I64Eqz
+ Select
+ Return
+
+// compiled with emscripten
+// params: a, b, len
+// ret: <0/0/>0
+TEXT memcmp<>(SB), NOSPLIT, $0-0
+ Get R2
+ If $1
+ Loop
+ Get R0
+ I32Load8S $0
+ Tee R3
+ Get R1
+ I32Load8S $0
+ Tee R4
+ I32Eq
+ If
+ Get R0
+ I32Const $1
+ I32Add
+ Set R0
+ Get R1
+ I32Const $1
+ I32Add
+ Set R1
+ I32Const $0
+ Get R2
+ I32Const $-1
+ I32Add
+ Tee R2
+ I32Eqz
+ BrIf $3
+ Drop
+ Br $1
+ End
+ End
+ Get R3
+ I32Const $255
+ I32And
+ Get R4
+ I32Const $255
+ I32And
+ I32Sub
+ Else
+ I32Const $0
+ End
+ Return
diff --git a/src/internal/bytealg/count_amd64.s b/src/internal/bytealg/count_amd64.s
new file mode 100644
index 0000000..efb17f8
--- /dev/null
+++ b/src/internal/bytealg/count_amd64.s
@@ -0,0 +1,208 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "asm_amd64.h"
+#include "textflag.h"
+
+TEXT ·Count(SB),NOSPLIT,$0-40
+#ifndef hasPOPCNT
+ CMPB internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1
+ JEQ 2(PC)
+ JMP ·countGeneric(SB)
+#endif
+ MOVQ b_base+0(FP), SI
+ MOVQ b_len+8(FP), BX
+ MOVB c+24(FP), AL
+ LEAQ ret+32(FP), R8
+ JMP countbody<>(SB)
+
+TEXT ·CountString(SB),NOSPLIT,$0-32
+#ifndef hasPOPCNT
+ CMPB internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1
+ JEQ 2(PC)
+ JMP ·countGenericString(SB)
+#endif
+ MOVQ s_base+0(FP), SI
+ MOVQ s_len+8(FP), BX
+ MOVB c+16(FP), AL
+ LEAQ ret+24(FP), R8
+ JMP countbody<>(SB)
+
+// input:
+// SI: data
+// BX: data len
+// AL: byte sought
+// R8: address to put result
+// This function requires the POPCNT instruction.
+TEXT countbody<>(SB),NOSPLIT,$0
+ // Shuffle X0 around so that each byte contains
+ // the character we're looking for.
+ MOVD AX, X0
+ PUNPCKLBW X0, X0
+ PUNPCKLBW X0, X0
+ PSHUFL $0, X0, X0
+
+ CMPQ BX, $16
+ JLT small
+
+ MOVQ $0, R12 // Accumulator
+
+ MOVQ SI, DI
+
+ CMPQ BX, $32
+ JA avx2
+sse:
+ LEAQ -16(SI)(BX*1), AX // AX = address of last 16 bytes
+ JMP sseloopentry
+
+sseloop:
+ // Move the next 16-byte chunk of the data into X1.
+ MOVOU (DI), X1
+ // Compare bytes in X0 to X1.
+ PCMPEQB X0, X1
+ // Take the top bit of each byte in X1 and put the result in DX.
+ PMOVMSKB X1, DX
+ // Count number of matching bytes
+ POPCNTL DX, DX
+ // Accumulate into R12
+ ADDQ DX, R12
+ // Advance to next block.
+ ADDQ $16, DI
+sseloopentry:
+ CMPQ DI, AX
+ JBE sseloop
+
+ // Get the number of bytes to consider in the last 16 bytes
+ ANDQ $15, BX
+ JZ end
+
+ // Create mask to ignore overlap between previous 16 byte block
+ // and the next.
+ MOVQ $16,CX
+ SUBQ BX, CX
+ MOVQ $0xFFFF, R10
+ SARQ CL, R10
+ SALQ CL, R10
+
+ // Process the last 16-byte chunk. This chunk may overlap with the
+ // chunks we've already searched so we need to mask part of it.
+ MOVOU (AX), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, DX
+ // Apply mask
+ ANDQ R10, DX
+ POPCNTL DX, DX
+ ADDQ DX, R12
+end:
+ MOVQ R12, (R8)
+ RET
+
+// handle for lengths < 16
+small:
+ TESTQ BX, BX
+ JEQ endzero
+
+ // Check if we'll load across a page boundary.
+ LEAQ 16(SI), AX
+ TESTW $0xff0, AX
+ JEQ endofpage
+
+ // We must ignore high bytes as they aren't part of our slice.
+ // Create mask.
+ MOVB BX, CX
+ MOVQ $1, R10
+ SALQ CL, R10
+ SUBQ $1, R10
+
+ // Load data
+ MOVOU (SI), X1
+ // Compare target byte with each byte in data.
+ PCMPEQB X0, X1
+ // Move result bits to integer register.
+ PMOVMSKB X1, DX
+ // Apply mask
+ ANDQ R10, DX
+ POPCNTL DX, DX
+ // Directly return DX, we don't need to accumulate
+ // since we have <16 bytes.
+ MOVQ DX, (R8)
+ RET
+endzero:
+ MOVQ $0, (R8)
+ RET
+
+endofpage:
+ // We must ignore low bytes as they aren't part of our slice.
+ MOVQ $16,CX
+ SUBQ BX, CX
+ MOVQ $0xFFFF, R10
+ SARQ CL, R10
+ SALQ CL, R10
+
+ // Load data into the high end of X1.
+ MOVOU -16(SI)(BX*1), X1
+ // Compare target byte with each byte in data.
+ PCMPEQB X0, X1
+ // Move result bits to integer register.
+ PMOVMSKB X1, DX
+ // Apply mask
+ ANDQ R10, DX
+ // Directly return DX, we don't need to accumulate
+ // since we have <16 bytes.
+ POPCNTL DX, DX
+ MOVQ DX, (R8)
+ RET
+
+avx2:
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JNE sse
+#endif
+ MOVD AX, X0
+ LEAQ -32(SI)(BX*1), R11
+ VPBROADCASTB X0, Y1
+avx2_loop:
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPMOVMSKB Y3, DX
+ POPCNTL DX, DX
+ ADDQ DX, R12
+ ADDQ $32, DI
+ CMPQ DI, R11
+ JLE avx2_loop
+
+ // If last block is already processed,
+ // skip to the end.
+ CMPQ DI, R11
+ JEQ endavx
+
+ // Load address of the last 32 bytes.
+ // There is an overlap with the previous block.
+ MOVQ R11, DI
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPMOVMSKB Y3, DX
+ // Exit AVX mode.
+ VZEROUPPER
+
+ // Create mask to ignore overlap between previous 32 byte block
+ // and the next.
+ ANDQ $31, BX
+ MOVQ $32,CX
+ SUBQ BX, CX
+ MOVQ $0xFFFFFFFF, R10
+ SARQ CL, R10
+ SALQ CL, R10
+ // Apply mask
+ ANDQ R10, DX
+ POPCNTL DX, DX
+ ADDQ DX, R12
+ MOVQ R12, (R8)
+ RET
+endavx:
+ // Exit AVX mode.
+ VZEROUPPER
+ MOVQ R12, (R8)
+ RET
diff --git a/src/internal/bytealg/count_arm.s b/src/internal/bytealg/count_arm.s
new file mode 100644
index 0000000..f704ea0
--- /dev/null
+++ b/src/internal/bytealg/count_arm.s
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Count(SB),NOSPLIT,$0-20
+ MOVW b_base+0(FP), R0
+ MOVW b_len+4(FP), R1
+ MOVBU c+12(FP), R2
+ MOVW $ret+16(FP), R7
+ B countbytebody<>(SB)
+
+TEXT ·CountString(SB),NOSPLIT,$0-16
+ MOVW s_base+0(FP), R0
+ MOVW s_len+4(FP), R1
+ MOVBU c+8(FP), R2
+ MOVW $ret+12(FP), R7
+ B countbytebody<>(SB)
+
+// Input:
+// R0: data
+// R1: data length
+// R2: byte to find
+// R7: address to put result
+//
+// On exit:
+// R4 and R8 are clobbered
+TEXT countbytebody<>(SB),NOSPLIT,$0
+ MOVW $0, R8 // R8 = count of byte to search
+ CMP $0, R1
+ B.EQ done // short path to handle 0-byte case
+ ADD R0, R1 // R1 is the end of the range
+byte_loop:
+ MOVBU.P 1(R0), R4
+ CMP R4, R2
+ ADD.EQ $1, R8
+ CMP R0, R1
+ B.NE byte_loop
+done:
+ MOVW R8, (R7)
+ RET
diff --git a/src/internal/bytealg/count_arm64.s b/src/internal/bytealg/count_arm64.s
new file mode 100644
index 0000000..8cd703d
--- /dev/null
+++ b/src/internal/bytealg/count_arm64.s
@@ -0,0 +1,90 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Count(SB),NOSPLIT,$0-40
+ MOVD b_base+0(FP), R0
+ MOVD b_len+8(FP), R2
+ MOVBU c+24(FP), R1
+ MOVD $ret+32(FP), R8
+ B countbytebody<>(SB)
+
+TEXT ·CountString(SB),NOSPLIT,$0-32
+ MOVD s_base+0(FP), R0
+ MOVD s_len+8(FP), R2
+ MOVBU c+16(FP), R1
+ MOVD $ret+24(FP), R8
+ B countbytebody<>(SB)
+
+// input:
+// R0: data
+// R2: data len
+// R1: byte to find
+// R8: address to put result
+TEXT countbytebody<>(SB),NOSPLIT,$0
+ // R11 = count of byte to search
+ MOVD $0, R11
+ // short path to handle 0-byte case
+ CBZ R2, done
+ CMP $0x20, R2
+ // jump directly to tail if length < 32
+ BLO tail
+ ANDS $0x1f, R0, R9
+ BEQ chunk
+ // Work with not 32-byte aligned head
+ BIC $0x1f, R0, R3
+ ADD $0x20, R3
+head_loop:
+ MOVBU.P 1(R0), R5
+ CMP R5, R1
+ CINC EQ, R11, R11
+ SUB $1, R2, R2
+ CMP R0, R3
+ BNE head_loop
+ // Work with 32-byte aligned chunks
+chunk:
+ BIC $0x1f, R2, R9
+ // The first chunk can also be the last
+ CBZ R9, tail
+ // R3 = end of 32-byte chunks
+ ADD R0, R9, R3
+ MOVD $1, R5
+ VMOV R5, V5.B16
+ // R2 = length of tail
+ SUB R9, R2, R2
+ // Duplicate R1 (byte to search) to 16 1-byte elements of V0
+ VMOV R1, V0.B16
+ // Clear the low 64-bit element of V7 and V8
+ VEOR V7.B8, V7.B8, V7.B8
+ VEOR V8.B8, V8.B8, V8.B8
+ // Count the target byte in 32-byte chunk
+chunk_loop:
+ VLD1.P (R0), [V1.B16, V2.B16]
+ CMP R0, R3
+ VCMEQ V0.B16, V1.B16, V3.B16
+ VCMEQ V0.B16, V2.B16, V4.B16
+ // Clear the higher 7 bits
+ VAND V5.B16, V3.B16, V3.B16
+ VAND V5.B16, V4.B16, V4.B16
+ // Count lanes match the requested byte
+ VADDP V4.B16, V3.B16, V6.B16 // 32B->16B
+ VUADDLV V6.B16, V7
+ // Accumulate the count in low 64-bit element of V8 when inside the loop
+ VADD V7, V8
+ BNE chunk_loop
+ VMOV V8.D[0], R6
+ ADD R6, R11, R11
+ CBZ R2, done
+tail:
+ // Work with tail shorter than 32 bytes
+ MOVBU.P 1(R0), R5
+ SUB $1, R2, R2
+ CMP R5, R1
+ CINC EQ, R11, R11
+ CBNZ R2, tail
+done:
+ MOVD R11, (R8)
+ RET
diff --git a/src/internal/bytealg/count_generic.go b/src/internal/bytealg/count_generic.go
new file mode 100644
index 0000000..932a7c5
--- /dev/null
+++ b/src/internal/bytealg/count_generic.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !arm && !arm64 && !ppc64le && !ppc64 && !riscv64 && !s390x
+
+package bytealg
+
+func Count(b []byte, c byte) int {
+ n := 0
+ for _, x := range b {
+ if x == c {
+ n++
+ }
+ }
+ return n
+}
+
+func CountString(s string, c byte) int {
+ n := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/internal/bytealg/count_native.go b/src/internal/bytealg/count_native.go
new file mode 100644
index 0000000..90189c9
--- /dev/null
+++ b/src/internal/bytealg/count_native.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm || arm64 || ppc64le || ppc64 || riscv64 || s390x
+
+package bytealg
+
+//go:noescape
+func Count(b []byte, c byte) int
+
+//go:noescape
+func CountString(s string, c byte) int
+
+// A backup implementation to use by assembly.
+func countGeneric(b []byte, c byte) int {
+ n := 0
+ for _, x := range b {
+ if x == c {
+ n++
+ }
+ }
+ return n
+}
+func countGenericString(s string, c byte) int {
+ n := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/internal/bytealg/count_ppc64x.s b/src/internal/bytealg/count_ppc64x.s
new file mode 100644
index 0000000..2d2490b
--- /dev/null
+++ b/src/internal/bytealg/count_ppc64x.s
@@ -0,0 +1,96 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le || ppc64
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Count<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // R3 = byte array pointer
+ // R4 = length
+ MOVBZ R6, R5 // R5 = byte
+ BR countbytebody<>(SB)
+
+TEXT ·CountString<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-32
+ // R3 = byte array pointer
+ // R4 = length
+ MOVBZ R5, R5 // R5 = byte
+ BR countbytebody<>(SB)
+
+// R3: addr of string
+// R4: len of string
+// R5: byte to count
+// On exit:
+// R3: return value
+// endianness shouldn't matter since we are just counting and order
+// is irrelevant
+TEXT countbytebody<>(SB), NOSPLIT|NOFRAME, $0-0
+ DCBT (R3) // Prepare cache line.
+ MOVD R0, R18 // byte count
+ MOVD R3, R19 // Save base address for calculating the index later.
+ MOVD R4, R16
+
+ MOVD R5, R6
+ RLDIMI $8, R6, $48, R6
+ RLDIMI $16, R6, $32, R6
+ RLDIMI $32, R6, $0, R6 // fill reg with the byte to count
+
+ VSPLTISW $3, V4 // used for shift
+ MTVRD R6, V1 // move compare byte
+ VSPLTB $7, V1, V1 // replicate byte across V1
+
+ CMPU R4, $32 // Check if it's a small string (<32 bytes)
+ BLT tail // Jump to the small string case
+ XXLXOR VS37, VS37, VS37 // clear V5 (aka VS37) to use as accumulator
+
+cmploop:
+ LXVW4X (R3), VS32 // load bytes from string
+
+ // when the bytes match, the corresponding byte contains all 1s
+ VCMPEQUB V1, V0, V2 // compare bytes
+ VPOPCNTD V2, V3 // each double word contains its count
+ VADDUDM V3, V5, V5 // accumulate bit count in each double word
+ ADD $16, R3, R3 // increment pointer
+ SUB $16, R16, R16 // remaining bytes
+ CMP R16, $16 // at least 16 remaining?
+ BGE cmploop
+ VSRD V5, V4, V5 // shift by 3 to convert bits to bytes
+ VSLDOI $8, V5, V5, V6 // get the double word values from vector
+ MFVSRD V5, R9
+ MFVSRD V6, R10
+ ADD R9, R10, R9
+ ADD R9, R18, R18
+
+tail:
+ CMP R16, $8 // 8 bytes left?
+ BLT small
+
+ MOVD (R3), R12 // load 8 bytes
+ CMPB R12, R6, R17 // compare bytes
+ POPCNTD R17, R15 // bit count
+ SRD $3, R15, R15 // byte count
+ ADD R15, R18, R18 // add to byte count
+
+next1:
+ ADD $8, R3, R3
+ SUB $8, R16, R16 // remaining bytes
+ BR tail
+
+small:
+ CMP $0, R16 // any remaining
+ BEQ done
+ MOVBZ (R3), R12 // check each remaining byte
+ CMP R12, R5
+ BNE next2
+ ADD $1, R18
+
+next2:
+ SUB $1, R16
+ ADD $1, R3 // inc address
+ BR small
+
+done:
+ MOVD R18, R3 // return count
+ RET
diff --git a/src/internal/bytealg/count_riscv64.s b/src/internal/bytealg/count_riscv64.s
new file mode 100644
index 0000000..d123cbd
--- /dev/null
+++ b/src/internal/bytealg/count_riscv64.s
@@ -0,0 +1,47 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Count<ABIInternal>(SB),NOSPLIT,$0-40
+ // X10 = b_base
+ // X11 = b_len
+ // X12 = b_cap (unused)
+ // X13 = byte to count (want in X12)
+ AND $0xff, X13, X12
+ MOV ZERO, X14 // count
+ ADD X10, X11 // end
+
+loop:
+ BEQ X10, X11, done
+ MOVBU (X10), X15
+ ADD $1, X10
+ BNE X12, X15, loop
+ ADD $1, X14
+ JMP loop
+
+done:
+ MOV X14, X10
+ RET
+
+TEXT ·CountString<ABIInternal>(SB),NOSPLIT,$0-32
+ // X10 = s_base
+ // X11 = s_len
+ // X12 = byte to count
+ AND $0xff, X12
+ MOV ZERO, X14 // count
+ ADD X10, X11 // end
+
+loop:
+ BEQ X10, X11, done
+ MOVBU (X10), X15
+ ADD $1, X10
+ BNE X12, X15, loop
+ ADD $1, X14
+ JMP loop
+
+done:
+ MOV X14, X10
+ RET
diff --git a/src/internal/bytealg/count_s390x.s b/src/internal/bytealg/count_s390x.s
new file mode 100644
index 0000000..2a3b5c0
--- /dev/null
+++ b/src/internal/bytealg/count_s390x.s
@@ -0,0 +1,169 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// condition code masks
+#define EQ 8
+#define NE 7
+
+// register assignments
+#define R_ZERO R0
+#define R_VAL R1
+#define R_TMP R2
+#define R_PTR R3
+#define R_LEN R4
+#define R_CHAR R5
+#define R_RET R6
+#define R_ITER R7
+#define R_CNT R8
+#define R_MPTR R9
+
+// vector register assignments
+#define V_ZERO V0
+#define V_CHAR V1
+#define V_MASK V2
+#define V_VAL V3
+#define V_CNT V4
+
+// mask for trailing bytes in vector implementation
+GLOBL countbytemask<>(SB), RODATA, $16
+DATA countbytemask<>+0(SB)/8, $0x0101010101010101
+DATA countbytemask<>+8(SB)/8, $0x0101010101010101
+
+// func Count(b []byte, c byte) int
+TEXT ·Count(SB), NOSPLIT|NOFRAME, $0-40
+ LMG b+0(FP), R_PTR, R_LEN
+ MOVBZ c+24(FP), R_CHAR
+ MOVD $ret+32(FP), R_RET
+ BR countbytebody<>(SB)
+
+// func CountString(s string, c byte) int
+TEXT ·CountString(SB), NOSPLIT|NOFRAME, $0-32
+ LMG s+0(FP), R_PTR, R_LEN
+ MOVBZ c+16(FP), R_CHAR
+ MOVD $ret+24(FP), R_RET
+ BR countbytebody<>(SB)
+
+// input:
+// R_PTR = address of array of bytes
+// R_LEN = number of bytes in array
+// R_CHAR = byte value to count zero (extended to register width)
+// R_RET = address of return value
+TEXT countbytebody<>(SB), NOSPLIT|NOFRAME, $0-0
+ MOVD $internal∕cpu·S390X+const_offsetS390xHasVX(SB), R_TMP
+ MOVD $countbytemask<>(SB), R_MPTR
+ CGIJ $EQ, R_LEN, $0, ret0 // return if length is 0.
+ SRD $4, R_LEN, R_ITER // R_ITER is the number of 16-byte chunks
+ MOVBZ (R_TMP), R_TMP // load bool indicating support for vector facility
+ CGIJ $EQ, R_TMP, $0, novx // jump to scalar code if the vector facility is not available
+
+ // Start of vector code (have vector facility).
+ //
+ // Set R_LEN to be the length mod 16 minus 1 to use as an index for
+ // vector 'load with length' (VLL). It will be in the range [-1,14].
+ // Also replicate c across a 16-byte vector and initialize V_ZERO.
+ ANDW $0xf, R_LEN
+ VLVGB $0, R_CHAR, V_CHAR // V_CHAR = [16]byte{c, 0, ..., 0, 0}
+ VZERO V_ZERO // V_ZERO = [1]uint128{0}
+ ADDW $-1, R_LEN
+ VREPB $0, V_CHAR, V_CHAR // V_CHAR = [16]byte{c, c, ..., c, c}
+
+ // Jump to loop if we have more than 15 bytes to process.
+ CGIJ $NE, R_ITER, $0, vxchunks
+
+ // Load 1-15 bytes and corresponding mask.
+ // Note: only the low 32-bits of R_LEN are used for the index.
+ VLL R_LEN, (R_PTR), V_VAL
+ VLL R_LEN, (R_MPTR), V_MASK
+
+ // Compare each byte in input chunk against byte to be counted.
+ // Each byte element will be set to either 0 (no match) or 1 (match).
+ VCEQB V_CHAR, V_VAL, V_VAL // each byte will be either 0xff or 0x00
+ VN V_MASK, V_VAL, V_VAL // mask out most significant 7 bits
+
+ // Accumulate matched byte count in 128-bit integer value.
+ VSUMB V_VAL, V_ZERO, V_VAL // [16]byte{x0, x1, ..., x14, x15} → [4]uint32{x0+x1+x2+x3, ..., x12+x13+x14+x15}
+ VSUMQF V_VAL, V_ZERO, V_CNT // [4]uint32{x0, x1, x2, x3} → [1]uint128{x0+x1+x2+x3}
+
+ // Return rightmost (lowest) 64-bit part of accumulator.
+ VSTEG $1, V_CNT, (R_RET)
+ RET
+
+vxchunks:
+ // Load 0x01 into every byte element in the 16-byte mask vector.
+ VREPIB $1, V_MASK // V_MASK = [16]byte{1, 1, ..., 1, 1}
+ VZERO V_CNT // initial uint128 count of 0
+
+vxloop:
+ // Load input bytes in 16-byte chunks.
+ VL (R_PTR), V_VAL
+
+ // Compare each byte in input chunk against byte to be counted.
+ // Each byte element will be set to either 0 (no match) or 1 (match).
+ VCEQB V_CHAR, V_VAL, V_VAL // each byte will be either 0xff or 0x00
+ VN V_MASK, V_VAL, V_VAL // mask out most significant 7 bits
+
+ // Increment input string address.
+ MOVD $16(R_PTR), R_PTR
+
+ // Accumulate matched byte count in 128-bit integer value.
+ VSUMB V_VAL, V_ZERO, V_VAL // [16]byte{x0, x1, ..., x14, x15} → [4]uint32{x0+x1+x2+x3, ..., x12+x13+x14+x15}
+ VSUMQF V_VAL, V_ZERO, V_VAL // [4]uint32{x0, x1, x2, x3} → [1]uint128{x0+x1+x2+x3}
+ VAQ V_VAL, V_CNT, V_CNT // accumulate
+
+ // Repeat until all 16-byte chunks are done.
+ BRCTG R_ITER, vxloop
+
+ // Skip to end if there are no trailing bytes.
+ CIJ $EQ, R_LEN, $-1, vxret
+
+ // Load 1-15 bytes and corresponding mask.
+ // Note: only the low 32-bits of R_LEN are used for the index.
+ VLL R_LEN, (R_PTR), V_VAL
+ VLL R_LEN, (R_MPTR), V_MASK
+
+ // Compare each byte in input chunk against byte to be counted.
+ // Each byte element will be set to either 0 (no match) or 1 (match).
+ VCEQB V_CHAR, V_VAL, V_VAL
+ VN V_MASK, V_VAL, V_VAL
+
+ // Accumulate matched byte count in 128-bit integer value.
+ VSUMB V_VAL, V_ZERO, V_VAL // [16]byte{x0, x1, ..., x14, x15} → [4]uint32{x0+x1+x2+x3, ..., x12+x13+x14+x15}
+ VSUMQF V_VAL, V_ZERO, V_VAL // [4]uint32{x0, x1, x2, x3} → [1]uint128{x0+x1+x2+x3}
+ VAQ V_VAL, V_CNT, V_CNT // accumulate
+
+vxret:
+ // Return rightmost (lowest) 64-bit part of accumulator.
+ VSTEG $1, V_CNT, (R_RET)
+ RET
+
+novx:
+ // Start of non-vector code (the vector facility not available).
+ //
+ // Initialise counter and constant zero.
+ MOVD $0, R_CNT
+ MOVD $0, R_ZERO
+
+loop:
+ // Read 1-byte from input and compare.
+ // Note: avoid putting LOCGR in critical path.
+ MOVBZ (R_PTR), R_VAL
+ MOVD $1, R_TMP
+ MOVD $1(R_PTR), R_PTR
+ CMPW R_VAL, R_CHAR
+ LOCGR $NE, R_ZERO, R_TMP // select 0 if no match (1 if there is a match)
+ ADD R_TMP, R_CNT // accumulate 64-bit result
+
+ // Repeat until all bytes have been checked.
+ BRCTG R_LEN, loop
+
+ret:
+ MOVD R_CNT, (R_RET)
+ RET
+
+ret0:
+ MOVD $0, (R_RET)
+ RET
diff --git a/src/internal/bytealg/equal_386.s b/src/internal/bytealg/equal_386.s
new file mode 100644
index 0000000..58b3cbe
--- /dev/null
+++ b/src/internal/bytealg/equal_386.s
@@ -0,0 +1,130 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT,$0-13
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ CMPL SI, DI
+ JEQ eq
+ MOVL size+8(FP), BX
+ LEAL ret+12(FP), AX
+ JMP memeqbody<>(SB)
+eq:
+ MOVB $1, ret+12(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ CMPL SI, DI
+ JEQ eq
+ MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
+ LEAL ret+8(FP), AX
+ JMP memeqbody<>(SB)
+eq:
+ MOVB $1, ret+8(FP)
+ RET
+
+// a in SI
+// b in DI
+// count in BX
+// address of result byte in AX
+TEXT memeqbody<>(SB),NOSPLIT,$0-0
+ CMPL BX, $4
+ JB small
+
+ // 64 bytes at a time using xmm registers
+hugeloop:
+ CMPL BX, $64
+ JB bigloop
+#ifdef GO386_softfloat
+ JMP bigloop
+#endif
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ MOVOU 16(SI), X2
+ MOVOU 16(DI), X3
+ MOVOU 32(SI), X4
+ MOVOU 32(DI), X5
+ MOVOU 48(SI), X6
+ MOVOU 48(DI), X7
+ PCMPEQB X1, X0
+ PCMPEQB X3, X2
+ PCMPEQB X5, X4
+ PCMPEQB X7, X6
+ PAND X2, X0
+ PAND X6, X4
+ PAND X4, X0
+ PMOVMSKB X0, DX
+ ADDL $64, SI
+ ADDL $64, DI
+ SUBL $64, BX
+ CMPL DX, $0xffff
+ JEQ hugeloop
+ MOVB $0, (AX)
+ RET
+
+ // 4 bytes at a time using 32-bit register
+bigloop:
+ CMPL BX, $4
+ JBE leftover
+ MOVL (SI), CX
+ MOVL (DI), DX
+ ADDL $4, SI
+ ADDL $4, DI
+ SUBL $4, BX
+ CMPL CX, DX
+ JEQ bigloop
+ MOVB $0, (AX)
+ RET
+
+ // remaining 0-4 bytes
+leftover:
+ MOVL -4(SI)(BX*1), CX
+ MOVL -4(DI)(BX*1), DX
+ CMPL CX, DX
+ SETEQ (AX)
+ RET
+
+small:
+ CMPL BX, $0
+ JEQ equal
+
+ LEAL 0(BX*8), CX
+ NEGL CX
+
+ MOVL SI, DX
+ CMPB DX, $0xfc
+ JA si_high
+
+ // load at SI won't cross a page boundary.
+ MOVL (SI), SI
+ JMP si_finish
+si_high:
+ // address ends in 111111xx. Load up to bytes we want, move to correct position.
+ MOVL -4(SI)(BX*1), SI
+ SHRL CX, SI
+si_finish:
+
+ // same for DI.
+ MOVL DI, DX
+ CMPB DX, $0xfc
+ JA di_high
+ MOVL (DI), DI
+ JMP di_finish
+di_high:
+ MOVL -4(DI)(BX*1), DI
+ SHRL CX, DI
+di_finish:
+
+ SUBL SI, DI
+ SHLL CX, DI
+equal:
+ SETEQ (AX)
+ RET
diff --git a/src/internal/bytealg/equal_amd64.s b/src/internal/bytealg/equal_amd64.s
new file mode 100644
index 0000000..d178a33
--- /dev/null
+++ b/src/internal/bytealg/equal_amd64.s
@@ -0,0 +1,162 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "asm_amd64.h"
+#include "textflag.h"
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT,$0-25
+ // AX = a (want in SI)
+ // BX = b (want in DI)
+ // CX = size (want in BX)
+ CMPQ AX, BX
+ JNE neq
+ MOVQ $1, AX // return 1
+ RET
+neq:
+ MOVQ AX, SI
+ MOVQ BX, DI
+ MOVQ CX, BX
+ JMP memeqbody<>(SB)
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
+ // AX = a (want in SI)
+ // BX = b (want in DI)
+ // 8(DX) = size (want in BX)
+ CMPQ AX, BX
+ JNE neq
+ MOVQ $1, AX // return 1
+ RET
+neq:
+ MOVQ AX, SI
+ MOVQ BX, DI
+ MOVQ 8(DX), BX // compiler stores size at offset 8 in the closure
+ JMP memeqbody<>(SB)
+
+// Input:
+// a in SI
+// b in DI
+// count in BX
+// Output:
+// result in AX
+TEXT memeqbody<>(SB),NOSPLIT,$0-0
+ CMPQ BX, $8
+ JB small
+ CMPQ BX, $64
+ JB bigloop
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE hugeloop_avx2
+
+ // 64 bytes at a time using xmm registers
+hugeloop:
+ CMPQ BX, $64
+ JB bigloop
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ MOVOU 16(SI), X2
+ MOVOU 16(DI), X3
+ MOVOU 32(SI), X4
+ MOVOU 32(DI), X5
+ MOVOU 48(SI), X6
+ MOVOU 48(DI), X7
+ PCMPEQB X1, X0
+ PCMPEQB X3, X2
+ PCMPEQB X5, X4
+ PCMPEQB X7, X6
+ PAND X2, X0
+ PAND X6, X4
+ PAND X4, X0
+ PMOVMSKB X0, DX
+ ADDQ $64, SI
+ ADDQ $64, DI
+ SUBQ $64, BX
+ CMPL DX, $0xffff
+ JEQ hugeloop
+ XORQ AX, AX // return 0
+ RET
+#endif
+
+ // 64 bytes at a time using ymm registers
+hugeloop_avx2:
+ CMPQ BX, $64
+ JB bigloop_avx2
+ VMOVDQU (SI), Y0
+ VMOVDQU (DI), Y1
+ VMOVDQU 32(SI), Y2
+ VMOVDQU 32(DI), Y3
+ VPCMPEQB Y1, Y0, Y4
+ VPCMPEQB Y2, Y3, Y5
+ VPAND Y4, Y5, Y6
+ VPMOVMSKB Y6, DX
+ ADDQ $64, SI
+ ADDQ $64, DI
+ SUBQ $64, BX
+ CMPL DX, $0xffffffff
+ JEQ hugeloop_avx2
+ VZEROUPPER
+ XORQ AX, AX // return 0
+ RET
+
+bigloop_avx2:
+ VZEROUPPER
+
+ // 8 bytes at a time using 64-bit register
+bigloop:
+ CMPQ BX, $8
+ JBE leftover
+ MOVQ (SI), CX
+ MOVQ (DI), DX
+ ADDQ $8, SI
+ ADDQ $8, DI
+ SUBQ $8, BX
+ CMPQ CX, DX
+ JEQ bigloop
+ XORQ AX, AX // return 0
+ RET
+
+ // remaining 0-8 bytes
+leftover:
+ MOVQ -8(SI)(BX*1), CX
+ MOVQ -8(DI)(BX*1), DX
+ CMPQ CX, DX
+ SETEQ AX
+ RET
+
+small:
+ CMPQ BX, $0
+ JEQ equal
+
+ LEAQ 0(BX*8), CX
+ NEGQ CX
+
+ CMPB SI, $0xf8
+ JA si_high
+
+ // load at SI won't cross a page boundary.
+ MOVQ (SI), SI
+ JMP si_finish
+si_high:
+ // address ends in 11111xxx. Load up to bytes we want, move to correct position.
+ MOVQ -8(SI)(BX*1), SI
+ SHRQ CX, SI
+si_finish:
+
+ // same for DI.
+ CMPB DI, $0xf8
+ JA di_high
+ MOVQ (DI), DI
+ JMP di_finish
+di_high:
+ MOVQ -8(DI)(BX*1), DI
+ SHRQ CX, DI
+di_finish:
+
+ SUBQ SI, DI
+ SHLQ CX, DI
+equal:
+ SETEQ AX
+ RET
diff --git a/src/internal/bytealg/equal_arm.s b/src/internal/bytealg/equal_arm.s
new file mode 100644
index 0000000..a6c4369
--- /dev/null
+++ b/src/internal/bytealg/equal_arm.s
@@ -0,0 +1,91 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-13
+ MOVW a+0(FP), R0
+ MOVW b+4(FP), R2
+ CMP R0, R2
+ B.EQ eq
+ MOVW size+8(FP), R1
+ CMP $0, R1
+ B.EQ eq // short path to handle 0-byte case
+ MOVW $ret+12(FP), R7
+ B memeqbody<>(SB)
+eq:
+ MOVW $1, R0
+ MOVB R0, ret+12(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-9
+ MOVW a+0(FP), R0
+ MOVW b+4(FP), R2
+ CMP R0, R2
+ B.EQ eq
+ MOVW 4(R7), R1 // compiler stores size at offset 4 in the closure
+ CMP $0, R1
+ B.EQ eq // short path to handle 0-byte case
+ MOVW $ret+8(FP), R7
+ B memeqbody<>(SB)
+eq:
+ MOVW $1, R0
+ MOVB R0, ret+8(FP)
+ RET
+
+// Input:
+// R0: data of a
+// R1: length
+// R2: data of b
+// R7: points to return value
+//
+// On exit:
+// R4, R5 and R6 are clobbered
+TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMP $1, R1
+ B.EQ one // 1-byte special case for better performance
+
+ CMP $4, R1
+ ADD R0, R1 // R1 is the end of the range to compare
+ B.LT byte_loop // length < 4
+ AND $3, R0, R6
+ CMP $0, R6
+ B.NE byte_loop // unaligned a, use byte-wise compare (TODO: try to align a)
+ AND $3, R2, R6
+ CMP $0, R6
+ B.NE byte_loop // unaligned b, use byte-wise compare
+ AND $0xfffffffc, R1, R6
+ // length >= 4
+chunk4_loop:
+ MOVW.P 4(R0), R4
+ MOVW.P 4(R2), R5
+ CMP R4, R5
+ B.NE notequal
+ CMP R0, R6
+ B.NE chunk4_loop
+ CMP R0, R1
+ B.EQ equal // reached the end
+byte_loop:
+ MOVBU.P 1(R0), R4
+ MOVBU.P 1(R2), R5
+ CMP R4, R5
+ B.NE notequal
+ CMP R0, R1
+ B.NE byte_loop
+equal:
+ MOVW $1, R0
+ MOVB R0, (R7)
+ RET
+one:
+ MOVBU (R0), R4
+ MOVBU (R2), R5
+ CMP R4, R5
+ B.EQ equal
+notequal:
+ MOVW $0, R0
+ MOVB R0, (R7)
+ RET
diff --git a/src/internal/bytealg/equal_arm64.s b/src/internal/bytealg/equal_arm64.s
new file mode 100644
index 0000000..d3aabba
--- /dev/null
+++ b/src/internal/bytealg/equal_arm64.s
@@ -0,0 +1,121 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+ // short path to handle 0-byte case
+ CBZ R2, equal
+ B memeqbody<>(SB)
+equal:
+ MOVD $1, R0
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
+ CMP R0, R1
+ BEQ eq
+ MOVD 8(R26), R2 // compiler stores size at offset 8 in the closure
+ CBZ R2, eq
+ B memeqbody<>(SB)
+eq:
+ MOVD $1, R0
+ RET
+
+// input:
+// R0: pointer a
+// R1: pointer b
+// R2: data len
+// at return: result in R0
+TEXT memeqbody<>(SB),NOSPLIT,$0
+ CMP $1, R2
+ // handle 1-byte special case for better performance
+ BEQ one
+ CMP $16, R2
+ // handle specially if length < 16
+ BLO tail
+ BIC $0x3f, R2, R3
+ CBZ R3, chunk16
+ // work with 64-byte chunks
+ ADD R3, R0, R6 // end of chunks
+chunk64_loop:
+ VLD1.P (R0), [V0.D2, V1.D2, V2.D2, V3.D2]
+ VLD1.P (R1), [V4.D2, V5.D2, V6.D2, V7.D2]
+ VCMEQ V0.D2, V4.D2, V8.D2
+ VCMEQ V1.D2, V5.D2, V9.D2
+ VCMEQ V2.D2, V6.D2, V10.D2
+ VCMEQ V3.D2, V7.D2, V11.D2
+ VAND V8.B16, V9.B16, V8.B16
+ VAND V8.B16, V10.B16, V8.B16
+ VAND V8.B16, V11.B16, V8.B16
+ CMP R0, R6
+ VMOV V8.D[0], R4
+ VMOV V8.D[1], R5
+ CBZ R4, not_equal
+ CBZ R5, not_equal
+ BNE chunk64_loop
+ AND $0x3f, R2, R2
+ CBZ R2, equal
+chunk16:
+ // work with 16-byte chunks
+ BIC $0xf, R2, R3
+ CBZ R3, tail
+ ADD R3, R0, R6 // end of chunks
+chunk16_loop:
+ LDP.P 16(R0), (R4, R5)
+ LDP.P 16(R1), (R7, R9)
+ EOR R4, R7
+ CBNZ R7, not_equal
+ EOR R5, R9
+ CBNZ R9, not_equal
+ CMP R0, R6
+ BNE chunk16_loop
+ AND $0xf, R2, R2
+ CBZ R2, equal
+tail:
+ // special compare of tail with length < 16
+ TBZ $3, R2, lt_8
+ MOVD (R0), R4
+ MOVD (R1), R5
+ EOR R4, R5
+ CBNZ R5, not_equal
+ SUB $8, R2, R6 // offset of the last 8 bytes
+ MOVD (R0)(R6), R4
+ MOVD (R1)(R6), R5
+ EOR R4, R5
+ CBNZ R5, not_equal
+ B equal
+lt_8:
+ TBZ $2, R2, lt_4
+ MOVWU (R0), R4
+ MOVWU (R1), R5
+ EOR R4, R5
+ CBNZ R5, not_equal
+ SUB $4, R2, R6 // offset of the last 4 bytes
+ MOVWU (R0)(R6), R4
+ MOVWU (R1)(R6), R5
+ EOR R4, R5
+ CBNZ R5, not_equal
+ B equal
+lt_4:
+ TBZ $1, R2, lt_2
+ MOVHU.P 2(R0), R4
+ MOVHU.P 2(R1), R5
+ CMP R4, R5
+ BNE not_equal
+lt_2:
+ TBZ $0, R2, equal
+one:
+ MOVBU (R0), R4
+ MOVBU (R1), R5
+ CMP R4, R5
+ BNE not_equal
+equal:
+ MOVD $1, R0
+ RET
+not_equal:
+ MOVB ZR, R0
+ RET
diff --git a/src/internal/bytealg/equal_generic.go b/src/internal/bytealg/equal_generic.go
new file mode 100644
index 0000000..59bdf8f
--- /dev/null
+++ b/src/internal/bytealg/equal_generic.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+// Equal reports whether a and b
+// are the same length and contain the same bytes.
+// A nil argument is equivalent to an empty slice.
+//
+// Equal is equivalent to bytes.Equal.
+// It is provided here for convenience,
+// because some packages cannot depend on bytes.
+func Equal(a, b []byte) bool {
+ // Neither cmd/compile nor gccgo allocates for these string conversions.
+ // There is a test for this in package bytes.
+ return string(a) == string(b)
+}
diff --git a/src/internal/bytealg/equal_loong64.s b/src/internal/bytealg/equal_loong64.s
new file mode 100644
index 0000000..ba2a557
--- /dev/null
+++ b/src/internal/bytealg/equal_loong64.s
@@ -0,0 +1,53 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define REGCTXT R29
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
+ MOVV a+0(FP), R4
+ MOVV b+8(FP), R5
+ BEQ R4, R5, eq
+ MOVV size+16(FP), R6
+ ADDV R4, R6, R7
+ PCALIGN $16
+loop:
+ BNE R4, R7, test
+ MOVV $1, R4
+ MOVB R4, ret+24(FP)
+ RET
+test:
+ MOVBU (R4), R9
+ ADDV $1, R4
+ MOVBU (R5), R10
+ ADDV $1, R5
+ BEQ R9, R10, loop
+
+ MOVB R0, ret+24(FP)
+ RET
+eq:
+ MOVV $1, R4
+ MOVB R4, ret+24(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
+ MOVV a+0(FP), R4
+ MOVV b+8(FP), R5
+ BEQ R4, R5, eq
+ MOVV 8(REGCTXT), R6 // compiler stores size at offset 8 in the closure
+ MOVV R4, 8(R3)
+ MOVV R5, 16(R3)
+ MOVV R6, 24(R3)
+ JAL runtime·memequal(SB)
+ MOVBU 32(R3), R4
+ MOVB R4, ret+16(FP)
+ RET
+eq:
+ MOVV $1, R4
+ MOVB R4, ret+16(FP)
+ RET
diff --git a/src/internal/bytealg/equal_mips64x.s b/src/internal/bytealg/equal_mips64x.s
new file mode 100644
index 0000000..d92f225
--- /dev/null
+++ b/src/internal/bytealg/equal_mips64x.s
@@ -0,0 +1,118 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define REGCTXT R22
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
+ MOVV a+0(FP), R1
+ MOVV b+8(FP), R2
+ BEQ R1, R2, eq
+ MOVV size+16(FP), R3
+ ADDV R1, R3, R4
+
+ // chunk size is 16
+ SGTU $16, R3, R8
+ BEQ R0, R8, chunk_entry
+
+byte_loop:
+ BNE R1, R4, byte_test
+ MOVV $1, R1
+ MOVB R1, ret+24(FP)
+ RET
+byte_test:
+ MOVBU (R1), R6
+ ADDV $1, R1
+ MOVBU (R2), R7
+ ADDV $1, R2
+ BEQ R6, R7, byte_loop
+ JMP not_eq
+
+chunk_entry:
+ // make sure both a and b are aligned
+ OR R1, R2, R9
+ AND $0x7, R9
+ BNE R0, R9, byte_loop
+ JMP chunk_loop_1
+
+chunk_loop:
+ // chunk size is 16
+ SGTU $16, R3, R8
+ BNE R0, R8, chunk_tail_8
+chunk_loop_1:
+ MOVV (R1), R6
+ MOVV (R2), R7
+ BNE R6, R7, not_eq
+ MOVV 8(R1), R12
+ MOVV 8(R2), R13
+ ADDV $16, R1
+ ADDV $16, R2
+ SUBV $16, R3
+ BEQ R12, R13, chunk_loop
+ JMP not_eq
+
+chunk_tail_8:
+ AND $8, R3, R14
+ BEQ R0, R14, chunk_tail_4
+ MOVV (R1), R6
+ MOVV (R2), R7
+ BNE R6, R7, not_eq
+ ADDV $8, R1
+ ADDV $8, R2
+
+chunk_tail_4:
+ AND $4, R3, R14
+ BEQ R0, R14, chunk_tail_2
+ MOVWU (R1), R6
+ MOVWU (R2), R7
+ BNE R6, R7, not_eq
+ ADDV $4, R1
+ ADDV $4, R2
+
+chunk_tail_2:
+ AND $2, R3, R14
+ BEQ R0, R14, chunk_tail_1
+ MOVHU (R1), R6
+ MOVHU (R2), R7
+ BNE R6, R7, not_eq
+ ADDV $2, R1
+ ADDV $2, R2
+
+chunk_tail_1:
+ AND $1, R3, R14
+ BEQ R0, R14, eq
+ MOVBU (R1), R6
+ MOVBU (R2), R7
+ BEQ R6, R7, eq
+
+not_eq:
+ MOVB R0, ret+24(FP)
+ RET
+eq:
+ MOVV $1, R1
+ MOVB R1, ret+24(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
+ MOVV a+0(FP), R1
+ MOVV b+8(FP), R2
+ BEQ R1, R2, eq
+ MOVV 8(REGCTXT), R3 // compiler stores size at offset 8 in the closure
+ MOVV R1, 8(R29)
+ MOVV R2, 16(R29)
+ MOVV R3, 24(R29)
+ JAL runtime·memequal(SB)
+ MOVBU 32(R29), R1
+ MOVB R1, ret+16(FP)
+ RET
+eq:
+ MOVV $1, R1
+ MOVB R1, ret+16(FP)
+ RET
diff --git a/src/internal/bytealg/equal_mipsx.s b/src/internal/bytealg/equal_mipsx.s
new file mode 100644
index 0000000..4c46dd4
--- /dev/null
+++ b/src/internal/bytealg/equal_mipsx.s
@@ -0,0 +1,62 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define REGCTXT R22
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT,$0-13
+ MOVW a+0(FP), R1
+ MOVW b+4(FP), R2
+ BEQ R1, R2, eq
+ MOVW size+8(FP), R3
+ ADDU R1, R3, R4
+loop:
+ BNE R1, R4, test
+ MOVW $1, R1
+ MOVB R1, ret+12(FP)
+ RET
+test:
+ MOVBU (R1), R6
+ ADDU $1, R1
+ MOVBU (R2), R7
+ ADDU $1, R2
+ BEQ R6, R7, loop
+
+ MOVB R0, ret+12(FP)
+ RET
+eq:
+ MOVW $1, R1
+ MOVB R1, ret+12(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
+ MOVW a+0(FP), R1
+ MOVW b+4(FP), R2
+ BEQ R1, R2, eq
+ MOVW 4(REGCTXT), R3 // compiler stores size at offset 4 in the closure
+ ADDU R1, R3, R4
+loop:
+ BNE R1, R4, test
+ MOVW $1, R1
+ MOVB R1, ret+8(FP)
+ RET
+test:
+ MOVBU (R1), R6
+ ADDU $1, R1
+ MOVBU (R2), R7
+ ADDU $1, R2
+ BEQ R6, R7, loop
+
+ MOVB R0, ret+8(FP)
+ RET
+eq:
+ MOVW $1, R1
+ MOVB R1, ret+8(FP)
+ RET
diff --git a/src/internal/bytealg/equal_native.go b/src/internal/bytealg/equal_native.go
new file mode 100644
index 0000000..cf3a245
--- /dev/null
+++ b/src/internal/bytealg/equal_native.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import "unsafe"
+
+// The declarations below generate ABI wrappers for functions
+// implemented in assembly in this package but declared in another
+// package.
+
+// The compiler generates calls to runtime.memequal and runtime.memequal_varlen.
+// In addition, the runtime calls runtime.memequal explicitly.
+// Those functions are implemented in this package.
+
+//go:linkname abigen_runtime_memequal runtime.memequal
+func abigen_runtime_memequal(a, b unsafe.Pointer, size uintptr) bool
+
+//go:linkname abigen_runtime_memequal_varlen runtime.memequal_varlen
+func abigen_runtime_memequal_varlen(a, b unsafe.Pointer) bool
diff --git a/src/internal/bytealg/equal_ppc64x.s b/src/internal/bytealg/equal_ppc64x.s
new file mode 100644
index 0000000..07dce80
--- /dev/null
+++ b/src/internal/bytealg/equal_ppc64x.s
@@ -0,0 +1,207 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// 4K (smallest case) page size offset mask for PPC64.
+#define PAGE_OFFSET 4095
+
+// Likewise, the BC opcode is hard to read, and no extended
+// mnemonics are offered for these forms.
+#define BGELR_CR6 BC 4, CR6LT, (LR)
+#define BEQLR BC 12, CR0EQ, (LR)
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+ // R3 = a
+ // R4 = b
+ // R5 = size
+ BR memeqbody<>(SB)
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-17
+ // R3 = a
+ // R4 = b
+ CMP R3, R4
+ BEQ eq
+ MOVD 8(R11), R5 // compiler stores size at offset 8 in the closure
+ BR memeqbody<>(SB)
+eq:
+ MOVD $1, R3
+ RET
+
+// Do an efficient memequal for ppc64
+// R3 = s1
+// R4 = s2
+// R5 = len
+// On exit:
+// R3 = return value
+TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ MOVD R3, R8 // Move s1 into R8
+ ADD R5, R3, R9 // &s1[len(s1)]
+ ADD R5, R4, R10 // &s2[len(s2)]
+ MOVD $1, R11
+ CMP R5, $16 // Use GPR checks for check for len <= 16
+ BLE check0_16
+ MOVD $0, R3 // Assume no-match in case BGELR CR6 returns
+ CMP R5, $32 // Use overlapping VSX loads for len <= 32
+ BLE check17_32 // Do a pair of overlapping VSR compares
+ CMP R5, $64
+ BLE check33_64 // Hybrid check + overlap compare.
+
+setup64:
+ SRD $6, R5, R6 // number of 64 byte chunks to compare
+ MOVD R6, CTR
+ MOVD $16, R14 // index for VSX loads and stores
+ MOVD $32, R15
+ MOVD $48, R16
+ ANDCC $0x3F, R5, R5 // len%64==0?
+
+ PCALIGN $16
+loop64:
+ LXVD2X (R8+R0), V0
+ LXVD2X (R4+R0), V1
+ VCMPEQUBCC V0, V1, V2 // compare, setting CR6
+ BGELR_CR6
+ LXVD2X (R8+R14), V0
+ LXVD2X (R4+R14), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ LXVD2X (R8+R15), V0
+ LXVD2X (R4+R15), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ LXVD2X (R8+R16), V0
+ LXVD2X (R4+R16), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ ADD $64,R8 // bump up to next 64
+ ADD $64,R4
+ BDNZ loop64
+
+ ISEL CR0EQ, R11, R3, R3 // If no tail, return 1, otherwise R3 remains 0.
+ BEQLR // return if no tail.
+
+ ADD $-64, R9, R8
+ ADD $-64, R10, R4
+ LXVD2X (R8+R0), V0
+ LXVD2X (R4+R0), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ LXVD2X (R8+R14), V0
+ LXVD2X (R4+R14), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ LXVD2X (R8+R15), V0
+ LXVD2X (R4+R15), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ LXVD2X (R8+R16), V0
+ LXVD2X (R4+R16), V1
+ VCMPEQUBCC V0, V1, V2
+ ISEL CR6LT, R11, R0, R3
+ RET
+
+check33_64:
+ // Bytes 0-15
+ LXVD2X (R8+R0), V0
+ LXVD2X (R4+R0), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+ ADD $16, R8
+ ADD $16, R4
+
+ // Bytes 16-31
+ LXVD2X (R8+R0), V0
+ LXVD2X (R4+R0), V1
+ VCMPEQUBCC V0, V1, V2
+ BGELR_CR6
+
+ // A little tricky, but point R4,R8 to &sx[len-32],
+ // and reuse check17_32 to check the next 1-31 bytes (with some overlap)
+ ADD $-32, R9, R8
+ ADD $-32, R10, R4
+ // Fallthrough
+
+check17_32:
+ LXVD2X (R8+R0), V0
+ LXVD2X (R4+R0), V1
+ VCMPEQUBCC V0, V1, V2
+ ISEL CR6LT, R11, R0, R5
+
+ // Load sX[len(sX)-16:len(sX)] and compare.
+ ADD $-16, R9
+ ADD $-16, R10
+ LXVD2X (R9+R0), V0
+ LXVD2X (R10+R0), V1
+ VCMPEQUBCC V0, V1, V2
+ ISEL CR6LT, R5, R0, R3
+ RET
+
+check0_16:
+#ifdef GOPPC64_power10
+ SLD $56, R5, R7
+ LXVL R8, R7, V0
+ LXVL R4, R7, V1
+ VCMPEQUDCC V0, V1, V2
+ ISEL CR6LT, R11, R0, R3
+ RET
+#else
+ CMP R5, $8
+ BLT check0_7
+ // Load sX[0:7] and compare.
+ MOVD (R8), R6
+ MOVD (R4), R7
+ CMP R6, R7
+ ISEL CR0EQ, R11, R0, R5
+ // Load sX[len(sX)-8:len(sX)] and compare.
+ MOVD -8(R9), R6
+ MOVD -8(R10), R7
+ CMP R6, R7
+ ISEL CR0EQ, R5, R0, R3
+ RET
+
+check0_7:
+ CMP R5,$0
+ MOVD $1, R3
+ BEQLR // return if len == 0
+
+ // Check < 8B loads with a single compare, but select the load address
+ // such that it cannot cross a page boundary. Load a few bytes from the
+ // lower address if that does not cross the lower page. Or, load a few
+ // extra bytes from the higher addresses. And align those values
+ // consistently in register as either address may have differing
+ // alignment requirements.
+ ANDCC $PAGE_OFFSET, R8, R6 // &sX & PAGE_OFFSET
+ ANDCC $PAGE_OFFSET, R4, R9
+ SUBC R5, $8, R12 // 8-len
+ SLD $3, R12, R14 // (8-len)*8
+ CMPU R6, R12, CR1 // Enough bytes lower in the page to load lower?
+ CMPU R9, R12, CR0
+ SUB R12, R8, R6 // compute lower load address
+ SUB R12, R4, R9
+ ISEL CR1LT, R8, R6, R8 // R8 = R6 < 0 ? R8 (&s1) : R6 (&s1 - (8-len))
+ ISEL CR0LT, R4, R9, R4 // Similar for s2
+ MOVD (R8), R15
+ MOVD (R4), R16
+ SLD R14, R15, R7
+ SLD R14, R16, R17
+ SRD R14, R7, R7 // Clear the upper (8-len) bytes (with 2 shifts)
+ SRD R14, R17, R17
+ SRD R14, R15, R6 // Clear the lower (8-len) bytes
+ SRD R14, R16, R9
+#ifdef GOARCH_ppc64le
+ ISEL CR1LT, R7, R6, R8 // Choose the correct len bytes to compare based on alignment
+ ISEL CR0LT, R17, R9, R4
+#else
+ ISEL CR1LT, R6, R7, R8
+ ISEL CR0LT, R9, R17, R4
+#endif
+ CMP R4, R8
+ ISEL CR0EQ, R11, R0, R3
+ RET
+#endif // tail processing if !defined(GOPPC64_power10)
diff --git a/src/internal/bytealg/equal_riscv64.s b/src/internal/bytealg/equal_riscv64.s
new file mode 100644
index 0000000..503aac5
--- /dev/null
+++ b/src/internal/bytealg/equal_riscv64.s
@@ -0,0 +1,126 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+#define CTXT S10
+
+// func memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+ // X10 = a_base
+ // X11 = b_base
+ // X12 = size
+ JMP memequal<>(SB)
+
+// func memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-17
+ MOV 8(CTXT), X12 // compiler stores size at offset 8 in the closure
+ // X10 = a_base
+ // X11 = b_base
+ JMP memequal<>(SB)
+
+// On entry X10 and X11 contain pointers, X12 contains length.
+// For non-regabi X13 contains address for return value.
+// For regabi return value in X10.
+TEXT memequal<>(SB),NOSPLIT|NOFRAME,$0
+ BEQ X10, X11, eq
+
+ MOV $32, X23
+ BLT X12, X23, loop4_check
+
+ // Check alignment - if alignment differs we have to do one byte at a time.
+ AND $7, X10, X9
+ AND $7, X11, X19
+ BNE X9, X19, loop4_check
+ BEQZ X9, loop32_check
+
+ // Check one byte at a time until we reach 8 byte alignment.
+ SUB X9, X0, X9
+ ADD $8, X9, X9
+ SUB X9, X12, X12
+align:
+ ADD $-1, X9
+ MOVBU 0(X10), X19
+ MOVBU 0(X11), X20
+ BNE X19, X20, not_eq
+ ADD $1, X10
+ ADD $1, X11
+ BNEZ X9, align
+
+loop32_check:
+ MOV $32, X9
+ BLT X12, X9, loop16_check
+loop32:
+ MOV 0(X10), X19
+ MOV 0(X11), X20
+ MOV 8(X10), X21
+ MOV 8(X11), X22
+ BNE X19, X20, not_eq
+ BNE X21, X22, not_eq
+ MOV 16(X10), X14
+ MOV 16(X11), X15
+ MOV 24(X10), X16
+ MOV 24(X11), X17
+ BNE X14, X15, not_eq
+ BNE X16, X17, not_eq
+ ADD $32, X10
+ ADD $32, X11
+ ADD $-32, X12
+ BGE X12, X9, loop32
+ BEQZ X12, eq
+
+loop16_check:
+ MOV $16, X23
+ BLT X12, X23, loop4_check
+loop16:
+ MOV 0(X10), X19
+ MOV 0(X11), X20
+ MOV 8(X10), X21
+ MOV 8(X11), X22
+ BNE X19, X20, not_eq
+ BNE X21, X22, not_eq
+ ADD $16, X10
+ ADD $16, X11
+ ADD $-16, X12
+ BGE X12, X23, loop16
+ BEQZ X12, eq
+
+loop4_check:
+ MOV $4, X23
+ BLT X12, X23, loop1
+loop4:
+ MOVBU 0(X10), X19
+ MOVBU 0(X11), X20
+ MOVBU 1(X10), X21
+ MOVBU 1(X11), X22
+ BNE X19, X20, not_eq
+ BNE X21, X22, not_eq
+ MOVBU 2(X10), X14
+ MOVBU 2(X11), X15
+ MOVBU 3(X10), X16
+ MOVBU 3(X11), X17
+ BNE X14, X15, not_eq
+ BNE X16, X17, not_eq
+ ADD $4, X10
+ ADD $4, X11
+ ADD $-4, X12
+ BGE X12, X23, loop4
+
+loop1:
+ BEQZ X12, eq
+ MOVBU 0(X10), X19
+ MOVBU 0(X11), X20
+ BNE X19, X20, not_eq
+ ADD $1, X10
+ ADD $1, X11
+ ADD $-1, X12
+ JMP loop1
+
+not_eq:
+ MOVB ZERO, X10
+ RET
+eq:
+ MOV $1, X10
+ RET
diff --git a/src/internal/bytealg/equal_s390x.s b/src/internal/bytealg/equal_s390x.s
new file mode 100644
index 0000000..67f814d
--- /dev/null
+++ b/src/internal/bytealg/equal_s390x.s
@@ -0,0 +1,92 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// memequal(a, b unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R5
+ MOVD size+16(FP), R6
+ LA ret+24(FP), R7
+ BR memeqbody<>(SB)
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-17
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R5
+ MOVD 8(R12), R6 // compiler stores size at offset 8 in the closure
+ LA ret+16(FP), R7
+ BR memeqbody<>(SB)
+
+// input:
+// R3 = a
+// R5 = b
+// R6 = len
+// R7 = address of output byte (stores 0 or 1 here)
+// a and b have the same length
+TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMPBEQ R3, R5, equal
+loop:
+ CMPBEQ R6, $0, equal
+ CMPBLT R6, $32, tiny
+ CMP R6, $256
+ BLT tail
+ CLC $256, 0(R3), 0(R5)
+ BNE notequal
+ SUB $256, R6
+ LA 256(R3), R3
+ LA 256(R5), R5
+ BR loop
+tail:
+ SUB $1, R6, R8
+ EXRL $memeqbodyclc<>(SB), R8
+ BEQ equal
+notequal:
+ MOVB $0, 0(R7)
+ RET
+equal:
+ MOVB $1, 0(R7)
+ RET
+tiny:
+ MOVD $0, R2
+ CMPBLT R6, $16, lt16
+ MOVD 0(R3), R8
+ MOVD 0(R5), R9
+ CMPBNE R8, R9, notequal
+ MOVD 8(R3), R8
+ MOVD 8(R5), R9
+ CMPBNE R8, R9, notequal
+ LA 16(R2), R2
+ SUB $16, R6
+lt16:
+ CMPBLT R6, $8, lt8
+ MOVD 0(R3)(R2*1), R8
+ MOVD 0(R5)(R2*1), R9
+ CMPBNE R8, R9, notequal
+ LA 8(R2), R2
+ SUB $8, R6
+lt8:
+ CMPBLT R6, $4, lt4
+ MOVWZ 0(R3)(R2*1), R8
+ MOVWZ 0(R5)(R2*1), R9
+ CMPBNE R8, R9, notequal
+ LA 4(R2), R2
+ SUB $4, R6
+lt4:
+#define CHECK(n) \
+ CMPBEQ R6, $n, equal \
+ MOVB n(R3)(R2*1), R8 \
+ MOVB n(R5)(R2*1), R9 \
+ CMPBNE R8, R9, notequal
+ CHECK(0)
+ CHECK(1)
+ CHECK(2)
+ CHECK(3)
+ BR equal
+
+TEXT memeqbodyclc<>(SB),NOSPLIT|NOFRAME,$0-0
+ CLC $1, 0(R3), 0(R5)
+ RET
diff --git a/src/internal/bytealg/equal_wasm.s b/src/internal/bytealg/equal_wasm.s
new file mode 100644
index 0000000..a2b76c1
--- /dev/null
+++ b/src/internal/bytealg/equal_wasm.s
@@ -0,0 +1,77 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// memequal(p, q unsafe.Pointer, size uintptr) bool
+TEXT runtime·memequal(SB), NOSPLIT, $0-25
+ Get SP
+ I64Load a+0(FP)
+ I64Load b+8(FP)
+ I64Load size+16(FP)
+ Call memeqbody<>(SB)
+ I64Store8 ret+24(FP)
+ RET
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB), NOSPLIT, $0-17
+ Get SP
+ I64Load a+0(FP)
+ I64Load b+8(FP)
+ I64Load 8(CTXT) // compiler stores size at offset 8 in the closure
+ Call memeqbody<>(SB)
+ I64Store8 ret+16(FP)
+ RET
+
+// params: a, b, len
+// ret: 0/1
+TEXT memeqbody<>(SB), NOSPLIT, $0-0
+ Get R0
+ Get R1
+ I64Eq
+ If
+ I64Const $1
+ Return
+ End
+
+loop:
+ Loop
+ Get R2
+ I64Eqz
+ If
+ I64Const $1
+ Return
+ End
+
+ Get R0
+ I32WrapI64
+ I64Load8U $0
+ Get R1
+ I32WrapI64
+ I64Load8U $0
+ I64Ne
+ If
+ I64Const $0
+ Return
+ End
+
+ Get R0
+ I64Const $1
+ I64Add
+ Set R0
+
+ Get R1
+ I64Const $1
+ I64Add
+ Set R1
+
+ Get R2
+ I64Const $1
+ I64Sub
+ Set R2
+
+ Br loop
+ End
+ UNDEF
diff --git a/src/internal/bytealg/index_amd64.go b/src/internal/bytealg/index_amd64.go
new file mode 100644
index 0000000..c7a1941
--- /dev/null
+++ b/src/internal/bytealg/index_amd64.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import "internal/cpu"
+
+const MaxBruteForce = 64
+
+func init() {
+ if cpu.X86.HasAVX2 {
+ MaxLen = 63
+ } else {
+ MaxLen = 31
+ }
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 8 characters, plus a few slop to start.
+ return (n + 16) / 8
+}
diff --git a/src/internal/bytealg/index_amd64.s b/src/internal/bytealg/index_amd64.s
new file mode 100644
index 0000000..0431491
--- /dev/null
+++ b/src/internal/bytealg/index_amd64.s
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Index(SB),NOSPLIT,$0-56
+ MOVQ a_base+0(FP), DI
+ MOVQ a_len+8(FP), DX
+ MOVQ b_base+24(FP), R8
+ MOVQ b_len+32(FP), AX
+ MOVQ DI, R10
+ LEAQ ret+48(FP), R11
+ JMP indexbody<>(SB)
+
+TEXT ·IndexString(SB),NOSPLIT,$0-40
+ MOVQ a_base+0(FP), DI
+ MOVQ a_len+8(FP), DX
+ MOVQ b_base+16(FP), R8
+ MOVQ b_len+24(FP), AX
+ MOVQ DI, R10
+ LEAQ ret+32(FP), R11
+ JMP indexbody<>(SB)
+
+// AX: length of string, that we are searching for
+// DX: length of string, in which we are searching
+// DI: pointer to string, in which we are searching
+// R8: pointer to string, that we are searching for
+// R11: address, where to put return value
+// Note: We want len in DX and AX, because PCMPESTRI implicitly consumes them
+TEXT indexbody<>(SB),NOSPLIT,$0
+ CMPQ AX, DX
+ JA fail
+ CMPQ DX, $16
+ JAE sse42
+no_sse42:
+ CMPQ AX, $2
+ JA _3_or_more
+ MOVW (R8), R8
+ LEAQ -1(DI)(DX*1), DX
+loop2:
+ MOVW (DI), SI
+ CMPW SI,R8
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop2
+ JMP fail
+_3_or_more:
+ CMPQ AX, $3
+ JA _4_or_more
+ MOVW 1(R8), BX
+ MOVW (R8), R8
+ LEAQ -2(DI)(DX*1), DX
+loop3:
+ MOVW (DI), SI
+ CMPW SI,R8
+ JZ partial_success3
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop3
+ JMP fail
+partial_success3:
+ MOVW 1(DI), SI
+ CMPW SI,BX
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop3
+ JMP fail
+_4_or_more:
+ CMPQ AX, $4
+ JA _5_or_more
+ MOVL (R8), R8
+ LEAQ -3(DI)(DX*1), DX
+loop4:
+ MOVL (DI), SI
+ CMPL SI,R8
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop4
+ JMP fail
+_5_or_more:
+ CMPQ AX, $7
+ JA _8_or_more
+ LEAQ 1(DI)(DX*1), DX
+ SUBQ AX, DX
+ MOVL -4(R8)(AX*1), BX
+ MOVL (R8), R8
+loop5to7:
+ MOVL (DI), SI
+ CMPL SI,R8
+ JZ partial_success5to7
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop5to7
+ JMP fail
+partial_success5to7:
+ MOVL -4(AX)(DI*1), SI
+ CMPL SI,BX
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop5to7
+ JMP fail
+_8_or_more:
+ CMPQ AX, $8
+ JA _9_or_more
+ MOVQ (R8), R8
+ LEAQ -7(DI)(DX*1), DX
+loop8:
+ MOVQ (DI), SI
+ CMPQ SI,R8
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop8
+ JMP fail
+_9_or_more:
+ CMPQ AX, $15
+ JA _16_or_more
+ LEAQ 1(DI)(DX*1), DX
+ SUBQ AX, DX
+ MOVQ -8(R8)(AX*1), BX
+ MOVQ (R8), R8
+loop9to15:
+ MOVQ (DI), SI
+ CMPQ SI,R8
+ JZ partial_success9to15
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop9to15
+ JMP fail
+partial_success9to15:
+ MOVQ -8(AX)(DI*1), SI
+ CMPQ SI,BX
+ JZ success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop9to15
+ JMP fail
+_16_or_more:
+ CMPQ AX, $16
+ JA _17_or_more
+ MOVOU (R8), X1
+ LEAQ -15(DI)(DX*1), DX
+loop16:
+ MOVOU (DI), X2
+ PCMPEQB X1, X2
+ PMOVMSKB X2, SI
+ CMPQ SI, $0xffff
+ JE success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop16
+ JMP fail
+_17_or_more:
+ CMPQ AX, $31
+ JA _32_or_more
+ LEAQ 1(DI)(DX*1), DX
+ SUBQ AX, DX
+ MOVOU -16(R8)(AX*1), X0
+ MOVOU (R8), X1
+loop17to31:
+ MOVOU (DI), X2
+ PCMPEQB X1,X2
+ PMOVMSKB X2, SI
+ CMPQ SI, $0xffff
+ JE partial_success17to31
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop17to31
+ JMP fail
+partial_success17to31:
+ MOVOU -16(AX)(DI*1), X3
+ PCMPEQB X0, X3
+ PMOVMSKB X3, SI
+ CMPQ SI, $0xffff
+ JE success
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop17to31
+ JMP fail
+// We can get here only when AVX2 is enabled and cutoff for indexShortStr is set to 63
+// So no need to check cpuid
+_32_or_more:
+ CMPQ AX, $32
+ JA _33_to_63
+ VMOVDQU (R8), Y1
+ LEAQ -31(DI)(DX*1), DX
+loop32:
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPMOVMSKB Y3, SI
+ CMPL SI, $0xffffffff
+ JE success_avx2
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop32
+ JMP fail_avx2
+_33_to_63:
+ LEAQ 1(DI)(DX*1), DX
+ SUBQ AX, DX
+ VMOVDQU -32(R8)(AX*1), Y0
+ VMOVDQU (R8), Y1
+loop33to63:
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPMOVMSKB Y3, SI
+ CMPL SI, $0xffffffff
+ JE partial_success33to63
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop33to63
+ JMP fail_avx2
+partial_success33to63:
+ VMOVDQU -32(AX)(DI*1), Y3
+ VPCMPEQB Y0, Y3, Y4
+ VPMOVMSKB Y4, SI
+ CMPL SI, $0xffffffff
+ JE success_avx2
+ ADDQ $1,DI
+ CMPQ DI,DX
+ JB loop33to63
+fail_avx2:
+ VZEROUPPER
+fail:
+ MOVQ $-1, (R11)
+ RET
+success_avx2:
+ VZEROUPPER
+ JMP success
+sse42:
+#ifndef hasSSE42
+ CMPB internal∕cpu·X86+const_offsetX86HasSSE42(SB), $1
+ JNE no_sse42
+#endif
+ CMPQ AX, $12
+ // PCMPESTRI is slower than normal compare,
+ // so using it makes sense only if we advance 4+ bytes per compare
+ // This value was determined experimentally and is the ~same
+ // on Nehalem (first with SSE42) and Haswell.
+ JAE _9_or_more
+ LEAQ 16(R8), SI
+ TESTW $0xff0, SI
+ JEQ no_sse42
+ MOVOU (R8), X1
+ LEAQ -15(DI)(DX*1), SI
+ MOVQ $16, R9
+ SUBQ AX, R9 // We advance by 16-len(sep) each iteration, so precalculate it into R9
+loop_sse42:
+ // 0x0c means: unsigned byte compare (bits 0,1 are 00)
+ // for equality (bits 2,3 are 11)
+ // result is not masked or inverted (bits 4,5 are 00)
+ // and corresponds to first matching byte (bit 6 is 0)
+ PCMPESTRI $0x0c, (DI), X1
+ // CX == 16 means no match,
+ // CX > R9 means partial match at the end of the string,
+ // otherwise sep is at offset CX from X1 start
+ CMPQ CX, R9
+ JBE sse42_success
+ ADDQ R9, DI
+ CMPQ DI, SI
+ JB loop_sse42
+ PCMPESTRI $0x0c, -1(SI), X1
+ CMPQ CX, R9
+ JA fail
+ LEAQ -1(SI), DI
+sse42_success:
+ ADDQ CX, DI
+success:
+ SUBQ R10, DI
+ MOVQ DI, (R11)
+ RET
diff --git a/src/internal/bytealg/index_arm64.go b/src/internal/bytealg/index_arm64.go
new file mode 100644
index 0000000..e87c109
--- /dev/null
+++ b/src/internal/bytealg/index_arm64.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+// Empirical data shows that using Index can get better
+// performance when len(s) <= 16.
+const MaxBruteForce = 16
+
+func init() {
+ // Optimize cases where the length of the substring is less than 32 bytes
+ MaxLen = 32
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 16 characters, plus a few slop to start.
+ return 4 + n>>4
+}
diff --git a/src/internal/bytealg/index_arm64.s b/src/internal/bytealg/index_arm64.s
new file mode 100644
index 0000000..3a551a7
--- /dev/null
+++ b/src/internal/bytealg/index_arm64.s
@@ -0,0 +1,206 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Index(SB),NOSPLIT,$0-56
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+24(FP), R2
+ MOVD b_len+32(FP), R3
+ MOVD $ret+48(FP), R9
+ B indexbody<>(SB)
+
+TEXT ·IndexString(SB),NOSPLIT,$0-40
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+16(FP), R2
+ MOVD b_len+24(FP), R3
+ MOVD $ret+32(FP), R9
+ B indexbody<>(SB)
+
+// input:
+// R0: haystack
+// R1: length of haystack
+// R2: needle
+// R3: length of needle (2 <= len <= 32)
+// R9: address to put result
+TEXT indexbody<>(SB),NOSPLIT,$0-56
+ // main idea is to load 'sep' into separate register(s)
+ // to avoid repeatedly re-load it again and again
+ // for sebsequent substring comparisons
+ SUB R3, R1, R4
+ // R4 contains the start of last substring for comparison
+ ADD R0, R4, R4
+ ADD $1, R0, R8
+
+ CMP $8, R3
+ BHI greater_8
+ TBZ $3, R3, len_2_7
+len_8:
+ // R5 contains 8-byte of sep
+ MOVD (R2), R5
+loop_8:
+ // R6 contains substring for comparison
+ CMP R4, R0
+ BHI not_found
+ MOVD.P 1(R0), R6
+ CMP R5, R6
+ BNE loop_8
+ B found
+len_2_7:
+ TBZ $2, R3, len_2_3
+ TBZ $1, R3, len_4_5
+ TBZ $0, R3, len_6
+len_7:
+ // R5 and R6 contain 7-byte of sep
+ MOVWU (R2), R5
+ // 1-byte overlap with R5
+ MOVWU 3(R2), R6
+loop_7:
+ CMP R4, R0
+ BHI not_found
+ MOVWU.P 1(R0), R3
+ CMP R5, R3
+ BNE loop_7
+ MOVWU 2(R0), R3
+ CMP R6, R3
+ BNE loop_7
+ B found
+len_6:
+ // R5 and R6 contain 6-byte of sep
+ MOVWU (R2), R5
+ MOVHU 4(R2), R6
+loop_6:
+ CMP R4, R0
+ BHI not_found
+ MOVWU.P 1(R0), R3
+ CMP R5, R3
+ BNE loop_6
+ MOVHU 3(R0), R3
+ CMP R6, R3
+ BNE loop_6
+ B found
+len_4_5:
+ TBZ $0, R3, len_4
+len_5:
+ // R5 and R7 contain 5-byte of sep
+ MOVWU (R2), R5
+ MOVBU 4(R2), R7
+loop_5:
+ CMP R4, R0
+ BHI not_found
+ MOVWU.P 1(R0), R3
+ CMP R5, R3
+ BNE loop_5
+ MOVBU 3(R0), R3
+ CMP R7, R3
+ BNE loop_5
+ B found
+len_4:
+ // R5 contains 4-byte of sep
+ MOVWU (R2), R5
+loop_4:
+ CMP R4, R0
+ BHI not_found
+ MOVWU.P 1(R0), R6
+ CMP R5, R6
+ BNE loop_4
+ B found
+len_2_3:
+ TBZ $0, R3, len_2
+len_3:
+ // R6 and R7 contain 3-byte of sep
+ MOVHU (R2), R6
+ MOVBU 2(R2), R7
+loop_3:
+ CMP R4, R0
+ BHI not_found
+ MOVHU.P 1(R0), R3
+ CMP R6, R3
+ BNE loop_3
+ MOVBU 1(R0), R3
+ CMP R7, R3
+ BNE loop_3
+ B found
+len_2:
+ // R5 contains 2-byte of sep
+ MOVHU (R2), R5
+loop_2:
+ CMP R4, R0
+ BHI not_found
+ MOVHU.P 1(R0), R6
+ CMP R5, R6
+ BNE loop_2
+found:
+ SUB R8, R0, R0
+ MOVD R0, (R9)
+ RET
+not_found:
+ MOVD $-1, R0
+ MOVD R0, (R9)
+ RET
+greater_8:
+ SUB $9, R3, R11 // len(sep) - 9, offset of R0 for last 8 bytes
+ CMP $16, R3
+ BHI greater_16
+len_9_16:
+ MOVD.P 8(R2), R5 // R5 contains the first 8-byte of sep
+ SUB $16, R3, R7 // len(sep) - 16, offset of R2 for last 8 bytes
+ MOVD (R2)(R7), R6 // R6 contains the last 8-byte of sep
+loop_9_16:
+ // search the first 8 bytes first
+ CMP R4, R0
+ BHI not_found
+ MOVD.P 1(R0), R7
+ CMP R5, R7
+ BNE loop_9_16
+ MOVD (R0)(R11), R7
+ CMP R6, R7 // compare the last 8 bytes
+ BNE loop_9_16
+ B found
+greater_16:
+ CMP $24, R3
+ BHI len_25_32
+len_17_24:
+ LDP.P 16(R2), (R5, R6) // R5 and R6 contain the first 16-byte of sep
+ SUB $24, R3, R10 // len(sep) - 24
+ MOVD (R2)(R10), R7 // R7 contains the last 8-byte of sep
+loop_17_24:
+ // search the first 16 bytes first
+ CMP R4, R0
+ BHI not_found
+ MOVD.P 1(R0), R10
+ CMP R5, R10
+ BNE loop_17_24
+ MOVD 7(R0), R10
+ CMP R6, R10
+ BNE loop_17_24
+ MOVD (R0)(R11), R10
+ CMP R7, R10 // compare the last 8 bytes
+ BNE loop_17_24
+ B found
+len_25_32:
+ LDP.P 16(R2), (R5, R6)
+ MOVD.P 8(R2), R7 // R5, R6 and R7 contain the first 24-byte of sep
+ SUB $32, R3, R12 // len(sep) - 32
+ MOVD (R2)(R12), R10 // R10 contains the last 8-byte of sep
+loop_25_32:
+ // search the first 24 bytes first
+ CMP R4, R0
+ BHI not_found
+ MOVD.P 1(R0), R12
+ CMP R5, R12
+ BNE loop_25_32
+ MOVD 7(R0), R12
+ CMP R6, R12
+ BNE loop_25_32
+ MOVD 15(R0), R12
+ CMP R7, R12
+ BNE loop_25_32
+ MOVD (R0)(R11), R12
+ CMP R10, R12 // compare the last 8 bytes
+ BNE loop_25_32
+ B found
diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go
new file mode 100644
index 0000000..a59e329
--- /dev/null
+++ b/src/internal/bytealg/index_generic.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !arm64 && !s390x && !ppc64le && !ppc64
+
+package bytealg
+
+const MaxBruteForce = 0
+
+// Index returns the index of the first instance of b in a, or -1 if b is not present in a.
+// Requires 2 <= len(b) <= MaxLen.
+func Index(a, b []byte) int {
+ panic("unimplemented")
+}
+
+// IndexString returns the index of the first instance of b in a, or -1 if b is not present in a.
+// Requires 2 <= len(b) <= MaxLen.
+func IndexString(a, b string) int {
+ panic("unimplemented")
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ panic("unimplemented")
+}
diff --git a/src/internal/bytealg/index_native.go b/src/internal/bytealg/index_native.go
new file mode 100644
index 0000000..59c93f9
--- /dev/null
+++ b/src/internal/bytealg/index_native.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || s390x || ppc64le || ppc64
+
+package bytealg
+
+// Index returns the index of the first instance of b in a, or -1 if b is not present in a.
+// Requires 2 <= len(b) <= MaxLen.
+//
+//go:noescape
+func Index(a, b []byte) int
+
+// IndexString returns the index of the first instance of b in a, or -1 if b is not present in a.
+// Requires 2 <= len(b) <= MaxLen.
+//
+//go:noescape
+func IndexString(a, b string) int
diff --git a/src/internal/bytealg/index_ppc64x.go b/src/internal/bytealg/index_ppc64x.go
new file mode 100644
index 0000000..720d517
--- /dev/null
+++ b/src/internal/bytealg/index_ppc64x.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package bytealg
+
+import "internal/cpu"
+
+const MaxBruteForce = 16
+
+var SupportsPower9 = cpu.PPC64.IsPOWER9
+
+func init() {
+ MaxLen = 32
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 8 characters, plus a few slop to start.
+ return (n + 16) / 8
+}
diff --git a/src/internal/bytealg/index_ppc64x.s b/src/internal/bytealg/index_ppc64x.s
new file mode 100644
index 0000000..80a1f85
--- /dev/null
+++ b/src/internal/bytealg/index_ppc64x.s
@@ -0,0 +1,841 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is an implementation based on the s390x
+// implementation.
+
+// Find a separator with 2 <= len <= 32 within a string.
+// Separators with lengths of 2, 3 or 4 are handled
+// specially.
+
+// This works on power8 and above. The loads and
+// compares are done in big endian order
+// since that allows the used of VCLZD, and allows
+// the same implementation to work on big and little
+// endian platforms with minimal conditional changes.
+
+// NOTE: There is a power9 implementation that
+// improves performance by 10-15% on little
+// endian for some of the benchmarks.
+// Unrolled index2to16 loop by 4 on ppc64le/power9
+// Work is still needed for a big endian
+// implementation on power9.
+
+//go:build ppc64 || ppc64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Needed to swap LXVD2X loads to the correct
+// byte order to work on POWER8.
+
+#ifdef GOARCH_ppc64
+DATA byteswap<>+0(SB)/8, $0x0001020304050607
+DATA byteswap<>+8(SB)/8, $0x08090a0b0c0d0e0f
+#else
+DATA byteswap<>+0(SB)/8, $0x0706050403020100
+DATA byteswap<>+8(SB)/8, $0x0f0e0d0c0b0a0908
+#endif
+
+// Load bytes in big endian order. Address
+// alignment does not need checking.
+#define VLOADSWAP(base, index, vreg, vsreg) \
+ LXVD2X (base)(index), vsreg; \
+ VPERM vreg, vreg, SWAP, vreg
+
+GLOBL byteswap<>+0(SB), RODATA, $16
+
+TEXT ·Index<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+ // R3 = byte array pointer
+ // R4 = length
+ MOVD R6, R5 // R5 = separator pointer
+ MOVD R7, R6 // R6 = separator length
+
+#ifdef GOARCH_ppc64le
+ MOVBZ internal∕cpu·PPC64+const_offsetPPC64HasPOWER9(SB), R7
+ CMP R7, $1
+ BNE power8
+ BR indexbodyp9<>(SB)
+#endif
+power8:
+ BR indexbody<>(SB)
+
+TEXT ·IndexString<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // R3 = string
+ // R4 = length
+ // R5 = separator pointer
+ // R6 = separator length
+
+#ifdef GOARCH_ppc64le
+ MOVBZ internal∕cpu·PPC64+const_offsetPPC64HasPOWER9(SB), R7
+ CMP R7, $1
+ BNE power8
+ BR indexbodyp9<>(SB)
+
+#endif
+power8:
+ BR indexbody<>(SB)
+
+ // s: string we are searching
+ // sep: string to search for
+ // R3=&s[0], R4=len(s)
+ // R5=&sep[0], R6=len(sep)
+ // R14=&ret (index where sep found)
+ // R7=working addr of string
+ // R16=index value 16
+ // R17=index value 17
+ // R18=index value 18
+ // R19=index value 1
+ // R26=LASTBYTE of string
+ // R27=LASTSTR last start byte to compare with sep
+ // R8, R9 scratch
+ // V0=sep left justified zero fill
+ // CR4=sep length >= 16
+
+#define SEPMASK V17
+#define LASTBYTE R26
+#define LASTSTR R27
+#define ONES V20
+#define SWAP V21
+#define SWAP_ VS53
+TEXT indexbody<>(SB), NOSPLIT|NOFRAME, $0
+ CMP R6, R4 // Compare lengths
+ BGT notfound // If sep len is > string, notfound
+ ADD R4, R3, LASTBYTE // find last byte addr
+ SUB R6, LASTBYTE, LASTSTR // LAST=&s[len(s)-len(sep)] (last valid start index)
+ CMP R6, $0 // Check sep len
+ BEQ notfound // sep len 0 -- not found
+ MOVD R3, R7 // Copy of string addr
+ MOVD $16, R16 // Index value 16
+ MOVD $17, R17 // Index value 17
+ MOVD $18, R18 // Index value 18
+ MOVD $1, R19 // Index value 1
+ MOVD $byteswap<>+00(SB), R8
+ VSPLTISB $0xFF, ONES // splat all 1s
+ LXVD2X (R8)(R0), SWAP_ // Set up swap string
+
+ CMP R6, $16, CR4 // CR4 for len(sep) >= 16
+ VOR ONES, ONES, SEPMASK // Set up full SEPMASK
+ BGE CR4, loadge16 // Load for len(sep) >= 16
+ SUB R6, R16, R9 // 16-len of sep
+ SLD $3, R9 // Set up for VSLO
+ MTVSRD R9, V9 // Set up for VSLO
+ VSLDOI $8, V9, V9, V9 // Set up for VSLO
+ VSLO ONES, V9, SEPMASK // Mask for separator len(sep) < 16
+
+loadge16:
+ ANDCC $15, R5, R9 // Find byte offset of sep
+ ADD R9, R6, R10 // Add sep len
+ CMP R10, $16 // Check if sep len+offset > 16
+ BGT sepcross16 // Sep crosses 16 byte boundary
+
+ RLDICR $0, R5, $59, R8 // Adjust addr to 16 byte container
+ VLOADSWAP(R8, R0, V0, V0) // Load 16 bytes @R8 into V0
+ SLD $3, R9 // Set up shift count for VSLO
+ MTVSRD R9, V8 // Set up shift count for VSLO
+ VSLDOI $8, V8, V8, V8
+ VSLO V0, V8, V0 // Shift by start byte
+
+ VAND V0, SEPMASK, V0 // Mask separator (< 16)
+ BR index2plus
+
+sepcross16:
+ VLOADSWAP(R5, R0, V0, V0) // Load 16 bytes @R5 into V0
+
+ VAND V0, SEPMASK, V0 // mask out separator
+ BLE CR4, index2to16
+ BR index17plus // Handle sep > 16
+
+index2plus:
+ CMP R6, $2 // Check length of sep
+ BNE index3plus // If not 2, check for 3
+ ADD $16, R7, R9 // Check if next 16 bytes past last
+ CMP R9, LASTBYTE // compare with last
+ BGE index2to16 // 2 <= len(string) <= 16
+ MOVD $0xff00, R21 // Mask for later
+ MTVSRD R21, V25 // Move to Vreg
+ VSPLTH $3, V25, V31 // Splat mask
+ VSPLTH $0, V0, V1 // Splat 1st 2 bytes of sep
+ VSPLTISB $0, V10 // Clear V10
+
+ // First case: 2 byte separator
+ // V1: 2 byte separator splatted
+ // V2: 16 bytes at addr
+ // V4: 16 bytes at addr+1
+ // Compare 2 byte separator at start
+ // and at start+1. Use VSEL to combine
+ // those results to find the first
+ // matching start byte, returning
+ // that value when found. Loop as
+ // long as len(string) > 16
+index2loop2:
+ VLOADSWAP(R7, R19, V3, V3) // Load 16 bytes @R7+1 into V3
+
+index2loop:
+ VLOADSWAP(R7, R0, V2, V2) // Load 16 bytes @R7 into V2
+ VCMPEQUH V1, V2, V5 // Search for sep
+ VCMPEQUH V1, V3, V6 // Search for sep offset by 1
+ VSEL V6, V5, V31, V7 // merge even and odd indices
+ VCLZD V7, V18 // find index of first match
+ MFVSRD V18, R25 // get first value
+ CMP R25, $64 // Found if < 64
+ BLT foundR25 // Return byte index where found
+ VSLDOI $8, V18, V18, V18 // Adjust 2nd value
+ MFVSRD V18, R25 // get second value
+ CMP R25, $64 // Found if < 64
+ ADD $64, R25 // Update byte offset
+ BLT foundR25 // Return value
+ ADD $16, R7 // R7+=16 Update string pointer
+ ADD $17, R7, R9 // R9=F7+17 since loop unrolled
+ CMP R9, LASTBYTE // Compare addr+17 against last byte
+ BLT index2loop2 // If < last, continue loop
+ CMP R7, LASTBYTE // Compare addr+16 against last byte
+ BLT index2to16 // If < 16 handle specially
+ VLOADSWAP(R7, R0, V3, V3) // Load 16 bytes @R7 into V3
+ VSLDOI $1, V3, V10, V3 // Shift left by 1 byte
+ BR index2loop
+
+index3plus:
+ CMP R6, $3 // Check if sep == 3
+ BNE index4plus // If not check larger
+ ADD $19, R7, R9 // Find bytes for use in this loop
+ CMP R9, LASTBYTE // Compare against last byte
+ BGE index2to16 // Remaining string 2<=len<=16
+ MOVD $0xff00, R21 // Set up mask for upcoming loop
+ MTVSRD R21, V25 // Move mask to Vreg
+ VSPLTH $3, V25, V31 // Splat mask
+ VSPLTH $0, V0, V1 // Splat 1st two bytes of sep
+ VSPLTB $2, V0, V8 // Splat 3rd byte of sep
+
+ // Loop to process 3 byte separator.
+ // string[0:16] is in V2
+ // string[2:18] is in V3
+ // sep[0:2] splatted in V1
+ // sec[3] splatted in v8
+ // Load vectors at string, string+1
+ // and string+2. Compare string, string+1
+ // against first 2 bytes of separator
+ // splatted, and string+2 against 3rd
+ // byte splatted. Merge the results with
+ // VSEL to find the first byte of a match.
+
+ // Special handling for last 16 bytes if the
+ // string fits in 16 byte multiple.
+index3loop2:
+ MOVD $2, R21 // Set up index for 2
+ VSPLTISB $0, V10 // Clear V10
+ VLOADSWAP(R7, R21, V3, V3)// Load 16 bytes @R7+2 into V3
+ VSLDOI $14, V3, V10, V3 // Left justify next 2 bytes
+
+index3loop:
+ VLOADSWAP(R7, R0, V2, V2) // Load with correct order
+ VSLDOI $1, V2, V3, V4 // string[1:17]
+ VSLDOI $2, V2, V3, V9 // string[2:18]
+ VCMPEQUH V1, V2, V5 // compare hw even indices
+ VCMPEQUH V1, V4, V6 // compare hw odd indices
+ VCMPEQUB V8, V9, V10 // compare 3rd to last byte
+ VSEL V6, V5, V31, V7 // Find 1st matching byte using mask
+ VAND V7, V10, V7 // AND matched bytes with matched 3rd byte
+ VCLZD V7, V18 // Find first nonzero indexes
+ MFVSRD V18, R25 // Move 1st doubleword
+ CMP R25, $64 // If < 64 found
+ BLT foundR25 // Return matching index
+ VSLDOI $8, V18, V18, V18 // Move value
+ MFVSRD V18, R25 // Move 2nd doubleword
+ CMP R25, $64 // If < 64 found
+ ADD $64, R25 // Update byte index
+ BLT foundR25 // Return matching index
+ ADD $16, R7 // R7+=16 string ptr
+ ADD $19, R7, R9 // Number of string bytes for loop
+ CMP R9, LASTBYTE // Compare against last byte of string
+ BLT index3loop2 // If within, continue this loop
+ CMP R7, LASTSTR // Compare against last start byte
+ BLT index2to16 // Process remainder
+ VSPLTISB $0, V3 // Special case for last 16 bytes
+ BR index3loop // Continue this loop
+
+ // Loop to process 4 byte separator
+ // string[0:16] in V2
+ // string[3:16] in V3
+ // sep[0:4] splatted in V1
+ // Set up vectors with strings at offsets
+ // 0, 1, 2, 3 and compare against the 4 byte
+ // separator also splatted. Use VSEL with the
+ // compare results to find the first byte where
+ // a separator match is found.
+index4plus:
+ CMP R6, $4 // Check if 4 byte separator
+ BNE index5plus // If not next higher
+ ADD $20, R7, R9 // Check string size to load
+ CMP R9, LASTBYTE // Verify string length
+ BGE index2to16 // If not large enough, process remaining
+ MOVD $2, R15 // Set up index
+
+ // Set up masks for use with VSEL
+ MOVD $0xff, R21 // Set up mask 0xff000000ff000000...
+ SLD $24, R21
+ MTVSRD R21, V10
+ VSPLTW $1, V10, V29
+ VSLDOI $2, V29, V29, V30 // Mask 0x0000ff000000ff00...
+ MOVD $0xffff, R21
+ SLD $16, R21
+ MTVSRD R21, V10
+ VSPLTW $1, V10, V31 // Mask 0xffff0000ffff0000...
+ VSPLTW $0, V0, V1 // Splat 1st word of separator
+
+index4loop:
+ VLOADSWAP(R7, R0, V2, V2) // Load 16 bytes @R7 into V2
+
+next4:
+ VSPLTISB $0, V10 // Clear
+ MOVD $3, R9 // Number of bytes beyond 16
+ VLOADSWAP(R7, R9, V3, V3) // Load 16 bytes @R7+3 into V3
+ VSLDOI $13, V3, V10, V3 // Shift left last 3 bytes
+ VSLDOI $1, V2, V3, V4 // V4=(V2:V3)<<1
+ VSLDOI $2, V2, V3, V9 // V9=(V2:V3)<<2
+ VSLDOI $3, V2, V3, V10 // V10=(V2:v3)<<3
+ VCMPEQUW V1, V2, V5 // compare index 0, 4, ... with sep
+ VCMPEQUW V1, V4, V6 // compare index 1, 5, ... with sep
+ VCMPEQUW V1, V9, V11 // compare index 2, 6, ... with sep
+ VCMPEQUW V1, V10, V12 // compare index 3, 7, ... with sep
+ VSEL V6, V5, V29, V13 // merge index 0, 1, 4, 5, using mask
+ VSEL V12, V11, V30, V14 // merge index 2, 3, 6, 7, using mask
+ VSEL V14, V13, V31, V7 // final merge
+ VCLZD V7, V18 // Find first index for each half
+ MFVSRD V18, R25 // Isolate value
+ CMP R25, $64 // If < 64, found
+ BLT foundR25 // Return found index
+ VSLDOI $8, V18, V18, V18 // Move for MFVSRD
+ MFVSRD V18, R25 // Isolate other value
+ CMP R25, $64 // If < 64, found
+ ADD $64, R25 // Update index for high doubleword
+ BLT foundR25 // Return found index
+ ADD $16, R7 // R7+=16 for next string
+ ADD $20, R7, R9 // R+20 for all bytes to load
+ CMP R9, LASTBYTE // Past end? Maybe check for extra?
+ BLT index4loop // If not, continue loop
+ CMP R7, LASTSTR // Check remainder
+ BLE index2to16 // Process remainder
+ BR notfound // Not found
+
+index5plus:
+ CMP R6, $16 // Check for sep > 16
+ BGT index17plus // Handle large sep
+
+ // Assumption is that the separator is smaller than the string at this point
+index2to16:
+ CMP R7, LASTSTR // Compare last start byte
+ BGT notfound // last takes len(sep) into account
+
+ ADD $16, R7, R9 // Check for last byte of string
+ CMP R9, LASTBYTE
+ BGT index2to16tail
+
+ // At least 16 bytes of string left
+ // Mask the number of bytes in sep
+index2to16loop:
+ VLOADSWAP(R7, R0, V1, V1) // Load 16 bytes @R7 into V1
+
+compare:
+ VAND V1, SEPMASK, V2 // Mask out sep size
+ VCMPEQUBCC V0, V2, V3 // Compare masked string
+ BLT CR6, found // All equal
+ ADD $1, R7 // Update ptr to next byte
+ CMP R7, LASTSTR // Still less than last start byte
+ BGT notfound // Not found
+ ADD $16, R7, R9 // Verify remaining bytes
+ CMP R9, LASTBYTE // At least 16
+ BLT index2to16loop // Try again
+
+ // Less than 16 bytes remaining in string
+ // Separator >= 2
+index2to16tail:
+ ADD R3, R4, R9 // End of string
+ SUB R7, R9, R9 // Number of bytes left
+ ANDCC $15, R7, R10 // 16 byte offset
+ ADD R10, R9, R11 // offset + len
+ CMP R11, $16 // >= 16?
+ BLE short // Does not cross 16 bytes
+ VLOADSWAP(R7, R0, V1, V1) // Load 16 bytes @R7 into V1
+ BR index2to16next // Continue on
+
+short:
+ RLDICR $0, R7, $59, R9 // Adjust addr to 16 byte container
+ VLOADSWAP(R9, R0, V1, V1)// Load 16 bytes @R9 into V1
+ SLD $3, R10 // Set up shift
+ MTVSRD R10, V8 // Set up shift
+ VSLDOI $8, V8, V8, V8
+ VSLO V1, V8, V1 // Shift by start byte
+ VSPLTISB $0, V25 // Clear for later use
+
+index2to16next:
+ VAND V1, SEPMASK, V2 // Just compare size of sep
+ VCMPEQUBCC V0, V2, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BGT notfound // If at end, then not found
+ VSLDOI $1, V1, V25, V1 // Shift string left by 1 byte
+ BR index2to16next // Check the next partial string
+
+index17plus:
+ CMP R6, $32 // Check if 17 < len(sep) <= 32
+ BGT index33plus
+ SUB $16, R6, R9 // Extra > 16
+ SLD $56, R9, R10 // Shift to use in VSLO
+ MTVSRD R10, V9 // Set up for VSLO
+ VLOADSWAP(R5, R9, V1, V1)// Load 16 bytes @R5+R9 into V1
+ VSLO V1, V9, V1 // Shift left
+ VSPLTISB $0xff, V7 // Splat 1s
+ VSPLTISB $0, V27 // Splat 0
+
+index17to32loop:
+ VLOADSWAP(R7, R0, V2, V2) // Load 16 bytes @R7 into V2
+
+next17:
+ VLOADSWAP(R7, R9, V3, V3) // Load 16 bytes @R7+R9 into V3
+ VSLO V3, V9, V3 // Shift left
+ VCMPEQUB V0, V2, V4 // Compare first 16 bytes
+ VCMPEQUB V1, V3, V5 // Compare extra over 16 bytes
+ VAND V4, V5, V6 // Check if both equal
+ VCMPEQUBCC V6, V7, V8 // All equal?
+ BLT CR6, found // Yes
+ ADD $1, R7 // On to next byte
+ CMP R7, LASTSTR // Check if last start byte
+ BGT notfound // If too high, not found
+ BR index17to32loop // Continue
+
+notfound:
+ MOVD $-1, R3 // Return -1 if not found
+ RET
+
+index33plus:
+ MOVD $0, (R0) // Case not implemented
+ RET // Crash before return
+
+foundR25:
+ SRD $3, R25 // Convert from bits to bytes
+ ADD R25, R7 // Add to current string address
+ SUB R3, R7 // Subtract from start of string
+ MOVD R7, R3 // Return byte where found
+ RET
+
+found:
+ SUB R3, R7 // Return byte where found
+ MOVD R7, R3
+ RET
+
+TEXT indexbodyp9<>(SB), NOSPLIT|NOFRAME, $0
+ CMP R6, R4 // Compare lengths
+ BGT notfound // If sep len is > string, notfound
+ ADD R4, R3, LASTBYTE // find last byte addr
+ SUB R6, LASTBYTE, LASTSTR // LAST=&s[len(s)-len(sep)] (last valid start index)
+ CMP R6, $0 // Check sep len
+ BEQ notfound // sep len 0 -- not found
+ MOVD R3, R7 // Copy of string addr
+#ifndef GOPPC64_power10
+ MOVD $16, R16 // Index value 16
+ MOVD $17, R17 // Index value 17
+ MOVD $18, R18 // Index value 18
+ VSPLTISB $0xFF, ONES // splat all 1s
+ VOR ONES, ONES, SEPMASK // Set up full SEPMASK
+#else
+ SLD $56, R6, R14 // Set up separator length for LXVLL
+#endif
+ MOVD $1, R19 // Index value 1
+ CMP R6, $16, CR4 // CR4 for len(sep) >= 16
+ BGE CR4, loadge16 // Load for len(sep) >= 16
+#ifndef GOPPC64_power10
+ SUB R6, R16, R9 // 16-len of sep
+ SLD $3, R9 // Set up for VSLO
+ MTVSRD R9, V9 // Set up for VSLO
+ VSLDOI $8, V9, V9, V9 // Set up for VSLO
+ VSLO ONES, V9, SEPMASK // Mask for separator len(sep) < 16
+#endif
+loadge16:
+ ANDCC $15, R5, R9 // Find byte offset of sep
+ ADD R9, R6, R10 // Add sep len
+ CMP R10, $16 // Check if sep len+offset > 16
+ BGT sepcross16 // Sep crosses 16 byte boundary
+#ifdef GOPPC64_power10
+ LXVLL R5, R14, V0 // Load separator
+#else
+ RLDICR $0, R5, $59, R8 // Adjust addr to 16 byte container
+ LXVB16X (R8)(R0), V0 // Load 16 bytes @R8 into V0
+ SLD $3, R9 // Set up shift count for VSLO
+ MTVSRD R9, V8 // Set up shift count for VSLO
+ VSLDOI $8, V8, V8, V8
+ VSLO V0, V8, V0 // Shift by start byte
+ VAND V0, SEPMASK, V0 // Mask separator (< 16)
+#endif
+ BR index2plus
+sepcross16:
+#ifdef GOPPC64_power10
+ LXVLL R5, R14, V0 // Load separator
+#else
+ LXVB16X (R5)(R0), V0 // Load 16 bytes @R5 into V0\
+ VAND V0, SEPMASK, V0 // mask out separator
+#endif
+ BLE CR4, index2to16
+ BR index17plus // Handle sep > 16
+
+index2plus:
+ CMP R6, $2 // Check length of sep
+ BNE index3plus // If not 2, check for 3
+ ADD $16, R7, R9 // Check if next 16 bytes past last
+ CMP R9, LASTBYTE // compare with last
+ BGE index2to16 // 2 <= len(string) <= 16
+ MOVD $0xff00, R21 // Mask for later
+ MTVSRD R21, V25 // Move to Vreg
+ VSPLTH $3, V25, V31 // Splat mask
+ VSPLTH $0, V0, V1 // Splat 1st 2 bytes of sep
+ VSPLTISB $0, V10 // Clear V10
+
+ // First case: 2 byte separator
+ // V1: 2 byte separator splatted
+ // V2: 16 bytes at addr
+ // V4: 16 bytes at addr+1
+ // Compare 2 byte separator at start
+ // and at start+1. Use VSEL to combine
+ // those results to find the first
+ // matching start byte, returning
+ // that value when found. Loop as
+ // long as len(string) > 16
+index2loop2:
+ LXVB16X (R7)(R19), V3 // Load 16 bytes @R7+1 into V3
+
+index2loop:
+ LXVB16X (R7)(R0), V2 // Load 16 bytes @R7 into V2
+ VCMPEQUH V1, V2, V5 // Search for sep
+ VCMPEQUH V1, V3, V6 // Search for sep offset by 1
+ VSEL V6, V5, V31, V7 // merge even and odd indices
+ VCLZD V7, V18 // find index of first match
+ MFVSRD V18, R25 // get first value
+ CMP R25, $64 // Found if < 64
+ BLT foundR25 // Return byte index where found
+
+ MFVSRLD V18, R25 // get second value
+ CMP R25, $64 // Found if < 64
+ ADD $64, R25 // Update byte offset
+ BLT foundR25 // Return value
+ ADD $16, R7 // R7+=16 Update string pointer
+ ADD $17, R7, R9 // R9=F7+17 since loop unrolled
+ CMP R9, LASTBYTE // Compare addr+17 against last byte
+ BLT index2loop2 // If < last, continue loop
+ CMP R7, LASTBYTE // Compare addr+16 against last byte
+ BLT index2to16 // If < 16 handle specially
+ LXVB16X (R7)(R0), V3 // Load 16 bytes @R7 into V3
+ VSLDOI $1, V3, V10, V3 // Shift left by 1 byte
+ BR index2loop
+
+index3plus:
+ CMP R6, $3 // Check if sep == 3
+ BNE index4plus // If not check larger
+ ADD $19, R7, R9 // Find bytes for use in this loop
+ CMP R9, LASTBYTE // Compare against last byte
+ BGE index2to16 // Remaining string 2<=len<=16
+ MOVD $0xff00, R21 // Set up mask for upcoming loop
+ MTVSRD R21, V25 // Move mask to Vreg
+ VSPLTH $3, V25, V31 // Splat mask
+ VSPLTH $0, V0, V1 // Splat 1st two bytes of sep
+ VSPLTB $2, V0, V8 // Splat 3rd byte of sep
+
+ // Loop to process 3 byte separator.
+ // string[0:16] is in V2
+ // string[2:18] is in V3
+ // sep[0:2] splatted in V1
+ // sec[3] splatted in v8
+ // Load vectors at string, string+1
+ // and string+2. Compare string, string+1
+ // against first 2 bytes of separator
+ // splatted, and string+2 against 3rd
+ // byte splatted. Merge the results with
+ // VSEL to find the first byte of a match.
+
+ // Special handling for last 16 bytes if the
+ // string fits in 16 byte multiple.
+index3loop2:
+ MOVD $2, R21 // Set up index for 2
+ VSPLTISB $0, V10 // Clear V10
+ LXVB16X (R7)(R21), V3 // Load 16 bytes @R7+2 into V3
+ VSLDOI $14, V3, V10, V3 // Left justify next 2 bytes
+
+index3loop:
+ LXVB16X (R7)(R0), V2 // Load 16 bytes @R7
+ VSLDOI $1, V2, V3, V4 // string[1:17]
+ VSLDOI $2, V2, V3, V9 // string[2:18]
+ VCMPEQUH V1, V2, V5 // compare hw even indices
+ VCMPEQUH V1, V4, V6 // compare hw odd indices
+ VCMPEQUB V8, V9, V10 // compare 3rd to last byte
+ VSEL V6, V5, V31, V7 // Find 1st matching byte using mask
+ VAND V7, V10, V7 // AND matched bytes with matched 3rd byte
+ VCLZD V7, V18 // Find first nonzero indexes
+ MFVSRD V18, R25 // Move 1st doubleword
+ CMP R25, $64 // If < 64 found
+ BLT foundR25 // Return matching index
+
+ MFVSRLD V18, R25 // Move 2nd doubleword
+ CMP R25, $64 // If < 64 found
+ ADD $64, R25 // Update byte index
+ BLT foundR25 // Return matching index
+ ADD $16, R7 // R7+=16 string ptr
+ ADD $19, R7, R9 // Number of string bytes for loop
+ CMP R9, LASTBYTE // Compare against last byte of string
+ BLT index3loop2 // If within, continue this loop
+ CMP R7, LASTSTR // Compare against last start byte
+ BLT index2to16 // Process remainder
+ VSPLTISB $0, V3 // Special case for last 16 bytes
+ BR index3loop // Continue this loop
+
+ // Loop to process 4 byte separator
+ // string[0:16] in V2
+ // string[3:16] in V3
+ // sep[0:4] splatted in V1
+ // Set up vectors with strings at offsets
+ // 0, 1, 2, 3 and compare against the 4 byte
+ // separator also splatted. Use VSEL with the
+ // compare results to find the first byte where
+ // a separator match is found.
+index4plus:
+ CMP R6, $4 // Check if 4 byte separator
+ BNE index5plus // If not next higher
+ ADD $20, R7, R9 // Check string size to load
+ CMP R9, LASTBYTE // Verify string length
+ BGE index2to16 // If not large enough, process remaining
+
+ // Set up masks for use with VSEL
+ MOVD $0xff, R21 // Set up mask 0xff000000ff000000...
+ SLD $24, R21
+ MTVSRWS R21, V29
+
+ VSLDOI $2, V29, V29, V30 // Mask 0x0000ff000000ff00...
+ MOVD $0xffff, R21
+ SLD $16, R21
+ MTVSRWS R21, V31
+
+ VSPLTW $0, V0, V1 // Splat 1st word of separator
+
+index4loop:
+ LXVB16X (R7)(R0), V2 // Load 16 bytes @R7 into V2
+
+next4:
+ VSPLTISB $0, V10 // Clear
+ MOVD $3, R9 // Number of bytes beyond 16
+ LXVB16X (R7)(R9), V3 // Load 16 bytes @R7 into V3
+ VSLDOI $13, V3, V10, V3 // Shift left last 3 bytes
+ VSLDOI $1, V2, V3, V4 // V4=(V2:V3)<<1
+ VSLDOI $2, V2, V3, V9 // V9=(V2:V3)<<2
+ VSLDOI $3, V2, V3, V10 // V10=(V2:v3)<<3
+ VCMPEQUW V1, V2, V5 // compare index 0, 4, ... with sep
+ VCMPEQUW V1, V4, V6 // compare index 1, 5, ... with sep
+ VCMPEQUW V1, V9, V11 // compare index 2, 6, ... with sep
+ VCMPEQUW V1, V10, V12 // compare index 3, 7, ... with sep
+ VSEL V6, V5, V29, V13 // merge index 0, 1, 4, 5, using mask
+ VSEL V12, V11, V30, V14 // merge index 2, 3, 6, 7, using mask
+ VSEL V14, V13, V31, V7 // final merge
+ VCLZD V7, V18 // Find first index for each half
+ MFVSRD V18, R25 // Isolate value
+ CMP R25, $64 // If < 64, found
+ BLT foundR25 // Return found index
+
+ MFVSRLD V18, R25 // Isolate other value
+ CMP R25, $64 // If < 64, found
+ ADD $64, R25 // Update index for high doubleword
+ BLT foundR25 // Return found index
+ ADD $16, R7 // R7+=16 for next string
+ ADD $20, R7, R9 // R+20 for all bytes to load
+ CMP R9, LASTBYTE // Past end? Maybe check for extra?
+ BLT index4loop // If not, continue loop
+ CMP R7, LASTSTR // Check remainder
+ BLE index2to16 // Process remainder
+ BR notfound // Not found
+
+index5plus:
+ CMP R6, $16 // Check for sep > 16
+ BGT index17plus // Handle large sep
+
+ // Assumption is that the separator is smaller than the string at this point
+index2to16:
+ CMP R7, LASTSTR // Compare last start byte
+ BGT notfound // last takes len(sep) into account
+
+ ADD $19, R7, R9 // To check 4 indices per iteration, need at least 16+3 bytes
+ CMP R9, LASTBYTE
+ // At least 16 bytes of string left
+ // Mask the number of bytes in sep
+ VSPLTISB $0, V10 // Clear
+ BGT index2to16tail
+
+#ifdef GOPPC64_power10
+ ADD $3,R7, R17 // Base+3
+ ADD $2,R7, R8 // Base+2
+ ADD $1,R7, R10 // Base+1
+#else
+ MOVD $3, R17 // Number of bytes beyond 16
+#endif
+ PCALIGN $16
+
+index2to16loop:
+
+#ifdef GOPPC64_power10
+ LXVLL R7, R14, V8 // Load next 16 bytes of string from Base
+ LXVLL R10, R14, V9 // Load next 16 bytes of string from Base+1
+ LXVLL R8, R14, V11 // Load next 16 bytes of string from Base+2
+ LXVLL R17,R14, V12 // Load next 16 bytes of string from Base+3
+#else
+ LXVB16X (R7)(R0), V1 // Load next 16 bytes of string into V1 from R7
+ LXVB16X (R7)(R17), V5 // Load next 16 bytes of string into V5 from R7+3
+
+ VSLDOI $13, V5, V10, V2 // Shift left last 3 bytes
+ VSLDOI $1, V1, V2, V3 // V3=(V1:V2)<<1
+ VSLDOI $2, V1, V2, V4 // V4=(V1:V2)<<2
+ VAND V1, SEPMASK, V8 // Mask out sep size 0th index
+ VAND V3, SEPMASK, V9 // Mask out sep size 1st index
+ VAND V4, SEPMASK, V11 // Mask out sep size 2nd index
+ VAND V5, SEPMASK, V12 // Mask out sep size 3rd index
+#endif
+ VCMPEQUBCC V0, V8, V8 // compare masked string
+ BLT CR6, found // All equal while comparing 0th index
+ VCMPEQUBCC V0, V9, V9 // compare masked string
+ BLT CR6, found2 // All equal while comparing 1st index
+ VCMPEQUBCC V0, V11, V11 // compare masked string
+ BLT CR6, found3 // All equal while comparing 2nd index
+ VCMPEQUBCC V0, V12, V12 // compare masked string
+ BLT CR6, found4 // All equal while comparing 3rd index
+
+ ADD $4, R7 // Update ptr to next 4 bytes
+#ifdef GOPPC64_power10
+ ADD $4, R17 // Update ptr to next 4 bytes
+ ADD $4, R8 // Update ptr to next 4 bytes
+ ADD $4, R10 // Update ptr to next 4 bytes
+#endif
+ CMP R7, LASTSTR // Still less than last start byte
+ BGT notfound // Not found
+ ADD $19, R7, R9 // Verify remaining bytes
+ CMP R9, LASTBYTE // length of string at least 19
+ BLE index2to16loop // Try again, else do post processing and jump to index2to16next
+ PCALIGN $32
+ // <19 bytes left, post process the remaining string
+index2to16tail:
+#ifdef GOPPC64_power10
+index2to16next_p10:
+ LXVLL R7,R14, V1 // Load 16 bytes @R7 into V1
+ VCMPEQUBCC V1, V0, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BLE index2to16next_p10 // If at end, then not found
+ BR notfound // go to remainder loop
+#else
+ ADD R3, R4, R9 // End of string
+ SUB R7, R9, R9 // Number of bytes left
+ ANDCC $15, R7, R10 // 16 byte offset
+ ADD R10, R9, R11 // offset + len
+ CMP R11, $16 // >= 16?
+ BLE short // Does not cross 16 bytes
+ LXVB16X (R7)(R0), V1 // Load 16 bytes @R7 into V1
+ CMP R9, $16 // Post-processing of unrolled loop
+ BLE index2to16next // continue to index2to16next if <= 16 bytes
+ SUB R16, R9, R10 // R9 should be 18 or 17 hence R10 is 1 or 2
+ LXVB16X (R7)(R10), V9
+ CMP R10, $1 // string length is 17, compare 1 more byte
+ BNE extra2 // string length is 18, compare 2 more bytes
+ VSLDOI $15, V9, V10, V25
+ VAND V1, SEPMASK, V2 // Just compare size of sep
+ VCMPEQUBCC V0, V2, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BGT notfound // If at end, then not found
+ VSLDOI $1, V1, V25, V1 // Shift string left by 1 byte
+ BR index2to16next // go to remainder loop
+extra2:
+ VSLDOI $14, V9, V10, V25
+ VAND V1, SEPMASK, V2 // Just compare size of sep
+ VCMPEQUBCC V0, V2, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BGT notfound // If at end, then not found
+ VOR V1, V1, V4 // save remaining string
+ VSLDOI $1, V1, V25, V1 // Shift string left by 1 byte for 17th byte
+ VAND V1, SEPMASK, V2 // Just compare size of sep
+ VCMPEQUBCC V0, V2, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BGT notfound // If at end, then not found
+ VSLDOI $2, V4, V25, V1 // Shift saved string left by 2 bytes for 18th byte
+ BR index2to16next // Check the remaining partial string in index2to16next
+
+short:
+ RLDICR $0, R7, $59, R9 // Adjust addr to 16 byte container
+ LXVB16X (R9)(R0), V1 // Load 16 bytes @R9 into V1
+ SLD $3, R10 // Set up shift
+ MTVSRD R10, V8 // Set up shift
+ VSLDOI $8, V8, V8, V8
+ VSLO V1, V8, V1 // Shift by start byte
+ PCALIGN $16
+index2to16next:
+ VAND V1, SEPMASK, V2 // Just compare size of sep
+ VCMPEQUBCC V0, V2, V3 // Compare sep and partial string
+ BLT CR6, found // Found
+ ADD $1, R7 // Not found, try next partial string
+ CMP R7, LASTSTR // Check for end of string
+ BGT notfound // If at end, then not found
+ VSLDOI $1, V1, V10, V1 // Shift string left by 1 byte
+ BR index2to16next // Check the next partial string
+#endif // Tail processing if GOPPC64!=power10
+
+index17plus:
+ CMP R6, $32 // Check if 17 < len(sep) <= 32
+ BGT index33plus
+ SUB $16, R6, R9 // Extra > 16
+ SLD $56, R9, R10 // Shift to use in VSLO
+ MTVSRD R10, V9 // Set up for VSLO
+ LXVB16X (R5)(R9), V1 // Load 16 bytes @R5+R9 into V1
+ VSLO V1, V9, V1 // Shift left
+ VSPLTISB $0xff, V7 // Splat 1s
+ VSPLTISB $0, V27 // Splat 0
+
+index17to32loop:
+ LXVB16X (R7)(R0), V2 // Load 16 bytes @R7 into V2
+
+next17:
+ LXVB16X (R7)(R9), V3 // Load 16 bytes @R7+R9 into V3
+ VSLO V3, V9, V3 // Shift left
+ VCMPEQUB V0, V2, V4 // Compare first 16 bytes
+ VCMPEQUB V1, V3, V5 // Compare extra over 16 bytes
+ VAND V4, V5, V6 // Check if both equal
+ VCMPEQUBCC V6, V7, V8 // All equal?
+ BLT CR6, found // Yes
+ ADD $1, R7 // On to next byte
+ CMP R7, LASTSTR // Check if last start byte
+ BGT notfound // If too high, not found
+ BR index17to32loop // Continue
+
+notfound:
+ MOVD $-1, R3 // Return -1 if not found
+ RET
+
+index33plus:
+ MOVD $0, (R0) // Case not implemented
+ RET // Crash before return
+
+foundR25:
+ SRD $3, R25 // Convert from bits to bytes
+ ADD R25, R7 // Add to current string address
+ SUB R3, R7 // Subtract from start of string
+ MOVD R7, R3 // Return byte where found
+ RET
+found4:
+ ADD $1, R7 // found from unrolled loop at index 3
+found3:
+ ADD $1, R7 // found from unrolled loop at index 2
+found2:
+ ADD $1, R7 // found from unrolled loop at index 1
+found: // found at index 0
+ SUB R3, R7 // Return byte where found
+ MOVD R7, R3
+ RET
diff --git a/src/internal/bytealg/index_s390x.go b/src/internal/bytealg/index_s390x.go
new file mode 100644
index 0000000..9340cf1
--- /dev/null
+++ b/src/internal/bytealg/index_s390x.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import "internal/cpu"
+
+const MaxBruteForce = 64
+
+func init() {
+ // Note: we're kind of lucky that this flag is available at this point.
+ // The runtime sets HasVX when processing auxv records, and that happens
+ // to happen *before* running the init functions of packages that
+ // the runtime depends on.
+ // TODO: it would really be nicer for internal/cpu to figure out this
+ // flag by itself. Then we wouldn't need to depend on quirks of
+ // early startup initialization order.
+ if cpu.S390X.HasVX {
+ MaxLen = 64
+ }
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 8 characters, plus a few slop to start.
+ return (n + 16) / 8
+}
diff --git a/src/internal/bytealg/index_s390x.s b/src/internal/bytealg/index_s390x.s
new file mode 100644
index 0000000..491d5bc
--- /dev/null
+++ b/src/internal/bytealg/index_s390x.s
@@ -0,0 +1,216 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Caller must confirm availability of vx facility before calling.
+TEXT ·Index(SB),NOSPLIT|NOFRAME,$0-56
+ LMG a_base+0(FP), R1, R2 // R1=&s[0], R2=len(s)
+ LMG b_base+24(FP), R3, R4 // R3=&sep[0], R4=len(sep)
+ MOVD $ret+48(FP), R5
+ BR indexbody<>(SB)
+
+// Caller must confirm availability of vx facility before calling.
+TEXT ·IndexString(SB),NOSPLIT|NOFRAME,$0-40
+ LMG a_base+0(FP), R1, R2 // R1=&s[0], R2=len(s)
+ LMG b_base+16(FP), R3, R4 // R3=&sep[0], R4=len(sep)
+ MOVD $ret+32(FP), R5
+ BR indexbody<>(SB)
+
+// s: string we are searching
+// sep: string to search for
+// R1=&s[0], R2=len(s)
+// R3=&sep[0], R4=len(sep)
+// R5=&ret (int)
+// Caller must confirm availability of vx facility before calling.
+TEXT indexbody<>(SB),NOSPLIT|NOFRAME,$0
+ CMPBGT R4, R2, notfound
+ ADD R1, R2
+ SUB R4, R2 // R2=&s[len(s)-len(sep)] (last valid index)
+ CMPBEQ R4, $0, notfound
+ SUB $1, R4 // R4=len(sep)-1 for use as VLL index
+ VLL R4, (R3), V0 // contains first 16 bytes of sep
+ MOVD R1, R7
+index2plus:
+ CMPBNE R4, $1, index3plus
+ MOVD $15(R7), R9
+ CMPBGE R9, R2, index2to16
+ VGBM $0xaaaa, V31 // 0xff00ff00ff00ff00...
+ VONE V16
+ VREPH $0, V0, V1
+ CMPBGE R9, R2, index2to16
+index2loop:
+ VL 0(R7), V2 // 16 bytes, even indices
+ VL 1(R7), V4 // 16 bytes, odd indices
+ VCEQH V1, V2, V5 // compare even indices
+ VCEQH V1, V4, V6 // compare odd indices
+ VSEL V5, V6, V31, V7 // merge even and odd indices
+ VFEEBS V16, V7, V17 // find leftmost index, set condition to 1 if found
+ BLT foundV17
+ MOVD $16(R7), R7 // R7+=16
+ ADD $15, R7, R9
+ CMPBLE R9, R2, index2loop // continue if (R7+15) <= R2 (last index to search)
+ CMPBLE R7, R2, index2to16
+ BR notfound
+
+index3plus:
+ CMPBNE R4, $2, index4plus
+ ADD $15, R7, R9
+ CMPBGE R9, R2, index2to16
+ MOVD $1, R0
+ VGBM $0xaaaa, V31 // 0xff00ff00ff00ff00...
+ VONE V16
+ VREPH $0, V0, V1
+ VREPB $2, V0, V8
+index3loop:
+ VL (R7), V2 // load 16-bytes into V2
+ VLL R0, 16(R7), V3 // load 2-bytes into V3
+ VSLDB $1, V2, V3, V4 // V4=(V2:V3)<<1
+ VSLDB $2, V2, V3, V9 // V9=(V2:V3)<<2
+ VCEQH V1, V2, V5 // compare 2-byte even indices
+ VCEQH V1, V4, V6 // compare 2-byte odd indices
+ VCEQB V8, V9, V10 // compare last bytes
+ VSEL V5, V6, V31, V7 // merge even and odd indices
+ VN V7, V10, V7 // AND indices with last byte
+ VFEEBS V16, V7, V17 // find leftmost index, set condition to 1 if found
+ BLT foundV17
+ MOVD $16(R7), R7 // R7+=16
+ ADD $15, R7, R9
+ CMPBLE R9, R2, index3loop // continue if (R7+15) <= R2 (last index to search)
+ CMPBLE R7, R2, index2to16
+ BR notfound
+
+index4plus:
+ CMPBNE R4, $3, index5plus
+ ADD $15, R7, R9
+ CMPBGE R9, R2, index2to16
+ MOVD $2, R0
+ VGBM $0x8888, V29 // 0xff000000ff000000...
+ VGBM $0x2222, V30 // 0x0000ff000000ff00...
+ VGBM $0xcccc, V31 // 0xffff0000ffff0000...
+ VONE V16
+ VREPF $0, V0, V1
+index4loop:
+ VL (R7), V2 // load 16-bytes into V2
+ VLL R0, 16(R7), V3 // load 3-bytes into V3
+ VSLDB $1, V2, V3, V4 // V4=(V2:V3)<<1
+ VSLDB $2, V2, V3, V9 // V9=(V2:V3)<<1
+ VSLDB $3, V2, V3, V10 // V10=(V2:V3)<<1
+ VCEQF V1, V2, V5 // compare index 0, 4, ...
+ VCEQF V1, V4, V6 // compare index 1, 5, ...
+ VCEQF V1, V9, V11 // compare index 2, 6, ...
+ VCEQF V1, V10, V12 // compare index 3, 7, ...
+ VSEL V5, V6, V29, V13 // merge index 0, 1, 4, 5, ...
+ VSEL V11, V12, V30, V14 // merge index 2, 3, 6, 7, ...
+ VSEL V13, V14, V31, V7 // final merge
+ VFEEBS V16, V7, V17 // find leftmost index, set condition to 1 if found
+ BLT foundV17
+ MOVD $16(R7), R7 // R7+=16
+ ADD $15, R7, R9
+ CMPBLE R9, R2, index4loop // continue if (R7+15) <= R2 (last index to search)
+ CMPBLE R7, R2, index2to16
+ BR notfound
+
+index5plus:
+ CMPBGT R4, $15, index17plus
+index2to16:
+ CMPBGT R7, R2, notfound
+ MOVD $1(R7), R8
+ CMPBGT R8, R2, index2to16tail
+index2to16loop:
+ // unrolled 2x
+ VLL R4, (R7), V1
+ VLL R4, 1(R7), V2
+ VCEQGS V0, V1, V3
+ BEQ found
+ MOVD $1(R7), R7
+ VCEQGS V0, V2, V4
+ BEQ found
+ MOVD $1(R7), R7
+ CMPBLT R7, R2, index2to16loop
+ CMPBGT R7, R2, notfound
+index2to16tail:
+ VLL R4, (R7), V1
+ VCEQGS V0, V1, V2
+ BEQ found
+ BR notfound
+
+index17plus:
+ CMPBGT R4, $31, index33plus
+ SUB $16, R4, R0
+ VLL R0, 16(R3), V1
+ VONE V7
+index17to32loop:
+ VL (R7), V2
+ VLL R0, 16(R7), V3
+ VCEQG V0, V2, V4
+ VCEQG V1, V3, V5
+ VN V4, V5, V6
+ VCEQGS V6, V7, V8
+ BEQ found
+ MOVD $1(R7), R7
+ CMPBLE R7, R2, index17to32loop
+ BR notfound
+
+index33plus:
+ CMPBGT R4, $47, index49plus
+ SUB $32, R4, R0
+ VL 16(R3), V1
+ VLL R0, 32(R3), V2
+ VONE V11
+index33to48loop:
+ VL (R7), V3
+ VL 16(R7), V4
+ VLL R0, 32(R7), V5
+ VCEQG V0, V3, V6
+ VCEQG V1, V4, V7
+ VCEQG V2, V5, V8
+ VN V6, V7, V9
+ VN V8, V9, V10
+ VCEQGS V10, V11, V12
+ BEQ found
+ MOVD $1(R7), R7
+ CMPBLE R7, R2, index33to48loop
+ BR notfound
+
+index49plus:
+ CMPBGT R4, $63, index65plus
+ SUB $48, R4, R0
+ VL 16(R3), V1
+ VL 32(R3), V2
+ VLL R0, 48(R3), V3
+ VONE V15
+index49to64loop:
+ VL (R7), V4
+ VL 16(R7), V5
+ VL 32(R7), V6
+ VLL R0, 48(R7), V7
+ VCEQG V0, V4, V8
+ VCEQG V1, V5, V9
+ VCEQG V2, V6, V10
+ VCEQG V3, V7, V11
+ VN V8, V9, V12
+ VN V10, V11, V13
+ VN V12, V13, V14
+ VCEQGS V14, V15, V16
+ BEQ found
+ MOVD $1(R7), R7
+ CMPBLE R7, R2, index49to64loop
+notfound:
+ MOVD $-1, (R5)
+ RET
+
+index65plus:
+ // not implemented
+ MOVD $0, (R0)
+ RET
+
+foundV17: // index is in doubleword V17[0]
+ VLGVG $0, V17, R8
+ ADD R8, R7
+found:
+ SUB R1, R7
+ MOVD R7, (R5)
+ RET
diff --git a/src/internal/bytealg/indexbyte_386.s b/src/internal/bytealg/indexbyte_386.s
new file mode 100644
index 0000000..8a03054
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_386.s
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-20
+ MOVL b_base+0(FP), SI
+ MOVL b_len+4(FP), CX
+ MOVB c+12(FP), AL
+ MOVL SI, DI
+ CLD; REPN; SCASB
+ JZ 3(PC)
+ MOVL $-1, ret+16(FP)
+ RET
+ SUBL SI, DI
+ SUBL $1, DI
+ MOVL DI, ret+16(FP)
+ RET
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-16
+ MOVL s_base+0(FP), SI
+ MOVL s_len+4(FP), CX
+ MOVB c+8(FP), AL
+ MOVL SI, DI
+ CLD; REPN; SCASB
+ JZ 3(PC)
+ MOVL $-1, ret+12(FP)
+ RET
+ SUBL SI, DI
+ SUBL $1, DI
+ MOVL DI, ret+12(FP)
+ RET
diff --git a/src/internal/bytealg/indexbyte_amd64.s b/src/internal/bytealg/indexbyte_amd64.s
new file mode 100644
index 0000000..1ca70e3
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_amd64.s
@@ -0,0 +1,149 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB), NOSPLIT, $0-40
+ MOVQ b_base+0(FP), SI
+ MOVQ b_len+8(FP), BX
+ MOVB c+24(FP), AL
+ LEAQ ret+32(FP), R8
+ JMP indexbytebody<>(SB)
+
+TEXT ·IndexByteString(SB), NOSPLIT, $0-32
+ MOVQ s_base+0(FP), SI
+ MOVQ s_len+8(FP), BX
+ MOVB c+16(FP), AL
+ LEAQ ret+24(FP), R8
+ JMP indexbytebody<>(SB)
+
+// input:
+// SI: data
+// BX: data len
+// AL: byte sought
+// R8: address to put result
+TEXT indexbytebody<>(SB), NOSPLIT, $0
+ // Shuffle X0 around so that each byte contains
+ // the character we're looking for.
+ MOVD AX, X0
+ PUNPCKLBW X0, X0
+ PUNPCKLBW X0, X0
+ PSHUFL $0, X0, X0
+
+ CMPQ BX, $16
+ JLT small
+
+ MOVQ SI, DI
+
+ CMPQ BX, $32
+ JA avx2
+sse:
+ LEAQ -16(SI)(BX*1), AX // AX = address of last 16 bytes
+ JMP sseloopentry
+
+sseloop:
+ // Move the next 16-byte chunk of the data into X1.
+ MOVOU (DI), X1
+ // Compare bytes in X0 to X1.
+ PCMPEQB X0, X1
+ // Take the top bit of each byte in X1 and put the result in DX.
+ PMOVMSKB X1, DX
+ // Find first set bit, if any.
+ BSFL DX, DX
+ JNZ ssesuccess
+ // Advance to next block.
+ ADDQ $16, DI
+sseloopentry:
+ CMPQ DI, AX
+ JB sseloop
+
+ // Search the last 16-byte chunk. This chunk may overlap with the
+ // chunks we've already searched, but that's ok.
+ MOVQ AX, DI
+ MOVOU (AX), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, DX
+ BSFL DX, DX
+ JNZ ssesuccess
+
+failure:
+ MOVQ $-1, (R8)
+ RET
+
+// We've found a chunk containing the byte.
+// The chunk was loaded from DI.
+// The index of the matching byte in the chunk is DX.
+// The start of the data is SI.
+ssesuccess:
+ SUBQ SI, DI // Compute offset of chunk within data.
+ ADDQ DX, DI // Add offset of byte within chunk.
+ MOVQ DI, (R8)
+ RET
+
+// handle for lengths < 16
+small:
+ TESTQ BX, BX
+ JEQ failure
+
+ // Check if we'll load across a page boundary.
+ LEAQ 16(SI), AX
+ TESTW $0xff0, AX
+ JEQ endofpage
+
+ MOVOU (SI), X1 // Load data
+ PCMPEQB X0, X1 // Compare target byte with each byte in data.
+ PMOVMSKB X1, DX // Move result bits to integer register.
+ BSFL DX, DX // Find first set bit.
+ JZ failure // No set bit, failure.
+ CMPL DX, BX
+ JAE failure // Match is past end of data.
+ MOVQ DX, (R8)
+ RET
+
+endofpage:
+ MOVOU -16(SI)(BX*1), X1 // Load data into the high end of X1.
+ PCMPEQB X0, X1 // Compare target byte with each byte in data.
+ PMOVMSKB X1, DX // Move result bits to integer register.
+ MOVL BX, CX
+ SHLL CX, DX
+ SHRL $16, DX // Shift desired bits down to bottom of register.
+ BSFL DX, DX // Find first set bit.
+ JZ failure // No set bit, failure.
+ MOVQ DX, (R8)
+ RET
+
+avx2:
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JNE sse
+#endif
+ MOVD AX, X0
+ LEAQ -32(SI)(BX*1), R11
+ VPBROADCASTB X0, Y1
+avx2_loop:
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPTEST Y3, Y3
+ JNZ avx2success
+ ADDQ $32, DI
+ CMPQ DI, R11
+ JLT avx2_loop
+ MOVQ R11, DI
+ VMOVDQU (DI), Y2
+ VPCMPEQB Y1, Y2, Y3
+ VPTEST Y3, Y3
+ JNZ avx2success
+ VZEROUPPER
+ MOVQ $-1, (R8)
+ RET
+
+avx2success:
+ VPMOVMSKB Y3, DX
+ BSFL DX, DX
+ SUBQ SI, DI
+ ADDQ DI, DX
+ MOVQ DX, (R8)
+ VZEROUPPER
+ RET
diff --git a/src/internal/bytealg/indexbyte_arm.s b/src/internal/bytealg/indexbyte_arm.s
new file mode 100644
index 0000000..faf9797
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_arm.s
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-20
+ MOVW b_base+0(FP), R0
+ MOVW b_len+4(FP), R1
+ MOVBU c+12(FP), R2 // byte to find
+ MOVW $ret+16(FP), R5
+ B indexbytebody<>(SB)
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-16
+ MOVW s_base+0(FP), R0
+ MOVW s_len+4(FP), R1
+ MOVBU c+8(FP), R2 // byte to find
+ MOVW $ret+12(FP), R5
+ B indexbytebody<>(SB)
+
+// input:
+// R0: data
+// R1: data length
+// R2: byte to find
+// R5: address to put result
+TEXT indexbytebody<>(SB),NOSPLIT,$0-0
+ MOVW R0, R4 // store base for later
+ ADD R0, R1 // end
+
+loop:
+ CMP R0, R1
+ B.EQ notfound
+ MOVBU.P 1(R0), R3
+ CMP R2, R3
+ B.NE loop
+
+ SUB $1, R0 // R0 will be one beyond the position we want
+ SUB R4, R0 // remove base
+ MOVW R0, (R5)
+ RET
+
+notfound:
+ MOVW $-1, R0
+ MOVW R0, (R5)
+ RET
diff --git a/src/internal/bytealg/indexbyte_arm64.s b/src/internal/bytealg/indexbyte_arm64.s
new file mode 100644
index 0000000..40843fb
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_arm64.s
@@ -0,0 +1,126 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-40
+ MOVD b_base+0(FP), R0
+ MOVD b_len+8(FP), R2
+ MOVBU c+24(FP), R1
+ MOVD $ret+32(FP), R8
+ B indexbytebody<>(SB)
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-32
+ MOVD s_base+0(FP), R0
+ MOVD s_len+8(FP), R2
+ MOVBU c+16(FP), R1
+ MOVD $ret+24(FP), R8
+ B indexbytebody<>(SB)
+
+// input:
+// R0: data
+// R1: byte to search
+// R2: data len
+// R8: address to put result
+TEXT indexbytebody<>(SB),NOSPLIT,$0
+ // Core algorithm:
+ // For each 32-byte chunk we calculate a 64-bit syndrome value,
+ // with two bits per byte. For each tuple, bit 0 is set if the
+ // relevant byte matched the requested character and bit 1 is
+ // not used (faster than using a 32bit syndrome). Since the bits
+ // in the syndrome reflect exactly the order in which things occur
+ // in the original string, counting trailing zeros allows to
+ // identify exactly which byte has matched.
+
+ CBZ R2, fail
+ MOVD R0, R11
+ // Magic constant 0x40100401 allows us to identify
+ // which lane matches the requested byte.
+ // 0x40100401 = ((1<<0) + (4<<8) + (16<<16) + (64<<24))
+ // Different bytes have different bit masks (i.e: 1, 4, 16, 64)
+ MOVD $0x40100401, R5
+ VMOV R1, V0.B16
+ // Work with aligned 32-byte chunks
+ BIC $0x1f, R0, R3
+ VMOV R5, V5.S4
+ ANDS $0x1f, R0, R9
+ AND $0x1f, R2, R10
+ BEQ loop
+
+ // Input string is not 32-byte aligned. We calculate the
+ // syndrome value for the aligned 32 bytes block containing
+ // the first bytes and mask off the irrelevant part.
+ VLD1.P (R3), [V1.B16, V2.B16]
+ SUB $0x20, R9, R4
+ ADDS R4, R2, R2
+ VCMEQ V0.B16, V1.B16, V3.B16
+ VCMEQ V0.B16, V2.B16, V4.B16
+ VAND V5.B16, V3.B16, V3.B16
+ VAND V5.B16, V4.B16, V4.B16
+ VADDP V4.B16, V3.B16, V6.B16 // 256->128
+ VADDP V6.B16, V6.B16, V6.B16 // 128->64
+ VMOV V6.D[0], R6
+ // Clear the irrelevant lower bits
+ LSL $1, R9, R4
+ LSR R4, R6, R6
+ LSL R4, R6, R6
+ // The first block can also be the last
+ BLS masklast
+ // Have we found something already?
+ CBNZ R6, tail
+
+loop:
+ VLD1.P (R3), [V1.B16, V2.B16]
+ SUBS $0x20, R2, R2
+ VCMEQ V0.B16, V1.B16, V3.B16
+ VCMEQ V0.B16, V2.B16, V4.B16
+ // If we're out of data we finish regardless of the result
+ BLS end
+ // Use a fast check for the termination condition
+ VORR V4.B16, V3.B16, V6.B16
+ VADDP V6.D2, V6.D2, V6.D2
+ VMOV V6.D[0], R6
+ // We're not out of data, loop if we haven't found the character
+ CBZ R6, loop
+
+end:
+ // Termination condition found, let's calculate the syndrome value
+ VAND V5.B16, V3.B16, V3.B16
+ VAND V5.B16, V4.B16, V4.B16
+ VADDP V4.B16, V3.B16, V6.B16
+ VADDP V6.B16, V6.B16, V6.B16
+ VMOV V6.D[0], R6
+ // Only do the clear for the last possible block with less than 32 bytes
+ // Condition flags come from SUBS in the loop
+ BHS tail
+
+masklast:
+ // Clear the irrelevant upper bits
+ ADD R9, R10, R4
+ AND $0x1f, R4, R4
+ SUB $0x20, R4, R4
+ NEG R4<<1, R4
+ LSL R4, R6, R6
+ LSR R4, R6, R6
+
+tail:
+ // Check that we have found a character
+ CBZ R6, fail
+ // Count the trailing zeros using bit reversing
+ RBIT R6, R6
+ // Compensate the last post-increment
+ SUB $0x20, R3, R3
+ // And count the leading zeros
+ CLZ R6, R6
+ // R6 is twice the offset into the fragment
+ ADD R6>>1, R3, R0
+ // Compute the offset result
+ SUB R11, R0, R0
+ MOVD R0, (R8)
+ RET
+
+fail:
+ MOVD $-1, R0
+ MOVD R0, (R8)
+ RET
diff --git a/src/internal/bytealg/indexbyte_generic.go b/src/internal/bytealg/indexbyte_generic.go
new file mode 100644
index 0000000..b89d34f
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_generic.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !loong64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm
+
+package bytealg
+
+func IndexByte(b []byte, c byte) int {
+ for i, x := range b {
+ if x == c {
+ return i
+ }
+ }
+ return -1
+}
+
+func IndexByteString(s string, c byte) int {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/internal/bytealg/indexbyte_loong64.s b/src/internal/bytealg/indexbyte_loong64.s
new file mode 100644
index 0000000..6049705
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_loong64.s
@@ -0,0 +1,54 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-40
+ MOVV b_base+0(FP), R4
+ MOVV b_len+8(FP), R5
+ MOVBU c+24(FP), R6 // byte to find
+ MOVV R4, R7 // store base for later
+ ADDV R4, R5 // end
+ ADDV $-1, R4
+
+ PCALIGN $16
+loop:
+ ADDV $1, R4
+ BEQ R4, R5, notfound
+ MOVBU (R4), R8
+ BNE R6, R8, loop
+
+ SUBV R7, R4 // remove base
+ MOVV R4, ret+32(FP)
+ RET
+
+notfound:
+ MOVV $-1, R4
+ MOVV R4, ret+32(FP)
+ RET
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-32
+ MOVV s_base+0(FP), R4
+ MOVV s_len+8(FP), R5
+ MOVBU c+16(FP), R6 // byte to find
+ MOVV R4, R7 // store base for later
+ ADDV R4, R5 // end
+ ADDV $-1, R4
+
+ PCALIGN $16
+loop:
+ ADDV $1, R4
+ BEQ R4, R5, notfound
+ MOVBU (R4), R8
+ BNE R6, R8, loop
+
+ SUBV R7, R4 // remove base
+ MOVV R4, ret+24(FP)
+ RET
+
+notfound:
+ MOVV $-1, R4
+ MOVV R4, ret+24(FP)
+ RET
diff --git a/src/internal/bytealg/indexbyte_mips64x.s b/src/internal/bytealg/indexbyte_mips64x.s
new file mode 100644
index 0000000..5689f84
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_mips64x.s
@@ -0,0 +1,54 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-40
+ MOVV b_base+0(FP), R1
+ MOVV b_len+8(FP), R2
+ MOVBU c+24(FP), R3 // byte to find
+ MOVV R1, R4 // store base for later
+ ADDV R1, R2 // end
+ ADDV $-1, R1
+
+loop:
+ ADDV $1, R1
+ BEQ R1, R2, notfound
+ MOVBU (R1), R5
+ BNE R3, R5, loop
+
+ SUBV R4, R1 // remove base
+ MOVV R1, ret+32(FP)
+ RET
+
+notfound:
+ MOVV $-1, R1
+ MOVV R1, ret+32(FP)
+ RET
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-32
+ MOVV s_base+0(FP), R1
+ MOVV s_len+8(FP), R2
+ MOVBU c+16(FP), R3 // byte to find
+ MOVV R1, R4 // store base for later
+ ADDV R1, R2 // end
+ ADDV $-1, R1
+
+loop:
+ ADDV $1, R1
+ BEQ R1, R2, notfound
+ MOVBU (R1), R5
+ BNE R3, R5, loop
+
+ SUBV R4, R1 // remove base
+ MOVV R1, ret+24(FP)
+ RET
+
+notfound:
+ MOVV $-1, R1
+ MOVV R1, ret+24(FP)
+ RET
diff --git a/src/internal/bytealg/indexbyte_mipsx.s b/src/internal/bytealg/indexbyte_mipsx.s
new file mode 100644
index 0000000..1c2b104
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_mipsx.s
@@ -0,0 +1,52 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT,$0-20
+ MOVW b_base+0(FP), R1
+ MOVW b_len+4(FP), R2
+ MOVBU c+12(FP), R3 // byte to find
+ ADDU $1, R1, R4 // store base+1 for later
+ ADDU R1, R2 // end
+
+loop:
+ BEQ R1, R2, notfound
+ MOVBU (R1), R5
+ ADDU $1, R1
+ BNE R3, R5, loop
+
+ SUBU R4, R1 // R1 will be one beyond the position we want so remove (base+1)
+ MOVW R1, ret+16(FP)
+ RET
+
+notfound:
+ MOVW $-1, R1
+ MOVW R1, ret+16(FP)
+ RET
+
+TEXT ·IndexByteString(SB),NOSPLIT,$0-16
+ MOVW s_base+0(FP), R1
+ MOVW s_len+4(FP), R2
+ MOVBU c+8(FP), R3 // byte to find
+ ADDU $1, R1, R4 // store base+1 for later
+ ADDU R1, R2 // end
+
+loop:
+ BEQ R1, R2, notfound
+ MOVBU (R1), R5
+ ADDU $1, R1
+ BNE R3, R5, loop
+
+ SUBU R4, R1 // remove (base+1)
+ MOVW R1, ret+12(FP)
+ RET
+
+notfound:
+ MOVW $-1, R1
+ MOVW R1, ret+12(FP)
+ RET
diff --git a/src/internal/bytealg/indexbyte_native.go b/src/internal/bytealg/indexbyte_native.go
new file mode 100644
index 0000000..c5bb2df
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_native.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm
+
+package bytealg
+
+//go:noescape
+func IndexByte(b []byte, c byte) int
+
+//go:noescape
+func IndexByteString(s string, c byte) int
diff --git a/src/internal/bytealg/indexbyte_ppc64x.s b/src/internal/bytealg/indexbyte_ppc64x.s
new file mode 100644
index 0000000..b6714f4
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_ppc64x.s
@@ -0,0 +1,314 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+ // R3 = byte array pointer
+ // R4 = length
+ MOVD R6, R5 // R5 = byte
+ BR indexbytebody<>(SB)
+
+TEXT ·IndexByteString<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
+ // R3 = string
+ // R4 = length
+ // R5 = byte
+ BR indexbytebody<>(SB)
+
+#ifndef GOPPC64_power9
+#ifdef GOARCH_ppc64le
+DATA indexbytevbperm<>+0(SB)/8, $0x3830282018100800
+DATA indexbytevbperm<>+8(SB)/8, $0x7870686058504840
+#else
+DATA indexbytevbperm<>+0(SB)/8, $0x0008101820283038
+DATA indexbytevbperm<>+8(SB)/8, $0x4048505860687078
+#endif
+GLOBL indexbytevbperm<>+0(SB), RODATA, $16
+#endif
+
+// Some operations are endian specific, choose the correct opcode base on GOARCH.
+// Note, _VCZBEBB is only available on power9 and newer.
+#ifdef GOARCH_ppc64le
+#define _LDBEX MOVDBR
+#define _LWBEX MOVWBR
+#define _LHBEX MOVHBR
+#define _VCZBEBB VCTZLSBB
+#else
+#define _LDBEX MOVD
+#define _LWBEX MOVW
+#define _LHBEX MOVH
+#define _VCZBEBB VCLZLSBB
+#endif
+
+// R3 = addr of string
+// R4 = len of string
+// R5 = byte to find
+// On exit:
+// R3 = return value
+TEXT indexbytebody<>(SB),NOSPLIT|NOFRAME,$0-0
+ CMPU R4,$32
+
+#ifndef GOPPC64_power9
+ // Load VBPERMQ constant to reduce compare into an ordered bit mask.
+ MOVD $indexbytevbperm<>+00(SB),R16
+ LXVD2X (R16),V0 // Set up swap string
+#endif
+
+ MTVRD R5,V1
+ VSPLTB $7,V1,V1 // Replicate byte across V1
+
+ BLT cmp16 // Jump to the small string case if it's <32 bytes.
+
+ CMP R4,$64,CR1
+ MOVD $16,R11
+ MOVD R3,R8
+ BLT CR1,cmp32 // Special case for length 32 - 63
+ MOVD $32,R12
+ MOVD $48,R6
+
+ RLDICR $0,R4,$63-6,R9 // R9 = len &^ 63
+ ADD R3,R9,R9 // R9 = &s[len &^ 63]
+ ANDCC $63,R4 // (len &= 63) cmp 0.
+
+ PCALIGN $16
+loop64:
+ LXVD2X (R0)(R8),V2 // Scan 64 bytes at a time, starting at &s[0]
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0 // Match found at R8, jump out
+
+ LXVD2X (R11)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat1 // Match found at R8+16 bytes, jump out
+
+ LXVD2X (R12)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat2 // Match found at R8+32 bytes, jump out
+
+ LXVD2X (R6)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat3 // Match found at R8+48 bytes, jump out
+
+ ADD $64,R8
+ CMPU R8,R9,CR1
+ BNE CR1,loop64 // R8 != &s[len &^ 63]?
+
+ PCALIGN $32
+ BEQ notfound // Is tail length 0? CR0 is set before entering loop64.
+
+ CMP R4,$32 // Tail length >= 32, use cmp32 path.
+ CMP R4,$16,CR1
+ BGE cmp32
+
+ ADD R8,R4,R9
+ ADD $-16,R9
+ BLE CR1,cmp64_tail_gt0
+
+cmp64_tail_gt16: // Tail length 17 - 32
+ LXVD2X (R0)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0
+
+cmp64_tail_gt0: // Tail length 1 - 16
+ MOVD R9,R8
+ LXVD2X (R0)(R9),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0
+
+ BR notfound
+
+cmp32: // Length 32 - 63
+
+ // Bytes 0 - 15
+ LXVD2X (R0)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0
+
+ // Bytes 16 - 31
+ LXVD2X (R8)(R11),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat1 // Match found at R8+16 bytes, jump out
+
+ BEQ notfound // Is length <= 32? (CR0 holds this comparison on entry to cmp32)
+ CMP R4,$48
+
+ ADD R4,R8,R9 // Compute &s[len(s)-16]
+ ADD $32,R8,R8
+ ADD $-16,R9,R9
+ ISEL CR0GT,R8,R9,R8 // R8 = len(s) <= 48 ? R9 : R8
+
+ // Bytes 33 - 47
+ LXVD2X (R0)(R8),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0 // match found at R8+32 bytes, jump out
+
+ BLE notfound
+
+ // Bytes 48 - 63
+ MOVD R9,R8 // R9 holds the final check.
+ LXVD2X (R0)(R9),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0 // Match found at R8+48 bytes, jump out
+
+ BR notfound
+
+// If ISA 3.0 instructions are unavailable, we need to account for the extra 16 added by CNTLZW.
+#ifndef GOPPC64_power9
+#define ADJUST_FOR_CNTLZW -16
+#else
+#define ADJUST_FOR_CNTLZW 0
+#endif
+
+// Now, find the index of the 16B vector the match was discovered in. If CNTLZW is used
+// to determine the offset into the 16B vector, it will overcount by 16. Account for it here.
+foundat3:
+ SUB R3,R8,R3
+ ADD $48+ADJUST_FOR_CNTLZW,R3
+ BR vfound
+foundat2:
+ SUB R3,R8,R3
+ ADD $32+ADJUST_FOR_CNTLZW,R3
+ BR vfound
+foundat1:
+ SUB R3,R8,R3
+ ADD $16+ADJUST_FOR_CNTLZW,R3
+ BR vfound
+foundat0:
+ SUB R3,R8,R3
+ ADD $0+ADJUST_FOR_CNTLZW,R3
+vfound:
+ // Map equal values into a 16 bit value with earlier matches setting higher bits.
+#ifndef GOPPC64_power9
+ VBPERMQ V6,V0,V6
+ MFVRD V6,R4
+ CNTLZW R4,R4
+#else
+#ifdef GOARCH_ppc64le
+ // Put the value back into LE ordering by swapping doublewords.
+ XXPERMDI V6,V6,$2,V6
+#endif
+ _VCZBEBB V6,R4
+#endif
+ ADD R3,R4,R3
+ RET
+
+cmp16: // Length 16 - 31
+ CMPU R4,$16
+ ADD R4,R3,R9
+ BLT cmp8
+
+ ADD $-16,R9,R9 // &s[len(s)-16]
+
+ // Bytes 0 - 15
+ LXVD2X (R0)(R3),V2
+ VCMPEQUBCC V2,V1,V6
+ MOVD R3,R8
+ BNE CR6,foundat0 // Match found at R8+32 bytes, jump out
+
+ BEQ notfound
+
+ // Bytes 16 - 30
+ MOVD R9,R8 // R9 holds the final check.
+ LXVD2X (R0)(R9),V2
+ VCMPEQUBCC V2,V1,V6
+ BNE CR6,foundat0 // Match found at R8+48 bytes, jump out
+
+ BR notfound
+
+
+cmp8: // Length 8 - 15
+#ifdef GOPPC64_power10
+ // Load all the bytes into a single VSR in BE order.
+ SLD $56,R4,R5
+ LXVLL R3,R5,V2
+ // Compare and count the number which don't match.
+ VCMPEQUB V2,V1,V6
+ VCLZLSBB V6,R3
+ // If count is the number of bytes, or more. No matches are found.
+ CMPU R3,R4
+ MOVD $-1,R5
+ // Otherwise, the count is the index of the first match.
+ ISEL CR0LT,R3,R5,R3
+ RET
+#else
+ RLDIMI $8,R5,$48,R5 // Replicating the byte across the register.
+ RLDIMI $16,R5,$32,R5
+ RLDIMI $32,R5,$0,R5
+ CMPU R4,$8
+ BLT cmp4
+ MOVD $-8,R11
+ ADD $-8,R4,R4
+
+ _LDBEX (R0)(R3),R10
+ _LDBEX (R11)(R9),R11
+ CMPB R10,R5,R10
+ CMPB R11,R5,R11
+ CMPU R10,$0
+ CMPU R11,$0,CR1
+ CNTLZD R10,R10
+ CNTLZD R11,R11
+ SRD $3,R10,R3
+ SRD $3,R11,R11
+ BNE found
+
+ ADD R4,R11,R4
+ MOVD $-1,R3
+ ISEL CR1EQ,R3,R4,R3
+ RET
+
+cmp4: // Length 4 - 7
+ CMPU R4,$4
+ BLT cmp2
+ MOVD $-4,R11
+ ADD $-4,R4,R4
+
+ _LWBEX (R0)(R3),R10
+ _LWBEX (R11)(R9),R11
+ CMPB R10,R5,R10
+ CMPB R11,R5,R11
+ CNTLZW R10,R10
+ CNTLZW R11,R11
+ CMPU R10,$32
+ CMPU R11,$32,CR1
+ SRD $3,R10,R3
+ SRD $3,R11,R11
+ BNE found
+
+ ADD R4,R11,R4
+ MOVD $-1,R3
+ ISEL CR1EQ,R3,R4,R3
+ RET
+
+cmp2: // Length 2 - 3
+ CMPU R4,$2
+ BLT cmp1
+
+ _LHBEX (R0)(R3),R10
+ CMPB R10,R5,R10
+ SLDCC $48,R10,R10
+ CNTLZD R10,R10
+ SRD $3,R10,R3
+ BNE found
+
+cmp1: // Length 1
+ MOVD $-1,R3
+ ANDCC $1,R4,R31
+ BEQ found
+
+ MOVBZ -1(R9),R10
+ CMPB R10,R5,R10
+ ANDCC $1,R10
+ ADD $-1,R4
+ ISEL CR0EQ,R3,R4,R3
+
+found:
+ RET
+#endif
+
+notfound:
+ MOVD $-1,R3
+ RET
+
diff --git a/src/internal/bytealg/indexbyte_riscv64.s b/src/internal/bytealg/indexbyte_riscv64.s
new file mode 100644
index 0000000..8be78ed
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_riscv64.s
@@ -0,0 +1,51 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte<ABIInternal>(SB),NOSPLIT,$0-40
+ // X10 = b_base
+ // X11 = b_len
+ // X12 = b_cap (unused)
+ // X13 = byte to find
+ AND $0xff, X13
+ MOV X10, X12 // store base for later
+ ADD X10, X11 // end
+ ADD $-1, X10
+
+loop:
+ ADD $1, X10
+ BEQ X10, X11, notfound
+ MOVBU (X10), X14
+ BNE X13, X14, loop
+
+ SUB X12, X10 // remove base
+ RET
+
+notfound:
+ MOV $-1, X10
+ RET
+
+TEXT ·IndexByteString<ABIInternal>(SB),NOSPLIT,$0-32
+ // X10 = b_base
+ // X11 = b_len
+ // X12 = byte to find
+ AND $0xff, X12
+ MOV X10, X13 // store base for later
+ ADD X10, X11 // end
+ ADD $-1, X10
+
+loop:
+ ADD $1, X10
+ BEQ X10, X11, notfound
+ MOVBU (X10), X14
+ BNE X12, X14, loop
+
+ SUB X13, X10 // remove base
+ RET
+
+notfound:
+ MOV $-1, X10
+ RET
diff --git a/src/internal/bytealg/indexbyte_s390x.s b/src/internal/bytealg/indexbyte_s390x.s
new file mode 100644
index 0000000..cf88d92
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_s390x.s
@@ -0,0 +1,108 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB),NOSPLIT|NOFRAME,$0-40
+ MOVD b_base+0(FP), R3// b_base => R3
+ MOVD b_len+8(FP), R4 // b_len => R4
+ MOVBZ c+24(FP), R5 // c => R5
+ MOVD $ret+32(FP), R2 // &ret => R9
+ BR indexbytebody<>(SB)
+
+TEXT ·IndexByteString(SB),NOSPLIT|NOFRAME,$0-32
+ MOVD s_base+0(FP), R3// s_base => R3
+ MOVD s_len+8(FP), R4 // s_len => R4
+ MOVBZ c+16(FP), R5 // c => R5
+ MOVD $ret+24(FP), R2 // &ret => R9
+ BR indexbytebody<>(SB)
+
+// input:
+// R3: s
+// R4: s_len
+// R5: c -- byte sought
+// R2: &ret -- address to put index into
+TEXT indexbytebody<>(SB),NOSPLIT|NOFRAME,$0
+ CMPBEQ R4, $0, notfound
+ MOVD R3, R6 // store base for later
+ ADD R3, R4, R8 // the address after the end of the string
+ //if the length is small, use loop; otherwise, use vector or srst search
+ CMPBGE R4, $16, large
+
+residual:
+ CMPBEQ R3, R8, notfound
+ MOVBZ 0(R3), R7
+ LA 1(R3), R3
+ CMPBNE R7, R5, residual
+
+found:
+ SUB R6, R3
+ SUB $1, R3
+ MOVD R3, 0(R2)
+ RET
+
+notfound:
+ MOVD $-1, 0(R2)
+ RET
+
+large:
+ MOVBZ internal∕cpu·S390X+const_offsetS390xHasVX(SB), R1
+ CMPBNE R1, $0, vectorimpl
+
+srstimpl: // no vector facility
+ MOVBZ R5, R0 // c needs to be in R0, leave until last minute as currently R0 is expected to be 0
+srstloop:
+ WORD $0xB25E0083 // srst %r8, %r3 (search the range [R3, R8))
+ BVS srstloop // interrupted - continue
+ BGT notfoundr0
+foundr0:
+ XOR R0, R0 // reset R0
+ SUB R6, R8 // remove base
+ MOVD R8, 0(R2)
+ RET
+notfoundr0:
+ XOR R0, R0 // reset R0
+ MOVD $-1, 0(R2)
+ RET
+
+vectorimpl:
+ //if the address is not 16byte aligned, use loop for the header
+ MOVD R3, R8
+ AND $15, R8
+ CMPBGT R8, $0, notaligned
+
+aligned:
+ ADD R6, R4, R8
+ MOVD R8, R7
+ AND $-16, R7
+ // replicate c across V17
+ VLVGB $0, R5, V19
+ VREPB $0, V19, V17
+
+vectorloop:
+ CMPBGE R3, R7, residual
+ VL 0(R3), V16 // load string to be searched into V16
+ ADD $16, R3
+ VFEEBS V16, V17, V18 // search V17 in V16 and set conditional code accordingly
+ BVS vectorloop
+
+ // when vector search found c in the string
+ VLGVB $7, V18, R7 // load 7th element of V18 containing index into R7
+ SUB $16, R3
+ SUB R6, R3
+ ADD R3, R7
+ MOVD R7, 0(R2)
+ RET
+
+notaligned:
+ MOVD R3, R8
+ AND $-16, R8
+ ADD $16, R8
+notalignedloop:
+ CMPBEQ R3, R8, aligned
+ MOVBZ 0(R3), R7
+ LA 1(R3), R3
+ CMPBNE R7, R5, notalignedloop
+ BR found
diff --git a/src/internal/bytealg/indexbyte_wasm.s b/src/internal/bytealg/indexbyte_wasm.s
new file mode 100644
index 0000000..ef4bd93
--- /dev/null
+++ b/src/internal/bytealg/indexbyte_wasm.s
@@ -0,0 +1,195 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·IndexByte(SB), NOSPLIT, $0-40
+ I64Load b_base+0(FP)
+ I32WrapI64
+ I32Load8U c+24(FP)
+ I64Load b_len+8(FP)
+ I32WrapI64
+ Call memchr<>(SB)
+ I64ExtendI32S
+ Set R0
+
+ Get SP
+ I64Const $-1
+ Get R0
+ I64Load b_base+0(FP)
+ I64Sub
+ Get R0
+ I64Eqz $0
+ Select
+ I64Store ret+32(FP)
+
+ RET
+
+TEXT ·IndexByteString(SB), NOSPLIT, $0-32
+ Get SP
+ I64Load s_base+0(FP)
+ I32WrapI64
+ I32Load8U c+16(FP)
+ I64Load s_len+8(FP)
+ I32WrapI64
+ Call memchr<>(SB)
+ I64ExtendI32S
+ Set R0
+
+ I64Const $-1
+ Get R0
+ I64Load s_base+0(FP)
+ I64Sub
+ Get R0
+ I64Eqz $0
+ Select
+ I64Store ret+24(FP)
+
+ RET
+
+// initially compiled with emscripten and then modified over time.
+// params:
+// R0: s
+// R1: c
+// R2: len
+// ret: index
+TEXT memchr<>(SB), NOSPLIT, $0
+ Get R1
+ Set R4
+ Block
+ Block
+ Get R2
+ I32Const $0
+ I32Ne
+ Tee R3
+ Get R0
+ I32Const $3
+ I32And
+ I32Const $0
+ I32Ne
+ I32And
+ If
+ Loop
+ Get R0
+ I32Load8U $0
+ Get R1
+ I32Eq
+ BrIf $2
+ Get R2
+ I32Const $-1
+ I32Add
+ Tee R2
+ I32Const $0
+ I32Ne
+ Tee R3
+ Get R0
+ I32Const $1
+ I32Add
+ Tee R0
+ I32Const $3
+ I32And
+ I32Const $0
+ I32Ne
+ I32And
+ BrIf $0
+ End
+ End
+ Get R3
+ BrIf $0
+ I32Const $0
+ Set R1
+ Br $1
+ End
+ Get R0
+ I32Load8U $0
+ Get R4
+ Tee R3
+ I32Eq
+ If
+ Get R2
+ Set R1
+ Else
+ Get R4
+ I32Const $16843009
+ I32Mul
+ Set R4
+ Block
+ Block
+ Get R2
+ I32Const $3
+ I32GtU
+ If
+ Get R2
+ Set R1
+ Loop
+ Get R0
+ I32Load $0
+ Get R4
+ I32Xor
+ Tee R2
+ I32Const $-2139062144
+ I32And
+ I32Const $-2139062144
+ I32Xor
+ Get R2
+ I32Const $-16843009
+ I32Add
+ I32And
+ I32Eqz
+ If
+ Get R0
+ I32Const $4
+ I32Add
+ Set R0
+ Get R1
+ I32Const $-4
+ I32Add
+ Tee R1
+ I32Const $3
+ I32GtU
+ BrIf $1
+ Br $3
+ End
+ End
+ Else
+ Get R2
+ Set R1
+ Br $1
+ End
+ Br $1
+ End
+ Get R1
+ I32Eqz
+ If
+ I32Const $0
+ Set R1
+ Br $3
+ End
+ End
+ Loop
+ Get R0
+ I32Load8U $0
+ Get R3
+ I32Eq
+ BrIf $2
+ Get R0
+ I32Const $1
+ I32Add
+ Set R0
+ Get R1
+ I32Const $-1
+ I32Add
+ Tee R1
+ BrIf $0
+ I32Const $0
+ Set R1
+ End
+ End
+ End
+ Get R0
+ I32Const $0
+ Get R1
+ Select
+ Return
diff --git a/src/internal/cfg/cfg.go b/src/internal/cfg/cfg.go
new file mode 100644
index 0000000..2af0ec7
--- /dev/null
+++ b/src/internal/cfg/cfg.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cfg holds configuration shared by the Go command and internal/testenv.
+// Definitions that don't need to be exposed outside of cmd/go should be in
+// cmd/go/internal/cfg instead of this package.
+package cfg
+
+// KnownEnv is a list of environment variables that affect the operation
+// of the Go command.
+const KnownEnv = `
+ AR
+ CC
+ CGO_CFLAGS
+ CGO_CFLAGS_ALLOW
+ CGO_CFLAGS_DISALLOW
+ CGO_CPPFLAGS
+ CGO_CPPFLAGS_ALLOW
+ CGO_CPPFLAGS_DISALLOW
+ CGO_CXXFLAGS
+ CGO_CXXFLAGS_ALLOW
+ CGO_CXXFLAGS_DISALLOW
+ CGO_ENABLED
+ CGO_FFLAGS
+ CGO_FFLAGS_ALLOW
+ CGO_FFLAGS_DISALLOW
+ CGO_LDFLAGS
+ CGO_LDFLAGS_ALLOW
+ CGO_LDFLAGS_DISALLOW
+ CXX
+ FC
+ GCCGO
+ GO111MODULE
+ GO386
+ GOAMD64
+ GOARCH
+ GOARM
+ GOBIN
+ GOCACHE
+ GOCACHEPROG
+ GOENV
+ GOEXE
+ GOEXPERIMENT
+ GOFLAGS
+ GOGCCFLAGS
+ GOHOSTARCH
+ GOHOSTOS
+ GOINSECURE
+ GOMIPS
+ GOMIPS64
+ GOMODCACHE
+ GONOPROXY
+ GONOSUMDB
+ GOOS
+ GOPATH
+ GOPPC64
+ GOPRIVATE
+ GOPROXY
+ GOROOT
+ GOSUMDB
+ GOTMPDIR
+ GOTOOLCHAIN
+ GOTOOLDIR
+ GOVCS
+ GOWASM
+ GOWORK
+ GO_EXTLINK_ENABLED
+ PKG_CONFIG
+`
diff --git a/src/internal/coverage/calloc/batchcounteralloc.go b/src/internal/coverage/calloc/batchcounteralloc.go
new file mode 100644
index 0000000..2b6495d
--- /dev/null
+++ b/src/internal/coverage/calloc/batchcounteralloc.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package calloc
+
+// This package contains a simple "batch" allocator for allocating
+// coverage counters (slices of uint32 basically), for working with
+// coverage data files. Collections of counter arrays tend to all be
+// live/dead over the same time period, so a good fit for batch
+// allocation.
+
+type BatchCounterAlloc struct {
+ pool []uint32
+}
+
+func (ca *BatchCounterAlloc) AllocateCounters(n int) []uint32 {
+ const chunk = 8192
+ if n > cap(ca.pool) {
+ siz := chunk
+ if n > chunk {
+ siz = n
+ }
+ ca.pool = make([]uint32, siz)
+ }
+ rv := ca.pool[:n]
+ ca.pool = ca.pool[n:]
+ return rv
+}
diff --git a/src/internal/coverage/cformat/fmt_test.go b/src/internal/coverage/cformat/fmt_test.go
new file mode 100644
index 0000000..f5ed01b
--- /dev/null
+++ b/src/internal/coverage/cformat/fmt_test.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cformat_test
+
+import (
+ "internal/coverage"
+ "internal/coverage/cformat"
+ "slices"
+ "strings"
+ "testing"
+)
+
+func TestBasics(t *testing.T) {
+ fm := cformat.NewFormatter(coverage.CtrModeAtomic)
+
+ mku := func(stl, enl, nx uint32) coverage.CoverableUnit {
+ return coverage.CoverableUnit{
+ StLine: stl,
+ EnLine: enl,
+ NxStmts: nx,
+ }
+ }
+ fn1units := []coverage.CoverableUnit{
+ mku(10, 11, 2),
+ mku(15, 11, 1),
+ }
+ fn2units := []coverage.CoverableUnit{
+ mku(20, 25, 3),
+ mku(30, 31, 2),
+ mku(33, 40, 7),
+ }
+ fn3units := []coverage.CoverableUnit{
+ mku(99, 100, 1),
+ }
+ fm.SetPackage("my/pack1")
+ for k, u := range fn1units {
+ fm.AddUnit("p.go", "f1", false, u, uint32(k))
+ }
+ for k, u := range fn2units {
+ fm.AddUnit("q.go", "f2", false, u, 0)
+ fm.AddUnit("q.go", "f2", false, u, uint32(k))
+ }
+ fm.SetPackage("my/pack2")
+ for _, u := range fn3units {
+ fm.AddUnit("lit.go", "f3", true, u, 0)
+ }
+
+ var b1, b2, b3, b4 strings.Builder
+ if err := fm.EmitTextual(&b1); err != nil {
+ t.Fatalf("EmitTextual returned %v", err)
+ }
+ wantText := strings.TrimSpace(`
+mode: atomic
+p.go:10.0,11.0 2 0
+p.go:15.0,11.0 1 1
+q.go:20.0,25.0 3 0
+q.go:30.0,31.0 2 1
+q.go:33.0,40.0 7 2
+lit.go:99.0,100.0 1 0`)
+ gotText := strings.TrimSpace(b1.String())
+ if wantText != gotText {
+ t.Errorf("emit text: got:\n%s\nwant:\n%s\n", gotText, wantText)
+ }
+
+ // Percent output with no aggregation.
+ noCoverPkg := ""
+ if err := fm.EmitPercent(&b2, noCoverPkg, false, false); err != nil {
+ t.Fatalf("EmitPercent returned %v", err)
+ }
+ wantPercent := strings.Fields(`
+ my/pack1 coverage: 66.7% of statements
+ my/pack2 coverage: 0.0% of statements
+`)
+ gotPercent := strings.Fields(b2.String())
+ if !slices.Equal(wantPercent, gotPercent) {
+ t.Errorf("emit percent: got:\n%+v\nwant:\n%+v\n",
+ gotPercent, wantPercent)
+ }
+
+ // Percent mode with aggregation.
+ withCoverPkg := " in ./..."
+ if err := fm.EmitPercent(&b3, withCoverPkg, false, true); err != nil {
+ t.Fatalf("EmitPercent returned %v", err)
+ }
+ wantPercent = strings.Fields(`
+ coverage: 62.5% of statements in ./...
+`)
+ gotPercent = strings.Fields(b3.String())
+ if !slices.Equal(wantPercent, gotPercent) {
+ t.Errorf("emit percent: got:\n%+v\nwant:\n%+v\n",
+ gotPercent, wantPercent)
+ }
+
+ if err := fm.EmitFuncs(&b4); err != nil {
+ t.Fatalf("EmitFuncs returned %v", err)
+ }
+ wantFuncs := strings.TrimSpace(`
+p.go:10: f1 33.3%
+q.go:20: f2 75.0%
+total (statements) 62.5%`)
+ gotFuncs := strings.TrimSpace(b4.String())
+ if wantFuncs != gotFuncs {
+ t.Errorf("emit funcs: got:\n%s\nwant:\n%s\n", gotFuncs, wantFuncs)
+ }
+ if false {
+ t.Logf("text is %s\n", b1.String())
+ t.Logf("perc is %s\n", b2.String())
+ t.Logf("perc2 is %s\n", b3.String())
+ t.Logf("funcs is %s\n", b4.String())
+ }
+}
+
+func TestEmptyPackages(t *testing.T) {
+
+ fm := cformat.NewFormatter(coverage.CtrModeAtomic)
+ fm.SetPackage("my/pack1")
+ fm.SetPackage("my/pack2")
+
+ // No aggregation.
+ {
+ var b strings.Builder
+ noCoverPkg := ""
+ if err := fm.EmitPercent(&b, noCoverPkg, true, false); err != nil {
+ t.Fatalf("EmitPercent returned %v", err)
+ }
+ wantPercent := strings.Fields(`
+ my/pack1 coverage: [no statements]
+ my/pack2 coverage: [no statements]
+`)
+ gotPercent := strings.Fields(b.String())
+ if !slices.Equal(wantPercent, gotPercent) {
+ t.Errorf("emit percent: got:\n%+v\nwant:\n%+v\n",
+ gotPercent, wantPercent)
+ }
+ }
+
+ // With aggregation.
+ {
+ var b strings.Builder
+ noCoverPkg := ""
+ if err := fm.EmitPercent(&b, noCoverPkg, true, true); err != nil {
+ t.Fatalf("EmitPercent returned %v", err)
+ }
+ wantPercent := strings.Fields(`
+ coverage: [no statements]
+`)
+ gotPercent := strings.Fields(b.String())
+ if !slices.Equal(wantPercent, gotPercent) {
+ t.Errorf("emit percent: got:\n%+v\nwant:\n%+v\n",
+ gotPercent, wantPercent)
+ }
+ }
+}
diff --git a/src/internal/coverage/cformat/format.go b/src/internal/coverage/cformat/format.go
new file mode 100644
index 0000000..7e7a277
--- /dev/null
+++ b/src/internal/coverage/cformat/format.go
@@ -0,0 +1,352 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cformat
+
+// This package provides apis for producing human-readable summaries
+// of coverage data (e.g. a coverage percentage for a given package or
+// set of packages) and for writing data in the legacy test format
+// emitted by "go test -coverprofile=<outfile>".
+//
+// The model for using these apis is to create a Formatter object,
+// then make a series of calls to SetPackage and AddUnit passing in
+// data read from coverage meta-data and counter-data files. E.g.
+//
+// myformatter := cformat.NewFormatter()
+// ...
+// for each package P in meta-data file: {
+// myformatter.SetPackage(P)
+// for each function F in P: {
+// for each coverable unit U in F: {
+// myformatter.AddUnit(U)
+// }
+// }
+// }
+// myformatter.EmitPercent(os.Stdout, "", true, true)
+// myformatter.EmitTextual(somefile)
+//
+// These apis are linked into tests that are built with "-cover", and
+// called at the end of test execution to produce text output or
+// emit coverage percentages.
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/cmerge"
+ "io"
+ "sort"
+ "text/tabwriter"
+)
+
+type Formatter struct {
+ // Maps import path to package state.
+ pm map[string]*pstate
+ // Records current package being visited.
+ pkg string
+ // Pointer to current package state.
+ p *pstate
+ // Counter mode.
+ cm coverage.CounterMode
+}
+
+// pstate records package-level coverage data state:
+// - a table of functions (file/fname/literal)
+// - a map recording the index/ID of each func encountered so far
+// - a table storing execution count for the coverable units in each func
+type pstate struct {
+ // slice of unique functions
+ funcs []fnfile
+ // maps function to index in slice above (index acts as function ID)
+ funcTable map[fnfile]uint32
+
+ // A table storing coverage counts for each coverable unit.
+ unitTable map[extcu]uint32
+}
+
+// extcu encapsulates a coverable unit within some function.
+type extcu struct {
+ fnfid uint32 // index into p.funcs slice
+ coverage.CoverableUnit
+}
+
+// fnfile is a function-name/file-name tuple.
+type fnfile struct {
+ file string
+ fname string
+ lit bool
+}
+
+func NewFormatter(cm coverage.CounterMode) *Formatter {
+ return &Formatter{
+ pm: make(map[string]*pstate),
+ cm: cm,
+ }
+}
+
+// SetPackage tells the formatter that we're about to visit the
+// coverage data for the package with the specified import path.
+// Note that it's OK to call SetPackage more than once with the
+// same import path; counter data values will be accumulated.
+func (fm *Formatter) SetPackage(importpath string) {
+ if importpath == fm.pkg {
+ return
+ }
+ fm.pkg = importpath
+ ps, ok := fm.pm[importpath]
+ if !ok {
+ ps = new(pstate)
+ fm.pm[importpath] = ps
+ ps.unitTable = make(map[extcu]uint32)
+ ps.funcTable = make(map[fnfile]uint32)
+ }
+ fm.p = ps
+}
+
+// AddUnit passes info on a single coverable unit (file, funcname,
+// literal flag, range of lines, and counter value) to the formatter.
+// Counter values will be accumulated where appropriate.
+func (fm *Formatter) AddUnit(file string, fname string, isfnlit bool, unit coverage.CoverableUnit, count uint32) {
+ if fm.p == nil {
+ panic("AddUnit invoked before SetPackage")
+ }
+ fkey := fnfile{file: file, fname: fname, lit: isfnlit}
+ idx, ok := fm.p.funcTable[fkey]
+ if !ok {
+ idx = uint32(len(fm.p.funcs))
+ fm.p.funcs = append(fm.p.funcs, fkey)
+ fm.p.funcTable[fkey] = idx
+ }
+ ukey := extcu{fnfid: idx, CoverableUnit: unit}
+ pcount := fm.p.unitTable[ukey]
+ var result uint32
+ if fm.cm == coverage.CtrModeSet {
+ if count != 0 || pcount != 0 {
+ result = 1
+ }
+ } else {
+ // Use saturating arithmetic.
+ result, _ = cmerge.SaturatingAdd(pcount, count)
+ }
+ fm.p.unitTable[ukey] = result
+}
+
+// sortUnits sorts a slice of extcu objects in a package according to
+// source position information (e.g. file and line). Note that we don't
+// include function name as part of the sorting criteria, the thinking
+// being that is better to provide things in the original source order.
+func (p *pstate) sortUnits(units []extcu) {
+ sort.Slice(units, func(i, j int) bool {
+ ui := units[i]
+ uj := units[j]
+ ifile := p.funcs[ui.fnfid].file
+ jfile := p.funcs[uj.fnfid].file
+ if ifile != jfile {
+ return ifile < jfile
+ }
+ // NB: not taking function literal flag into account here (no
+ // need, since other fields are guaranteed to be distinct).
+ if units[i].StLine != units[j].StLine {
+ return units[i].StLine < units[j].StLine
+ }
+ if units[i].EnLine != units[j].EnLine {
+ return units[i].EnLine < units[j].EnLine
+ }
+ if units[i].StCol != units[j].StCol {
+ return units[i].StCol < units[j].StCol
+ }
+ if units[i].EnCol != units[j].EnCol {
+ return units[i].EnCol < units[j].EnCol
+ }
+ return units[i].NxStmts < units[j].NxStmts
+ })
+}
+
+// EmitTextual writes the accumulated coverage data in the legacy
+// cmd/cover text format to the writer 'w'. We sort the data items by
+// importpath, source file, and line number before emitting (this sorting
+// is not explicitly mandated by the format, but seems like a good idea
+// for repeatable/deterministic dumps).
+func (fm *Formatter) EmitTextual(w io.Writer) error {
+ if fm.cm == coverage.CtrModeInvalid {
+ panic("internal error, counter mode unset")
+ }
+ if _, err := fmt.Fprintf(w, "mode: %s\n", fm.cm.String()); err != nil {
+ return err
+ }
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+ sort.Strings(pkgs)
+ for _, importpath := range pkgs {
+ p := fm.pm[importpath]
+ units := make([]extcu, 0, len(p.unitTable))
+ for u := range p.unitTable {
+ units = append(units, u)
+ }
+ p.sortUnits(units)
+ for _, u := range units {
+ count := p.unitTable[u]
+ file := p.funcs[u.fnfid].file
+ if _, err := fmt.Fprintf(w, "%s:%d.%d,%d.%d %d %d\n",
+ file, u.StLine, u.StCol,
+ u.EnLine, u.EnCol, u.NxStmts, count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// EmitPercent writes out a "percentage covered" string to the writer 'w'.
+func (fm *Formatter) EmitPercent(w io.Writer, covpkgs string, noteEmpty bool, aggregate bool) error {
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+
+ rep := func(cov, tot uint64) error {
+ if tot != 0 {
+ if _, err := fmt.Fprintf(w, "coverage: %.1f%% of statements%s\n",
+ 100.0*float64(cov)/float64(tot), covpkgs); err != nil {
+ return err
+ }
+ } else if noteEmpty {
+ if _, err := fmt.Fprintf(w, "coverage: [no statements]\n"); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ sort.Strings(pkgs)
+ var totalStmts, coveredStmts uint64
+ for _, importpath := range pkgs {
+ p := fm.pm[importpath]
+ if !aggregate {
+ totalStmts, coveredStmts = 0, 0
+ }
+ for unit, count := range p.unitTable {
+ nx := uint64(unit.NxStmts)
+ totalStmts += nx
+ if count != 0 {
+ coveredStmts += nx
+ }
+ }
+ if !aggregate {
+ if _, err := fmt.Fprintf(w, "\t%s\t\t", importpath); err != nil {
+ return err
+ }
+ if err := rep(coveredStmts, totalStmts); err != nil {
+ return err
+ }
+ }
+ }
+ if aggregate {
+ if err := rep(coveredStmts, totalStmts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// EmitFuncs writes out a function-level summary to the writer 'w'. A
+// note on handling function literals: although we collect coverage
+// data for unnamed literals, it probably does not make sense to
+// include them in the function summary since there isn't any good way
+// to name them (this is also consistent with the legacy cmd/cover
+// implementation). We do want to include their counts in the overall
+// summary however.
+func (fm *Formatter) EmitFuncs(w io.Writer) error {
+ if fm.cm == coverage.CtrModeInvalid {
+ panic("internal error, counter mode unset")
+ }
+ perc := func(covered, total uint64) float64 {
+ if total == 0 {
+ total = 1
+ }
+ return 100.0 * float64(covered) / float64(total)
+ }
+ tabber := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ defer tabber.Flush()
+ allStmts := uint64(0)
+ covStmts := uint64(0)
+
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+ sort.Strings(pkgs)
+
+ // Emit functions for each package, sorted by import path.
+ for _, importpath := range pkgs {
+ p := fm.pm[importpath]
+ if len(p.unitTable) == 0 {
+ continue
+ }
+ units := make([]extcu, 0, len(p.unitTable))
+ for u := range p.unitTable {
+ units = append(units, u)
+ }
+
+ // Within a package, sort the units, then walk through the
+ // sorted array. Each time we hit a new function, emit the
+ // summary entry for the previous function, then make one last
+ // emit call at the end of the loop.
+ p.sortUnits(units)
+ fname := ""
+ ffile := ""
+ flit := false
+ var fline uint32
+ var cstmts, tstmts uint64
+ captureFuncStart := func(u extcu) {
+ fname = p.funcs[u.fnfid].fname
+ ffile = p.funcs[u.fnfid].file
+ flit = p.funcs[u.fnfid].lit
+ fline = u.StLine
+ }
+ emitFunc := func(u extcu) error {
+ // Don't emit entries for function literals (see discussion
+ // in function header comment above).
+ if !flit {
+ if _, err := fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n",
+ ffile, fline, fname, perc(cstmts, tstmts)); err != nil {
+ return err
+ }
+ }
+ captureFuncStart(u)
+ allStmts += tstmts
+ covStmts += cstmts
+ tstmts = 0
+ cstmts = 0
+ return nil
+ }
+ for k, u := range units {
+ if k == 0 {
+ captureFuncStart(u)
+ } else {
+ if fname != p.funcs[u.fnfid].fname {
+ // New function; emit entry for previous one.
+ if err := emitFunc(u); err != nil {
+ return err
+ }
+ }
+ }
+ tstmts += uint64(u.NxStmts)
+ count := p.unitTable[u]
+ if count != 0 {
+ cstmts += uint64(u.NxStmts)
+ }
+ }
+ if err := emitFunc(extcu{}); err != nil {
+ return err
+ }
+ }
+ if _, err := fmt.Fprintf(tabber, "%s\t%s\t%.1f%%\n",
+ "total", "(statements)", perc(covStmts, allStmts)); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/internal/coverage/cmddefs.go b/src/internal/coverage/cmddefs.go
new file mode 100644
index 0000000..49376a4
--- /dev/null
+++ b/src/internal/coverage/cmddefs.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package coverage
+
+// CoverPkgConfig is a bundle of information passed from the Go
+// command to the cover command during "go build -cover" runs. The
+// Go command creates and fills in a struct as below, then passes
+// file containing the encoded JSON for the struct to the "cover"
+// tool when instrumenting the source files in a Go package.
+type CoverPkgConfig struct {
+ // File into which cmd/cover should emit summary info
+ // when instrumentation is complete.
+ OutConfig string
+
+ // Import path for the package being instrumented.
+ PkgPath string
+
+ // Package name.
+ PkgName string
+
+ // Instrumentation granularity: one of "perfunc" or "perblock" (default)
+ Granularity string
+
+ // Module path for this package (empty if no go.mod in use)
+ ModulePath string
+
+ // Local mode indicates we're doing a coverage build or test of a
+ // package selected via local import path, e.g. "./..." or
+ // "./foo/bar" as opposed to a non-relative import path. See the
+ // corresponding field in cmd/go's PackageInternal struct for more
+ // info.
+ Local bool
+}
+
+// CoverFixupConfig contains annotations/notes generated by the
+// cmd/cover tool (during instrumentation) to be passed on to the
+// compiler when the instrumented code is compiled. The cmd/cover tool
+// creates a struct of this type, JSON-encodes it, and emits the
+// result to a file, which the Go command then passes to the compiler
+// when the instrumented package is built.
+type CoverFixupConfig struct {
+ // Name of the variable (created by cmd/cover) containing the
+ // encoded meta-data for the package.
+ MetaVar string
+
+ // Length of the meta-data.
+ MetaLen int
+
+ // Hash computed by cmd/cover of the meta-data.
+ MetaHash string
+
+ // Instrumentation strategy. For now this is always set to
+ // "normal", but in the future we may add new values (for example,
+ // if panic paths are instrumented, or if the instrumenter
+ // eliminates redundant counters).
+ Strategy string
+
+ // Prefix assigned to the names of counter variables generated
+ // during instrumentation by cmd/cover.
+ CounterPrefix string
+
+ // Name chosen for the package ID variable generated during
+ // instrumentation.
+ PkgIdVar string
+
+ // Counter mode (e.g. set/count/atomic)
+ CounterMode string
+
+ // Counter granularity (perblock or perfunc).
+ CounterGranularity string
+}
+
+// MetaFilePaths contains information generated by the Go command and
+// the read in by coverage test support functions within an executing
+// "go test -cover" binary.
+type MetaFileCollection struct {
+ ImportPaths []string
+ MetaFileFragments []string
+}
+
+// Name of file within the "go test -cover" temp coverdir directory
+// containing a list of meta-data files for packages being tested
+// in a "go test -coverpkg=... ..." run. This constant is shared
+// by the Go command and by the coverage runtime.
+const MetaFilesFileName = "metafiles.txt"
diff --git a/src/internal/coverage/cmerge/merge.go b/src/internal/coverage/cmerge/merge.go
new file mode 100644
index 0000000..1339803
--- /dev/null
+++ b/src/internal/coverage/cmerge/merge.go
@@ -0,0 +1,127 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmerge
+
+// package cmerge provides a few small utility APIs for helping
+// with merging of counter data for a given function.
+
+import (
+ "fmt"
+ "internal/coverage"
+ "math"
+)
+
+type ModeMergePolicy uint8
+
+const (
+ ModeMergeStrict ModeMergePolicy = iota
+ ModeMergeRelaxed
+)
+
+// Merger provides state and methods to help manage the process of
+// merging together coverage counter data for a given function, for
+// tools that need to implicitly merge counter as they read multiple
+// coverage counter data files.
+type Merger struct {
+ cmode coverage.CounterMode
+ cgran coverage.CounterGranularity
+ policy ModeMergePolicy
+ overflow bool
+}
+
+func (cm *Merger) SetModeMergePolicy(policy ModeMergePolicy) {
+ cm.policy = policy
+}
+
+// MergeCounters takes the counter values in 'src' and merges them
+// into 'dst' according to the correct counter mode.
+func (m *Merger) MergeCounters(dst, src []uint32) (error, bool) {
+ if len(src) != len(dst) {
+ return fmt.Errorf("merging counters: len(dst)=%d len(src)=%d", len(dst), len(src)), false
+ }
+ if m.cmode == coverage.CtrModeSet {
+ for i := 0; i < len(src); i++ {
+ if src[i] != 0 {
+ dst[i] = 1
+ }
+ }
+ } else {
+ for i := 0; i < len(src); i++ {
+ dst[i] = m.SaturatingAdd(dst[i], src[i])
+ }
+ }
+ ovf := m.overflow
+ m.overflow = false
+ return nil, ovf
+}
+
+// Saturating add does a saturating addition of 'dst' and 'src',
+// returning added value or math.MaxUint32 if there is an overflow.
+// Overflows are recorded in case the client needs to track them.
+func (m *Merger) SaturatingAdd(dst, src uint32) uint32 {
+ result, overflow := SaturatingAdd(dst, src)
+ if overflow {
+ m.overflow = true
+ }
+ return result
+}
+
+// Saturating add does a saturating addition of 'dst' and 'src',
+// returning added value or math.MaxUint32 plus an overflow flag.
+func SaturatingAdd(dst, src uint32) (uint32, bool) {
+ d, s := uint64(dst), uint64(src)
+ sum := d + s
+ overflow := false
+ if uint64(uint32(sum)) != sum {
+ overflow = true
+ sum = math.MaxUint32
+ }
+ return uint32(sum), overflow
+}
+
+// SetModeAndGranularity records the counter mode and granularity for
+// the current merge. In the specific case of merging across coverage
+// data files from different binaries, where we're combining data from
+// more than one meta-data file, we need to check for and resolve
+// mode/granularity clashes.
+func (cm *Merger) SetModeAndGranularity(mdf string, cmode coverage.CounterMode, cgran coverage.CounterGranularity) error {
+ if cm.cmode == coverage.CtrModeInvalid {
+ // Set merger mode based on what we're seeing here.
+ cm.cmode = cmode
+ cm.cgran = cgran
+ } else {
+ // Granularity clashes are always errors.
+ if cm.cgran != cgran {
+ return fmt.Errorf("counter granularity clash while reading meta-data file %s: previous file had %s, new file has %s", mdf, cm.cgran.String(), cgran.String())
+ }
+ // Mode clashes are treated as errors if we're using the
+ // default strict policy.
+ if cm.cmode != cmode {
+ if cm.policy == ModeMergeStrict {
+ return fmt.Errorf("counter mode clash while reading meta-data file %s: previous file had %s, new file has %s", mdf, cm.cmode.String(), cmode.String())
+ }
+ // In the case of a relaxed mode merge policy, upgrade
+ // mode if needed.
+ if cm.cmode < cmode {
+ cm.cmode = cmode
+ }
+ }
+ }
+ return nil
+}
+
+func (cm *Merger) ResetModeAndGranularity() {
+ cm.cmode = coverage.CtrModeInvalid
+ cm.cgran = coverage.CtrGranularityInvalid
+ cm.overflow = false
+}
+
+func (cm *Merger) Mode() coverage.CounterMode {
+ return cm.cmode
+}
+
+func (cm *Merger) Granularity() coverage.CounterGranularity {
+ return cm.cgran
+}
diff --git a/src/internal/coverage/cmerge/merge_test.go b/src/internal/coverage/cmerge/merge_test.go
new file mode 100644
index 0000000..0e6112a
--- /dev/null
+++ b/src/internal/coverage/cmerge/merge_test.go
@@ -0,0 +1,118 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmerge_test
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/cmerge"
+ "testing"
+)
+
+func TestClash(t *testing.T) {
+ m := &cmerge.Merger{}
+ err := m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash: %v", err)
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash: %v", err)
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeCount, coverage.CtrGranularityPerBlock)
+ if err == nil {
+ t.Fatalf("expected mode clash, not found")
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerFunc)
+ if err == nil {
+ t.Fatalf("expected granularity clash, not found")
+ }
+ m.SetModeMergePolicy(cmerge.ModeMergeRelaxed)
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeCount, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash: %v", err)
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash: %v", err)
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeAtomic, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash: %v", err)
+ }
+ m.ResetModeAndGranularity()
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeCount, coverage.CtrGranularityPerFunc)
+ if err != nil {
+ t.Fatalf("unexpected clash after reset: %v", err)
+ }
+}
+
+func TestBasic(t *testing.T) {
+ scenarios := []struct {
+ cmode coverage.CounterMode
+ cgran coverage.CounterGranularity
+ src, dst, res []uint32
+ iters int
+ merr bool
+ overflow bool
+ }{
+ {
+ cmode: coverage.CtrModeSet,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{1, 0, 1},
+ dst: []uint32{1, 1, 0},
+ res: []uint32{1, 1, 1},
+ iters: 2,
+ overflow: false,
+ },
+ {
+ cmode: coverage.CtrModeCount,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{1, 0, 3},
+ dst: []uint32{5, 7, 0},
+ res: []uint32{6, 7, 3},
+ iters: 1,
+ overflow: false,
+ },
+ {
+ cmode: coverage.CtrModeCount,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{4294967200, 0, 3},
+ dst: []uint32{4294967001, 7, 0},
+ res: []uint32{4294967295, 7, 3},
+ iters: 1,
+ overflow: true,
+ },
+ }
+
+ for k, scenario := range scenarios {
+ var err error
+ var ovf bool
+ m := &cmerge.Merger{}
+ mdf := fmt.Sprintf("file%d", k)
+ err = m.SetModeAndGranularity(mdf, scenario.cmode, scenario.cgran)
+ if err != nil {
+ t.Fatalf("case %d SetModeAndGranularity failed: %v", k, err)
+ }
+ for i := 0; i < scenario.iters; i++ {
+ err, ovf = m.MergeCounters(scenario.dst, scenario.src)
+ if ovf != scenario.overflow {
+ t.Fatalf("case %d overflow mismatch: got %v want %v", k, ovf, scenario.overflow)
+ }
+ if !scenario.merr && err != nil {
+ t.Fatalf("case %d unexpected err %v", k, err)
+ }
+ if scenario.merr && err == nil {
+ t.Fatalf("case %d expected err, not received", k)
+ }
+ for i := range scenario.dst {
+ if scenario.dst[i] != scenario.res[i] {
+ t.Fatalf("case %d: bad merge at %d got %d want %d",
+ k, i, scenario.dst[i], scenario.res[i])
+ }
+ }
+ }
+ }
+}
diff --git a/src/internal/coverage/decodecounter/decodecounterfile.go b/src/internal/coverage/decodecounter/decodecounterfile.go
new file mode 100644
index 0000000..83934fe
--- /dev/null
+++ b/src/internal/coverage/decodecounter/decodecounterfile.go
@@ -0,0 +1,373 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decodecounter
+
+import (
+ "encoding/binary"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/slicereader"
+ "internal/coverage/stringtab"
+ "io"
+ "os"
+ "strconv"
+ "unsafe"
+)
+
+// This file contains helpers for reading counter data files created
+// during the executions of a coverage-instrumented binary.
+
+type CounterDataReader struct {
+ stab *stringtab.Reader
+ args map[string]string
+ osargs []string
+ goarch string // GOARCH setting from run that produced counter data
+ goos string // GOOS setting from run that produced counter data
+ mr io.ReadSeeker
+ hdr coverage.CounterFileHeader
+ ftr coverage.CounterFileFooter
+ shdr coverage.CounterSegmentHeader
+ u32b []byte
+ u8b []byte
+ fcnCount uint32
+ segCount uint32
+ debug bool
+}
+
+func NewCounterDataReader(fn string, rs io.ReadSeeker) (*CounterDataReader, error) {
+ cdr := &CounterDataReader{
+ mr: rs,
+ u32b: make([]byte, 4),
+ u8b: make([]byte, 1),
+ }
+ // Read header
+ if err := binary.Read(rs, binary.LittleEndian, &cdr.hdr); err != nil {
+ return nil, err
+ }
+ if cdr.debug {
+ fmt.Fprintf(os.Stderr, "=-= counter file header: %+v\n", cdr.hdr)
+ }
+ if !checkMagic(cdr.hdr.Magic) {
+ return nil, fmt.Errorf("invalid magic string: not a counter data file")
+ }
+ if cdr.hdr.Version > coverage.CounterFileVersion {
+ return nil, fmt.Errorf("version data incompatibility: reader is %d data is %d", coverage.CounterFileVersion, cdr.hdr.Version)
+ }
+
+ // Read footer.
+ if err := cdr.readFooter(); err != nil {
+ return nil, err
+ }
+ // Seek back to just past the file header.
+ hsz := int64(unsafe.Sizeof(cdr.hdr))
+ if _, err := cdr.mr.Seek(hsz, io.SeekStart); err != nil {
+ return nil, err
+ }
+ // Read preamble for first segment.
+ if err := cdr.readSegmentPreamble(); err != nil {
+ return nil, err
+ }
+ return cdr, nil
+}
+
+func checkMagic(v [4]byte) bool {
+ g := coverage.CovCounterMagic
+ return v[0] == g[0] && v[1] == g[1] && v[2] == g[2] && v[3] == g[3]
+}
+
+func (cdr *CounterDataReader) readFooter() error {
+ ftrSize := int64(unsafe.Sizeof(cdr.ftr))
+ if _, err := cdr.mr.Seek(-ftrSize, io.SeekEnd); err != nil {
+ return err
+ }
+ if err := binary.Read(cdr.mr, binary.LittleEndian, &cdr.ftr); err != nil {
+ return err
+ }
+ if !checkMagic(cdr.ftr.Magic) {
+ return fmt.Errorf("invalid magic string (not a counter data file)")
+ }
+ if cdr.ftr.NumSegments == 0 {
+ return fmt.Errorf("invalid counter data file (no segments)")
+ }
+ return nil
+}
+
+// readSegmentPreamble reads and consumes the segment header, segment string
+// table, and segment args table.
+func (cdr *CounterDataReader) readSegmentPreamble() error {
+ // Read segment header.
+ if err := binary.Read(cdr.mr, binary.LittleEndian, &cdr.shdr); err != nil {
+ return err
+ }
+ if cdr.debug {
+ fmt.Fprintf(os.Stderr, "=-= read counter segment header: %+v", cdr.shdr)
+ fmt.Fprintf(os.Stderr, " FcnEntries=0x%x StrTabLen=0x%x ArgsLen=0x%x\n",
+ cdr.shdr.FcnEntries, cdr.shdr.StrTabLen, cdr.shdr.ArgsLen)
+ }
+
+ // Read string table and args.
+ if err := cdr.readStringTable(); err != nil {
+ return err
+ }
+ if err := cdr.readArgs(); err != nil {
+ return err
+ }
+ // Seek past any padding to bring us up to a 4-byte boundary.
+ if of, err := cdr.mr.Seek(0, io.SeekCurrent); err != nil {
+ return err
+ } else {
+ rem := of % 4
+ if rem != 0 {
+ pad := 4 - rem
+ if _, err := cdr.mr.Seek(pad, io.SeekCurrent); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (cdr *CounterDataReader) readStringTable() error {
+ b := make([]byte, cdr.shdr.StrTabLen)
+ nr, err := cdr.mr.Read(b)
+ if err != nil {
+ return err
+ }
+ if nr != int(cdr.shdr.StrTabLen) {
+ return fmt.Errorf("error: short read on string table")
+ }
+ slr := slicereader.NewReader(b, false /* not readonly */)
+ cdr.stab = stringtab.NewReader(slr)
+ cdr.stab.Read()
+ return nil
+}
+
+func (cdr *CounterDataReader) readArgs() error {
+ b := make([]byte, cdr.shdr.ArgsLen)
+ nr, err := cdr.mr.Read(b)
+ if err != nil {
+ return err
+ }
+ if nr != int(cdr.shdr.ArgsLen) {
+ return fmt.Errorf("error: short read on args table")
+ }
+ slr := slicereader.NewReader(b, false /* not readonly */)
+ sget := func() (string, error) {
+ kidx := slr.ReadULEB128()
+ if int(kidx) >= cdr.stab.Entries() {
+ return "", fmt.Errorf("malformed string table ref")
+ }
+ return cdr.stab.Get(uint32(kidx)), nil
+ }
+ nents := slr.ReadULEB128()
+ cdr.args = make(map[string]string, int(nents))
+ for i := uint64(0); i < nents; i++ {
+ k, errk := sget()
+ if errk != nil {
+ return errk
+ }
+ v, errv := sget()
+ if errv != nil {
+ return errv
+ }
+ if _, ok := cdr.args[k]; ok {
+ return fmt.Errorf("malformed args table")
+ }
+ cdr.args[k] = v
+ }
+ if argcs, ok := cdr.args["argc"]; ok {
+ argc, err := strconv.Atoi(argcs)
+ if err != nil {
+ return fmt.Errorf("malformed argc in counter data file args section")
+ }
+ cdr.osargs = make([]string, 0, argc)
+ for i := 0; i < argc; i++ {
+ arg := cdr.args[fmt.Sprintf("argv%d", i)]
+ cdr.osargs = append(cdr.osargs, arg)
+ }
+ }
+ if goos, ok := cdr.args["GOOS"]; ok {
+ cdr.goos = goos
+ }
+ if goarch, ok := cdr.args["GOARCH"]; ok {
+ cdr.goarch = goarch
+ }
+ return nil
+}
+
+// OsArgs returns the program arguments (saved from os.Args during
+// the run of the instrumented binary) read from the counter
+// data file. Not all coverage data files will have os.Args values;
+// for example, if a data file is produced by merging coverage
+// data from two distinct runs, no os args will be available (an
+// empty list is returned).
+func (cdr *CounterDataReader) OsArgs() []string {
+ return cdr.osargs
+}
+
+// Goos returns the GOOS setting in effect for the "-cover" binary
+// that produced this counter data file. The GOOS value may be
+// empty in the case where the counter data file was produced
+// from a merge in which more than one GOOS value was present.
+func (cdr *CounterDataReader) Goos() string {
+ return cdr.goos
+}
+
+// Goarch returns the GOARCH setting in effect for the "-cover" binary
+// that produced this counter data file. The GOARCH value may be
+// empty in the case where the counter data file was produced
+// from a merge in which more than one GOARCH value was present.
+func (cdr *CounterDataReader) Goarch() string {
+ return cdr.goarch
+}
+
+// FuncPayload encapsulates the counter data payload for a single
+// function as read from a counter data file.
+type FuncPayload struct {
+ PkgIdx uint32
+ FuncIdx uint32
+ Counters []uint32
+}
+
+// NumSegments returns the number of execution segments in the file.
+func (cdr *CounterDataReader) NumSegments() uint32 {
+ return cdr.ftr.NumSegments
+}
+
+// BeginNextSegment sets up the reader to read the next segment,
+// returning TRUE if we do have another segment to read, or FALSE
+// if we're done with all the segments (also an error if
+// something went wrong).
+func (cdr *CounterDataReader) BeginNextSegment() (bool, error) {
+ if cdr.segCount >= cdr.ftr.NumSegments {
+ return false, nil
+ }
+ cdr.segCount++
+ cdr.fcnCount = 0
+ // Seek past footer from last segment.
+ ftrSize := int64(unsafe.Sizeof(cdr.ftr))
+ if _, err := cdr.mr.Seek(ftrSize, io.SeekCurrent); err != nil {
+ return false, err
+ }
+ // Read preamble for this segment.
+ if err := cdr.readSegmentPreamble(); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// NumFunctionsInSegment returns the number of live functions
+// in the currently selected segment.
+func (cdr *CounterDataReader) NumFunctionsInSegment() uint32 {
+ return uint32(cdr.shdr.FcnEntries)
+}
+
+const supportDeadFunctionsInCounterData = false
+
+// NextFunc reads data for the next function in this current segment
+// into "p", returning TRUE if the read was successful or FALSE
+// if we've read all the functions already (also an error if
+// something went wrong with the read or we hit a premature
+// EOF).
+func (cdr *CounterDataReader) NextFunc(p *FuncPayload) (bool, error) {
+ if cdr.fcnCount >= uint32(cdr.shdr.FcnEntries) {
+ return false, nil
+ }
+ cdr.fcnCount++
+ var rdu32 func() (uint32, error)
+ if cdr.hdr.CFlavor == coverage.CtrULeb128 {
+ rdu32 = func() (uint32, error) {
+ var shift uint
+ var value uint64
+ for {
+ _, err := cdr.mr.Read(cdr.u8b)
+ if err != nil {
+ return 0, err
+ }
+ b := cdr.u8b[0]
+ value |= (uint64(b&0x7F) << shift)
+ if b&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+ return uint32(value), nil
+ }
+ } else if cdr.hdr.CFlavor == coverage.CtrRaw {
+ if cdr.hdr.BigEndian {
+ rdu32 = func() (uint32, error) {
+ n, err := cdr.mr.Read(cdr.u32b)
+ if err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, io.EOF
+ }
+ return binary.BigEndian.Uint32(cdr.u32b), nil
+ }
+ } else {
+ rdu32 = func() (uint32, error) {
+ n, err := cdr.mr.Read(cdr.u32b)
+ if err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, io.EOF
+ }
+ return binary.LittleEndian.Uint32(cdr.u32b), nil
+ }
+ }
+ } else {
+ panic("internal error: unknown counter flavor")
+ }
+
+ // Alternative/experimental path: one way we could handling writing
+ // out counter data would be to just memcpy the counter segment
+ // out to a file, meaning that a region in the counter memory
+ // corresponding to a dead (never-executed) function would just be
+ // zeroes. The code path below handles this case.
+ var nc uint32
+ var err error
+ if supportDeadFunctionsInCounterData {
+ for {
+ nc, err = rdu32()
+ if err == io.EOF {
+ return false, io.EOF
+ } else if err != nil {
+ break
+ }
+ if nc != 0 {
+ break
+ }
+ }
+ } else {
+ nc, err = rdu32()
+ }
+ if err != nil {
+ return false, err
+ }
+
+ // Read package and func indices.
+ p.PkgIdx, err = rdu32()
+ if err != nil {
+ return false, err
+ }
+ p.FuncIdx, err = rdu32()
+ if err != nil {
+ return false, err
+ }
+ if cap(p.Counters) < 1024 {
+ p.Counters = make([]uint32, 0, 1024)
+ }
+ p.Counters = p.Counters[:0]
+ for i := uint32(0); i < nc; i++ {
+ v, err := rdu32()
+ if err != nil {
+ return false, err
+ }
+ p.Counters = append(p.Counters, v)
+ }
+ return true, nil
+}
diff --git a/src/internal/coverage/decodemeta/decode.go b/src/internal/coverage/decodemeta/decode.go
new file mode 100644
index 0000000..fa047c7
--- /dev/null
+++ b/src/internal/coverage/decodemeta/decode.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decodemeta
+
+// This package contains APIs and helpers for decoding a single package's
+// meta data "blob" emitted by the compiler when coverage instrumentation
+// is turned on.
+
+import (
+ "encoding/binary"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/slicereader"
+ "internal/coverage/stringtab"
+ "io"
+ "os"
+)
+
+// See comments in the encodecovmeta package for details on the format.
+
+type CoverageMetaDataDecoder struct {
+ r *slicereader.Reader
+ hdr coverage.MetaSymbolHeader
+ strtab *stringtab.Reader
+ tmp []byte
+ debug bool
+}
+
+func NewCoverageMetaDataDecoder(b []byte, readonly bool) (*CoverageMetaDataDecoder, error) {
+ slr := slicereader.NewReader(b, readonly)
+ x := &CoverageMetaDataDecoder{
+ r: slr,
+ tmp: make([]byte, 0, 256),
+ }
+ if err := x.readHeader(); err != nil {
+ return nil, err
+ }
+ if err := x.readStringTable(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+func (d *CoverageMetaDataDecoder) readHeader() error {
+ if err := binary.Read(d.r, binary.LittleEndian, &d.hdr); err != nil {
+ return err
+ }
+ if d.debug {
+ fmt.Fprintf(os.Stderr, "=-= after readHeader: %+v\n", d.hdr)
+ }
+ return nil
+}
+
+func (d *CoverageMetaDataDecoder) readStringTable() error {
+ // Seek to the correct location to read the string table.
+ stringTableLocation := int64(coverage.CovMetaHeaderSize + 4*d.hdr.NumFuncs)
+ if _, err := d.r.Seek(stringTableLocation, io.SeekStart); err != nil {
+ return err
+ }
+
+ // Read the table itself.
+ d.strtab = stringtab.NewReader(d.r)
+ d.strtab.Read()
+ return nil
+}
+
+func (d *CoverageMetaDataDecoder) PackagePath() string {
+ return d.strtab.Get(d.hdr.PkgPath)
+}
+
+func (d *CoverageMetaDataDecoder) PackageName() string {
+ return d.strtab.Get(d.hdr.PkgName)
+}
+
+func (d *CoverageMetaDataDecoder) ModulePath() string {
+ return d.strtab.Get(d.hdr.ModulePath)
+}
+
+func (d *CoverageMetaDataDecoder) NumFuncs() uint32 {
+ return d.hdr.NumFuncs
+}
+
+// ReadFunc reads the coverage meta-data for the function with index
+// 'findex', filling it into the FuncDesc pointed to by 'f'.
+func (d *CoverageMetaDataDecoder) ReadFunc(fidx uint32, f *coverage.FuncDesc) error {
+ if fidx >= d.hdr.NumFuncs {
+ return fmt.Errorf("illegal function index")
+ }
+
+ // Seek to the correct location to read the function offset and read it.
+ funcOffsetLocation := int64(coverage.CovMetaHeaderSize + 4*fidx)
+ if _, err := d.r.Seek(funcOffsetLocation, io.SeekStart); err != nil {
+ return err
+ }
+ foff := d.r.ReadUint32()
+
+ // Check assumptions
+ if foff < uint32(funcOffsetLocation) || foff > d.hdr.Length {
+ return fmt.Errorf("malformed func offset %d", foff)
+ }
+
+ // Seek to the correct location to read the function.
+ floc := int64(foff)
+ if _, err := d.r.Seek(floc, io.SeekStart); err != nil {
+ return err
+ }
+
+ // Preamble containing number of units, file, and function.
+ numUnits := uint32(d.r.ReadULEB128())
+ fnameidx := uint32(d.r.ReadULEB128())
+ fileidx := uint32(d.r.ReadULEB128())
+
+ f.Srcfile = d.strtab.Get(fileidx)
+ f.Funcname = d.strtab.Get(fnameidx)
+
+ // Now the units
+ f.Units = f.Units[:0]
+ if cap(f.Units) < int(numUnits) {
+ f.Units = make([]coverage.CoverableUnit, 0, numUnits)
+ }
+ for k := uint32(0); k < numUnits; k++ {
+ f.Units = append(f.Units,
+ coverage.CoverableUnit{
+ StLine: uint32(d.r.ReadULEB128()),
+ StCol: uint32(d.r.ReadULEB128()),
+ EnLine: uint32(d.r.ReadULEB128()),
+ EnCol: uint32(d.r.ReadULEB128()),
+ NxStmts: uint32(d.r.ReadULEB128()),
+ })
+ }
+ lit := d.r.ReadULEB128()
+ f.Lit = lit != 0
+ return nil
+}
diff --git a/src/internal/coverage/decodemeta/decodefile.go b/src/internal/coverage/decodemeta/decodefile.go
new file mode 100644
index 0000000..6580dd5
--- /dev/null
+++ b/src/internal/coverage/decodemeta/decodefile.go
@@ -0,0 +1,223 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decodemeta
+
+// This package contains APIs and helpers for reading and decoding
+// meta-data output files emitted by the runtime when a
+// coverage-instrumented binary executes. A meta-data file contains
+// top-level info (counter mode, number of packages) and then a
+// separate self-contained meta-data section for each Go package.
+
+import (
+ "bufio"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/slicereader"
+ "internal/coverage/stringtab"
+ "io"
+ "os"
+)
+
+// CoverageMetaFileReader provides state and methods for reading
+// a meta-data file from a code coverage run.
+type CoverageMetaFileReader struct {
+ f *os.File
+ hdr coverage.MetaFileHeader
+ tmp []byte
+ pkgOffsets []uint64
+ pkgLengths []uint64
+ strtab *stringtab.Reader
+ fileRdr *bufio.Reader
+ fileView []byte
+ debug bool
+}
+
+// NewCoverageMetaFileReader returns a new helper object for reading
+// the coverage meta-data output file 'f'. The param 'fileView' is a
+// read-only slice containing the contents of 'f' obtained by mmap'ing
+// the file read-only; 'fileView' may be nil, in which case the helper
+// will read the contents of the file using regular file Read
+// operations.
+func NewCoverageMetaFileReader(f *os.File, fileView []byte) (*CoverageMetaFileReader, error) {
+ r := &CoverageMetaFileReader{
+ f: f,
+ fileView: fileView,
+ tmp: make([]byte, 256),
+ }
+
+ if err := r.readFileHeader(); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+func (r *CoverageMetaFileReader) readFileHeader() error {
+ var err error
+
+ r.fileRdr = bufio.NewReader(r.f)
+
+ // Read file header.
+ if err := binary.Read(r.fileRdr, binary.LittleEndian, &r.hdr); err != nil {
+ return err
+ }
+
+ // Verify magic string
+ m := r.hdr.Magic
+ g := coverage.CovMetaMagic
+ if m[0] != g[0] || m[1] != g[1] || m[2] != g[2] || m[3] != g[3] {
+ return fmt.Errorf("invalid meta-data file magic string")
+ }
+
+ // Vet the version. If this is a meta-data file from the future,
+ // we won't be able to read it.
+ if r.hdr.Version > coverage.MetaFileVersion {
+ return fmt.Errorf("meta-data file withn unknown version %d (expected %d)", r.hdr.Version, coverage.MetaFileVersion)
+ }
+
+ // Read package offsets for good measure
+ r.pkgOffsets = make([]uint64, r.hdr.Entries)
+ for i := uint64(0); i < r.hdr.Entries; i++ {
+ if r.pkgOffsets[i], err = r.rdUint64(); err != nil {
+ return err
+ }
+ if r.pkgOffsets[i] > r.hdr.TotalLength {
+ return fmt.Errorf("insane pkg offset %d: %d > totlen %d",
+ i, r.pkgOffsets[i], r.hdr.TotalLength)
+ }
+ }
+ r.pkgLengths = make([]uint64, r.hdr.Entries)
+ for i := uint64(0); i < r.hdr.Entries; i++ {
+ if r.pkgLengths[i], err = r.rdUint64(); err != nil {
+ return err
+ }
+ if r.pkgLengths[i] > r.hdr.TotalLength {
+ return fmt.Errorf("insane pkg length %d: %d > totlen %d",
+ i, r.pkgLengths[i], r.hdr.TotalLength)
+ }
+ }
+
+ // Read string table.
+ b := make([]byte, r.hdr.StrTabLength)
+ nr, err := r.fileRdr.Read(b)
+ if err != nil {
+ return err
+ }
+ if nr != int(r.hdr.StrTabLength) {
+ return fmt.Errorf("error: short read on string table")
+ }
+ slr := slicereader.NewReader(b, false /* not readonly */)
+ r.strtab = stringtab.NewReader(slr)
+ r.strtab.Read()
+
+ if r.debug {
+ fmt.Fprintf(os.Stderr, "=-= read-in header is: %+v\n", *r)
+ }
+
+ return nil
+}
+
+func (r *CoverageMetaFileReader) rdUint64() (uint64, error) {
+ r.tmp = r.tmp[:0]
+ r.tmp = append(r.tmp, make([]byte, 8)...)
+ n, err := r.fileRdr.Read(r.tmp)
+ if err != nil {
+ return 0, err
+ }
+ if n != 8 {
+ return 0, fmt.Errorf("premature end of file on read")
+ }
+ v := binary.LittleEndian.Uint64(r.tmp)
+ return v, nil
+}
+
+// NumPackages returns the number of packages for which this file
+// contains meta-data.
+func (r *CoverageMetaFileReader) NumPackages() uint64 {
+ return r.hdr.Entries
+}
+
+// CounterMode returns the counter mode (set, count, atomic) used
+// when building for coverage for the program that produce this
+// meta-data file.
+func (r *CoverageMetaFileReader) CounterMode() coverage.CounterMode {
+ return r.hdr.CMode
+}
+
+// CounterMode returns the counter granularity (single counter per
+// function, or counter per block) selected when building for coverage
+// for the program that produce this meta-data file.
+func (r *CoverageMetaFileReader) CounterGranularity() coverage.CounterGranularity {
+ return r.hdr.CGranularity
+}
+
+// FileHash returns the hash computed for all of the package meta-data
+// blobs. Coverage counter data files refer to this hash, and the
+// hash will be encoded into the meta-data file name.
+func (r *CoverageMetaFileReader) FileHash() [16]byte {
+ return r.hdr.MetaFileHash
+}
+
+// GetPackageDecoder requests a decoder object for the package within
+// the meta-data file whose index is 'pkIdx'. If the
+// CoverageMetaFileReader was set up with a read-only file view, a
+// pointer into that file view will be returned, otherwise the buffer
+// 'payloadbuf' will be written to (or if it is not of sufficient
+// size, a new buffer will be allocated). Return value is the decoder,
+// a byte slice with the encoded meta-data, and an error.
+func (r *CoverageMetaFileReader) GetPackageDecoder(pkIdx uint32, payloadbuf []byte) (*CoverageMetaDataDecoder, []byte, error) {
+ pp, err := r.GetPackagePayload(pkIdx, payloadbuf)
+ if r.debug {
+ fmt.Fprintf(os.Stderr, "=-= pkidx=%d payload length is %d hash=%s\n",
+ pkIdx, len(pp), fmt.Sprintf("%x", md5.Sum(pp)))
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ mdd, err := NewCoverageMetaDataDecoder(pp, r.fileView != nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ return mdd, pp, nil
+}
+
+// GetPackagePayload returns the raw (encoded) meta-data payload for the
+// package with index 'pkIdx'. As with GetPackageDecoder, if the
+// CoverageMetaFileReader was set up with a read-only file view, a
+// pointer into that file view will be returned, otherwise the buffer
+// 'payloadbuf' will be written to (or if it is not of sufficient
+// size, a new buffer will be allocated). Return value is the decoder,
+// a byte slice with the encoded meta-data, and an error.
+func (r *CoverageMetaFileReader) GetPackagePayload(pkIdx uint32, payloadbuf []byte) ([]byte, error) {
+
+ // Determine correct offset/length.
+ if uint64(pkIdx) >= r.hdr.Entries {
+ return nil, fmt.Errorf("GetPackagePayload: illegal pkg index %d", pkIdx)
+ }
+ off := r.pkgOffsets[pkIdx]
+ len := r.pkgLengths[pkIdx]
+
+ if r.debug {
+ fmt.Fprintf(os.Stderr, "=-= for pk %d, off=%d len=%d\n", pkIdx, off, len)
+ }
+
+ if r.fileView != nil {
+ return r.fileView[off : off+len], nil
+ }
+
+ payload := payloadbuf[:0]
+ if cap(payload) < int(len) {
+ payload = make([]byte, 0, len)
+ }
+ payload = append(payload, make([]byte, len)...)
+ if _, err := r.f.Seek(int64(off), io.SeekStart); err != nil {
+ return nil, err
+ }
+ if _, err := io.ReadFull(r.f, payload); err != nil {
+ return nil, err
+ }
+ return payload, nil
+}
diff --git a/src/internal/coverage/defs.go b/src/internal/coverage/defs.go
new file mode 100644
index 0000000..8751b9f
--- /dev/null
+++ b/src/internal/coverage/defs.go
@@ -0,0 +1,374 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package coverage
+
+// Types and constants related to the output files written
+// by code coverage tooling. When a coverage-instrumented binary
+// is run, it emits two output files: a meta-data output file, and
+// a counter data output file.
+
+//.....................................................................
+//
+// Meta-data definitions:
+//
+// The meta-data file is composed of a file header, a series of
+// meta-data blobs/sections (one per instrumented package), and an offsets
+// area storing the offsets of each section. Format of the meta-data
+// file looks like:
+//
+// --header----------
+// | magic: [4]byte magic string
+// | version
+// | total length of meta-data file in bytes
+// | numPkgs: number of package entries in file
+// | hash: [16]byte hash of entire meta-data payload
+// | offset to string table section
+// | length of string table
+// | number of entries in string table
+// | counter mode
+// | counter granularity
+// --package offsets table------
+// <offset to pkg 0>
+// <offset to pkg 1>
+// ...
+// --package lengths table------
+// <length of pkg 0>
+// <length of pkg 1>
+// ...
+// --string table------
+// <uleb128 len> 8
+// <data> "somestring"
+// ...
+// --package payloads------
+// <meta-symbol for pkg 0>
+// <meta-symbol for pkg 1>
+// ...
+//
+// Each package payload is a stand-alone blob emitted by the compiler,
+// and does not depend on anything else in the meta-data file. In
+// particular, each blob has it's own string table. Note that the
+// file-level string table is expected to be very short (most strings
+// will be in the meta-data blobs themselves).
+
+// CovMetaMagic holds the magic string for a meta-data file.
+var CovMetaMagic = [4]byte{'\x00', '\x63', '\x76', '\x6d'}
+
+// MetaFilePref is a prefix used when emitting meta-data files; these
+// files are of the form "covmeta.<hash>", where hash is a hash
+// computed from the hashes of all the package meta-data symbols in
+// the program.
+const MetaFilePref = "covmeta"
+
+// MetaFileVersion contains the current (most recent) meta-data file version.
+const MetaFileVersion = 1
+
+// MetaFileHeader stores file header information for a meta-data file.
+type MetaFileHeader struct {
+ Magic [4]byte
+ Version uint32
+ TotalLength uint64
+ Entries uint64
+ MetaFileHash [16]byte
+ StrTabOffset uint32
+ StrTabLength uint32
+ CMode CounterMode
+ CGranularity CounterGranularity
+ _ [6]byte // padding
+}
+
+// MetaSymbolHeader stores header information for a single
+// meta-data blob, e.g. the coverage meta-data payload
+// computed for a given Go package.
+type MetaSymbolHeader struct {
+ Length uint32 // size of meta-symbol payload in bytes
+ PkgName uint32 // string table index
+ PkgPath uint32 // string table index
+ ModulePath uint32 // string table index
+ MetaHash [16]byte
+ _ byte // currently unused
+ _ [3]byte // padding
+ NumFiles uint32
+ NumFuncs uint32
+}
+
+const CovMetaHeaderSize = 16 + 4 + 4 + 4 + 4 + 4 + 4 + 4 // keep in sync with above
+
+// As an example, consider the following Go package:
+//
+// 01: package p
+// 02:
+// 03: var v, w, z int
+// 04:
+// 05: func small(x, y int) int {
+// 06: v++
+// 07: // comment
+// 08: if y == 0 {
+// 09: return x
+// 10: }
+// 11: return (x << 1) ^ (9 / y)
+// 12: }
+// 13:
+// 14: func Medium(q, r int) int {
+// 15: s1 := small(q, r)
+// 16: z += s1
+// 17: s2 := small(r, q)
+// 18: w -= s2
+// 19: return w + z
+// 20: }
+//
+// The meta-data blob for the single package above might look like the
+// following:
+//
+// -- MetaSymbolHeader header----------
+// | size: size of this blob in bytes
+// | packagepath: <path to p>
+// | modulepath: <modpath for p>
+// | nfiles: 1
+// | nfunctions: 2
+// --func offsets table------
+// <offset to func 0>
+// <offset to func 1>
+// --string table (contains all files and functions)------
+// | <uleb128 len> 4
+// | <data> "p.go"
+// | <uleb128 len> 5
+// | <data> "small"
+// | <uleb128 len> 6
+// | <data> "Medium"
+// --func 0------
+// | <uleb128> num units: 3
+// | <uleb128> func name: S1 (index into string table)
+// | <uleb128> file: S0 (index into string table)
+// | <unit 0>: S0 L6 L8 2
+// | <unit 1>: S0 L9 L9 1
+// | <unit 2>: S0 L11 L11 1
+// --func 1------
+// | <uleb128> num units: 1
+// | <uleb128> func name: S2 (index into string table)
+// | <uleb128> file: S0 (index into string table)
+// | <unit 0>: S0 L15 L19 5
+// ---end-----------
+
+// The following types and constants used by the meta-data encoder/decoder.
+
+// FuncDesc encapsulates the meta-data definitions for a single Go function.
+// This version assumes that we're looking at a function before inlining;
+// if we want to capture a post-inlining view of the world, the
+// representations of source positions would need to be a good deal more
+// complicated.
+type FuncDesc struct {
+ Funcname string
+ Srcfile string
+ Units []CoverableUnit
+ Lit bool // true if this is a function literal
+}
+
+// CoverableUnit describes the source characteristics of a single
+// program unit for which we want to gather coverage info. Coverable
+// units are either "simple" or "intraline"; a "simple" coverable unit
+// corresponds to a basic block (region of straight-line code with no
+// jumps or control transfers). An "intraline" unit corresponds to a
+// logical clause nested within some other simple unit. A simple unit
+// will have a zero Parent value; for an intraline unit NxStmts will
+// be zero and Parent will be set to 1 plus the index of the
+// containing simple statement. Example:
+//
+// L7: q := 1
+// L8: x := (y == 101 || launch() == false)
+// L9: r := x * 2
+//
+// For the code above we would have three simple units (one for each
+// line), then an intraline unit describing the "launch() == false"
+// clause in line 8, with Parent pointing to the index of the line 8
+// unit in the units array.
+//
+// Note: in the initial version of the coverage revamp, only simple
+// units will be in use.
+type CoverableUnit struct {
+ StLine, StCol uint32
+ EnLine, EnCol uint32
+ NxStmts uint32
+ Parent uint32
+}
+
+// CounterMode tracks the "flavor" of the coverage counters being
+// used in a given coverage-instrumented program.
+type CounterMode uint8
+
+const (
+ CtrModeInvalid CounterMode = iota
+ CtrModeSet // "set" mode
+ CtrModeCount // "count" mode
+ CtrModeAtomic // "atomic" mode
+ CtrModeRegOnly // registration-only pseudo-mode
+ CtrModeTestMain // testmain pseudo-mode
+)
+
+func (cm CounterMode) String() string {
+ switch cm {
+ case CtrModeSet:
+ return "set"
+ case CtrModeCount:
+ return "count"
+ case CtrModeAtomic:
+ return "atomic"
+ case CtrModeRegOnly:
+ return "regonly"
+ case CtrModeTestMain:
+ return "testmain"
+ }
+ return "<invalid>"
+}
+
+func ParseCounterMode(mode string) CounterMode {
+ var cm CounterMode
+ switch mode {
+ case "set":
+ cm = CtrModeSet
+ case "count":
+ cm = CtrModeCount
+ case "atomic":
+ cm = CtrModeAtomic
+ case "regonly":
+ cm = CtrModeRegOnly
+ case "testmain":
+ cm = CtrModeTestMain
+ default:
+ cm = CtrModeInvalid
+ }
+ return cm
+}
+
+// CounterGranularity tracks the granularity of the coverage counters being
+// used in a given coverage-instrumented program.
+type CounterGranularity uint8
+
+const (
+ CtrGranularityInvalid CounterGranularity = iota
+ CtrGranularityPerBlock
+ CtrGranularityPerFunc
+)
+
+func (cm CounterGranularity) String() string {
+ switch cm {
+ case CtrGranularityPerBlock:
+ return "perblock"
+ case CtrGranularityPerFunc:
+ return "perfunc"
+ }
+ return "<invalid>"
+}
+
+//.....................................................................
+//
+// Counter data definitions:
+//
+
+// A counter data file is composed of a file header followed by one or
+// more "segments" (each segment representing a given run or partial
+// run of a give binary) followed by a footer.
+
+// CovCounterMagic holds the magic string for a coverage counter-data file.
+var CovCounterMagic = [4]byte{'\x00', '\x63', '\x77', '\x6d'}
+
+// CounterFileVersion stores the most recent counter data file version.
+const CounterFileVersion = 1
+
+// CounterFileHeader stores files header information for a counter-data file.
+type CounterFileHeader struct {
+ Magic [4]byte
+ Version uint32
+ MetaHash [16]byte
+ CFlavor CounterFlavor
+ BigEndian bool
+ _ [6]byte // padding
+}
+
+// CounterSegmentHeader encapsulates information about a specific
+// segment in a counter data file, which at the moment contains
+// counters data from a single execution of a coverage-instrumented
+// program. Following the segment header will be the string table and
+// args table, and then (possibly) padding bytes to bring the byte
+// size of the preamble up to a multiple of 4. Immediately following
+// that will be the counter payloads.
+//
+// The "args" section of a segment is used to store annotations
+// describing where the counter data came from; this section is
+// basically a series of key-value pairs (can be thought of as an
+// encoded 'map[string]string'). At the moment we only write os.Args()
+// data to this section, using pairs of the form "argc=<integer>",
+// "argv0=<os.Args[0]>", "argv1=<os.Args[1]>", and so on. In the
+// future the args table may also include things like GOOS/GOARCH
+// values, and/or tags indicating which tests were run to generate the
+// counter data.
+type CounterSegmentHeader struct {
+ FcnEntries uint64
+ StrTabLen uint32
+ ArgsLen uint32
+}
+
+// CounterFileFooter appears at the tail end of a counter data file,
+// and stores the number of segments it contains.
+type CounterFileFooter struct {
+ Magic [4]byte
+ _ [4]byte // padding
+ NumSegments uint32
+ _ [4]byte // padding
+}
+
+// CounterFilePref is the file prefix used when emitting coverage data
+// output files. CounterFileTemplate describes the format of the file
+// name: prefix followed by meta-file hash followed by process ID
+// followed by emit UnixNanoTime.
+const CounterFilePref = "covcounters"
+const CounterFileTempl = "%s.%x.%d.%d"
+const CounterFileRegexp = `^%s\.(\S+)\.(\d+)\.(\d+)+$`
+
+// CounterFlavor describes how function and counters are
+// stored/represented in the counter section of the file.
+type CounterFlavor uint8
+
+const (
+ // "Raw" representation: all values (pkg ID, func ID, num counters,
+ // and counters themselves) are stored as uint32's.
+ CtrRaw CounterFlavor = iota + 1
+
+ // "ULeb" representation: all values (pkg ID, func ID, num counters,
+ // and counters themselves) are stored with ULEB128 encoding.
+ CtrULeb128
+)
+
+func Round4(x int) int {
+ return (x + 3) &^ 3
+}
+
+//.....................................................................
+//
+// Runtime counter data definitions.
+//
+
+// At runtime within a coverage-instrumented program, the "counters"
+// object we associated with instrumented function can be thought of
+// as a struct of the following form:
+//
+// struct {
+// numCtrs uint32
+// pkgid uint32
+// funcid uint32
+// counterArray [numBlocks]uint32
+// }
+//
+// where "numCtrs" is the number of blocks / coverable units within the
+// function, "pkgid" is the unique index assigned to this package by
+// the runtime, "funcid" is the index of this function within its containing
+// package, and "counterArray" stores the actual counters.
+//
+// The counter variable itself is created not as a struct but as a flat
+// array of uint32's; we then use the offsets below to index into it.
+
+const NumCtrsOffset = 0
+const PkgIdOffset = 1
+const FuncIdOffset = 2
+const FirstCtrOffset = 3
diff --git a/src/internal/coverage/encodecounter/encode.go b/src/internal/coverage/encodecounter/encode.go
new file mode 100644
index 0000000..5958673
--- /dev/null
+++ b/src/internal/coverage/encodecounter/encode.go
@@ -0,0 +1,297 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package encodecounter
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/slicewriter"
+ "internal/coverage/stringtab"
+ "internal/coverage/uleb128"
+ "io"
+ "os"
+ "sort"
+)
+
+// This package contains APIs and helpers for encoding initial portions
+// of the counter data files emitted at runtime when coverage instrumentation
+// is enabled. Counter data files may contain multiple segments; the file
+// header and first segment are written via the "Write" method below, and
+// additional segments can then be added using "AddSegment".
+
+type CoverageDataWriter struct {
+ stab *stringtab.Writer
+ w *bufio.Writer
+ csh coverage.CounterSegmentHeader
+ tmp []byte
+ cflavor coverage.CounterFlavor
+ segs uint32
+ debug bool
+}
+
+func NewCoverageDataWriter(w io.Writer, flav coverage.CounterFlavor) *CoverageDataWriter {
+ r := &CoverageDataWriter{
+ stab: &stringtab.Writer{},
+ w: bufio.NewWriter(w),
+
+ tmp: make([]byte, 64),
+ cflavor: flav,
+ }
+ r.stab.InitWriter()
+ r.stab.Lookup("")
+ return r
+}
+
+// CounterVisitor describes a helper object used during counter file
+// writing; when writing counter data files, clients pass a
+// CounterVisitor to the write/emit routines, then the expectation is
+// that the VisitFuncs method will then invoke the callback "f" with
+// data for each function to emit to the file.
+type CounterVisitor interface {
+ VisitFuncs(f CounterVisitorFn) error
+}
+
+// CounterVisitorFn describes a callback function invoked when writing
+// coverage counter data.
+type CounterVisitorFn func(pkid uint32, funcid uint32, counters []uint32) error
+
+// Write writes the contents of the count-data file to the writer
+// previously supplied to NewCoverageDataWriter. Returns an error
+// if something went wrong somewhere with the write.
+func (cfw *CoverageDataWriter) Write(metaFileHash [16]byte, args map[string]string, visitor CounterVisitor) error {
+ if err := cfw.writeHeader(metaFileHash); err != nil {
+ return err
+ }
+ return cfw.AppendSegment(args, visitor)
+}
+
+func padToFourByteBoundary(ws *slicewriter.WriteSeeker) error {
+ sz := len(ws.BytesWritten())
+ zeros := []byte{0, 0, 0, 0}
+ rem := uint32(sz) % 4
+ if rem != 0 {
+ pad := zeros[:(4 - rem)]
+ if nw, err := ws.Write(pad); err != nil {
+ return err
+ } else if nw != len(pad) {
+ return fmt.Errorf("error: short write")
+ }
+ }
+ return nil
+}
+
+func (cfw *CoverageDataWriter) patchSegmentHeader(ws *slicewriter.WriteSeeker) error {
+ // record position
+ off, err := ws.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return fmt.Errorf("error seeking in patchSegmentHeader: %v", err)
+ }
+ // seek back to start so that we can update the segment header
+ if _, err := ws.Seek(0, io.SeekStart); err != nil {
+ return fmt.Errorf("error seeking in patchSegmentHeader: %v", err)
+ }
+ if cfw.debug {
+ fmt.Fprintf(os.Stderr, "=-= writing counter segment header: %+v", cfw.csh)
+ }
+ if err := binary.Write(ws, binary.LittleEndian, cfw.csh); err != nil {
+ return err
+ }
+ // ... and finally return to the original offset.
+ if _, err := ws.Seek(off, io.SeekStart); err != nil {
+ return fmt.Errorf("error seeking in patchSegmentHeader: %v", err)
+ }
+ return nil
+}
+
+func (cfw *CoverageDataWriter) writeSegmentPreamble(args map[string]string, ws *slicewriter.WriteSeeker) error {
+ if err := binary.Write(ws, binary.LittleEndian, cfw.csh); err != nil {
+ return err
+ }
+ hdrsz := uint32(len(ws.BytesWritten()))
+
+ // Write string table and args to a byte slice (since we need
+ // to capture offsets at various points), then emit the slice
+ // once we are done.
+ cfw.stab.Freeze()
+ if err := cfw.stab.Write(ws); err != nil {
+ return err
+ }
+ cfw.csh.StrTabLen = uint32(len(ws.BytesWritten())) - hdrsz
+
+ akeys := make([]string, 0, len(args))
+ for k := range args {
+ akeys = append(akeys, k)
+ }
+ sort.Strings(akeys)
+
+ wrULEB128 := func(v uint) error {
+ cfw.tmp = cfw.tmp[:0]
+ cfw.tmp = uleb128.AppendUleb128(cfw.tmp, v)
+ if _, err := ws.Write(cfw.tmp); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Count of arg pairs.
+ if err := wrULEB128(uint(len(args))); err != nil {
+ return err
+ }
+ // Arg pairs themselves.
+ for _, k := range akeys {
+ ki := uint(cfw.stab.Lookup(k))
+ if err := wrULEB128(ki); err != nil {
+ return err
+ }
+ v := args[k]
+ vi := uint(cfw.stab.Lookup(v))
+ if err := wrULEB128(vi); err != nil {
+ return err
+ }
+ }
+ if err := padToFourByteBoundary(ws); err != nil {
+ return err
+ }
+ cfw.csh.ArgsLen = uint32(len(ws.BytesWritten())) - (cfw.csh.StrTabLen + hdrsz)
+
+ return nil
+}
+
+// AppendSegment appends a new segment to a counter data, with a new
+// args section followed by a payload of counter data clauses.
+func (cfw *CoverageDataWriter) AppendSegment(args map[string]string, visitor CounterVisitor) error {
+ cfw.stab = &stringtab.Writer{}
+ cfw.stab.InitWriter()
+ cfw.stab.Lookup("")
+
+ var err error
+ for k, v := range args {
+ cfw.stab.Lookup(k)
+ cfw.stab.Lookup(v)
+ }
+
+ ws := &slicewriter.WriteSeeker{}
+ if err = cfw.writeSegmentPreamble(args, ws); err != nil {
+ return err
+ }
+ if err = cfw.writeCounters(visitor, ws); err != nil {
+ return err
+ }
+ if err = cfw.patchSegmentHeader(ws); err != nil {
+ return err
+ }
+ if err := cfw.writeBytes(ws.BytesWritten()); err != nil {
+ return err
+ }
+ if err = cfw.writeFooter(); err != nil {
+ return err
+ }
+ if err := cfw.w.Flush(); err != nil {
+ return fmt.Errorf("write error: %v", err)
+ }
+ cfw.stab = nil
+ return nil
+}
+
+func (cfw *CoverageDataWriter) writeHeader(metaFileHash [16]byte) error {
+ // Emit file header.
+ ch := coverage.CounterFileHeader{
+ Magic: coverage.CovCounterMagic,
+ Version: coverage.CounterFileVersion,
+ MetaHash: metaFileHash,
+ CFlavor: cfw.cflavor,
+ BigEndian: false,
+ }
+ if err := binary.Write(cfw.w, binary.LittleEndian, ch); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cfw *CoverageDataWriter) writeBytes(b []byte) error {
+ if len(b) == 0 {
+ return nil
+ }
+ nw, err := cfw.w.Write(b)
+ if err != nil {
+ return fmt.Errorf("error writing counter data: %v", err)
+ }
+ if len(b) != nw {
+ return fmt.Errorf("error writing counter data: short write")
+ }
+ return nil
+}
+
+func (cfw *CoverageDataWriter) writeCounters(visitor CounterVisitor, ws *slicewriter.WriteSeeker) error {
+ // Notes:
+ // - this version writes everything little-endian, which means
+ // a call is needed to encode every value (expensive)
+ // - we may want to move to a model in which we just blast out
+ // all counters, or possibly mmap the file and do the write
+ // implicitly.
+ ctrb := make([]byte, 4)
+ wrval := func(val uint32) error {
+ var buf []byte
+ var towr int
+ if cfw.cflavor == coverage.CtrRaw {
+ binary.LittleEndian.PutUint32(ctrb, val)
+ buf = ctrb
+ towr = 4
+ } else if cfw.cflavor == coverage.CtrULeb128 {
+ cfw.tmp = cfw.tmp[:0]
+ cfw.tmp = uleb128.AppendUleb128(cfw.tmp, uint(val))
+ buf = cfw.tmp
+ towr = len(buf)
+ } else {
+ panic("internal error: bad counter flavor")
+ }
+ if sz, err := ws.Write(buf); err != nil {
+ return err
+ } else if sz != towr {
+ return fmt.Errorf("writing counters: short write")
+ }
+ return nil
+ }
+
+ // Write out entries for each live function.
+ emitter := func(pkid uint32, funcid uint32, counters []uint32) error {
+ cfw.csh.FcnEntries++
+ if err := wrval(uint32(len(counters))); err != nil {
+ return err
+ }
+
+ if err := wrval(pkid); err != nil {
+ return err
+ }
+
+ if err := wrval(funcid); err != nil {
+ return err
+ }
+ for _, val := range counters {
+ if err := wrval(val); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if err := visitor.VisitFuncs(emitter); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cfw *CoverageDataWriter) writeFooter() error {
+ cfw.segs++
+ cf := coverage.CounterFileFooter{
+ Magic: coverage.CovCounterMagic,
+ NumSegments: cfw.segs,
+ }
+ if err := binary.Write(cfw.w, binary.LittleEndian, cf); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/internal/coverage/encodemeta/encode.go b/src/internal/coverage/encodemeta/encode.go
new file mode 100644
index 0000000..d211c7c
--- /dev/null
+++ b/src/internal/coverage/encodemeta/encode.go
@@ -0,0 +1,215 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package encodemeta
+
+// This package contains APIs and helpers for encoding the meta-data
+// "blob" for a single Go package, created when coverage
+// instrumentation is turned on.
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "internal/coverage"
+ "internal/coverage/stringtab"
+ "internal/coverage/uleb128"
+ "io"
+ "os"
+)
+
+type CoverageMetaDataBuilder struct {
+ stab stringtab.Writer
+ funcs []funcDesc
+ tmp []byte // temp work slice
+ h hash.Hash
+ pkgpath uint32
+ pkgname uint32
+ modpath uint32
+ debug bool
+ werr error
+}
+
+func NewCoverageMetaDataBuilder(pkgpath string, pkgname string, modulepath string) (*CoverageMetaDataBuilder, error) {
+ if pkgpath == "" {
+ return nil, fmt.Errorf("invalid empty package path")
+ }
+ x := &CoverageMetaDataBuilder{
+ tmp: make([]byte, 0, 256),
+ h: md5.New(),
+ }
+ x.stab.InitWriter()
+ x.stab.Lookup("")
+ x.pkgpath = x.stab.Lookup(pkgpath)
+ x.pkgname = x.stab.Lookup(pkgname)
+ x.modpath = x.stab.Lookup(modulepath)
+ io.WriteString(x.h, pkgpath)
+ io.WriteString(x.h, pkgname)
+ io.WriteString(x.h, modulepath)
+ return x, nil
+}
+
+func h32(x uint32, h hash.Hash, tmp []byte) {
+ tmp = tmp[:0]
+ tmp = append(tmp, []byte{0, 0, 0, 0}...)
+ binary.LittleEndian.PutUint32(tmp, x)
+ h.Write(tmp)
+}
+
+type funcDesc struct {
+ encoded []byte
+}
+
+// AddFunc registers a new function with the meta data builder.
+func (b *CoverageMetaDataBuilder) AddFunc(f coverage.FuncDesc) uint {
+ hashFuncDesc(b.h, &f, b.tmp)
+ fd := funcDesc{}
+ b.tmp = b.tmp[:0]
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(len(f.Units)))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(b.stab.Lookup(f.Funcname)))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(b.stab.Lookup(f.Srcfile)))
+ for _, u := range f.Units {
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(u.StLine))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(u.StCol))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(u.EnLine))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(u.EnCol))
+ b.tmp = uleb128.AppendUleb128(b.tmp, uint(u.NxStmts))
+ }
+ lit := uint(0)
+ if f.Lit {
+ lit = 1
+ }
+ b.tmp = uleb128.AppendUleb128(b.tmp, lit)
+ fd.encoded = bytes.Clone(b.tmp)
+ rv := uint(len(b.funcs))
+ b.funcs = append(b.funcs, fd)
+ return rv
+}
+
+func (b *CoverageMetaDataBuilder) emitFuncOffsets(w io.WriteSeeker, off int64) int64 {
+ nFuncs := len(b.funcs)
+ var foff int64 = coverage.CovMetaHeaderSize + int64(b.stab.Size()) + int64(nFuncs)*4
+ for idx := 0; idx < nFuncs; idx++ {
+ b.wrUint32(w, uint32(foff))
+ foff += int64(len(b.funcs[idx].encoded))
+ }
+ return off + (int64(len(b.funcs)) * 4)
+}
+
+func (b *CoverageMetaDataBuilder) emitFunc(w io.WriteSeeker, off int64, f funcDesc) (int64, error) {
+ ew := len(f.encoded)
+ if nw, err := w.Write(f.encoded); err != nil {
+ return 0, err
+ } else if ew != nw {
+ return 0, fmt.Errorf("short write emitting coverage meta-data")
+ }
+ return off + int64(ew), nil
+}
+
+func (b *CoverageMetaDataBuilder) reportWriteError(err error) {
+ if b.werr != nil {
+ b.werr = err
+ }
+}
+
+func (b *CoverageMetaDataBuilder) wrUint32(w io.WriteSeeker, v uint32) {
+ b.tmp = b.tmp[:0]
+ b.tmp = append(b.tmp, []byte{0, 0, 0, 0}...)
+ binary.LittleEndian.PutUint32(b.tmp, v)
+ if nw, err := w.Write(b.tmp); err != nil {
+ b.reportWriteError(err)
+ } else if nw != 4 {
+ b.reportWriteError(fmt.Errorf("short write"))
+ }
+}
+
+// Emit writes the meta-data accumulated so far in this builder to 'w'.
+// Returns a hash of the meta-data payload and an error.
+func (b *CoverageMetaDataBuilder) Emit(w io.WriteSeeker) ([16]byte, error) {
+ // Emit header. Length will initially be zero, we'll
+ // back-patch it later.
+ var digest [16]byte
+ copy(digest[:], b.h.Sum(nil))
+ mh := coverage.MetaSymbolHeader{
+ // hash and length initially zero, will be back-patched
+ PkgPath: uint32(b.pkgpath),
+ PkgName: uint32(b.pkgname),
+ ModulePath: uint32(b.modpath),
+ NumFiles: uint32(b.stab.Nentries()),
+ NumFuncs: uint32(len(b.funcs)),
+ MetaHash: digest,
+ }
+ if b.debug {
+ fmt.Fprintf(os.Stderr, "=-= writing header: %+v\n", mh)
+ }
+ if err := binary.Write(w, binary.LittleEndian, mh); err != nil {
+ return digest, fmt.Errorf("error writing meta-file header: %v", err)
+ }
+ off := int64(coverage.CovMetaHeaderSize)
+
+ // Write function offsets section
+ off = b.emitFuncOffsets(w, off)
+
+ // Check for any errors up to this point.
+ if b.werr != nil {
+ return digest, b.werr
+ }
+
+ // Write string table.
+ if err := b.stab.Write(w); err != nil {
+ return digest, err
+ }
+ off += int64(b.stab.Size())
+
+ // Write functions
+ for _, f := range b.funcs {
+ var err error
+ off, err = b.emitFunc(w, off, f)
+ if err != nil {
+ return digest, err
+ }
+ }
+
+ // Back-patch the length.
+ totalLength := uint32(off)
+ if _, err := w.Seek(0, io.SeekStart); err != nil {
+ return digest, err
+ }
+ b.wrUint32(w, totalLength)
+ if b.werr != nil {
+ return digest, b.werr
+ }
+ return digest, nil
+}
+
+// HashFuncDesc computes an md5 sum of a coverage.FuncDesc and returns
+// a digest for it.
+func HashFuncDesc(f *coverage.FuncDesc) [16]byte {
+ h := md5.New()
+ tmp := make([]byte, 0, 32)
+ hashFuncDesc(h, f, tmp)
+ var r [16]byte
+ copy(r[:], h.Sum(nil))
+ return r
+}
+
+// hashFuncDesc incorporates a given function 'f' into the hash 'h'.
+func hashFuncDesc(h hash.Hash, f *coverage.FuncDesc, tmp []byte) {
+ io.WriteString(h, f.Funcname)
+ io.WriteString(h, f.Srcfile)
+ for _, u := range f.Units {
+ h32(u.StLine, h, tmp)
+ h32(u.StCol, h, tmp)
+ h32(u.EnLine, h, tmp)
+ h32(u.EnCol, h, tmp)
+ h32(u.NxStmts, h, tmp)
+ }
+ lit := uint32(0)
+ if f.Lit {
+ lit = 1
+ }
+ h32(lit, h, tmp)
+}
diff --git a/src/internal/coverage/encodemeta/encodefile.go b/src/internal/coverage/encodemeta/encodefile.go
new file mode 100644
index 0000000..38ae46e
--- /dev/null
+++ b/src/internal/coverage/encodemeta/encodefile.go
@@ -0,0 +1,132 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package encodemeta
+
+import (
+ "bufio"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/stringtab"
+ "io"
+ "os"
+ "unsafe"
+)
+
+// This package contains APIs and helpers for writing out a meta-data
+// file (composed of a file header, offsets/lengths, and then a series of
+// meta-data blobs emitted by the compiler, one per Go package).
+
+type CoverageMetaFileWriter struct {
+ stab stringtab.Writer
+ mfname string
+ w *bufio.Writer
+ tmp []byte
+ debug bool
+}
+
+func NewCoverageMetaFileWriter(mfname string, w io.Writer) *CoverageMetaFileWriter {
+ r := &CoverageMetaFileWriter{
+ mfname: mfname,
+ w: bufio.NewWriter(w),
+ tmp: make([]byte, 64),
+ }
+ r.stab.InitWriter()
+ r.stab.Lookup("")
+ return r
+}
+
+func (m *CoverageMetaFileWriter) Write(finalHash [16]byte, blobs [][]byte, mode coverage.CounterMode, granularity coverage.CounterGranularity) error {
+ mhsz := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
+ stSize := m.stab.Size()
+ stOffset := mhsz + uint64(16*len(blobs))
+ preambleLength := stOffset + uint64(stSize)
+
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= sizeof(MetaFileHeader)=%d\n", mhsz)
+ fmt.Fprintf(os.Stderr, "=+= preambleLength=%d stSize=%d\n", preambleLength, stSize)
+ }
+
+ // Compute total size
+ tlen := preambleLength
+ for i := 0; i < len(blobs); i++ {
+ tlen += uint64(len(blobs[i]))
+ }
+
+ // Emit header
+ mh := coverage.MetaFileHeader{
+ Magic: coverage.CovMetaMagic,
+ Version: coverage.MetaFileVersion,
+ TotalLength: tlen,
+ Entries: uint64(len(blobs)),
+ MetaFileHash: finalHash,
+ StrTabOffset: uint32(stOffset),
+ StrTabLength: stSize,
+ CMode: mode,
+ CGranularity: granularity,
+ }
+ var err error
+ if err = binary.Write(m.w, binary.LittleEndian, mh); err != nil {
+ return fmt.Errorf("error writing %s: %v", m.mfname, err)
+ }
+
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= len(blobs) is %d\n", mh.Entries)
+ }
+
+ // Emit package offsets section followed by package lengths section.
+ off := preambleLength
+ off2 := mhsz
+ buf := make([]byte, 8)
+ for _, blob := range blobs {
+ binary.LittleEndian.PutUint64(buf, off)
+ if _, err = m.w.Write(buf); err != nil {
+ return fmt.Errorf("error writing %s: %v", m.mfname, err)
+ }
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= pkg offset %d 0x%x\n", off, off)
+ }
+ off += uint64(len(blob))
+ off2 += 8
+ }
+ for _, blob := range blobs {
+ bl := uint64(len(blob))
+ binary.LittleEndian.PutUint64(buf, bl)
+ if _, err = m.w.Write(buf); err != nil {
+ return fmt.Errorf("error writing %s: %v", m.mfname, err)
+ }
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= pkg len %d 0x%x\n", bl, bl)
+ }
+ off2 += 8
+ }
+
+ // Emit string table
+ if err = m.stab.Write(m.w); err != nil {
+ return err
+ }
+
+ // Now emit blobs themselves.
+ for k, blob := range blobs {
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= writing blob %d len %d at off=%d hash %s\n", k, len(blob), off2, fmt.Sprintf("%x", md5.Sum(blob)))
+ }
+ if _, err = m.w.Write(blob); err != nil {
+ return fmt.Errorf("error writing %s: %v", m.mfname, err)
+ }
+ if m.debug {
+ fmt.Fprintf(os.Stderr, "=+= wrote package payload of %d bytes\n",
+ len(blob))
+ }
+ off2 += uint64(len(blob))
+ }
+
+ // Flush writer, and we're done.
+ if err = m.w.Flush(); err != nil {
+ return fmt.Errorf("error writing %s: %v", m.mfname, err)
+ }
+ return nil
+}
diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go
new file mode 100644
index 0000000..8ddd44d
--- /dev/null
+++ b/src/internal/coverage/pkid.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package coverage
+
+// Building the runtime package with coverage instrumentation enabled
+// is tricky. For all other packages, you can be guaranteed that
+// the package init function is run before any functions are executed,
+// but this invariant is not maintained for packages such as "runtime",
+// "internal/cpu", etc. To handle this, hard-code the package ID for
+// the set of packages whose functions may be running before the
+// init function of the package is complete.
+//
+// Hardcoding is unfortunate because it means that the tool that does
+// coverage instrumentation has to keep a list of runtime packages,
+// meaning that if someone makes changes to the pkg "runtime"
+// dependencies, unexpected behavior will result for coverage builds.
+// The coverage runtime will detect and report the unexpected
+// behavior; look for an error of this form:
+//
+// internal error in coverage meta-data tracking:
+// list of hard-coded runtime package IDs needs revising.
+// registered list:
+// slot: 0 path='internal/cpu' hard-coded id: 1
+// slot: 1 path='internal/goarch' hard-coded id: 2
+// slot: 2 path='runtime/internal/atomic' hard-coded id: 3
+// slot: 3 path='internal/goos'
+// slot: 4 path='runtime/internal/sys' hard-coded id: 5
+// slot: 5 path='internal/abi' hard-coded id: 4
+// slot: 6 path='runtime/internal/math' hard-coded id: 6
+// slot: 7 path='internal/bytealg' hard-coded id: 7
+// slot: 8 path='internal/goexperiment'
+// slot: 9 path='runtime/internal/syscall' hard-coded id: 8
+// slot: 10 path='runtime' hard-coded id: 9
+// fatal error: runtime.addCovMeta
+//
+// For the error above, the hard-coded list is missing "internal/goos"
+// and "internal/goexperiment" ; the developer in question will need
+// to copy the list above into "rtPkgs" below.
+//
+// Note: this strategy assumes that the list of dependencies of
+// package runtime is fixed, and doesn't vary depending on OS/arch. If
+// this were to be the case, we would need a table of some sort below
+// as opposed to a fixed list.
+
+var rtPkgs = [...]string{
+ "internal/cpu",
+ "internal/goarch",
+ "runtime/internal/atomic",
+ "internal/goos",
+ "runtime/internal/sys",
+ "internal/abi",
+ "runtime/internal/math",
+ "internal/bytealg",
+ "internal/goexperiment",
+ "runtime/internal/syscall",
+ "runtime",
+}
+
+// Scoping note: the constants and apis in this file are internal
+// only, not expected to ever be exposed outside of the runtime (unlike
+// other coverage file formats and APIs, which will likely be shared
+// at some point).
+
+// NotHardCoded is a package pseudo-ID indicating that a given package
+// is not part of the runtime and doesn't require a hard-coded ID.
+const NotHardCoded = -1
+
+// HardCodedPkgID returns the hard-coded ID for the specified package
+// path, or -1 if we don't use a hard-coded ID. Hard-coded IDs start
+// at -2 and decrease as we go down the list.
+func HardCodedPkgID(pkgpath string) int {
+ for k, p := range rtPkgs {
+ if p == pkgpath {
+ return (0 - k) - 2
+ }
+ }
+ return NotHardCoded
+}
diff --git a/src/internal/coverage/pods/pods.go b/src/internal/coverage/pods/pods.go
new file mode 100644
index 0000000..e08f82e
--- /dev/null
+++ b/src/internal/coverage/pods/pods.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pods
+
+import (
+ "fmt"
+ "internal/coverage"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+)
+
+// Pod encapsulates a set of files emitted during the executions of a
+// coverage-instrumented binary. Each pod contains a single meta-data
+// file, and then 0 or more counter data files that refer to that
+// meta-data file. Pods are intended to simplify processing of
+// coverage output files in the case where we have several coverage
+// output directories containing output files derived from more
+// than one instrumented executable. In the case where the files that
+// make up a pod are spread out across multiple directories, each
+// element of the "Origins" field below will be populated with the
+// index of the originating directory for the corresponding counter
+// data file (within the slice of input dirs handed to CollectPods).
+// The ProcessIDs field will be populated with the process ID of each
+// data file in the CounterDataFiles slice.
+type Pod struct {
+ MetaFile string
+ CounterDataFiles []string
+ Origins []int
+ ProcessIDs []int
+}
+
+// CollectPods visits the files contained within the directories in
+// the list 'dirs', collects any coverage-related files, partitions
+// them into pods, and returns a list of the pods to the caller, along
+// with an error if something went wrong during directory/file
+// reading.
+//
+// CollectPods skips over any file that is not related to coverage
+// (e.g. avoids looking at things that are not meta-data files or
+// counter-data files). CollectPods also skips over 'orphaned' counter
+// data files (e.g. counter data files for which we can't find the
+// corresponding meta-data file). If "warn" is true, CollectPods will
+// issue warnings to stderr when it encounters non-fatal problems (for
+// orphans or a directory with no meta-data files).
+func CollectPods(dirs []string, warn bool) ([]Pod, error) {
+ files := []string{}
+ dirIndices := []int{}
+ for k, dir := range dirs {
+ dents, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ for _, e := range dents {
+ if e.IsDir() {
+ continue
+ }
+ files = append(files, filepath.Join(dir, e.Name()))
+ dirIndices = append(dirIndices, k)
+ }
+ }
+ return collectPodsImpl(files, dirIndices, warn), nil
+}
+
+// CollectPodsFromFiles functions the same as "CollectPods" but
+// operates on an explicit list of files instead of a directory.
+func CollectPodsFromFiles(files []string, warn bool) []Pod {
+ return collectPodsImpl(files, nil, warn)
+}
+
+type fileWithAnnotations struct {
+ file string
+ origin int
+ pid int
+}
+
+type protoPod struct {
+ mf string
+ elements []fileWithAnnotations
+}
+
+// collectPodsImpl examines the specified list of files and picks out
+// subsets that correspond to coverage pods. The first stage in this
+// process is collecting a set { M1, M2, ... MN } where each M_k is a
+// distinct coverage meta-data file. We then create a single pod for
+// each meta-data file M_k, then find all of the counter data files
+// that refer to that meta-data file (recall that the counter data
+// file name incorporates the meta-data hash), and add the counter
+// data file to the appropriate pod.
+//
+// This process is complicated by the fact that we need to keep track
+// of directory indices for counter data files. Here is an example to
+// motivate:
+//
+// directory 1:
+//
+// M1 covmeta.9bbf1777f47b3fcacb05c38b035512d6
+// C1 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677673.1662138360208416486
+// C2 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677637.1662138359974441782
+//
+// directory 2:
+//
+// M2 covmeta.9bbf1777f47b3fcacb05c38b035512d6
+// C3 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677445.1662138360208416480
+// C4 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677677.1662138359974441781
+// M3 covmeta.a723844208cea2ae80c63482c78b2245
+// C5 covcounters.a723844208cea2ae80c63482c78b2245.3677445.1662138360208416480
+// C6 covcounters.a723844208cea2ae80c63482c78b2245.1877677.1662138359974441781
+//
+// In these two directories we have three meta-data files, but only
+// two are distinct, meaning that we'll wind up with two pods. The
+// first pod (with meta-file M1) will have four counter data files
+// (C1, C2, C3, C4) and the second pod will have two counter data files
+// (C5, C6).
+func collectPodsImpl(files []string, dirIndices []int, warn bool) []Pod {
+ metaRE := regexp.MustCompile(fmt.Sprintf(`^%s\.(\S+)$`, coverage.MetaFilePref))
+ mm := make(map[string]protoPod)
+ for _, f := range files {
+ base := filepath.Base(f)
+ if m := metaRE.FindStringSubmatch(base); m != nil {
+ tag := m[1]
+ // We need to allow for the possibility of duplicate
+ // meta-data files. If we hit this case, use the
+ // first encountered as the canonical version.
+ if _, ok := mm[tag]; !ok {
+ mm[tag] = protoPod{mf: f}
+ }
+ // FIXME: should probably check file length and hash here for
+ // the duplicate.
+ }
+ }
+ counterRE := regexp.MustCompile(fmt.Sprintf(coverage.CounterFileRegexp, coverage.CounterFilePref))
+ for k, f := range files {
+ base := filepath.Base(f)
+ if m := counterRE.FindStringSubmatch(base); m != nil {
+ tag := m[1] // meta hash
+ pid, err := strconv.Atoi(m[2])
+ if err != nil {
+ continue
+ }
+ if v, ok := mm[tag]; ok {
+ idx := -1
+ if dirIndices != nil {
+ idx = dirIndices[k]
+ }
+ fo := fileWithAnnotations{file: f, origin: idx, pid: pid}
+ v.elements = append(v.elements, fo)
+ mm[tag] = v
+ } else {
+ if warn {
+ warning("skipping orphaned counter file: %s", f)
+ }
+ }
+ }
+ }
+ if len(mm) == 0 {
+ if warn {
+ warning("no coverage data files found")
+ }
+ return nil
+ }
+ pods := make([]Pod, 0, len(mm))
+ for _, p := range mm {
+ sort.Slice(p.elements, func(i, j int) bool {
+ if p.elements[i].origin != p.elements[j].origin {
+ return p.elements[i].origin < p.elements[j].origin
+ }
+ return p.elements[i].file < p.elements[j].file
+ })
+ pod := Pod{
+ MetaFile: p.mf,
+ CounterDataFiles: make([]string, 0, len(p.elements)),
+ Origins: make([]int, 0, len(p.elements)),
+ ProcessIDs: make([]int, 0, len(p.elements)),
+ }
+ for _, e := range p.elements {
+ pod.CounterDataFiles = append(pod.CounterDataFiles, e.file)
+ pod.Origins = append(pod.Origins, e.origin)
+ pod.ProcessIDs = append(pod.ProcessIDs, e.pid)
+ }
+ pods = append(pods, pod)
+ }
+ sort.Slice(pods, func(i, j int) bool {
+ return pods[i].MetaFile < pods[j].MetaFile
+ })
+ return pods
+}
+
+func warning(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "warning: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+}
diff --git a/src/internal/coverage/pods/pods_test.go b/src/internal/coverage/pods/pods_test.go
new file mode 100644
index 0000000..69c16e0
--- /dev/null
+++ b/src/internal/coverage/pods/pods_test.go
@@ -0,0 +1,142 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pods_test
+
+import (
+ "crypto/md5"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/pods"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestPodCollection(t *testing.T) {
+ //testenv.MustHaveGoBuild(t)
+
+ mkdir := func(d string, perm os.FileMode) string {
+ dp := filepath.Join(t.TempDir(), d)
+ if err := os.Mkdir(dp, perm); err != nil {
+ t.Fatal(err)
+ }
+ return dp
+ }
+
+ mkfile := func(d string, fn string) string {
+ fp := filepath.Join(d, fn)
+ if err := os.WriteFile(fp, []byte("foo"), 0666); err != nil {
+ t.Fatal(err)
+ }
+ return fp
+ }
+
+ mkmeta := func(dir string, tag string) string {
+ hash := md5.Sum([]byte(tag))
+ fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, hash)
+ return mkfile(dir, fn)
+ }
+
+ mkcounter := func(dir string, tag string, nt int, pid int) string {
+ hash := md5.Sum([]byte(tag))
+ fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, hash, pid, nt)
+ return mkfile(dir, fn)
+ }
+
+ trim := func(path string) string {
+ b := filepath.Base(path)
+ d := filepath.Dir(path)
+ db := filepath.Base(d)
+ return db + "/" + b
+ }
+
+ podToString := func(p pods.Pod) string {
+ rv := trim(p.MetaFile) + " [\n"
+ for k, df := range p.CounterDataFiles {
+ rv += trim(df)
+ if p.Origins != nil {
+ rv += fmt.Sprintf(" o:%d", p.Origins[k])
+ }
+ rv += "\n"
+ }
+ return rv + "]"
+ }
+
+ // Create a couple of directories.
+ o1 := mkdir("o1", 0777)
+ o2 := mkdir("o2", 0777)
+
+ // Add some random files (not coverage related)
+ mkfile(o1, "blah.txt")
+ mkfile(o1, "something.exe")
+
+ // Add a meta-data file with two counter files to first dir.
+ mkmeta(o1, "m1")
+ mkcounter(o1, "m1", 1, 42)
+ mkcounter(o1, "m1", 2, 41)
+ mkcounter(o1, "m1", 2, 40)
+
+ // Add a counter file with no associated meta file.
+ mkcounter(o1, "orphan", 9, 39)
+
+ // Add a meta-data file with three counter files to second dir.
+ mkmeta(o2, "m2")
+ mkcounter(o2, "m2", 1, 38)
+ mkcounter(o2, "m2", 2, 37)
+ mkcounter(o2, "m2", 3, 36)
+
+ // Add a duplicate of the first meta-file and a corresponding
+ // counter file to the second dir. This is intended to capture
+ // the scenario where we have two different runs of the same
+ // coverage-instrumented binary, but with the output files
+ // sent to separate directories.
+ mkmeta(o2, "m1")
+ mkcounter(o2, "m1", 11, 35)
+
+ // Collect pods.
+ podlist, err := pods.CollectPods([]string{o1, o2}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify pods
+ if len(podlist) != 2 {
+ t.Fatalf("expected 2 pods got %d pods", len(podlist))
+ }
+
+ for k, p := range podlist {
+ t.Logf("%d: mf=%s\n", k, p.MetaFile)
+ }
+
+ expected := []string{
+ `o1/covmeta.ae7be26cdaa742ca148068d5ac90eaca [
+o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.40.2 o:0
+o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.41.2 o:0
+o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.42.1 o:0
+o2/covcounters.ae7be26cdaa742ca148068d5ac90eaca.35.11 o:1
+]`,
+ `o2/covmeta.aaf2f89992379705dac844c0a2a1d45f [
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.36.3 o:1
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.37.2 o:1
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.38.1 o:1
+]`,
+ }
+ for k, exp := range expected {
+ got := podToString(podlist[k])
+ if exp != got {
+ t.Errorf("pod %d: expected:\n%s\ngot:\n%s", k, exp, got)
+ }
+ }
+
+ // Check handling of bad/unreadable dir.
+ if runtime.GOOS == "linux" {
+ dbad := "/dev/null"
+ _, err = pods.CollectPods([]string{dbad}, true)
+ if err == nil {
+ t.Errorf("executed error due to unreadable dir")
+ }
+ }
+}
diff --git a/src/internal/coverage/rtcov/rtcov.go b/src/internal/coverage/rtcov/rtcov.go
new file mode 100644
index 0000000..bbb93ac
--- /dev/null
+++ b/src/internal/coverage/rtcov/rtcov.go
@@ -0,0 +1,34 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rtcov
+
+// This package contains types whose structure is shared between
+// the runtime package and the "runtime/coverage" package.
+
+// CovMetaBlob is a container for holding the meta-data symbol (an
+// RODATA variable) for an instrumented Go package. Here "p" points to
+// the symbol itself, "len" is the length of the sym in bytes, and
+// "hash" is an md5sum for the sym computed by the compiler. When
+// the init function for a coverage-instrumented package executes, it
+// will make a call into the runtime which will create a covMetaBlob
+// object for the package and chain it onto a global list.
+type CovMetaBlob struct {
+ P *byte
+ Len uint32
+ Hash [16]byte
+ PkgPath string
+ PkgID int
+ CounterMode uint8 // coverage.CounterMode
+ CounterGranularity uint8 // coverage.CounterGranularity
+}
+
+// CovCounterBlob is a container for encapsulating a counter section
+// (BSS variable) for an instrumented Go module. Here "counters"
+// points to the counter payload and "len" is the number of uint32
+// entries in the section.
+type CovCounterBlob struct {
+ Counters *uint32
+ Len uint64
+}
diff --git a/src/internal/coverage/slicereader/slicereader.go b/src/internal/coverage/slicereader/slicereader.go
new file mode 100644
index 0000000..d9f2a7e
--- /dev/null
+++ b/src/internal/coverage/slicereader/slicereader.go
@@ -0,0 +1,123 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicereader
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "unsafe"
+)
+
+// This file contains the helper "SliceReader", a utility for
+// reading values from a byte slice that may or may not be backed
+// by a read-only mmap'd region.
+
+type Reader struct {
+ b []byte
+ readonly bool
+ off int64
+}
+
+func NewReader(b []byte, readonly bool) *Reader {
+ r := Reader{
+ b: b,
+ readonly: readonly,
+ }
+ return &r
+}
+
+func (r *Reader) Read(b []byte) (int, error) {
+ amt := len(b)
+ toread := r.b[r.off:]
+ if len(toread) < amt {
+ amt = len(toread)
+ }
+ copy(b, toread)
+ r.off += int64(amt)
+ return amt, nil
+}
+
+func (r *Reader) Seek(offset int64, whence int) (ret int64, err error) {
+ switch whence {
+ case io.SeekStart:
+ if offset < 0 || offset > int64(len(r.b)) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", offset, len(r.b))
+ }
+ r.off = offset
+ return offset, nil
+ case io.SeekCurrent:
+ newoff := r.off + offset
+ if newoff < 0 || newoff > int64(len(r.b)) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(r.b))
+ }
+ r.off = newoff
+ return r.off, nil
+ case io.SeekEnd:
+ newoff := int64(len(r.b)) + offset
+ if newoff < 0 || newoff > int64(len(r.b)) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(r.b))
+ }
+ r.off = newoff
+ return r.off, nil
+ }
+ // other modes are not supported
+ return 0, fmt.Errorf("unsupported seek mode %d", whence)
+}
+
+func (r *Reader) Offset() int64 {
+ return r.off
+}
+
+func (r *Reader) ReadUint8() uint8 {
+ rv := uint8(r.b[int(r.off)])
+ r.off += 1
+ return rv
+}
+
+func (r *Reader) ReadUint32() uint32 {
+ end := int(r.off) + 4
+ rv := binary.LittleEndian.Uint32(r.b[int(r.off):end:end])
+ r.off += 4
+ return rv
+}
+
+func (r *Reader) ReadUint64() uint64 {
+ end := int(r.off) + 8
+ rv := binary.LittleEndian.Uint64(r.b[int(r.off):end:end])
+ r.off += 8
+ return rv
+}
+
+func (r *Reader) ReadULEB128() (value uint64) {
+ var shift uint
+
+ for {
+ b := r.b[r.off]
+ r.off++
+ value |= (uint64(b&0x7F) << shift)
+ if b&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+ return
+}
+
+func (r *Reader) ReadString(len int64) string {
+ b := r.b[r.off : r.off+len]
+ r.off += len
+ if r.readonly {
+ return toString(b) // backed by RO memory, ok to make unsafe string
+ }
+ return string(b)
+}
+
+func toString(b []byte) string {
+ if len(b) == 0 {
+ return ""
+ }
+ return unsafe.String(&b[0], len(b))
+}
diff --git a/src/internal/coverage/slicereader/slr_test.go b/src/internal/coverage/slicereader/slr_test.go
new file mode 100644
index 0000000..461436d
--- /dev/null
+++ b/src/internal/coverage/slicereader/slr_test.go
@@ -0,0 +1,95 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicereader
+
+import (
+ "encoding/binary"
+ "io"
+ "testing"
+)
+
+func TestSliceReader(t *testing.T) {
+ b := []byte{}
+
+ bt := make([]byte, 4)
+ e32 := uint32(1030507)
+ binary.LittleEndian.PutUint32(bt, e32)
+ b = append(b, bt...)
+
+ bt = make([]byte, 8)
+ e64 := uint64(907050301)
+ binary.LittleEndian.PutUint64(bt, e64)
+ b = append(b, bt...)
+
+ b = appendUleb128(b, uint(e32))
+ b = appendUleb128(b, uint(e64))
+ b = appendUleb128(b, 6)
+ s1 := "foobar"
+ s1b := []byte(s1)
+ b = append(b, s1b...)
+ b = appendUleb128(b, 9)
+ s2 := "bazbasher"
+ s2b := []byte(s2)
+ b = append(b, s2b...)
+
+ readStr := func(slr *Reader) string {
+ len := slr.ReadULEB128()
+ return slr.ReadString(int64(len))
+ }
+
+ for i := 0; i < 2; i++ {
+ slr := NewReader(b, i == 0)
+ g32 := slr.ReadUint32()
+ if g32 != e32 {
+ t.Fatalf("slr.ReadUint32() got %d want %d", g32, e32)
+ }
+ g64 := slr.ReadUint64()
+ if g64 != e64 {
+ t.Fatalf("slr.ReadUint64() got %d want %d", g64, e64)
+ }
+ g32 = uint32(slr.ReadULEB128())
+ if g32 != e32 {
+ t.Fatalf("slr.ReadULEB128() got %d want %d", g32, e32)
+ }
+ g64 = slr.ReadULEB128()
+ if g64 != e64 {
+ t.Fatalf("slr.ReadULEB128() got %d want %d", g64, e64)
+ }
+ gs1 := readStr(slr)
+ if gs1 != s1 {
+ t.Fatalf("readStr got %s want %s", gs1, s1)
+ }
+ gs2 := readStr(slr)
+ if gs2 != s2 {
+ t.Fatalf("readStr got %s want %s", gs2, s2)
+ }
+ if _, err := slr.Seek(4, io.SeekStart); err != nil {
+ t.Fatal(err)
+ }
+ off := slr.Offset()
+ if off != 4 {
+ t.Fatalf("Offset() returned %d wanted 4", off)
+ }
+ g64 = slr.ReadUint64()
+ if g64 != e64 {
+ t.Fatalf("post-seek slr.ReadUint64() got %d want %d", g64, e64)
+ }
+ }
+}
+
+func appendUleb128(b []byte, v uint) []byte {
+ for {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ if v != 0 {
+ c |= 0x80
+ }
+ b = append(b, c)
+ if c&0x80 == 0 {
+ break
+ }
+ }
+ return b
+}
diff --git a/src/internal/coverage/slicewriter/slicewriter.go b/src/internal/coverage/slicewriter/slicewriter.go
new file mode 100644
index 0000000..460e9dc
--- /dev/null
+++ b/src/internal/coverage/slicewriter/slicewriter.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicewriter
+
+import (
+ "fmt"
+ "io"
+)
+
+// WriteSeeker is a helper object that implements the io.WriteSeeker
+// interface. Clients can create a WriteSeeker, make a series of Write
+// calls to add data to it (and possibly Seek calls to update
+// previously written portions), then finally invoke BytesWritten() to
+// get a pointer to the constructed byte slice.
+type WriteSeeker struct {
+ payload []byte
+ off int64
+}
+
+func (sws *WriteSeeker) Write(p []byte) (n int, err error) {
+ amt := len(p)
+ towrite := sws.payload[sws.off:]
+ if len(towrite) < amt {
+ sws.payload = append(sws.payload, make([]byte, amt-len(towrite))...)
+ towrite = sws.payload[sws.off:]
+ }
+ copy(towrite, p)
+ sws.off += int64(amt)
+ return amt, nil
+}
+
+// Seek repositions the read/write position of the WriteSeeker within
+// its internally maintained slice. Note that it is not possible to
+// expand the size of the slice using SEEK_SET; trying to seek outside
+// the slice will result in an error.
+func (sws *WriteSeeker) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case io.SeekStart:
+ if sws.off != offset && (offset < 0 || offset > int64(len(sws.payload))) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", offset, len(sws.payload))
+ }
+ sws.off = offset
+ return offset, nil
+ case io.SeekCurrent:
+ newoff := sws.off + offset
+ if newoff != sws.off && (newoff < 0 || newoff > int64(len(sws.payload))) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(sws.payload))
+ }
+ sws.off += offset
+ return sws.off, nil
+ case io.SeekEnd:
+ newoff := int64(len(sws.payload)) + offset
+ if newoff != sws.off && (newoff < 0 || newoff > int64(len(sws.payload))) {
+ return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(sws.payload))
+ }
+ sws.off = newoff
+ return sws.off, nil
+ }
+ // other modes not supported
+ return 0, fmt.Errorf("unsupported seek mode %d", whence)
+}
+
+// BytesWritten returns the underlying byte slice for the WriteSeeker,
+// containing the data written to it via Write/Seek calls.
+func (sws *WriteSeeker) BytesWritten() []byte {
+ return sws.payload
+}
+
+func (sws *WriteSeeker) Read(p []byte) (n int, err error) {
+ amt := len(p)
+ toread := sws.payload[sws.off:]
+ if len(toread) < amt {
+ amt = len(toread)
+ }
+ copy(p, toread)
+ sws.off += int64(amt)
+ return amt, nil
+}
diff --git a/src/internal/coverage/slicewriter/slw_test.go b/src/internal/coverage/slicewriter/slw_test.go
new file mode 100644
index 0000000..9e26767
--- /dev/null
+++ b/src/internal/coverage/slicewriter/slw_test.go
@@ -0,0 +1,134 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicewriter
+
+import (
+ "io"
+ "testing"
+)
+
+func TestSliceWriter(t *testing.T) {
+
+ sleq := func(t *testing.T, got []byte, want []byte) {
+ t.Helper()
+ if len(got) != len(want) {
+ t.Fatalf("bad length got %d want %d", len(got), len(want))
+ }
+ for i := range got {
+ if got[i] != want[i] {
+ t.Fatalf("bad read at %d got %d want %d", i, got[i], want[i])
+ }
+ }
+ }
+
+ wf := func(t *testing.T, ws *WriteSeeker, p []byte) {
+ t.Helper()
+ nw, werr := ws.Write(p)
+ if werr != nil {
+ t.Fatalf("unexpected write error: %v", werr)
+ }
+ if nw != len(p) {
+ t.Fatalf("wrong amount written want %d got %d", len(p), nw)
+ }
+ }
+
+ rf := func(t *testing.T, ws *WriteSeeker, p []byte) {
+ t.Helper()
+ b := make([]byte, len(p))
+ nr, rerr := ws.Read(b)
+ if rerr != nil {
+ t.Fatalf("unexpected read error: %v", rerr)
+ }
+ if nr != len(p) {
+ t.Fatalf("wrong amount read want %d got %d", len(p), nr)
+ }
+ sleq(t, b, p)
+ }
+
+ sk := func(t *testing.T, ws *WriteSeeker, offset int64, whence int) int64 {
+ t.Helper()
+ off, err := ws.Seek(offset, whence)
+ if err != nil {
+ t.Fatalf("unexpected seek error: %v", err)
+ }
+ return off
+ }
+
+ wp1 := []byte{1, 2}
+ ws := &WriteSeeker{}
+
+ // write some stuff
+ wf(t, ws, wp1)
+ // check that BytesWritten returns what we wrote.
+ sleq(t, ws.BytesWritten(), wp1)
+ // offset is at end of slice, so reading should return zero bytes.
+ rf(t, ws, []byte{})
+
+ // write some more stuff
+ wp2 := []byte{7, 8, 9}
+ wf(t, ws, wp2)
+ // check that BytesWritten returns what we expect.
+ wpex := []byte{1, 2, 7, 8, 9}
+ sleq(t, ws.BytesWritten(), wpex)
+ rf(t, ws, []byte{})
+
+ // seeks and reads.
+ sk(t, ws, 1, io.SeekStart)
+ rf(t, ws, []byte{2, 7})
+ sk(t, ws, -2, io.SeekCurrent)
+ rf(t, ws, []byte{2, 7})
+ sk(t, ws, -4, io.SeekEnd)
+ rf(t, ws, []byte{2, 7})
+ off := sk(t, ws, 0, io.SeekEnd)
+ sk(t, ws, off, io.SeekStart)
+
+ // seek back and overwrite
+ sk(t, ws, 1, io.SeekStart)
+ wf(t, ws, []byte{9, 11})
+ wpex = []byte{1, 9, 11, 8, 9}
+ sleq(t, ws.BytesWritten(), wpex)
+
+ // seeks on empty writer.
+ ws2 := &WriteSeeker{}
+ sk(t, ws2, 0, io.SeekStart)
+ sk(t, ws2, 0, io.SeekCurrent)
+ sk(t, ws2, 0, io.SeekEnd)
+
+ // check for seek errors.
+ _, err := ws.Seek(-1, io.SeekStart)
+ if err == nil {
+ t.Fatalf("expected error on invalid -1 seek")
+ }
+ _, err = ws.Seek(int64(len(ws.BytesWritten())+1), io.SeekStart)
+ if err == nil {
+ t.Fatalf("expected error on invalid %d seek", len(ws.BytesWritten()))
+ }
+
+ ws.Seek(0, io.SeekStart)
+ _, err = ws.Seek(-1, io.SeekCurrent)
+ if err == nil {
+ t.Fatalf("expected error on invalid -1 seek")
+ }
+ _, err = ws.Seek(int64(len(ws.BytesWritten())+1), io.SeekCurrent)
+ if err == nil {
+ t.Fatalf("expected error on invalid %d seek", len(ws.BytesWritten()))
+ }
+
+ _, err = ws.Seek(1, io.SeekEnd)
+ if err == nil {
+ t.Fatalf("expected error on invalid 1 seek")
+ }
+ bsamt := int64(-1*len(ws.BytesWritten()) - 1)
+ _, err = ws.Seek(bsamt, io.SeekEnd)
+ if err == nil {
+ t.Fatalf("expected error on invalid %d seek", bsamt)
+ }
+
+ // bad seek mode
+ _, err = ws.Seek(-1, io.SeekStart+9)
+ if err == nil {
+ t.Fatalf("expected error on invalid seek mode")
+ }
+}
diff --git a/src/internal/coverage/stringtab/stringtab.go b/src/internal/coverage/stringtab/stringtab.go
new file mode 100644
index 0000000..156c8ad
--- /dev/null
+++ b/src/internal/coverage/stringtab/stringtab.go
@@ -0,0 +1,139 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stringtab
+
+import (
+ "fmt"
+ "internal/coverage/slicereader"
+ "internal/coverage/uleb128"
+ "io"
+)
+
+// This package implements string table writer and reader utilities,
+// for use in emitting and reading/decoding coverage meta-data and
+// counter-data files.
+
+// Writer implements a string table writing utility.
+type Writer struct {
+ stab map[string]uint32
+ strs []string
+ tmp []byte
+ frozen bool
+}
+
+// InitWriter initializes a stringtab.Writer.
+func (stw *Writer) InitWriter() {
+ stw.stab = make(map[string]uint32)
+ stw.tmp = make([]byte, 64)
+}
+
+// Nentries returns the number of strings interned so far.
+func (stw *Writer) Nentries() uint32 {
+ return uint32(len(stw.strs))
+}
+
+// Lookup looks up string 's' in the writer's table, adding
+// a new entry if need be, and returning an index into the table.
+func (stw *Writer) Lookup(s string) uint32 {
+ if idx, ok := stw.stab[s]; ok {
+ return idx
+ }
+ if stw.frozen {
+ panic("internal error: string table previously frozen")
+ }
+ idx := uint32(len(stw.strs))
+ stw.stab[s] = idx
+ stw.strs = append(stw.strs, s)
+ return idx
+}
+
+// Size computes the memory in bytes needed for the serialized
+// version of a stringtab.Writer.
+func (stw *Writer) Size() uint32 {
+ rval := uint32(0)
+ stw.tmp = stw.tmp[:0]
+ stw.tmp = uleb128.AppendUleb128(stw.tmp, uint(len(stw.strs)))
+ rval += uint32(len(stw.tmp))
+ for _, s := range stw.strs {
+ stw.tmp = stw.tmp[:0]
+ slen := uint(len(s))
+ stw.tmp = uleb128.AppendUleb128(stw.tmp, slen)
+ rval += uint32(len(stw.tmp)) + uint32(slen)
+ }
+ return rval
+}
+
+// Write writes the string table in serialized form to the specified
+// io.Writer.
+func (stw *Writer) Write(w io.Writer) error {
+ wr128 := func(v uint) error {
+ stw.tmp = stw.tmp[:0]
+ stw.tmp = uleb128.AppendUleb128(stw.tmp, v)
+ if nw, err := w.Write(stw.tmp); err != nil {
+ return fmt.Errorf("writing string table: %v", err)
+ } else if nw != len(stw.tmp) {
+ return fmt.Errorf("short write emitting stringtab uleb")
+ }
+ return nil
+ }
+ if err := wr128(uint(len(stw.strs))); err != nil {
+ return err
+ }
+ for _, s := range stw.strs {
+ if err := wr128(uint(len(s))); err != nil {
+ return err
+ }
+ if nw, err := w.Write([]byte(s)); err != nil {
+ return fmt.Errorf("writing string table: %v", err)
+ } else if nw != len([]byte(s)) {
+ return fmt.Errorf("short write emitting stringtab")
+ }
+ }
+ return nil
+}
+
+// Freeze sends a signal to the writer that no more additions are
+// allowed, only lookups of existing strings (if a lookup triggers
+// addition, a panic will result). Useful as a mechanism for
+// "finalizing" a string table prior to writing it out.
+func (stw *Writer) Freeze() {
+ stw.frozen = true
+}
+
+// Reader is a helper for reading a string table previously
+// serialized by a Writer.Write call.
+type Reader struct {
+ r *slicereader.Reader
+ strs []string
+}
+
+// NewReader creates a stringtab.Reader to read the contents
+// of a string table from 'r'.
+func NewReader(r *slicereader.Reader) *Reader {
+ str := &Reader{
+ r: r,
+ }
+ return str
+}
+
+// Read reads/decodes a string table using the reader provided.
+func (str *Reader) Read() {
+ numEntries := int(str.r.ReadULEB128())
+ str.strs = make([]string, 0, numEntries)
+ for idx := 0; idx < numEntries; idx++ {
+ slen := str.r.ReadULEB128()
+ str.strs = append(str.strs, str.r.ReadString(int64(slen)))
+ }
+}
+
+// Entries returns the number of decoded entries in a string table.
+func (str *Reader) Entries() int {
+ return len(str.strs)
+}
+
+// Get returns string 'idx' within the string table.
+func (str *Reader) Get(idx uint32) string {
+ return str.strs[idx]
+}
diff --git a/src/internal/coverage/test/counter_test.go b/src/internal/coverage/test/counter_test.go
new file mode 100644
index 0000000..e29baed
--- /dev/null
+++ b/src/internal/coverage/test/counter_test.go
@@ -0,0 +1,237 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/encodecounter"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+type ctrVis struct {
+ funcs []decodecounter.FuncPayload
+}
+
+func (v *ctrVis) VisitFuncs(f encodecounter.CounterVisitorFn) error {
+ for _, fn := range v.funcs {
+ if err := f(fn.PkgIdx, fn.FuncIdx, fn.Counters); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mkfunc(p uint32, f uint32, c []uint32) decodecounter.FuncPayload {
+ return decodecounter.FuncPayload{
+ PkgIdx: p,
+ FuncIdx: f,
+ Counters: c,
+ }
+}
+
+func TestCounterDataWriterReader(t *testing.T) {
+ flavors := []coverage.CounterFlavor{
+ coverage.CtrRaw,
+ coverage.CtrULeb128,
+ }
+
+ isDead := func(fp decodecounter.FuncPayload) bool {
+ for _, v := range fp.Counters {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+ }
+
+ funcs := []decodecounter.FuncPayload{
+ mkfunc(0, 0, []uint32{13, 14, 15}),
+ mkfunc(0, 1, []uint32{16, 17}),
+ mkfunc(1, 0, []uint32{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 976543, 7}),
+ }
+ writeVisitor := &ctrVis{funcs: funcs}
+
+ for kf, flav := range flavors {
+
+ t.Logf("testing flavor %d\n", flav)
+
+ // Open a counter data file in preparation for emitting data.
+ d := t.TempDir()
+ cfpath := filepath.Join(d, fmt.Sprintf("covcounters.hash.0.%d", kf))
+ of, err := os.OpenFile(cfpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("opening covcounters: %v", err)
+ }
+
+ // Perform the encode and write.
+ cdfw := encodecounter.NewCoverageDataWriter(of, flav)
+ if cdfw == nil {
+ t.Fatalf("NewCoverageDataWriter failed")
+ }
+ finalHash := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0}
+ args := map[string]string{"argc": "3", "argv0": "arg0", "argv1": "arg1", "argv2": "arg_________2"}
+ if err := cdfw.Write(finalHash, args, writeVisitor); err != nil {
+ t.Fatalf("counter file Write failed: %v", err)
+ }
+ if err := of.Close(); err != nil {
+ t.Fatalf("closing covcounters: %v", err)
+ }
+ cdfw = nil
+
+ // Decode the same file.
+ var cdr *decodecounter.CounterDataReader
+ inf, err := os.Open(cfpath)
+ defer func() {
+ if err := inf.Close(); err != nil {
+ t.Fatalf("close failed with: %v", err)
+ }
+ }()
+
+ if err != nil {
+ t.Fatalf("reopening covcounters file: %v", err)
+ }
+ if cdr, err = decodecounter.NewCounterDataReader(cfpath, inf); err != nil {
+ t.Fatalf("opening covcounters for read: %v", err)
+ }
+ decodedArgs := cdr.OsArgs()
+ aWant := "[arg0 arg1 arg_________2]"
+ aGot := fmt.Sprintf("%+v", decodedArgs)
+ if aWant != aGot {
+ t.Errorf("reading decoded args, got %s want %s", aGot, aWant)
+ }
+ for i := range funcs {
+ if isDead(funcs[i]) {
+ continue
+ }
+ var fp decodecounter.FuncPayload
+ if ok, err := cdr.NextFunc(&fp); err != nil {
+ t.Fatalf("reading func %d: %v", i, err)
+ } else if !ok {
+ t.Fatalf("reading func %d: bad return", i)
+ }
+ got := fmt.Sprintf("%+v", fp)
+ want := fmt.Sprintf("%+v", funcs[i])
+ if got != want {
+ t.Errorf("cdr.NextFunc iter %d\ngot %+v\nwant %+v", i, got, want)
+ }
+ }
+ var dummy decodecounter.FuncPayload
+ if ok, err := cdr.NextFunc(&dummy); err != nil {
+ t.Fatalf("reading func after loop: %v", err)
+ } else if ok {
+ t.Fatalf("reading func after loop: expected EOF")
+ }
+ }
+}
+
+func TestCounterDataAppendSegment(t *testing.T) {
+ d := t.TempDir()
+ cfpath := filepath.Join(d, "covcounters.hash2.0")
+ of, err := os.OpenFile(cfpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("opening covcounters: %v", err)
+ }
+
+ const numSegments = 2
+
+ // Write a counter with with multiple segments.
+ args := map[string]string{"argc": "1", "argv0": "prog.exe"}
+ allfuncs := [][]decodecounter.FuncPayload{}
+ ctrs := []uint32{}
+ q := uint32(0)
+ var cdfw *encodecounter.CoverageDataWriter
+ for idx := 0; idx < numSegments; idx++ {
+ args[fmt.Sprintf("seg%d", idx)] = "x"
+ q += 7
+ ctrs = append(ctrs, q)
+ funcs := []decodecounter.FuncPayload{}
+ for k := 0; k < idx+1; k++ {
+ c := make([]uint32, len(ctrs))
+ copy(c, ctrs)
+ funcs = append(funcs, mkfunc(uint32(idx), uint32(k), c))
+ }
+ allfuncs = append(allfuncs, funcs)
+
+ writeVisitor := &ctrVis{funcs: funcs}
+
+ if idx == 0 {
+ // Perform the encode and write.
+ cdfw = encodecounter.NewCoverageDataWriter(of, coverage.CtrRaw)
+ if cdfw == nil {
+ t.Fatalf("NewCoverageDataWriter failed")
+ }
+ finalHash := [16]byte{1, 2}
+ if err := cdfw.Write(finalHash, args, writeVisitor); err != nil {
+ t.Fatalf("counter file Write failed: %v", err)
+ }
+ } else {
+ if err := cdfw.AppendSegment(args, writeVisitor); err != nil {
+ t.Fatalf("counter file AppendSegment failed: %v", err)
+ }
+ }
+ }
+ if err := of.Close(); err != nil {
+ t.Fatalf("closing covcounters: %v", err)
+ }
+
+ // Read the result file.
+ var cdr *decodecounter.CounterDataReader
+ inf, err := os.Open(cfpath)
+ defer func() {
+ if err := inf.Close(); err != nil {
+ t.Fatalf("close failed with: %v", err)
+ }
+ }()
+
+ if err != nil {
+ t.Fatalf("reopening covcounters file: %v", err)
+ }
+ if cdr, err = decodecounter.NewCounterDataReader(cfpath, inf); err != nil {
+ t.Fatalf("opening covcounters for read: %v", err)
+ }
+ ns := cdr.NumSegments()
+ if ns != numSegments {
+ t.Fatalf("got %d segments want %d", ns, numSegments)
+ }
+ if len(allfuncs) != numSegments {
+ t.Fatalf("expected %d got %d", numSegments, len(allfuncs))
+ }
+
+ for sidx := 0; sidx < int(ns); sidx++ {
+ if off, err := inf.Seek(0, io.SeekCurrent); err != nil {
+ t.Fatalf("Seek failed: %v", err)
+ } else {
+ t.Logf("sidx=%d off=%d\n", sidx, off)
+ }
+
+ if sidx != 0 {
+ if ok, err := cdr.BeginNextSegment(); err != nil {
+ t.Fatalf("BeginNextSegment failed: %v", err)
+ } else if !ok {
+ t.Fatalf("BeginNextSegment return %v on iter %d",
+ ok, sidx)
+ }
+ }
+ funcs := allfuncs[sidx]
+ for i := range funcs {
+ var fp decodecounter.FuncPayload
+ if ok, err := cdr.NextFunc(&fp); err != nil {
+ t.Fatalf("reading func %d: %v", i, err)
+ } else if !ok {
+ t.Fatalf("reading func %d: bad return", i)
+ }
+ got := fmt.Sprintf("%+v", fp)
+ want := fmt.Sprintf("%+v", funcs[i])
+ if got != want {
+ t.Errorf("cdr.NextFunc iter %d\ngot %+v\nwant %+v", i, got, want)
+ }
+ }
+ }
+}
diff --git a/src/internal/coverage/test/roundtrip_test.go b/src/internal/coverage/test/roundtrip_test.go
new file mode 100644
index 0000000..614f56e
--- /dev/null
+++ b/src/internal/coverage/test/roundtrip_test.go
@@ -0,0 +1,331 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodemeta"
+ "internal/coverage/encodemeta"
+ "internal/coverage/slicewriter"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func cmpFuncDesc(want, got coverage.FuncDesc) string {
+ swant := fmt.Sprintf("%+v", want)
+ sgot := fmt.Sprintf("%+v", got)
+ if swant == sgot {
+ return ""
+ }
+ return fmt.Sprintf("wanted %q got %q", swant, sgot)
+}
+
+func TestMetaDataEmptyPackage(t *testing.T) {
+ // Make sure that encoding/decoding works properly with packages
+ // that don't actually have any functions.
+ p := "empty/package"
+ pn := "package"
+ mp := "m"
+ b, err := encodemeta.NewCoverageMetaDataBuilder(p, pn, mp)
+ if err != nil {
+ t.Fatalf("making builder: %v", err)
+ }
+ drws := &slicewriter.WriteSeeker{}
+ b.Emit(drws)
+ drws.Seek(0, io.SeekStart)
+ dec, err := decodemeta.NewCoverageMetaDataDecoder(drws.BytesWritten(), false)
+ if err != nil {
+ t.Fatalf("making decoder: %v", err)
+ }
+ nf := dec.NumFuncs()
+ if nf != 0 {
+ t.Errorf("dec.NumFuncs(): got %d want %d", nf, 0)
+ }
+ pp := dec.PackagePath()
+ if pp != p {
+ t.Errorf("dec.PackagePath(): got %s want %s", pp, p)
+ }
+ ppn := dec.PackageName()
+ if ppn != pn {
+ t.Errorf("dec.PackageName(): got %s want %s", ppn, pn)
+ }
+ pmp := dec.ModulePath()
+ if pmp != mp {
+ t.Errorf("dec.ModulePath(): got %s want %s", pmp, mp)
+ }
+}
+
+func TestMetaDataEncoderDecoder(t *testing.T) {
+ // Test encode path.
+ pp := "foo/bar/pkg"
+ pn := "pkg"
+ mp := "barmod"
+ b, err := encodemeta.NewCoverageMetaDataBuilder(pp, pn, mp)
+ if err != nil {
+ t.Fatalf("making builder: %v", err)
+ }
+ f1 := coverage.FuncDesc{
+ Funcname: "func",
+ Srcfile: "foo.go",
+ Units: []coverage.CoverableUnit{
+ coverage.CoverableUnit{StLine: 1, StCol: 2, EnLine: 3, EnCol: 4, NxStmts: 5},
+ coverage.CoverableUnit{StLine: 6, StCol: 7, EnLine: 8, EnCol: 9, NxStmts: 10},
+ },
+ }
+ idx := b.AddFunc(f1)
+ if idx != 0 {
+ t.Errorf("b.AddFunc(f1) got %d want %d", idx, 0)
+ }
+
+ f2 := coverage.FuncDesc{
+ Funcname: "xfunc",
+ Srcfile: "bar.go",
+ Units: []coverage.CoverableUnit{
+ coverage.CoverableUnit{StLine: 1, StCol: 2, EnLine: 3, EnCol: 4, NxStmts: 5},
+ coverage.CoverableUnit{StLine: 6, StCol: 7, EnLine: 8, EnCol: 9, NxStmts: 10},
+ coverage.CoverableUnit{StLine: 11, StCol: 12, EnLine: 13, EnCol: 14, NxStmts: 15},
+ },
+ }
+ idx = b.AddFunc(f2)
+ if idx != 1 {
+ t.Errorf("b.AddFunc(f2) got %d want %d", idx, 0)
+ }
+
+ // Emit into a writer.
+ drws := &slicewriter.WriteSeeker{}
+ b.Emit(drws)
+
+ // Test decode path.
+ drws.Seek(0, io.SeekStart)
+ dec, err := decodemeta.NewCoverageMetaDataDecoder(drws.BytesWritten(), false)
+ if err != nil {
+ t.Fatalf("NewCoverageMetaDataDecoder error: %v", err)
+ }
+ nf := dec.NumFuncs()
+ if nf != 2 {
+ t.Errorf("dec.NumFuncs(): got %d want %d", nf, 2)
+ }
+
+ gotpp := dec.PackagePath()
+ if gotpp != pp {
+ t.Errorf("packagepath: got %s want %s", gotpp, pp)
+ }
+ gotpn := dec.PackageName()
+ if gotpn != pn {
+ t.Errorf("packagename: got %s want %s", gotpn, pn)
+ }
+
+ cases := []coverage.FuncDesc{f1, f2}
+ for i := uint32(0); i < uint32(len(cases)); i++ {
+ var fn coverage.FuncDesc
+ if err := dec.ReadFunc(i, &fn); err != nil {
+ t.Fatalf("err reading function %d: %v", i, err)
+ }
+ res := cmpFuncDesc(cases[i], fn)
+ if res != "" {
+ t.Errorf("ReadFunc(%d): %s", i, res)
+ }
+ }
+}
+
+func createFuncs(i int) []coverage.FuncDesc {
+ res := []coverage.FuncDesc{}
+ lc := uint32(1)
+ for fi := 0; fi < i+1; fi++ {
+ units := []coverage.CoverableUnit{}
+ for ui := 0; ui < (fi+1)*(i+1); ui++ {
+ units = append(units,
+ coverage.CoverableUnit{StLine: lc, StCol: lc + 1,
+ EnLine: lc + 2, EnCol: lc + 3, NxStmts: lc + 4,
+ })
+ lc += 5
+ }
+ f := coverage.FuncDesc{
+ Funcname: fmt.Sprintf("func_%d_%d", i, fi),
+ Srcfile: fmt.Sprintf("foo_%d.go", i),
+ Units: units,
+ }
+ res = append(res, f)
+ }
+ return res
+}
+
+func createBlob(t *testing.T, i int) []byte {
+ nomodule := ""
+ b, err := encodemeta.NewCoverageMetaDataBuilder("foo/pkg", "pkg", nomodule)
+ if err != nil {
+ t.Fatalf("making builder: %v", err)
+ }
+
+ funcs := createFuncs(i)
+ for _, f := range funcs {
+ b.AddFunc(f)
+ }
+ drws := &slicewriter.WriteSeeker{}
+ b.Emit(drws)
+ return drws.BytesWritten()
+}
+
+func createMetaDataBlobs(t *testing.T, nb int) [][]byte {
+ res := [][]byte{}
+ for i := 0; i < nb; i++ {
+ res = append(res, createBlob(t, i))
+ }
+ return res
+}
+
+func TestMetaDataWriterReader(t *testing.T) {
+ d := t.TempDir()
+
+ // Emit a meta-file...
+ mfpath := filepath.Join(d, "covmeta.hash.0")
+ of, err := os.OpenFile(mfpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("opening covmeta: %v", err)
+ }
+ //t.Logf("meta-file path is %s", mfpath)
+ blobs := createMetaDataBlobs(t, 7)
+ gran := coverage.CtrGranularityPerBlock
+ mfw := encodemeta.NewCoverageMetaFileWriter(mfpath, of)
+ finalHash := [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ err = mfw.Write(finalHash, blobs, coverage.CtrModeAtomic, gran)
+ if err != nil {
+ t.Fatalf("writing meta-file: %v", err)
+ }
+ if err = of.Close(); err != nil {
+ t.Fatalf("closing meta-file: %v", err)
+ }
+
+ // ... then read it back in, first time without setting fileView,
+ // second time setting it.
+ for k := 0; k < 2; k++ {
+ var fileView []byte
+
+ inf, err := os.Open(mfpath)
+ if err != nil {
+ t.Fatalf("open() on meta-file: %v", err)
+ }
+
+ if k != 0 {
+ // Use fileview to exercise different paths in reader.
+ fi, err := os.Stat(mfpath)
+ if err != nil {
+ t.Fatalf("stat() on meta-file: %v", err)
+ }
+ fileView = make([]byte, fi.Size())
+ if _, err := inf.Read(fileView); err != nil {
+ t.Fatalf("read() on meta-file: %v", err)
+ }
+ if _, err := inf.Seek(int64(0), io.SeekStart); err != nil {
+ t.Fatalf("seek() on meta-file: %v", err)
+ }
+ }
+
+ mfr, err := decodemeta.NewCoverageMetaFileReader(inf, fileView)
+ if err != nil {
+ t.Fatalf("k=%d NewCoverageMetaFileReader failed with: %v", k, err)
+ }
+ np := mfr.NumPackages()
+ if np != 7 {
+ t.Fatalf("k=%d wanted 7 packages got %d", k, np)
+ }
+ md := mfr.CounterMode()
+ wmd := coverage.CtrModeAtomic
+ if md != wmd {
+ t.Fatalf("k=%d wanted mode %d got %d", k, wmd, md)
+ }
+ gran := mfr.CounterGranularity()
+ wgran := coverage.CtrGranularityPerBlock
+ if gran != wgran {
+ t.Fatalf("k=%d wanted gran %d got %d", k, wgran, gran)
+ }
+
+ payload := []byte{}
+ for pi := 0; pi < int(np); pi++ {
+ var pd *decodemeta.CoverageMetaDataDecoder
+ var err error
+ pd, payload, err = mfr.GetPackageDecoder(uint32(pi), payload)
+ if err != nil {
+ t.Fatalf("GetPackageDecoder(%d) failed with: %v", pi, err)
+ }
+ efuncs := createFuncs(pi)
+ nf := pd.NumFuncs()
+ if len(efuncs) != int(nf) {
+ t.Fatalf("decoding pk %d wanted %d funcs got %d",
+ pi, len(efuncs), nf)
+ }
+ var f coverage.FuncDesc
+ for fi := 0; fi < int(nf); fi++ {
+ if err := pd.ReadFunc(uint32(fi), &f); err != nil {
+ t.Fatalf("ReadFunc(%d) pk %d got error %v",
+ fi, pi, err)
+ }
+ res := cmpFuncDesc(efuncs[fi], f)
+ if res != "" {
+ t.Errorf("ReadFunc(%d) pk %d: %s", fi, pi, res)
+ }
+ }
+ }
+ inf.Close()
+ }
+}
+
+func TestMetaDataDecodeLitFlagIssue57942(t *testing.T) {
+
+ // Encode a package with a few functions. The funcs alternate
+ // between regular functions and function literals.
+ pp := "foo/bar/pkg"
+ pn := "pkg"
+ mp := "barmod"
+ b, err := encodemeta.NewCoverageMetaDataBuilder(pp, pn, mp)
+ if err != nil {
+ t.Fatalf("making builder: %v", err)
+ }
+ const NF = 6
+ const NCU = 1
+ ln := uint32(10)
+ wantfds := []coverage.FuncDesc{}
+ for fi := uint32(0); fi < NF; fi++ {
+ fis := fmt.Sprintf("%d", fi)
+ fd := coverage.FuncDesc{
+ Funcname: "func" + fis,
+ Srcfile: "foo" + fis + ".go",
+ Units: []coverage.CoverableUnit{
+ coverage.CoverableUnit{StLine: ln + 1, StCol: 2, EnLine: ln + 3, EnCol: 4, NxStmts: fi + 2},
+ },
+ Lit: (fi % 2) == 0,
+ }
+ wantfds = append(wantfds, fd)
+ b.AddFunc(fd)
+ }
+
+ // Emit into a writer.
+ drws := &slicewriter.WriteSeeker{}
+ b.Emit(drws)
+
+ // Decode the result.
+ drws.Seek(0, io.SeekStart)
+ dec, err := decodemeta.NewCoverageMetaDataDecoder(drws.BytesWritten(), false)
+ if err != nil {
+ t.Fatalf("making decoder: %v", err)
+ }
+ nf := dec.NumFuncs()
+ if nf != NF {
+ t.Fatalf("decoder number of functions: got %d want %d", nf, NF)
+ }
+ var fn coverage.FuncDesc
+ for i := uint32(0); i < uint32(NF); i++ {
+ if err := dec.ReadFunc(i, &fn); err != nil {
+ t.Fatalf("err reading function %d: %v", i, err)
+ }
+ res := cmpFuncDesc(wantfds[i], fn)
+ if res != "" {
+ t.Errorf("ReadFunc(%d): %s", i, res)
+ }
+ }
+}
diff --git a/src/internal/coverage/uleb128/uleb128.go b/src/internal/coverage/uleb128/uleb128.go
new file mode 100644
index 0000000..e5cd92a
--- /dev/null
+++ b/src/internal/coverage/uleb128/uleb128.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uleb128
+
+func AppendUleb128(b []byte, v uint) []byte {
+ for {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ if v != 0 {
+ c |= 0x80
+ }
+ b = append(b, c)
+ if c&0x80 == 0 {
+ break
+ }
+ }
+ return b
+}
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
new file mode 100644
index 0000000..1352810
--- /dev/null
+++ b/src/internal/cpu/cpu.go
@@ -0,0 +1,222 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cpu implements processor feature detection
+// used by the Go standard library.
+package cpu
+
+// DebugOptions is set to true by the runtime if the OS supports reading
+// GODEBUG early in runtime startup.
+// This should not be changed after it is initialized.
+var DebugOptions bool
+
+// CacheLinePad is used to pad structs to avoid false sharing.
+type CacheLinePad struct{ _ [CacheLinePadSize]byte }
+
+// CacheLineSize is the CPU's assumed cache line size.
+// There is currently no runtime detection of the real cache line size
+// so we use the constant per GOARCH CacheLinePadSize as an approximation.
+var CacheLineSize uintptr = CacheLinePadSize
+
+// The booleans in X86 contain the correspondingly named cpuid feature bit.
+// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers
+// in addition to the cpuid feature bit being set.
+// The struct is padded to avoid false sharing.
+var X86 struct {
+ _ CacheLinePad
+ HasAES bool
+ HasADX bool
+ HasAVX bool
+ HasAVX2 bool
+ HasBMI1 bool
+ HasBMI2 bool
+ HasERMS bool
+ HasFMA bool
+ HasOSXSAVE bool
+ HasPCLMULQDQ bool
+ HasPOPCNT bool
+ HasRDTSCP bool
+ HasSHA bool
+ HasSSE3 bool
+ HasSSSE3 bool
+ HasSSE41 bool
+ HasSSE42 bool
+ _ CacheLinePad
+}
+
+// The booleans in ARM contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var ARM struct {
+ _ CacheLinePad
+ HasVFPv4 bool
+ HasIDIVA bool
+ _ CacheLinePad
+}
+
+// The booleans in ARM64 contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var ARM64 struct {
+ _ CacheLinePad
+ HasAES bool
+ HasPMULL bool
+ HasSHA1 bool
+ HasSHA2 bool
+ HasSHA512 bool
+ HasCRC32 bool
+ HasATOMICS bool
+ HasCPUID bool
+ IsNeoverse bool
+ _ CacheLinePad
+}
+
+var MIPS64X struct {
+ _ CacheLinePad
+ HasMSA bool // MIPS SIMD architecture
+ _ CacheLinePad
+}
+
+// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00,
+// since there are no optional categories. There are some exceptions that also
+// require kernel support to work (darn, scv), so there are feature bits for
+// those as well. The minimum processor requirement is POWER8 (ISA 2.07).
+// The struct is padded to avoid false sharing.
+var PPC64 struct {
+ _ CacheLinePad
+ HasDARN bool // Hardware random number generator (requires kernel enablement)
+ HasSCV bool // Syscall vectored (requires kernel enablement)
+ IsPOWER8 bool // ISA v2.07 (POWER8)
+ IsPOWER9 bool // ISA v3.00 (POWER9)
+ IsPOWER10 bool // ISA v3.1 (POWER10)
+ _ CacheLinePad
+}
+
+var S390X struct {
+ _ CacheLinePad
+ HasZARCH bool // z architecture mode is active [mandatory]
+ HasSTFLE bool // store facility list extended [mandatory]
+ HasLDISP bool // long (20-bit) displacements [mandatory]
+ HasEIMM bool // 32-bit immediates [mandatory]
+ HasDFP bool // decimal floating point
+ HasETF3EH bool // ETF-3 enhanced
+ HasMSA bool // message security assist (CPACF)
+ HasAES bool // KM-AES{128,192,256} functions
+ HasAESCBC bool // KMC-AES{128,192,256} functions
+ HasAESCTR bool // KMCTR-AES{128,192,256} functions
+ HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
+ HasGHASH bool // KIMD-GHASH function
+ HasSHA1 bool // K{I,L}MD-SHA-1 functions
+ HasSHA256 bool // K{I,L}MD-SHA-256 functions
+ HasSHA512 bool // K{I,L}MD-SHA-512 functions
+ HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
+ HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records.
+ HasVXE bool // vector-enhancements facility 1
+ HasKDSA bool // elliptic curve functions
+ HasECDSA bool // NIST curves
+ HasEDDSA bool // Edwards curves
+ _ CacheLinePad
+}
+
+// Initialize examines the processor and sets the relevant variables above.
+// This is called by the runtime package early in program initialization,
+// before normal init functions are run. env is set by runtime if the OS supports
+// cpu feature options in GODEBUG.
+func Initialize(env string) {
+ doinit()
+ processOptions(env)
+}
+
+// options contains the cpu debug options that can be used in GODEBUG.
+// Options are arch dependent and are added by the arch specific doinit functions.
+// Features that are mandatory for the specific GOARCH should not be added to options
+// (e.g. SSE2 on amd64).
+var options []option
+
+// Option names should be lower case. e.g. avx instead of AVX.
+type option struct {
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUG
+ Enable bool // whether feature should be enabled
+}
+
+// processOptions enables or disables CPU feature values based on the parsed env string.
+// The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2...
+// where feature names is one of the architecture specific list stored in the
+// cpu packages options variable and values are either 'on' or 'off'.
+// If env contains cpu.all=off then all cpu features referenced through the options
+// variable are disabled. Other feature names and values result in warning messages.
+func processOptions(env string) {
+field:
+ for env != "" {
+ field := ""
+ i := indexByte(env, ',')
+ if i < 0 {
+ field, env = env, ""
+ } else {
+ field, env = env[:i], env[i+1:]
+ }
+ if len(field) < 4 || field[:4] != "cpu." {
+ continue
+ }
+ i = indexByte(field, '=')
+ if i < 0 {
+ print("GODEBUG: no value specified for \"", field, "\"\n")
+ continue
+ }
+ key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
+
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
+ }
+ }
+
+ print("GODEBUG: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
+ }
+}
+
+// indexByte returns the index of the first instance of c in s,
+// or -1 if c is not present in s.
+func indexByte(s string, c byte) int {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/internal/cpu/cpu.s b/src/internal/cpu/cpu.s
new file mode 100644
index 0000000..3c770c1
--- /dev/null
+++ b/src/internal/cpu/cpu.s
@@ -0,0 +1,6 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This assembly file exists to allow internal/cpu to call
+// non-exported runtime functions that use "go:linkname". \ No newline at end of file
diff --git a/src/internal/cpu/cpu_arm.go b/src/internal/cpu/cpu_arm.go
new file mode 100644
index 0000000..b624526
--- /dev/null
+++ b/src/internal/cpu/cpu_arm.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 32
+
+// arm doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2.
+// These are initialized by archauxv() and should not be changed after they are
+// initialized.
+var HWCap uint
+var HWCap2 uint
+
+// HWCAP/HWCAP2 bits. These are exposed by Linux and FreeBSD.
+const (
+ hwcap_VFPv4 = 1 << 16
+ hwcap_IDIVA = 1 << 17
+)
+
+func doinit() {
+ options = []option{
+ {Name: "vfpv4", Feature: &ARM.HasVFPv4},
+ {Name: "idiva", Feature: &ARM.HasIDIVA},
+ }
+
+ // HWCAP feature bits
+ ARM.HasVFPv4 = isSet(HWCap, hwcap_VFPv4)
+ ARM.HasIDIVA = isSet(HWCap, hwcap_IDIVA)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/src/internal/cpu/cpu_arm64.go b/src/internal/cpu/cpu_arm64.go
new file mode 100644
index 0000000..4a302f2
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64.go
@@ -0,0 +1,69 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+// CacheLinePadSize is used to prevent false sharing of cache lines.
+// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
+// It doesn't cost much and is much more future-proof.
+const CacheLinePadSize = 128
+
+func doinit() {
+ options = []option{
+ {Name: "aes", Feature: &ARM64.HasAES},
+ {Name: "pmull", Feature: &ARM64.HasPMULL},
+ {Name: "sha1", Feature: &ARM64.HasSHA1},
+ {Name: "sha2", Feature: &ARM64.HasSHA2},
+ {Name: "sha512", Feature: &ARM64.HasSHA512},
+ {Name: "crc32", Feature: &ARM64.HasCRC32},
+ {Name: "atomics", Feature: &ARM64.HasATOMICS},
+ {Name: "cpuid", Feature: &ARM64.HasCPUID},
+ {Name: "isNeoverse", Feature: &ARM64.IsNeoverse},
+ }
+
+ // arm64 uses different ways to detect CPU features at runtime depending on the operating system.
+ osInit()
+}
+
+func getisar0() uint64
+
+func getMIDR() uint64
+
+func extractBits(data uint64, start, end uint) uint {
+ return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
+}
+
+func parseARM64SystemRegisters(isar0 uint64) {
+ // ID_AA64ISAR0_EL1
+ switch extractBits(isar0, 4, 7) {
+ case 1:
+ ARM64.HasAES = true
+ case 2:
+ ARM64.HasAES = true
+ ARM64.HasPMULL = true
+ }
+
+ switch extractBits(isar0, 8, 11) {
+ case 1:
+ ARM64.HasSHA1 = true
+ }
+
+ switch extractBits(isar0, 12, 15) {
+ case 1:
+ ARM64.HasSHA2 = true
+ case 2:
+ ARM64.HasSHA2 = true
+ ARM64.HasSHA512 = true
+ }
+
+ switch extractBits(isar0, 16, 19) {
+ case 1:
+ ARM64.HasCRC32 = true
+ }
+
+ switch extractBits(isar0, 20, 23) {
+ case 2:
+ ARM64.HasATOMICS = true
+ }
+}
diff --git a/src/internal/cpu/cpu_arm64.s b/src/internal/cpu/cpu_arm64.s
new file mode 100644
index 0000000..d6e7f44
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64.s
@@ -0,0 +1,18 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func getisar0() uint64
+TEXT ·getisar0(SB),NOSPLIT,$0
+ // get Instruction Set Attributes 0 into R0
+ MRS ID_AA64ISAR0_EL1, R0
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getMIDR() uint64
+TEXT ·getMIDR(SB), NOSPLIT, $0-8
+ MRS MIDR_EL1, R0
+ MOVD R0, ret+0(FP)
+ RET
diff --git a/src/internal/cpu/cpu_arm64_android.go b/src/internal/cpu/cpu_arm64_android.go
new file mode 100644
index 0000000..fbdf7ba
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_android.go
@@ -0,0 +1,11 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64
+
+package cpu
+
+func osInit() {
+ hwcapInit("android")
+}
diff --git a/src/internal/cpu/cpu_arm64_darwin.go b/src/internal/cpu/cpu_arm64_darwin.go
new file mode 100644
index 0000000..60beadd
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_darwin.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && darwin && !ios
+
+package cpu
+
+func osInit() {
+ ARM64.HasATOMICS = sysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00"))
+ ARM64.HasCRC32 = sysctlEnabled([]byte("hw.optional.armv8_crc32\x00"))
+ ARM64.HasSHA512 = sysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00"))
+
+ // There are no hw.optional sysctl values for the below features on Mac OS 11.0
+ // to detect their supported state dynamically. Assume the CPU features that
+ // Apple Silicon M1 supports to be available as a minimal set of features
+ // to all Go programs running on darwin/arm64.
+ ARM64.HasAES = true
+ ARM64.HasPMULL = true
+ ARM64.HasSHA1 = true
+ ARM64.HasSHA2 = true
+}
+
+//go:noescape
+func getsysctlbyname(name []byte) (int32, int32)
+
+func sysctlEnabled(name []byte) bool {
+ ret, value := getsysctlbyname(name)
+ if ret < 0 {
+ return false
+ }
+ return value > 0
+}
diff --git a/src/internal/cpu/cpu_arm64_freebsd.go b/src/internal/cpu/cpu_arm64_freebsd.go
new file mode 100644
index 0000000..96ed359
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_freebsd.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64
+
+package cpu
+
+func osInit() {
+ // Retrieve info from system register ID_AA64ISAR0_EL1.
+ isar0 := getisar0()
+
+ parseARM64SystemRegisters(isar0)
+}
diff --git a/src/internal/cpu/cpu_arm64_hwcap.go b/src/internal/cpu/cpu_arm64_hwcap.go
new file mode 100644
index 0000000..2fabbb6
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_hwcap.go
@@ -0,0 +1,66 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && linux
+
+package cpu
+
+// HWCap may be initialized by archauxv and
+// should not be changed after it was initialized.
+var HWCap uint
+
+// HWCAP bits. These are exposed by Linux.
+const (
+ hwcap_AES = 1 << 3
+ hwcap_PMULL = 1 << 4
+ hwcap_SHA1 = 1 << 5
+ hwcap_SHA2 = 1 << 6
+ hwcap_CRC32 = 1 << 7
+ hwcap_ATOMICS = 1 << 8
+ hwcap_CPUID = 1 << 11
+ hwcap_SHA512 = 1 << 21
+)
+
+func hwcapInit(os string) {
+ // HWCap was populated by the runtime from the auxiliary vector.
+ // Use HWCap information since reading aarch64 system registers
+ // is not supported in user space on older linux kernels.
+ ARM64.HasAES = isSet(HWCap, hwcap_AES)
+ ARM64.HasPMULL = isSet(HWCap, hwcap_PMULL)
+ ARM64.HasSHA1 = isSet(HWCap, hwcap_SHA1)
+ ARM64.HasSHA2 = isSet(HWCap, hwcap_SHA2)
+ ARM64.HasCRC32 = isSet(HWCap, hwcap_CRC32)
+ ARM64.HasCPUID = isSet(HWCap, hwcap_CPUID)
+ ARM64.HasSHA512 = isSet(HWCap, hwcap_SHA512)
+
+ // The Samsung S9+ kernel reports support for atomics, but not all cores
+ // actually support them, resulting in SIGILL. See issue #28431.
+ // TODO(elias.naur): Only disable the optimization on bad chipsets on android.
+ ARM64.HasATOMICS = isSet(HWCap, hwcap_ATOMICS) && os != "android"
+
+ // Check to see if executing on a Neoverse core and in order to do that,
+ // check the AUXV for the CPUID bit. The getMIDR function executes an
+ // instruction which would normally be an illegal instruction, but it's
+ // trapped by the kernel, the value sanitized and then returned.
+ // Without the CPUID bit the kernel will not trap the instruction and the
+ // process will be terminated with SIGILL.
+ if ARM64.HasCPUID {
+ midr := getMIDR()
+ part_num := uint16((midr >> 4) & 0xfff)
+ implementor := byte((midr >> 24) & 0xff)
+
+ // d0c - NeoverseN1
+ // d40 - NeoverseV1
+ // d49 - NeoverseN2
+ // d4f - NeoverseV2
+ if implementor == 'A' && (part_num == 0xd0c || part_num == 0xd40 ||
+ part_num == 0xd49 || part_num == 0xd4f) {
+ ARM64.IsNeoverse = true
+ }
+ }
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/src/internal/cpu/cpu_arm64_linux.go b/src/internal/cpu/cpu_arm64_linux.go
new file mode 100644
index 0000000..d746bdb
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && linux && !android
+
+package cpu
+
+func osInit() {
+ hwcapInit("linux")
+}
diff --git a/src/internal/cpu/cpu_arm64_openbsd.go b/src/internal/cpu/cpu_arm64_openbsd.go
new file mode 100644
index 0000000..1259309
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_openbsd.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64
+
+package cpu
+
+const (
+ // From OpenBSD's sys/sysctl.h.
+ _CTL_MACHDEP = 7
+
+ // From OpenBSD's machine/cpu.h.
+ _CPU_ID_AA64ISAR0 = 2
+ _CPU_ID_AA64ISAR1 = 3
+)
+
+//go:noescape
+func sysctlUint64(mib []uint32) (uint64, bool)
+
+func osInit() {
+ // Get ID_AA64ISAR0 from sysctl.
+ isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0})
+ if !ok {
+ return
+ }
+ parseARM64SystemRegisters(isar0)
+}
diff --git a/src/internal/cpu/cpu_arm64_other.go b/src/internal/cpu/cpu_arm64_other.go
new file mode 100644
index 0000000..44592cf
--- /dev/null
+++ b/src/internal/cpu/cpu_arm64_other.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && !linux && !freebsd && !android && (!darwin || ios) && !openbsd
+
+package cpu
+
+func osInit() {
+ // Other operating systems do not support reading HWCap from auxiliary vector,
+ // reading privileged aarch64 system registers or sysctl in user space to detect
+ // CPU features at runtime.
+}
diff --git a/src/internal/cpu/cpu_loong64.go b/src/internal/cpu/cpu_loong64.go
new file mode 100644
index 0000000..1c90c24
--- /dev/null
+++ b/src/internal/cpu/cpu_loong64.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build loong64
+
+package cpu
+
+// CacheLinePadSize is used to prevent false sharing of cache lines.
+// We choose 64 because Loongson 3A5000 the L1 Dcache is 4-way 256-line 64-byte-per-line.
+const CacheLinePadSize = 64
+
+func doinit() {}
diff --git a/src/internal/cpu/cpu_mips.go b/src/internal/cpu/cpu_mips.go
new file mode 100644
index 0000000..14a9c97
--- /dev/null
+++ b/src/internal/cpu/cpu_mips.go
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 32
+
+func doinit() {
+}
diff --git a/src/internal/cpu/cpu_mips64x.go b/src/internal/cpu/cpu_mips64x.go
new file mode 100644
index 0000000..c452ffd
--- /dev/null
+++ b/src/internal/cpu/cpu_mips64x.go
@@ -0,0 +1,32 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+package cpu
+
+const CacheLinePadSize = 32
+
+// This is initialized by archauxv and should not be changed after it is
+// initialized.
+var HWCap uint
+
+// HWCAP bits. These are exposed by the Linux kernel 5.4.
+const (
+ // CPU features
+ hwcap_MIPS_MSA = 1 << 1
+)
+
+func doinit() {
+ options = []option{
+ {Name: "msa", Feature: &MIPS64X.HasMSA},
+ }
+
+ // HWCAP feature bits
+ MIPS64X.HasMSA = isSet(HWCap, hwcap_MIPS_MSA)
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/src/internal/cpu/cpu_mipsle.go b/src/internal/cpu/cpu_mipsle.go
new file mode 100644
index 0000000..14a9c97
--- /dev/null
+++ b/src/internal/cpu/cpu_mipsle.go
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 32
+
+func doinit() {
+}
diff --git a/src/internal/cpu/cpu_no_name.go b/src/internal/cpu/cpu_no_name.go
new file mode 100644
index 0000000..2adfa1b
--- /dev/null
+++ b/src/internal/cpu/cpu_no_name.go
@@ -0,0 +1,18 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64 && !ppc64 && !ppc64le
+
+package cpu
+
+// Name returns the CPU name given by the vendor
+// if it can be read directly from memory or by CPU instructions.
+// If the CPU name can not be determined an empty string is returned.
+//
+// Implementations that use the Operating System (e.g. sysctl or /sys/)
+// to gather CPU information for display should be placed in internal/sysinfo.
+func Name() string {
+ // "A CPU has no name".
+ return ""
+}
diff --git a/src/internal/cpu/cpu_ppc64x.go b/src/internal/cpu/cpu_ppc64x.go
new file mode 100644
index 0000000..c4a08fe
--- /dev/null
+++ b/src/internal/cpu/cpu_ppc64x.go
@@ -0,0 +1,35 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package cpu
+
+const CacheLinePadSize = 128
+
+func doinit() {
+ options = []option{
+ {Name: "darn", Feature: &PPC64.HasDARN},
+ {Name: "scv", Feature: &PPC64.HasSCV},
+ {Name: "power9", Feature: &PPC64.IsPOWER9},
+ }
+
+ osinit()
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
+
+func Name() string {
+ switch {
+ case PPC64.IsPOWER10:
+ return "POWER10"
+ case PPC64.IsPOWER9:
+ return "POWER9"
+ case PPC64.IsPOWER8:
+ return "POWER8"
+ }
+ return ""
+}
diff --git a/src/internal/cpu/cpu_ppc64x_aix.go b/src/internal/cpu/cpu_ppc64x_aix.go
new file mode 100644
index 0000000..f05ed6f
--- /dev/null
+++ b/src/internal/cpu/cpu_ppc64x_aix.go
@@ -0,0 +1,25 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package cpu
+
+const (
+ // getsystemcfg constants
+ _SC_IMPL = 2
+ _IMPL_POWER8 = 0x10000
+ _IMPL_POWER9 = 0x20000
+ _IMPL_POWER10 = 0x40000
+)
+
+func osinit() {
+ impl := getsystemcfg(_SC_IMPL)
+ PPC64.IsPOWER8 = isSet(impl, _IMPL_POWER8)
+ PPC64.IsPOWER9 = isSet(impl, _IMPL_POWER9)
+ PPC64.IsPOWER10 = isSet(impl, _IMPL_POWER10)
+}
+
+// getsystemcfg is defined in runtime/os2_aix.go
+func getsystemcfg(label uint) uint
diff --git a/src/internal/cpu/cpu_ppc64x_linux.go b/src/internal/cpu/cpu_ppc64x_linux.go
new file mode 100644
index 0000000..9df82ca
--- /dev/null
+++ b/src/internal/cpu/cpu_ppc64x_linux.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package cpu
+
+// ppc64 doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2.
+// These are initialized by archauxv and should not be changed after they are
+// initialized.
+var HWCap uint
+var HWCap2 uint
+
+// HWCAP bits. These are exposed by Linux.
+const (
+ // ISA Level
+ hwcap2_ARCH_2_07 = 0x80000000
+ hwcap2_ARCH_3_00 = 0x00800000
+ hwcap2_ARCH_3_1 = 0x00040000
+
+ // CPU features
+ hwcap2_DARN = 0x00200000
+ hwcap2_SCV = 0x00100000
+)
+
+func osinit() {
+ PPC64.IsPOWER8 = isSet(HWCap2, hwcap2_ARCH_2_07)
+ PPC64.IsPOWER9 = isSet(HWCap2, hwcap2_ARCH_3_00)
+ PPC64.IsPOWER10 = isSet(HWCap2, hwcap2_ARCH_3_1)
+ PPC64.HasDARN = isSet(HWCap2, hwcap2_DARN)
+ PPC64.HasSCV = isSet(HWCap2, hwcap2_SCV)
+}
diff --git a/src/internal/cpu/cpu_ppc64x_other.go b/src/internal/cpu/cpu_ppc64x_other.go
new file mode 100644
index 0000000..d5b629d
--- /dev/null
+++ b/src/internal/cpu/cpu_ppc64x_other.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (ppc64 || ppc64le) && !aix && !linux
+
+package cpu
+
+func osinit() {
+ // Other operating systems do not support reading HWCap from auxiliary vector,
+ // reading privileged system registers or sysctl in user space to detect CPU
+ // features at runtime.
+}
diff --git a/src/internal/cpu/cpu_riscv64.go b/src/internal/cpu/cpu_riscv64.go
new file mode 100644
index 0000000..54b8c33
--- /dev/null
+++ b/src/internal/cpu/cpu_riscv64.go
@@ -0,0 +1,10 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 32
+
+func doinit() {
+}
diff --git a/src/internal/cpu/cpu_s390x.go b/src/internal/cpu/cpu_s390x.go
new file mode 100644
index 0000000..45d8ed2
--- /dev/null
+++ b/src/internal/cpu/cpu_s390x.go
@@ -0,0 +1,205 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 256
+
+var HWCap uint
+
+// bitIsSet reports whether the bit at index is set. The bit index
+// is in big endian order, so bit index 0 is the leftmost bit.
+func bitIsSet(bits []uint64, index uint) bool {
+ return bits[index/64]&((1<<63)>>(index%64)) != 0
+}
+
+// function is the function code for the named function.
+type function uint8
+
+const (
+ // KM{,A,C,CTR} function codes
+ aes128 function = 18 // AES-128
+ aes192 function = 19 // AES-192
+ aes256 function = 20 // AES-256
+
+ // K{I,L}MD function codes
+ sha1 function = 1 // SHA-1
+ sha256 function = 2 // SHA-256
+ sha512 function = 3 // SHA-512
+ sha3_224 function = 32 // SHA3-224
+ sha3_256 function = 33 // SHA3-256
+ sha3_384 function = 34 // SHA3-384
+ sha3_512 function = 35 // SHA3-512
+ shake128 function = 36 // SHAKE-128
+ shake256 function = 37 // SHAKE-256
+
+ // KLMD function codes
+ ghash function = 65 // GHASH
+)
+
+const (
+ // KDSA function codes
+ ecdsaVerifyP256 function = 1 // NIST P256
+ ecdsaVerifyP384 function = 2 // NIST P384
+ ecdsaVerifyP521 function = 3 // NIST P521
+ ecdsaSignP256 function = 9 // NIST P256
+ ecdsaSignP384 function = 10 // NIST P384
+ ecdsaSignP521 function = 11 // NIST P521
+ eddsaVerifyEd25519 function = 32 // Curve25519
+ eddsaVerifyEd448 function = 36 // Curve448
+ eddsaSignEd25519 function = 40 // Curve25519
+ eddsaSignEd448 function = 44 // Curve448
+)
+
+// queryResult contains the result of a Query function
+// call. Bits are numbered in big endian order so the
+// leftmost bit (the MSB) is at index 0.
+type queryResult struct {
+ bits [2]uint64
+}
+
+// Has reports whether the given functions are present.
+func (q *queryResult) Has(fns ...function) bool {
+ if len(fns) == 0 {
+ panic("no function codes provided")
+ }
+ for _, f := range fns {
+ if !bitIsSet(q.bits[:], uint(f)) {
+ return false
+ }
+ }
+ return true
+}
+
+// facility is a bit index for the named facility.
+type facility uint8
+
+const (
+ // mandatory facilities
+ zarch facility = 1 // z architecture mode is active
+ stflef facility = 7 // store-facility-list-extended
+ ldisp facility = 18 // long-displacement
+ eimm facility = 21 // extended-immediate
+
+ // miscellaneous facilities
+ dfp facility = 42 // decimal-floating-point
+ etf3eh facility = 30 // extended-translation 3 enhancement
+
+ // cryptography facilities
+ msa facility = 17 // message-security-assist
+ msa3 facility = 76 // message-security-assist extension 3
+ msa4 facility = 77 // message-security-assist extension 4
+ msa5 facility = 57 // message-security-assist extension 5
+ msa8 facility = 146 // message-security-assist extension 8
+ msa9 facility = 155 // message-security-assist extension 9
+
+ // vector facilities
+ vxe facility = 135 // vector-enhancements 1
+
+ // Note: vx requires kernel support
+ // and so must be fetched from HWCAP.
+
+ hwcap_VX = 1 << 11 // vector facility
+)
+
+// facilityList contains the result of an STFLE call.
+// Bits are numbered in big endian order so the
+// leftmost bit (the MSB) is at index 0.
+type facilityList struct {
+ bits [4]uint64
+}
+
+// Has reports whether the given facilities are present.
+func (s *facilityList) Has(fs ...facility) bool {
+ if len(fs) == 0 {
+ panic("no facility bits provided")
+ }
+ for _, f := range fs {
+ if !bitIsSet(s.bits[:], uint(f)) {
+ return false
+ }
+ }
+ return true
+}
+
+// The following feature detection functions are defined in cpu_s390x.s.
+// They are likely to be expensive to call so the results should be cached.
+func stfle() facilityList
+func kmQuery() queryResult
+func kmcQuery() queryResult
+func kmctrQuery() queryResult
+func kmaQuery() queryResult
+func kimdQuery() queryResult
+func klmdQuery() queryResult
+func kdsaQuery() queryResult
+
+func doinit() {
+ options = []option{
+ {Name: "zarch", Feature: &S390X.HasZARCH},
+ {Name: "stfle", Feature: &S390X.HasSTFLE},
+ {Name: "ldisp", Feature: &S390X.HasLDISP},
+ {Name: "msa", Feature: &S390X.HasMSA},
+ {Name: "eimm", Feature: &S390X.HasEIMM},
+ {Name: "dfp", Feature: &S390X.HasDFP},
+ {Name: "etf3eh", Feature: &S390X.HasETF3EH},
+ {Name: "vx", Feature: &S390X.HasVX},
+ {Name: "vxe", Feature: &S390X.HasVXE},
+ {Name: "kdsa", Feature: &S390X.HasKDSA},
+ }
+
+ aes := []function{aes128, aes192, aes256}
+ facilities := stfle()
+
+ S390X.HasZARCH = facilities.Has(zarch)
+ S390X.HasSTFLE = facilities.Has(stflef)
+ S390X.HasLDISP = facilities.Has(ldisp)
+ S390X.HasEIMM = facilities.Has(eimm)
+ S390X.HasDFP = facilities.Has(dfp)
+ S390X.HasETF3EH = facilities.Has(etf3eh)
+ S390X.HasMSA = facilities.Has(msa)
+
+ if S390X.HasMSA {
+ // cipher message
+ km, kmc := kmQuery(), kmcQuery()
+ S390X.HasAES = km.Has(aes...)
+ S390X.HasAESCBC = kmc.Has(aes...)
+ if facilities.Has(msa4) {
+ kmctr := kmctrQuery()
+ S390X.HasAESCTR = kmctr.Has(aes...)
+ }
+ if facilities.Has(msa8) {
+ kma := kmaQuery()
+ S390X.HasAESGCM = kma.Has(aes...)
+ }
+
+ // compute message digest
+ kimd := kimdQuery() // intermediate (no padding)
+ klmd := klmdQuery() // last (padding)
+ S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
+ S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
+ S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
+ S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
+ sha3 := []function{
+ sha3_224, sha3_256, sha3_384, sha3_512,
+ shake128, shake256,
+ }
+ S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...)
+ S390X.HasKDSA = facilities.Has(msa9) // elliptic curves
+ if S390X.HasKDSA {
+ kdsa := kdsaQuery()
+ S390X.HasECDSA = kdsa.Has(ecdsaVerifyP256, ecdsaSignP256, ecdsaVerifyP384, ecdsaSignP384, ecdsaVerifyP521, ecdsaSignP521)
+ S390X.HasEDDSA = kdsa.Has(eddsaVerifyEd25519, eddsaSignEd25519, eddsaVerifyEd448, eddsaSignEd448)
+ }
+ }
+
+ S390X.HasVX = isSet(HWCap, hwcap_VX)
+
+ if S390X.HasVX {
+ S390X.HasVXE = facilities.Has(vxe)
+ }
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
diff --git a/src/internal/cpu/cpu_s390x.s b/src/internal/cpu/cpu_s390x.s
new file mode 100644
index 0000000..a1243aa
--- /dev/null
+++ b/src/internal/cpu/cpu_s390x.s
@@ -0,0 +1,63 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func stfle() facilityList
+TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
+ MOVD $ret+0(FP), R1
+ MOVD $3, R0 // last doubleword index to store
+ XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
+ WORD $0xb2b01000 // store facility list extended (STFLE)
+ RET
+
+// func kmQuery() queryResult
+TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KM-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92E0024 // cipher message (KM)
+ RET
+
+// func kmcQuery() queryResult
+TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMC-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92F0024 // cipher message with chaining (KMC)
+ RET
+
+// func kmctrQuery() queryResult
+TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMCTR-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB92D4024 // cipher message with counter (KMCTR)
+ RET
+
+// func kmaQuery() queryResult
+TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KMA-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xb9296024 // cipher message with authentication (KMA)
+ RET
+
+// func kimdQuery() queryResult
+TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KIMD-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB93E0024 // compute intermediate message digest (KIMD)
+ RET
+
+// func klmdQuery() queryResult
+TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KLMD-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB93F0024 // compute last message digest (KLMD)
+ RET
+
+// func kdsaQuery() queryResult
+TEXT ·kdsaQuery(SB), NOSPLIT|NOFRAME, $0-16
+ MOVD $0, R0 // set function code to 0 (KLMD-Query)
+ MOVD $ret+0(FP), R1 // address of 16-byte return value
+ WORD $0xB93A0008 // compute digital signature authentication
+ RET
+
diff --git a/src/internal/cpu/cpu_s390x_test.go b/src/internal/cpu/cpu_s390x_test.go
new file mode 100644
index 0000000..ad86858
--- /dev/null
+++ b/src/internal/cpu/cpu_s390x_test.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu_test
+
+import (
+ "errors"
+ . "internal/cpu"
+ "os"
+ "regexp"
+ "testing"
+)
+
+func getFeatureList() ([]string, error) {
+ cpuinfo, err := os.ReadFile("/proc/cpuinfo")
+ if err != nil {
+ return nil, err
+ }
+ r := regexp.MustCompile("features\\s*:\\s*(.*)")
+ b := r.FindSubmatch(cpuinfo)
+ if len(b) < 2 {
+ return nil, errors.New("no feature list in /proc/cpuinfo")
+ }
+ return regexp.MustCompile("\\s+").Split(string(b[1]), -1), nil
+}
+
+func TestS390XAgainstCPUInfo(t *testing.T) {
+ // mapping of linux feature strings to S390X fields
+ mapping := make(map[string]*bool)
+ for _, option := range Options {
+ mapping[option.Name] = option.Feature
+ }
+
+ // these must be true on the machines Go supports
+ mandatory := make(map[string]bool)
+ mandatory["zarch"] = false
+ mandatory["eimm"] = false
+ mandatory["ldisp"] = false
+ mandatory["stfle"] = false
+
+ features, err := getFeatureList()
+ if err != nil {
+ t.Error(err)
+ }
+ for _, feature := range features {
+ if _, ok := mandatory[feature]; ok {
+ mandatory[feature] = true
+ }
+ if flag, ok := mapping[feature]; ok {
+ if !*flag {
+ t.Errorf("feature '%v' not detected", feature)
+ }
+ } else {
+ t.Logf("no entry for '%v'", feature)
+ }
+ }
+ for k, v := range mandatory {
+ if !v {
+ t.Errorf("mandatory feature '%v' not detected", k)
+ }
+ }
+}
diff --git a/src/internal/cpu/cpu_test.go b/src/internal/cpu/cpu_test.go
new file mode 100644
index 0000000..b8c74f2
--- /dev/null
+++ b/src/internal/cpu/cpu_test.go
@@ -0,0 +1,61 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu_test
+
+import (
+ . "internal/cpu"
+ "internal/godebug"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+func MustHaveDebugOptionsSupport(t *testing.T) {
+ if !DebugOptions {
+ t.Skipf("skipping test: cpu feature options not supported by OS")
+ }
+}
+
+func MustSupportFeatureDetection(t *testing.T) {
+ // TODO: add platforms that do not have CPU feature detection support.
+}
+
+func runDebugOptionsTest(t *testing.T, test string, options string) {
+ MustHaveDebugOptionsSupport(t)
+
+ testenv.MustHaveExec(t)
+
+ env := "GODEBUG=" + options
+
+ cmd := exec.Command(os.Args[0], "-test.run="+test)
+ cmd.Env = append(cmd.Env, env)
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%s with %s: run failed: %v output:\n%s\n",
+ test, env, err, string(output))
+ }
+}
+
+func TestDisableAllCapabilities(t *testing.T) {
+ MustSupportFeatureDetection(t)
+ runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "cpu.all=off")
+}
+
+func TestAllCapabilitiesDisabled(t *testing.T) {
+ MustHaveDebugOptionsSupport(t)
+
+ if godebug.New("#cpu.all").Value() != "off" {
+ t.Skipf("skipping test: GODEBUG=cpu.all=off not set")
+ }
+
+ for _, o := range Options {
+ want := false
+ if got := *o.Feature; got != want {
+ t.Errorf("%v: expected %v, got %v", o.Name, want, got)
+ }
+ }
+}
diff --git a/src/internal/cpu/cpu_wasm.go b/src/internal/cpu/cpu_wasm.go
new file mode 100644
index 0000000..2310ad6
--- /dev/null
+++ b/src/internal/cpu/cpu_wasm.go
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+const CacheLinePadSize = 64
+
+func doinit() {
+}
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
new file mode 100644
index 0000000..96b8ef9
--- /dev/null
+++ b/src/internal/cpu/cpu_x86.go
@@ -0,0 +1,190 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu
+
+const CacheLinePadSize = 64
+
+// cpuid is implemented in cpu_x86.s.
+func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+
+// xgetbv with ecx = 0 is implemented in cpu_x86.s.
+func xgetbv() (eax, edx uint32)
+
+// getGOAMD64level is implemented in cpu_x86.s. Returns number in [1,4].
+func getGOAMD64level() int32
+
+const (
+ // edx bits
+ cpuid_SSE2 = 1 << 26
+
+ // ecx bits
+ cpuid_SSE3 = 1 << 0
+ cpuid_PCLMULQDQ = 1 << 1
+ cpuid_SSSE3 = 1 << 9
+ cpuid_FMA = 1 << 12
+ cpuid_SSE41 = 1 << 19
+ cpuid_SSE42 = 1 << 20
+ cpuid_POPCNT = 1 << 23
+ cpuid_AES = 1 << 25
+ cpuid_OSXSAVE = 1 << 27
+ cpuid_AVX = 1 << 28
+
+ // ebx bits
+ cpuid_BMI1 = 1 << 3
+ cpuid_AVX2 = 1 << 5
+ cpuid_BMI2 = 1 << 8
+ cpuid_ERMS = 1 << 9
+ cpuid_ADX = 1 << 19
+ cpuid_SHA = 1 << 29
+
+ // edx bits for CPUID 0x80000001
+ cpuid_RDTSCP = 1 << 27
+)
+
+var maxExtendedFunctionInformation uint32
+
+func doinit() {
+ options = []option{
+ {Name: "adx", Feature: &X86.HasADX},
+ {Name: "aes", Feature: &X86.HasAES},
+ {Name: "erms", Feature: &X86.HasERMS},
+ {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
+ {Name: "rdtscp", Feature: &X86.HasRDTSCP},
+ {Name: "sha", Feature: &X86.HasSHA},
+ }
+ level := getGOAMD64level()
+ if level < 2 {
+ // These options are required at level 2. At lower levels
+ // they can be turned off.
+ options = append(options,
+ option{Name: "popcnt", Feature: &X86.HasPOPCNT},
+ option{Name: "sse3", Feature: &X86.HasSSE3},
+ option{Name: "sse41", Feature: &X86.HasSSE41},
+ option{Name: "sse42", Feature: &X86.HasSSE42},
+ option{Name: "ssse3", Feature: &X86.HasSSSE3})
+ }
+ if level < 3 {
+ // These options are required at level 3. At lower levels
+ // they can be turned off.
+ options = append(options,
+ option{Name: "avx", Feature: &X86.HasAVX},
+ option{Name: "avx2", Feature: &X86.HasAVX2},
+ option{Name: "bmi1", Feature: &X86.HasBMI1},
+ option{Name: "bmi2", Feature: &X86.HasBMI2},
+ option{Name: "fma", Feature: &X86.HasFMA})
+ }
+
+ maxID, _, _, _ := cpuid(0, 0)
+
+ if maxID < 1 {
+ return
+ }
+
+ maxExtendedFunctionInformation, _, _, _ = cpuid(0x80000000, 0)
+
+ _, _, ecx1, _ := cpuid(1, 0)
+
+ X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
+ X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
+ X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
+ X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
+ X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
+ X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
+ X86.HasAES = isSet(ecx1, cpuid_AES)
+
+ // OSXSAVE can be false when using older Operating Systems
+ // or when explicitly disabled on newer Operating Systems by
+ // e.g. setting the xsavedisable boot option on Windows 10.
+ X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
+
+ // The FMA instruction set extension only has VEX prefixed instructions.
+ // VEX prefixed instructions require OSXSAVE to be enabled.
+ // See Intel 64 and IA-32 Architecture Software Developer’s Manual Volume 2
+ // Section 2.4 "AVX and SSE Instruction Exception Specification"
+ X86.HasFMA = isSet(ecx1, cpuid_FMA) && X86.HasOSXSAVE
+
+ osSupportsAVX := false
+ // For XGETBV, OSXSAVE bit is required and sufficient.
+ if X86.HasOSXSAVE {
+ eax, _ := xgetbv()
+ // Check if XMM and YMM registers have OS support.
+ osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
+ }
+
+ X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
+
+ if maxID < 7 {
+ return
+ }
+
+ _, ebx7, _, _ := cpuid(7, 0)
+ X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
+ X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
+ X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
+ X86.HasERMS = isSet(ebx7, cpuid_ERMS)
+ X86.HasADX = isSet(ebx7, cpuid_ADX)
+ X86.HasSHA = isSet(ebx7, cpuid_SHA)
+
+ var maxExtendedInformation uint32
+ maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0)
+
+ if maxExtendedInformation < 0x80000001 {
+ return
+ }
+
+ _, _, _, edxExt1 := cpuid(0x80000001, 0)
+ X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP)
+}
+
+func isSet(hwc uint32, value uint32) bool {
+ return hwc&value != 0
+}
+
+// Name returns the CPU name given by the vendor.
+// If the CPU name can not be determined an
+// empty string is returned.
+func Name() string {
+ if maxExtendedFunctionInformation < 0x80000004 {
+ return ""
+ }
+
+ data := make([]byte, 0, 3*4*4)
+
+ var eax, ebx, ecx, edx uint32
+ eax, ebx, ecx, edx = cpuid(0x80000002, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+ eax, ebx, ecx, edx = cpuid(0x80000003, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+ eax, ebx, ecx, edx = cpuid(0x80000004, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+
+ // Trim leading spaces.
+ for len(data) > 0 && data[0] == ' ' {
+ data = data[1:]
+ }
+
+ // Trim tail after and including the first null byte.
+ for i, c := range data {
+ if c == '\x00' {
+ data = data[:i]
+ break
+ }
+ }
+
+ return string(data)
+}
+
+func appendBytes(b []byte, args ...uint32) []byte {
+ for _, arg := range args {
+ b = append(b,
+ byte((arg >> 0)),
+ byte((arg >> 8)),
+ byte((arg >> 16)),
+ byte((arg >> 24)))
+ }
+ return b
+}
diff --git a/src/internal/cpu/cpu_x86.s b/src/internal/cpu/cpu_x86.s
new file mode 100644
index 0000000..2ee8eca
--- /dev/null
+++ b/src/internal/cpu/cpu_x86.s
@@ -0,0 +1,43 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+#include "textflag.h"
+
+// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·cpuid(SB), NOSPLIT, $0-24
+ MOVL eaxArg+0(FP), AX
+ MOVL ecxArg+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv() (eax, edx uint32)
+TEXT ·xgetbv(SB),NOSPLIT,$0-8
+ MOVL $0, CX
+ XGETBV
+ MOVL AX, eax+0(FP)
+ MOVL DX, edx+4(FP)
+ RET
+
+// func getGOAMD64level() int32
+TEXT ·getGOAMD64level(SB),NOSPLIT,$0-4
+#ifdef GOAMD64_v4
+ MOVL $4, ret+0(FP)
+#else
+#ifdef GOAMD64_v3
+ MOVL $3, ret+0(FP)
+#else
+#ifdef GOAMD64_v2
+ MOVL $2, ret+0(FP)
+#else
+ MOVL $1, ret+0(FP)
+#endif
+#endif
+#endif
+ RET
diff --git a/src/internal/cpu/cpu_x86_test.go b/src/internal/cpu/cpu_x86_test.go
new file mode 100644
index 0000000..8564ccc
--- /dev/null
+++ b/src/internal/cpu/cpu_x86_test.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu_test
+
+import (
+ . "internal/cpu"
+ "internal/godebug"
+ "testing"
+)
+
+func TestX86ifAVX2hasAVX(t *testing.T) {
+ if X86.HasAVX2 && !X86.HasAVX {
+ t.Fatalf("HasAVX expected true when HasAVX2 is true, got false")
+ }
+}
+
+func TestDisableSSE3(t *testing.T) {
+ if GetGOAMD64level() > 1 {
+ t.Skip("skipping test: can't run on GOAMD64>v1 machines")
+ }
+ runDebugOptionsTest(t, "TestSSE3DebugOption", "cpu.sse3=off")
+}
+
+func TestSSE3DebugOption(t *testing.T) {
+ MustHaveDebugOptionsSupport(t)
+
+ if godebug.New("#cpu.sse3").Value() != "off" {
+ t.Skipf("skipping test: GODEBUG=cpu.sse3=off not set")
+ }
+
+ want := false
+ if got := X86.HasSSE3; got != want {
+ t.Errorf("X86.HasSSE3 expected %v, got %v", want, got)
+ }
+}
diff --git a/src/internal/cpu/export_test.go b/src/internal/cpu/export_test.go
new file mode 100644
index 0000000..91bfc1b
--- /dev/null
+++ b/src/internal/cpu/export_test.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+var (
+ Options = options
+)
diff --git a/src/internal/cpu/export_x86_test.go b/src/internal/cpu/export_x86_test.go
new file mode 100644
index 0000000..a12b6f2
--- /dev/null
+++ b/src/internal/cpu/export_x86_test.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu
+
+var (
+ GetGOAMD64level = getGOAMD64level
+)
diff --git a/src/internal/dag/alg.go b/src/internal/dag/alg.go
new file mode 100644
index 0000000..8800279
--- /dev/null
+++ b/src/internal/dag/alg.go
@@ -0,0 +1,63 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dag
+
+// Transpose reverses all edges in g.
+func (g *Graph) Transpose() {
+ old := g.edges
+
+ g.edges = make(map[string]map[string]bool)
+ for _, n := range g.Nodes {
+ g.edges[n] = make(map[string]bool)
+ }
+
+ for from, tos := range old {
+ for to := range tos {
+ g.edges[to][from] = true
+ }
+ }
+}
+
+// Topo returns a topological sort of g. This function is deterministic.
+func (g *Graph) Topo() []string {
+ topo := make([]string, 0, len(g.Nodes))
+ marks := make(map[string]bool)
+
+ var visit func(n string)
+ visit = func(n string) {
+ if marks[n] {
+ return
+ }
+ for _, to := range g.Edges(n) {
+ visit(to)
+ }
+ marks[n] = true
+ topo = append(topo, n)
+ }
+ for _, root := range g.Nodes {
+ visit(root)
+ }
+ for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
+ topo[i], topo[j] = topo[j], topo[i]
+ }
+ return topo
+}
+
+// TransitiveReduction removes edges from g that are transitively
+// reachable. g must be transitively closed.
+func (g *Graph) TransitiveReduction() {
+ // For i -> j -> k, if i -> k exists, delete it.
+ for _, i := range g.Nodes {
+ for _, j := range g.Nodes {
+ if g.HasEdge(i, j) {
+ for _, k := range g.Nodes {
+ if g.HasEdge(j, k) {
+ g.DelEdge(i, k)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/internal/dag/alg_test.go b/src/internal/dag/alg_test.go
new file mode 100644
index 0000000..e5ea8b6
--- /dev/null
+++ b/src/internal/dag/alg_test.go
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dag
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestTranspose(t *testing.T) {
+ g := mustParse(t, diamond)
+ g.Transpose()
+ wantEdges(t, g, "a->b a->c a->d b->d c->d")
+}
+
+func TestTopo(t *testing.T) {
+ g := mustParse(t, diamond)
+ got := g.Topo()
+ // "d" is the root, so it's first.
+ //
+ // "c" and "b" could be in either order, but Topo is
+ // deterministic in reverse node definition order.
+ //
+ // "a" is a leaf.
+ wantNodes := strings.Fields("d c b a")
+ if !reflect.DeepEqual(wantNodes, got) {
+ t.Fatalf("want topo sort %v, got %v", wantNodes, got)
+ }
+}
+
+func TestTransitiveReduction(t *testing.T) {
+ t.Run("diamond", func(t *testing.T) {
+ g := mustParse(t, diamond)
+ g.TransitiveReduction()
+ wantEdges(t, g, "b->a c->a d->b d->c")
+ })
+ t.Run("chain", func(t *testing.T) {
+ const chain = `NONE < a < b < c < d; a, d < e;`
+ g := mustParse(t, chain)
+ g.TransitiveReduction()
+ wantEdges(t, g, "e->d d->c c->b b->a")
+ })
+}
diff --git a/src/internal/dag/parse.go b/src/internal/dag/parse.go
new file mode 100644
index 0000000..9d5b918
--- /dev/null
+++ b/src/internal/dag/parse.go
@@ -0,0 +1,314 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dag implements a language for expressing directed acyclic
+// graphs.
+//
+// The general syntax of a rule is:
+//
+// a, b < c, d;
+//
+// which means c and d come after a and b in the partial order
+// (that is, there are edges from c and d to a and b),
+// but doesn't provide a relative order between a vs b or c vs d.
+//
+// The rules can chain together, as in:
+//
+// e < f, g < h;
+//
+// which is equivalent to
+//
+// e < f, g;
+// f, g < h;
+//
+// Except for the special bottom element "NONE", each name
+// must appear exactly once on the right-hand side of any rule.
+// That rule serves as the definition of the allowed successor
+// for that name. The definition must appear before any uses
+// of the name on the left-hand side of a rule. (That is, the
+// rules themselves must be ordered according to the partial
+// order, for easier reading by people.)
+//
+// Negative assertions double-check the partial order:
+//
+// i !< j
+//
+// means that it must NOT be the case that i < j.
+// Negative assertions may appear anywhere in the rules,
+// even before i and j have been defined.
+//
+// Comments begin with #.
+package dag
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+type Graph struct {
+ Nodes []string
+ byLabel map[string]int
+ edges map[string]map[string]bool
+}
+
+func newGraph() *Graph {
+ return &Graph{byLabel: map[string]int{}, edges: map[string]map[string]bool{}}
+}
+
+func (g *Graph) addNode(label string) bool {
+ if _, ok := g.byLabel[label]; ok {
+ return false
+ }
+ g.byLabel[label] = len(g.Nodes)
+ g.Nodes = append(g.Nodes, label)
+ g.edges[label] = map[string]bool{}
+ return true
+}
+
+func (g *Graph) AddEdge(from, to string) {
+ g.edges[from][to] = true
+}
+
+func (g *Graph) DelEdge(from, to string) {
+ delete(g.edges[from], to)
+}
+
+func (g *Graph) HasEdge(from, to string) bool {
+ return g.edges[from] != nil && g.edges[from][to]
+}
+
+func (g *Graph) Edges(from string) []string {
+ edges := make([]string, 0, 16)
+ for k := range g.edges[from] {
+ edges = append(edges, k)
+ }
+ sort.Slice(edges, func(i, j int) bool { return g.byLabel[edges[i]] < g.byLabel[edges[j]] })
+ return edges
+}
+
+// Parse parses the DAG language and returns the transitive closure of
+// the described graph. In the returned graph, there is an edge from "b"
+// to "a" if b < a (or a > b) in the partial order.
+func Parse(dag string) (*Graph, error) {
+ g := newGraph()
+ disallowed := []rule{}
+
+ rules, err := parseRules(dag)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: Add line numbers to errors.
+ var errors []string
+ errorf := func(format string, a ...any) {
+ errors = append(errors, fmt.Sprintf(format, a...))
+ }
+ for _, r := range rules {
+ if r.op == "!<" {
+ disallowed = append(disallowed, r)
+ continue
+ }
+ for _, def := range r.def {
+ if def == "NONE" {
+ errorf("NONE cannot be a predecessor")
+ continue
+ }
+ if !g.addNode(def) {
+ errorf("multiple definitions for %s", def)
+ }
+ for _, less := range r.less {
+ if less == "NONE" {
+ continue
+ }
+ if _, ok := g.byLabel[less]; !ok {
+ errorf("use of %s before its definition", less)
+ } else {
+ g.AddEdge(def, less)
+ }
+ }
+ }
+ }
+
+ // Check for missing definition.
+ for _, tos := range g.edges {
+ for to := range tos {
+ if g.edges[to] == nil {
+ errorf("missing definition for %s", to)
+ }
+ }
+ }
+
+ // Complete transitive closure.
+ for _, k := range g.Nodes {
+ for _, i := range g.Nodes {
+ for _, j := range g.Nodes {
+ if i != k && k != j && g.HasEdge(i, k) && g.HasEdge(k, j) {
+ if i == j {
+ // Can only happen along with a "use of X before deps" error above,
+ // but this error is more specific - it makes clear that reordering the
+ // rules will not be enough to fix the problem.
+ errorf("graph cycle: %s < %s < %s", j, k, i)
+ }
+ g.AddEdge(i, j)
+ }
+ }
+ }
+ }
+
+ // Check negative assertions against completed allowed graph.
+ for _, bad := range disallowed {
+ for _, less := range bad.less {
+ for _, def := range bad.def {
+ if g.HasEdge(def, less) {
+ errorf("graph edge assertion failed: %s !< %s", less, def)
+ }
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return nil, fmt.Errorf("%s", strings.Join(errors, "\n"))
+ }
+
+ return g, nil
+}
+
+// A rule is a line in the DAG language where "less < def" or "less !< def".
+type rule struct {
+ less []string
+ op string // Either "<" or "!<"
+ def []string
+}
+
+type syntaxError string
+
+func (e syntaxError) Error() string {
+ return string(e)
+}
+
+// parseRules parses the rules of a DAG.
+func parseRules(rules string) (out []rule, err error) {
+ defer func() {
+ e := recover()
+ switch e := e.(type) {
+ case nil:
+ return
+ case syntaxError:
+ err = e
+ default:
+ panic(e)
+ }
+ }()
+ p := &rulesParser{lineno: 1, text: rules}
+
+ var prev []string
+ var op string
+ for {
+ list, tok := p.nextList()
+ if tok == "" {
+ if prev == nil {
+ break
+ }
+ p.syntaxError("unexpected EOF")
+ }
+ if prev != nil {
+ out = append(out, rule{prev, op, list})
+ }
+ prev = list
+ if tok == ";" {
+ prev = nil
+ op = ""
+ continue
+ }
+ if tok != "<" && tok != "!<" {
+ p.syntaxError("missing <")
+ }
+ op = tok
+ }
+
+ return out, err
+}
+
+// A rulesParser parses the depsRules syntax described above.
+type rulesParser struct {
+ lineno int
+ lastWord string
+ text string
+}
+
+// syntaxError reports a parsing error.
+func (p *rulesParser) syntaxError(msg string) {
+ panic(syntaxError(fmt.Sprintf("parsing graph: line %d: syntax error: %s near %s", p.lineno, msg, p.lastWord)))
+}
+
+// nextList parses and returns a comma-separated list of names.
+func (p *rulesParser) nextList() (list []string, token string) {
+ for {
+ tok := p.nextToken()
+ switch tok {
+ case "":
+ if len(list) == 0 {
+ return nil, ""
+ }
+ fallthrough
+ case ",", "<", "!<", ";":
+ p.syntaxError("bad list syntax")
+ }
+ list = append(list, tok)
+
+ tok = p.nextToken()
+ if tok != "," {
+ return list, tok
+ }
+ }
+}
+
+// nextToken returns the next token in the deps rules,
+// one of ";" "," "<" "!<" or a name.
+func (p *rulesParser) nextToken() string {
+ for {
+ if p.text == "" {
+ return ""
+ }
+ switch p.text[0] {
+ case ';', ',', '<':
+ t := p.text[:1]
+ p.text = p.text[1:]
+ return t
+
+ case '!':
+ if len(p.text) < 2 || p.text[1] != '<' {
+ p.syntaxError("unexpected token !")
+ }
+ p.text = p.text[2:]
+ return "!<"
+
+ case '#':
+ i := strings.Index(p.text, "\n")
+ if i < 0 {
+ i = len(p.text)
+ }
+ p.text = p.text[i:]
+ continue
+
+ case '\n':
+ p.lineno++
+ fallthrough
+ case ' ', '\t':
+ p.text = p.text[1:]
+ continue
+
+ default:
+ i := strings.IndexAny(p.text, "!;,<#\n \t")
+ if i < 0 {
+ i = len(p.text)
+ }
+ t := p.text[:i]
+ p.text = p.text[i:]
+ p.lastWord = t
+ return t
+ }
+ }
+}
diff --git a/src/internal/dag/parse_test.go b/src/internal/dag/parse_test.go
new file mode 100644
index 0000000..b2520c3
--- /dev/null
+++ b/src/internal/dag/parse_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dag
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+const diamond = `
+NONE < a < b, c < d;
+`
+
+func mustParse(t *testing.T, dag string) *Graph {
+ t.Helper()
+ g, err := Parse(dag)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return g
+}
+
+func wantEdges(t *testing.T, g *Graph, edges string) {
+ t.Helper()
+
+ wantEdges := strings.Fields(edges)
+ wantEdgeMap := make(map[string]bool)
+ for _, e := range wantEdges {
+ wantEdgeMap[e] = true
+ }
+
+ for _, n1 := range g.Nodes {
+ for _, n2 := range g.Nodes {
+ got := g.HasEdge(n1, n2)
+ want := wantEdgeMap[n1+"->"+n2]
+ if got && want {
+ t.Logf("%s->%s", n1, n2)
+ } else if got && !want {
+ t.Errorf("%s->%s present but not expected", n1, n2)
+ } else if want && !got {
+ t.Errorf("%s->%s missing but expected", n1, n2)
+ }
+ }
+ }
+}
+
+func TestParse(t *testing.T) {
+ // Basic smoke test for graph parsing.
+ g := mustParse(t, diamond)
+
+ wantNodes := strings.Fields("a b c d")
+ if !reflect.DeepEqual(wantNodes, g.Nodes) {
+ t.Fatalf("want nodes %v, got %v", wantNodes, g.Nodes)
+ }
+
+ // Parse returns the transitive closure, so it adds d->a.
+ wantEdges(t, g, "b->a c->a d->a d->b d->c")
+}
diff --git a/src/internal/diff/diff.go b/src/internal/diff/diff.go
new file mode 100644
index 0000000..0aeeb75
--- /dev/null
+++ b/src/internal/diff/diff.go
@@ -0,0 +1,261 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A pair is a pair of values tracked for both the x and y side of a diff.
+// It is typically a pair of line indexes.
+type pair struct{ x, y int }
+
+// Diff returns an anchored diff of the two texts old and new
+// in the “unified diff” format. If old and new are identical,
+// Diff returns a nil slice (no output).
+//
+// Unix diff implementations typically look for a diff with
+// the smallest number of lines inserted and removed,
+// which can in the worst case take time quadratic in the
+// number of lines in the texts. As a result, many implementations
+// either can be made to run for a long time or cut off the search
+// after a predetermined amount of work.
+//
+// In contrast, this implementation looks for a diff with the
+// smallest number of “unique” lines inserted and removed,
+// where unique means a line that appears just once in both old and new.
+// We call this an “anchored diff” because the unique lines anchor
+// the chosen matching regions. An anchored diff is usually clearer
+// than a standard diff, because the algorithm does not try to
+// reuse unrelated blank lines or closing braces.
+// The algorithm also guarantees to run in O(n log n) time
+// instead of the standard O(n²) time.
+//
+// Some systems call this approach a “patience diff,” named for
+// the “patience sorting” algorithm, itself named for a solitaire card game.
+// We avoid that name for two reasons. First, the name has been used
+// for a few different variants of the algorithm, so it is imprecise.
+// Second, the name is frequently interpreted as meaning that you have
+// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
+// when in fact the algorithm is faster than the standard one.
+func Diff(oldName string, old []byte, newName string, new []byte) []byte {
+ if bytes.Equal(old, new) {
+ return nil
+ }
+ x := lines(old)
+ y := lines(new)
+
+ // Print diff header.
+ var out bytes.Buffer
+ fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
+ fmt.Fprintf(&out, "--- %s\n", oldName)
+ fmt.Fprintf(&out, "+++ %s\n", newName)
+
+ // Loop over matches to consider,
+ // expanding each match to include surrounding lines,
+ // and then printing diff chunks.
+ // To avoid setup/teardown cases outside the loop,
+ // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
+ // in the sequence of matches.
+ var (
+ done pair // printed up to x[:done.x] and y[:done.y]
+ chunk pair // start lines of current chunk
+ count pair // number of lines from each side in current chunk
+ ctext []string // lines for current chunk
+ )
+ for _, m := range tgs(x, y) {
+ if m.x < done.x {
+ // Already handled scanning forward from earlier match.
+ continue
+ }
+
+ // Expand matching lines as far possible,
+ // establishing that x[start.x:end.x] == y[start.y:end.y].
+ // Note that on the first (or last) iteration we may (or definitely do)
+ // have an empty match: start.x==end.x and start.y==end.y.
+ start := m
+ for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
+ start.x--
+ start.y--
+ }
+ end := m
+ for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
+ end.x++
+ end.y++
+ }
+
+ // Emit the mismatched lines before start into this chunk.
+ // (No effect on first sentinel iteration, when start = {0,0}.)
+ for _, s := range x[done.x:start.x] {
+ ctext = append(ctext, "-"+s)
+ count.x++
+ }
+ for _, s := range y[done.y:start.y] {
+ ctext = append(ctext, "+"+s)
+ count.y++
+ }
+
+ // If we're not at EOF and have too few common lines,
+ // the chunk includes all the common lines and continues.
+ const C = 3 // number of context lines
+ if (end.x < len(x) || end.y < len(y)) &&
+ (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
+ for _, s := range x[start.x:end.x] {
+ ctext = append(ctext, " "+s)
+ count.x++
+ count.y++
+ }
+ done = end
+ continue
+ }
+
+ // End chunk with common lines for context.
+ if len(ctext) > 0 {
+ n := end.x - start.x
+ if n > C {
+ n = C
+ }
+ for _, s := range x[start.x : start.x+n] {
+ ctext = append(ctext, " "+s)
+ count.x++
+ count.y++
+ }
+ done = pair{start.x + n, start.y + n}
+
+ // Format and emit chunk.
+ // Convert line numbers to 1-indexed.
+ // Special case: empty file shows up as 0,0 not 1,0.
+ if count.x > 0 {
+ chunk.x++
+ }
+ if count.y > 0 {
+ chunk.y++
+ }
+ fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
+ for _, s := range ctext {
+ out.WriteString(s)
+ }
+ count.x = 0
+ count.y = 0
+ ctext = ctext[:0]
+ }
+
+ // If we reached EOF, we're done.
+ if end.x >= len(x) && end.y >= len(y) {
+ break
+ }
+
+ // Otherwise start a new chunk.
+ chunk = pair{end.x - C, end.y - C}
+ for _, s := range x[chunk.x:end.x] {
+ ctext = append(ctext, " "+s)
+ count.x++
+ count.y++
+ }
+ done = end
+ }
+
+ return out.Bytes()
+}
+
+// lines returns the lines in the file x, including newlines.
+// If the file does not end in a newline, one is supplied
+// along with a warning about the missing newline.
+func lines(x []byte) []string {
+ l := strings.SplitAfter(string(x), "\n")
+ if l[len(l)-1] == "" {
+ l = l[:len(l)-1]
+ } else {
+ // Treat last line as having a message about the missing newline attached,
+ // using the same text as BSD/GNU diff (including the leading backslash).
+ l[len(l)-1] += "\n\\ No newline at end of file\n"
+ }
+ return l
+}
+
+// tgs returns the pairs of indexes of the longest common subsequence
+// of unique lines in x and y, where a unique line is one that appears
+// once in x and once in y.
+//
+// The longest common subsequence algorithm is as described in
+// Thomas G. Szymanski, “A Special Case of the Maximal Common
+// Subsequence Problem,” Princeton TR #170 (January 1975),
+// available at https://research.swtch.com/tgs170.pdf.
+func tgs(x, y []string) []pair {
+ // Count the number of times each string appears in a and b.
+ // We only care about 0, 1, many, counted as 0, -1, -2
+ // for the x side and 0, -4, -8 for the y side.
+ // Using negative numbers now lets us distinguish positive line numbers later.
+ m := make(map[string]int)
+ for _, s := range x {
+ if c := m[s]; c > -2 {
+ m[s] = c - 1
+ }
+ }
+ for _, s := range y {
+ if c := m[s]; c > -8 {
+ m[s] = c - 4
+ }
+ }
+
+ // Now unique strings can be identified by m[s] = -1+-4.
+ //
+ // Gather the indexes of those strings in x and y, building:
+ // xi[i] = increasing indexes of unique strings in x.
+ // yi[i] = increasing indexes of unique strings in y.
+ // inv[i] = index j such that x[xi[i]] = y[yi[j]].
+ var xi, yi, inv []int
+ for i, s := range y {
+ if m[s] == -1+-4 {
+ m[s] = len(yi)
+ yi = append(yi, i)
+ }
+ }
+ for i, s := range x {
+ if j, ok := m[s]; ok && j >= 0 {
+ xi = append(xi, i)
+ inv = append(inv, j)
+ }
+ }
+
+ // Apply Algorithm A from Szymanski's paper.
+ // In those terms, A = J = inv and B = [0, n).
+ // We add sentinel pairs {0,0}, and {len(x),len(y)}
+ // to the returned sequence, to help the processing loop.
+ J := inv
+ n := len(xi)
+ T := make([]int, n)
+ L := make([]int, n)
+ for i := range T {
+ T[i] = n + 1
+ }
+ for i := 0; i < n; i++ {
+ k := sort.Search(n, func(k int) bool {
+ return T[k] >= J[i]
+ })
+ T[k] = J[i]
+ L[i] = k + 1
+ }
+ k := 0
+ for _, v := range L {
+ if k < v {
+ k = v
+ }
+ }
+ seq := make([]pair, 2+k)
+ seq[1+k] = pair{len(x), len(y)} // sentinel at end
+ lastj := n
+ for i := n - 1; i >= 0; i-- {
+ if L[i] == k && J[i] < lastj {
+ seq[k] = pair{xi[i], yi[J[i]]}
+ k--
+ }
+ }
+ seq[0] = pair{0, 0} // sentinel at start
+ return seq
+}
diff --git a/src/internal/diff/diff_test.go b/src/internal/diff/diff_test.go
new file mode 100644
index 0000000..37281c5
--- /dev/null
+++ b/src/internal/diff/diff_test.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "bytes"
+ "internal/txtar"
+ "path/filepath"
+ "testing"
+)
+
+func clean(text []byte) []byte {
+ text = bytes.ReplaceAll(text, []byte("$\n"), []byte("\n"))
+ text = bytes.TrimSuffix(text, []byte("^D\n"))
+ return text
+}
+
+func Test(t *testing.T) {
+ files, _ := filepath.Glob("testdata/*.txt")
+ if len(files) == 0 {
+ t.Fatalf("no testdata")
+ }
+
+ for _, file := range files {
+ t.Run(filepath.Base(file), func(t *testing.T) {
+ a, err := txtar.ParseFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(a.Files) != 3 || a.Files[2].Name != "diff" {
+ t.Fatalf("%s: want three files, third named \"diff\"", file)
+ }
+ diffs := Diff(a.Files[0].Name, clean(a.Files[0].Data), a.Files[1].Name, clean(a.Files[1].Data))
+ want := clean(a.Files[2].Data)
+ if !bytes.Equal(diffs, want) {
+ t.Fatalf("%s: have:\n%s\nwant:\n%s\n%s", file,
+ diffs, want, Diff("have", diffs, "want", want))
+ }
+ })
+ }
+}
diff --git a/src/internal/diff/testdata/allnew.txt b/src/internal/diff/testdata/allnew.txt
new file mode 100644
index 0000000..8875649
--- /dev/null
+++ b/src/internal/diff/testdata/allnew.txt
@@ -0,0 +1,13 @@
+-- old --
+-- new --
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -0,0 +1,3 @@
++a
++b
++c
diff --git a/src/internal/diff/testdata/allold.txt b/src/internal/diff/testdata/allold.txt
new file mode 100644
index 0000000..bcc9ac0
--- /dev/null
+++ b/src/internal/diff/testdata/allold.txt
@@ -0,0 +1,13 @@
+-- old --
+a
+b
+c
+-- new --
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +0,0 @@
+-a
+-b
+-c
diff --git a/src/internal/diff/testdata/basic.txt b/src/internal/diff/testdata/basic.txt
new file mode 100644
index 0000000..d2565b5
--- /dev/null
+++ b/src/internal/diff/testdata/basic.txt
@@ -0,0 +1,35 @@
+Example from Hunt and McIlroy, “An Algorithm for Differential File Comparison.”
+https://www.cs.dartmouth.edu/~doug/diff.pdf
+
+-- old --
+a
+b
+c
+d
+e
+f
+g
+-- new --
+w
+a
+b
+x
+y
+z
+e
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,7 +1,7 @@
++w
+ a
+ b
+-c
+-d
++x
++y
++z
+ e
+-f
+-g
diff --git a/src/internal/diff/testdata/dups.txt b/src/internal/diff/testdata/dups.txt
new file mode 100644
index 0000000..d10524d
--- /dev/null
+++ b/src/internal/diff/testdata/dups.txt
@@ -0,0 +1,40 @@
+-- old --
+a
+
+b
+
+c
+
+d
+
+e
+
+f
+-- new --
+a
+
+B
+
+C
+
+d
+
+e
+
+f
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,8 +1,8 @@
+ a
+ $
+-b
+-
+-c
++B
++
++C
+ $
+ d
+ $
diff --git a/src/internal/diff/testdata/end.txt b/src/internal/diff/testdata/end.txt
new file mode 100644
index 0000000..158637c
--- /dev/null
+++ b/src/internal/diff/testdata/end.txt
@@ -0,0 +1,38 @@
+-- old --
+1
+2
+3
+4
+5
+6
+7
+eight
+nine
+ten
+eleven
+-- new --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+-- diff --
+diff old new
+--- old
++++ new
+@@ -5,7 +5,6 @@
+ 5
+ 6
+ 7
+-eight
+-nine
+-ten
+-eleven
++8
++9
++10
diff --git a/src/internal/diff/testdata/eof.txt b/src/internal/diff/testdata/eof.txt
new file mode 100644
index 0000000..5dc145c
--- /dev/null
+++ b/src/internal/diff/testdata/eof.txt
@@ -0,0 +1,9 @@
+-- old --
+a
+b
+c^D
+-- new --
+a
+b
+c^D
+-- diff --
diff --git a/src/internal/diff/testdata/eof1.txt b/src/internal/diff/testdata/eof1.txt
new file mode 100644
index 0000000..1ebf621
--- /dev/null
+++ b/src/internal/diff/testdata/eof1.txt
@@ -0,0 +1,18 @@
+-- old --
+a
+b
+c
+-- new --
+a
+b
+c^D
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +1,3 @@
+ a
+ b
+-c
++c
+\ No newline at end of file
diff --git a/src/internal/diff/testdata/eof2.txt b/src/internal/diff/testdata/eof2.txt
new file mode 100644
index 0000000..047705e
--- /dev/null
+++ b/src/internal/diff/testdata/eof2.txt
@@ -0,0 +1,18 @@
+-- old --
+a
+b
+c^D
+-- new --
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,3 +1,3 @@
+ a
+ b
+-c
+\ No newline at end of file
++c
diff --git a/src/internal/diff/testdata/long.txt b/src/internal/diff/testdata/long.txt
new file mode 100644
index 0000000..3fc99f7
--- /dev/null
+++ b/src/internal/diff/testdata/long.txt
@@ -0,0 +1,62 @@
+-- old --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+14½
+15
+16
+17
+18
+19
+20
+-- new --
+1
+2
+3
+4
+5
+6
+8
+9
+10
+11
+12
+13
+14
+17
+18
+19
+20
+-- diff --
+diff old new
+--- old
++++ new
+@@ -4,7 +4,6 @@
+ 4
+ 5
+ 6
+-7
+ 8
+ 9
+ 10
+@@ -12,9 +11,6 @@
+ 12
+ 13
+ 14
+-14½
+-15
+-16
+ 17
+ 18
+ 19
diff --git a/src/internal/diff/testdata/same.txt b/src/internal/diff/testdata/same.txt
new file mode 100644
index 0000000..86b1100
--- /dev/null
+++ b/src/internal/diff/testdata/same.txt
@@ -0,0 +1,5 @@
+-- old --
+hello world
+-- new --
+hello world
+-- diff --
diff --git a/src/internal/diff/testdata/start.txt b/src/internal/diff/testdata/start.txt
new file mode 100644
index 0000000..217b2fd
--- /dev/null
+++ b/src/internal/diff/testdata/start.txt
@@ -0,0 +1,34 @@
+-- old --
+e
+pi
+4
+5
+6
+7
+8
+9
+10
+-- new --
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,5 +1,6 @@
+-e
+-pi
++1
++2
++3
+ 4
+ 5
+ 6
diff --git a/src/internal/diff/testdata/triv.txt b/src/internal/diff/testdata/triv.txt
new file mode 100644
index 0000000..ab5759f
--- /dev/null
+++ b/src/internal/diff/testdata/triv.txt
@@ -0,0 +1,40 @@
+Another example from Hunt and McIlroy,
+“An Algorithm for Differential File Comparison.”
+https://www.cs.dartmouth.edu/~doug/diff.pdf
+
+Anchored diff gives up on finding anything,
+since there are no unique lines.
+
+-- old --
+a
+b
+c
+a
+b
+b
+a
+-- new --
+c
+a
+b
+a
+b
+c
+-- diff --
+diff old new
+--- old
++++ new
+@@ -1,7 +1,6 @@
+-a
+-b
+-c
+-a
+-b
+-b
+-a
++c
++a
++b
++a
++b
++c
diff --git a/src/internal/fmtsort/export_test.go b/src/internal/fmtsort/export_test.go
new file mode 100644
index 0000000..25cbb5d
--- /dev/null
+++ b/src/internal/fmtsort/export_test.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort
+
+import "reflect"
+
+func Compare(a, b reflect.Value) int {
+ return compare(a, b)
+}
diff --git a/src/internal/fmtsort/sort.go b/src/internal/fmtsort/sort.go
new file mode 100644
index 0000000..278a89b
--- /dev/null
+++ b/src/internal/fmtsort/sort.go
@@ -0,0 +1,219 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtsort provides a general stable ordering mechanism
+// for maps, on behalf of the fmt and text/template packages.
+// It is not guaranteed to be efficient and works only for types
+// that are valid map keys.
+package fmtsort
+
+import (
+ "reflect"
+ "sort"
+)
+
+// Note: Throughout this package we avoid calling reflect.Value.Interface as
+// it is not always legal to do so and it's easier to avoid the issue than to face it.
+
+// SortedMap represents a map's keys and values. The keys and values are
+// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
+type SortedMap struct {
+ Key []reflect.Value
+ Value []reflect.Value
+}
+
+func (o *SortedMap) Len() int { return len(o.Key) }
+func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
+func (o *SortedMap) Swap(i, j int) {
+ o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
+ o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
+}
+
+// Sort accepts a map and returns a SortedMap that has the same keys and
+// values but in a stable sorted order according to the keys, modulo issues
+// raised by unorderable key values such as NaNs.
+//
+// The ordering rules are more general than with Go's < operator:
+//
+// - when applicable, nil compares low
+// - ints, floats, and strings order by <
+// - NaN compares less than non-NaN floats
+// - bool compares false before true
+// - complex compares real, then imag
+// - pointers compare by machine address
+// - channel values compare by machine address
+// - structs compare each field in turn
+// - arrays compare each element in turn.
+// Otherwise identical arrays compare by length.
+// - interface values compare first by reflect.Type describing the concrete type
+// and then by concrete value as described in the previous rules.
+func Sort(mapValue reflect.Value) *SortedMap {
+ if mapValue.Type().Kind() != reflect.Map {
+ return nil
+ }
+ // Note: this code is arranged to not panic even in the presence
+ // of a concurrent map update. The runtime is responsible for
+ // yelling loudly if that happens. See issue 33275.
+ n := mapValue.Len()
+ key := make([]reflect.Value, 0, n)
+ value := make([]reflect.Value, 0, n)
+ iter := mapValue.MapRange()
+ for iter.Next() {
+ key = append(key, iter.Key())
+ value = append(value, iter.Value())
+ }
+ sorted := &SortedMap{
+ Key: key,
+ Value: value,
+ }
+ sort.Stable(sorted)
+ return sorted
+}
+
+// compare compares two values of the same type. It returns -1, 0, 1
+// according to whether a > b (1), a == b (0), or a < b (-1).
+// If the types differ, it returns -1.
+// See the comment on Sort for the comparison rules.
+func compare(aVal, bVal reflect.Value) int {
+ aType, bType := aVal.Type(), bVal.Type()
+ if aType != bType {
+ return -1 // No good answer possible, but don't return 0: they're not equal.
+ }
+ switch aVal.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ a, b := aVal.Int(), bVal.Int()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ a, b := aVal.Uint(), bVal.Uint()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.String:
+ a, b := aVal.String(), bVal.String()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Float32, reflect.Float64:
+ return floatCompare(aVal.Float(), bVal.Float())
+ case reflect.Complex64, reflect.Complex128:
+ a, b := aVal.Complex(), bVal.Complex()
+ if c := floatCompare(real(a), real(b)); c != 0 {
+ return c
+ }
+ return floatCompare(imag(a), imag(b))
+ case reflect.Bool:
+ a, b := aVal.Bool(), bVal.Bool()
+ switch {
+ case a == b:
+ return 0
+ case a:
+ return 1
+ default:
+ return -1
+ }
+ case reflect.Pointer, reflect.UnsafePointer:
+ a, b := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Chan:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ ap, bp := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case ap < bp:
+ return -1
+ case ap > bp:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Struct:
+ for i := 0; i < aVal.NumField(); i++ {
+ if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Array:
+ for i := 0; i < aVal.Len(); i++ {
+ if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Interface:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
+ if c != 0 {
+ return c
+ }
+ return compare(aVal.Elem(), bVal.Elem())
+ default:
+ // Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
+ panic("bad type in compare: " + aType.String())
+ }
+}
+
+// nilCompare checks whether either value is nil. If not, the boolean is false.
+// If either value is nil, the boolean is true and the integer is the comparison
+// value. The comparison is defined to be 0 if both are nil, otherwise the one
+// nil value compares low. Both arguments must represent a chan, func,
+// interface, map, pointer, or slice.
+func nilCompare(aVal, bVal reflect.Value) (int, bool) {
+ if aVal.IsNil() {
+ if bVal.IsNil() {
+ return 0, true
+ }
+ return -1, true
+ }
+ if bVal.IsNil() {
+ return 1, true
+ }
+ return 0, false
+}
+
+// floatCompare compares two floating-point values. NaNs compare low.
+func floatCompare(a, b float64) int {
+ switch {
+ case isNaN(a):
+ return -1 // No good answer if b is a NaN so don't bother checking.
+ case isNaN(b):
+ return 1
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ }
+ return 0
+}
+
+func isNaN(a float64) bool {
+ return a != a
+}
diff --git a/src/internal/fmtsort/sort_test.go b/src/internal/fmtsort/sort_test.go
new file mode 100644
index 0000000..cddcf70
--- /dev/null
+++ b/src/internal/fmtsort/sort_test.go
@@ -0,0 +1,279 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmtsort_test
+
+import (
+ "fmt"
+ "internal/fmtsort"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+var compareTests = [][]reflect.Value{
+ ct(reflect.TypeOf(int(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int8(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int16(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int32(0)), -1, 0, 1),
+ ct(reflect.TypeOf(int64(0)), -1, 0, 1),
+ ct(reflect.TypeOf(uint(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint8(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint16(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint32(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uint64(0)), 0, 1, 5),
+ ct(reflect.TypeOf(uintptr(0)), 0, 1, 5),
+ ct(reflect.TypeOf(string("")), "", "a", "ab"),
+ ct(reflect.TypeOf(float32(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+ ct(reflect.TypeOf(float64(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)),
+ ct(reflect.TypeOf(complex64(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+ ct(reflect.TypeOf(complex128(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i),
+ ct(reflect.TypeOf(false), false, true),
+ ct(reflect.TypeOf(&ints[0]), &ints[0], &ints[1], &ints[2]),
+ ct(reflect.TypeOf(unsafe.Pointer(&ints[0])), unsafe.Pointer(&ints[0]), unsafe.Pointer(&ints[1]), unsafe.Pointer(&ints[2])),
+ ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]),
+ ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}),
+ ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}),
+ ct(reflect.TypeOf(any(any(0))), iFace, 1, 2, 3),
+}
+
+var iFace any
+
+func ct(typ reflect.Type, args ...any) []reflect.Value {
+ value := make([]reflect.Value, len(args))
+ for i, v := range args {
+ x := reflect.ValueOf(v)
+ if !x.IsValid() { // Make it a typed nil.
+ x = reflect.Zero(typ)
+ } else {
+ x = x.Convert(typ)
+ }
+ value[i] = x
+ }
+ return value
+}
+
+func TestCompare(t *testing.T) {
+ for _, test := range compareTests {
+ for i, v0 := range test {
+ for j, v1 := range test {
+ c := fmtsort.Compare(v0, v1)
+ var expect int
+ switch {
+ case i == j:
+ expect = 0
+ // NaNs are tricky.
+ if typ := v0.Type(); (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && math.IsNaN(v0.Float()) {
+ expect = -1
+ }
+ case i < j:
+ expect = -1
+ case i > j:
+ expect = 1
+ }
+ if c != expect {
+ t.Errorf("%s: compare(%v,%v)=%d; expect %d", v0.Type(), v0, v1, c, expect)
+ }
+ }
+ }
+ }
+}
+
+type sortTest struct {
+ data any // Always a map.
+ print string // Printed result using our custom printer.
+}
+
+var sortTests = []sortTest{
+ {
+ map[int]string{7: "bar", -3: "foo"},
+ "-3:foo 7:bar",
+ },
+ {
+ map[uint8]string{7: "bar", 3: "foo"},
+ "3:foo 7:bar",
+ },
+ {
+ map[string]string{"7": "bar", "3": "foo"},
+ "3:foo 7:bar",
+ },
+ {
+ map[float64]string{7: "bar", -3: "foo", math.NaN(): "nan", math.Inf(0): "inf"},
+ "NaN:nan -3:foo 7:bar +Inf:inf",
+ },
+ {
+ map[complex128]string{7 + 2i: "bar2", 7 + 1i: "bar", -3: "foo", complex(math.NaN(), 0i): "nan", complex(math.Inf(0), 0i): "inf"},
+ "(NaN+0i):nan (-3+0i):foo (7+1i):bar (7+2i):bar2 (+Inf+0i):inf",
+ },
+ {
+ map[bool]string{true: "true", false: "false"},
+ "false:false true:true",
+ },
+ {
+ chanMap(),
+ "CHAN0:0 CHAN1:1 CHAN2:2",
+ },
+ {
+ pointerMap(),
+ "PTR0:0 PTR1:1 PTR2:2",
+ },
+ {
+ unsafePointerMap(),
+ "UNSAFEPTR0:0 UNSAFEPTR1:1 UNSAFEPTR2:2",
+ },
+ {
+ map[toy]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+ "{3 4}:34 {7 1}:71 {7 2}:72",
+ },
+ {
+ map[[2]int]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"},
+ "[3 4]:34 [7 1]:71 [7 2]:72",
+ },
+}
+
+func sprint(data any) string {
+ om := fmtsort.Sort(reflect.ValueOf(data))
+ if om == nil {
+ return "nil"
+ }
+ b := new(strings.Builder)
+ for i, key := range om.Key {
+ if i > 0 {
+ b.WriteRune(' ')
+ }
+ b.WriteString(sprintKey(key))
+ b.WriteRune(':')
+ fmt.Fprint(b, om.Value[i])
+ }
+ return b.String()
+}
+
+// sprintKey formats a reflect.Value but gives reproducible values for some
+// problematic types such as pointers. Note that it only does special handling
+// for the troublesome types used in the test cases; it is not a general
+// printer.
+func sprintKey(key reflect.Value) string {
+ switch str := key.Type().String(); str {
+ case "*int":
+ ptr := key.Interface().(*int)
+ for i := range ints {
+ if ptr == &ints[i] {
+ return fmt.Sprintf("PTR%d", i)
+ }
+ }
+ return "PTR???"
+ case "unsafe.Pointer":
+ ptr := key.Interface().(unsafe.Pointer)
+ for i := range ints {
+ if ptr == unsafe.Pointer(&ints[i]) {
+ return fmt.Sprintf("UNSAFEPTR%d", i)
+ }
+ }
+ return "UNSAFEPTR???"
+ case "chan int":
+ c := key.Interface().(chan int)
+ for i := range chans {
+ if c == chans[i] {
+ return fmt.Sprintf("CHAN%d", i)
+ }
+ }
+ return "CHAN???"
+ default:
+ return fmt.Sprint(key)
+ }
+}
+
+var (
+ ints [3]int
+ chans = makeChans()
+)
+
+func makeChans() []chan int {
+ cs := []chan int{make(chan int), make(chan int), make(chan int)}
+ // Order channels by address. See issue #49431.
+ // TODO: pin these pointers once pinning is available (#46787).
+ sort.Slice(cs, func(i, j int) bool {
+ return uintptr(reflect.ValueOf(cs[i]).UnsafePointer()) < uintptr(reflect.ValueOf(cs[j]).UnsafePointer())
+ })
+ return cs
+}
+
+func pointerMap() map[*int]string {
+ m := make(map[*int]string)
+ for i := 2; i >= 0; i-- {
+ m[&ints[i]] = fmt.Sprint(i)
+ }
+ return m
+}
+
+func unsafePointerMap() map[unsafe.Pointer]string {
+ m := make(map[unsafe.Pointer]string)
+ for i := 2; i >= 0; i-- {
+ m[unsafe.Pointer(&ints[i])] = fmt.Sprint(i)
+ }
+ return m
+}
+
+func chanMap() map[chan int]string {
+ m := make(map[chan int]string)
+ for i := 2; i >= 0; i-- {
+ m[chans[i]] = fmt.Sprint(i)
+ }
+ return m
+}
+
+type toy struct {
+ A int // Exported.
+ b int // Unexported.
+}
+
+func TestOrder(t *testing.T) {
+ for _, test := range sortTests {
+ got := sprint(test.data)
+ if got != test.print {
+ t.Errorf("%s: got %q, want %q", reflect.TypeOf(test.data), got, test.print)
+ }
+ }
+}
+
+func TestInterface(t *testing.T) {
+ // A map containing multiple concrete types should be sorted by type,
+ // then value. However, the relative ordering of types is unspecified,
+ // so test this by checking the presence of sorted subgroups.
+ m := map[any]string{
+ [2]int{1, 0}: "",
+ [2]int{0, 1}: "",
+ true: "",
+ false: "",
+ 3.1: "",
+ 2.1: "",
+ 1.1: "",
+ math.NaN(): "",
+ 3: "",
+ 2: "",
+ 1: "",
+ "c": "",
+ "b": "",
+ "a": "",
+ struct{ x, y int }{1, 0}: "",
+ struct{ x, y int }{0, 1}: "",
+ }
+ got := sprint(m)
+ typeGroups := []string{
+ "NaN: 1.1: 2.1: 3.1:", // float64
+ "false: true:", // bool
+ "1: 2: 3:", // int
+ "a: b: c:", // string
+ "[0 1]: [1 0]:", // [2]int
+ "{0 1}: {1 0}:", // struct{ x int; y int }
+ }
+ for _, g := range typeGroups {
+ if !strings.Contains(got, g) {
+ t.Errorf("sorted map should contain %q", g)
+ }
+ }
+}
diff --git a/src/internal/fuzz/counters_supported.go b/src/internal/fuzz/counters_supported.go
new file mode 100644
index 0000000..79e27d2
--- /dev/null
+++ b/src/internal/fuzz/counters_supported.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || linux || windows || freebsd) && (amd64 || arm64)
+
+package fuzz
+
+import (
+ "unsafe"
+)
+
+// coverage returns a []byte containing unique 8-bit counters for each edge of
+// the instrumented source code. This coverage data will only be generated if
+// `-d=libfuzzer` is set at build time. This can be used to understand the code
+// coverage of a test execution.
+func coverage() []byte {
+ addr := unsafe.Pointer(&_counters)
+ size := uintptr(unsafe.Pointer(&_ecounters)) - uintptr(addr)
+ return unsafe.Slice((*byte)(addr), int(size))
+}
diff --git a/src/internal/fuzz/counters_unsupported.go b/src/internal/fuzz/counters_unsupported.go
new file mode 100644
index 0000000..287bb4b
--- /dev/null
+++ b/src/internal/fuzz/counters_unsupported.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: expand the set of supported platforms, with testing. Nothing about
+// the instrumentation is OS specific, but only amd64 and arm64 are
+// supported in the runtime. See src/runtime/libfuzzer*.
+//
+// If you update this constraint, also update internal/platform.FuzzInstrumented.
+//
+//go:build !((darwin || linux || windows || freebsd) && (amd64 || arm64))
+
+package fuzz
+
+// TODO(#48504): re-enable on platforms where instrumentation works.
+// In theory, we shouldn't need this file at all: if the binary was built
+// without coverage, then _counters and _ecounters should have the same address.
+// However, this caused an init failure on aix/ppc64, so it's disabled here.
+
+// coverage returns a []byte containing unique 8-bit counters for each edge of
+// the instrumented source code. This coverage data will only be generated if
+// `-d=libfuzzer` is set at build time. This can be used to understand the code
+// coverage of a test execution.
+func coverage() []byte { return nil }
diff --git a/src/internal/fuzz/coverage.go b/src/internal/fuzz/coverage.go
new file mode 100644
index 0000000..0c5e17e
--- /dev/null
+++ b/src/internal/fuzz/coverage.go
@@ -0,0 +1,107 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+// ResetCoverage sets all of the counters for each edge of the instrumented
+// source code to 0.
+func ResetCoverage() {
+ cov := coverage()
+ for i := range cov {
+ cov[i] = 0
+ }
+}
+
+// SnapshotCoverage copies the current counter values into coverageSnapshot,
+// preserving them for later inspection. SnapshotCoverage also rounds each
+// counter down to the nearest power of two. This lets the coordinator store
+// multiple values for each counter by OR'ing them together.
+func SnapshotCoverage() {
+ cov := coverage()
+ for i, b := range cov {
+ b |= b >> 1
+ b |= b >> 2
+ b |= b >> 4
+ b -= b >> 1
+ coverageSnapshot[i] = b
+ }
+}
+
+// diffCoverage returns a set of bits set in snapshot but not in base.
+// If there are no new bits set, diffCoverage returns nil.
+func diffCoverage(base, snapshot []byte) []byte {
+ if len(base) != len(snapshot) {
+ panic(fmt.Sprintf("the number of coverage bits changed: before=%d, after=%d", len(base), len(snapshot)))
+ }
+ found := false
+ for i := range snapshot {
+ if snapshot[i]&^base[i] != 0 {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil
+ }
+ diff := make([]byte, len(snapshot))
+ for i := range diff {
+ diff[i] = snapshot[i] &^ base[i]
+ }
+ return diff
+}
+
+// countNewCoverageBits returns the number of bits set in snapshot that are not
+// set in base.
+func countNewCoverageBits(base, snapshot []byte) int {
+ n := 0
+ for i := range snapshot {
+ n += bits.OnesCount8(snapshot[i] &^ base[i])
+ }
+ return n
+}
+
+// isCoverageSubset returns true if all the base coverage bits are set in
+// snapshot.
+func isCoverageSubset(base, snapshot []byte) bool {
+ for i, v := range base {
+ if v&snapshot[i] != v {
+ return false
+ }
+ }
+ return true
+}
+
+// hasCoverageBit returns true if snapshot has at least one bit set that is
+// also set in base.
+func hasCoverageBit(base, snapshot []byte) bool {
+ for i := range snapshot {
+ if snapshot[i]&base[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func countBits(cov []byte) int {
+ n := 0
+ for _, c := range cov {
+ n += bits.OnesCount8(c)
+ }
+ return n
+}
+
+var (
+ coverageEnabled = len(coverage()) > 0
+ coverageSnapshot = make([]byte, len(coverage()))
+
+ // _counters and _ecounters mark the start and end, respectively, of where
+ // the 8-bit coverage counters reside in memory. They're known to cmd/link,
+ // which specially assigns their addresses for this purpose.
+ _counters, _ecounters [0]byte
+)
diff --git a/src/internal/fuzz/encoding.go b/src/internal/fuzz/encoding.go
new file mode 100644
index 0000000..270ef7a
--- /dev/null
+++ b/src/internal/fuzz/encoding.go
@@ -0,0 +1,361 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "math"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// encVersion1 will be the first line of a file with version 1 encoding.
+var encVersion1 = "go test fuzz v1"
+
+// marshalCorpusFile encodes an arbitrary number of arguments into the file format for the
+// corpus.
+func marshalCorpusFile(vals ...any) []byte {
+ if len(vals) == 0 {
+ panic("must have at least one value to marshal")
+ }
+ b := bytes.NewBuffer([]byte(encVersion1 + "\n"))
+ // TODO(katiehockman): keep uint8 and int32 encoding where applicable,
+ // instead of changing to byte and rune respectively.
+ for _, val := range vals {
+ switch t := val.(type) {
+ case int, int8, int16, int64, uint, uint16, uint32, uint64, bool:
+ fmt.Fprintf(b, "%T(%v)\n", t, t)
+ case float32:
+ if math.IsNaN(float64(t)) && math.Float32bits(t) != math.Float32bits(float32(math.NaN())) {
+ // We encode unusual NaNs as hex values, because that is how users are
+ // likely to encounter them in literature about floating-point encoding.
+ // This allows us to reproduce fuzz failures that depend on the specific
+ // NaN representation (for float32 there are about 2^24 possibilities!),
+ // not just the fact that the value is *a* NaN.
+ //
+ // Note that the specific value of float32(math.NaN()) can vary based on
+ // whether the architecture represents signaling NaNs using a low bit
+ // (as is common) or a high bit (as commonly implemented on MIPS
+ // hardware before around 2012). We believe that the increase in clarity
+ // from identifying "NaN" with math.NaN() is worth the slight ambiguity
+ // from a platform-dependent value.
+ fmt.Fprintf(b, "math.Float32frombits(0x%x)\n", math.Float32bits(t))
+ } else {
+ // We encode all other values — including the NaN value that is
+ // bitwise-identical to float32(math.Nan()) — using the default
+ // formatting, which is equivalent to strconv.FormatFloat with format
+ // 'g' and can be parsed by strconv.ParseFloat.
+ //
+ // For an ordinary floating-point number this format includes
+ // sufficiently many digits to reconstruct the exact value. For positive
+ // or negative infinity it is the string "+Inf" or "-Inf". For positive
+ // or negative zero it is "0" or "-0". For NaN, it is the string "NaN".
+ fmt.Fprintf(b, "%T(%v)\n", t, t)
+ }
+ case float64:
+ if math.IsNaN(t) && math.Float64bits(t) != math.Float64bits(math.NaN()) {
+ fmt.Fprintf(b, "math.Float64frombits(0x%x)\n", math.Float64bits(t))
+ } else {
+ fmt.Fprintf(b, "%T(%v)\n", t, t)
+ }
+ case string:
+ fmt.Fprintf(b, "string(%q)\n", t)
+ case rune: // int32
+ // Although rune and int32 are represented by the same type, only a subset
+ // of valid int32 values can be expressed as rune literals. Notably,
+ // negative numbers, surrogate halves, and values above unicode.MaxRune
+ // have no quoted representation.
+ //
+ // fmt with "%q" (and the corresponding functions in the strconv package)
+ // would quote out-of-range values to the Unicode replacement character
+ // instead of the original value (see https://go.dev/issue/51526), so
+ // they must be treated as int32 instead.
+ //
+ // We arbitrarily draw the line at UTF-8 validity, which biases toward the
+ // "rune" interpretation. (However, we accept either format as input.)
+ if utf8.ValidRune(t) {
+ fmt.Fprintf(b, "rune(%q)\n", t)
+ } else {
+ fmt.Fprintf(b, "int32(%v)\n", t)
+ }
+ case byte: // uint8
+ // For bytes, we arbitrarily prefer the character interpretation.
+ // (Every byte has a valid character encoding.)
+ fmt.Fprintf(b, "byte(%q)\n", t)
+ case []byte: // []uint8
+ fmt.Fprintf(b, "[]byte(%q)\n", t)
+ default:
+ panic(fmt.Sprintf("unsupported type: %T", t))
+ }
+ }
+ return b.Bytes()
+}
+
+// unmarshalCorpusFile decodes corpus bytes into their respective values.
+func unmarshalCorpusFile(b []byte) ([]any, error) {
+ if len(b) == 0 {
+ return nil, fmt.Errorf("cannot unmarshal empty string")
+ }
+ lines := bytes.Split(b, []byte("\n"))
+ if len(lines) < 2 {
+ return nil, fmt.Errorf("must include version and at least one value")
+ }
+ version := strings.TrimSuffix(string(lines[0]), "\r")
+ if version != encVersion1 {
+ return nil, fmt.Errorf("unknown encoding version: %s", version)
+ }
+ var vals []any
+ for _, line := range lines[1:] {
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 {
+ continue
+ }
+ v, err := parseCorpusValue(line)
+ if err != nil {
+ return nil, fmt.Errorf("malformed line %q: %v", line, err)
+ }
+ vals = append(vals, v)
+ }
+ return vals, nil
+}
+
+func parseCorpusValue(line []byte) (any, error) {
+ fs := token.NewFileSet()
+ expr, err := parser.ParseExprFrom(fs, "(test)", line, 0)
+ if err != nil {
+ return nil, err
+ }
+ call, ok := expr.(*ast.CallExpr)
+ if !ok {
+ return nil, fmt.Errorf("expected call expression")
+ }
+ if len(call.Args) != 1 {
+ return nil, fmt.Errorf("expected call expression with 1 argument; got %d", len(call.Args))
+ }
+ arg := call.Args[0]
+
+ if arrayType, ok := call.Fun.(*ast.ArrayType); ok {
+ if arrayType.Len != nil {
+ return nil, fmt.Errorf("expected []byte or primitive type")
+ }
+ elt, ok := arrayType.Elt.(*ast.Ident)
+ if !ok || elt.Name != "byte" {
+ return nil, fmt.Errorf("expected []byte")
+ }
+ lit, ok := arg.(*ast.BasicLit)
+ if !ok || lit.Kind != token.STRING {
+ return nil, fmt.Errorf("string literal required for type []byte")
+ }
+ s, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(s), nil
+ }
+
+ var idType *ast.Ident
+ if selector, ok := call.Fun.(*ast.SelectorExpr); ok {
+ xIdent, ok := selector.X.(*ast.Ident)
+ if !ok || xIdent.Name != "math" {
+ return nil, fmt.Errorf("invalid selector type")
+ }
+ switch selector.Sel.Name {
+ case "Float64frombits":
+ idType = &ast.Ident{Name: "float64-bits"}
+ case "Float32frombits":
+ idType = &ast.Ident{Name: "float32-bits"}
+ default:
+ return nil, fmt.Errorf("invalid selector type")
+ }
+ } else {
+ idType, ok = call.Fun.(*ast.Ident)
+ if !ok {
+ return nil, fmt.Errorf("expected []byte or primitive type")
+ }
+ if idType.Name == "bool" {
+ id, ok := arg.(*ast.Ident)
+ if !ok {
+ return nil, fmt.Errorf("malformed bool")
+ }
+ if id.Name == "true" {
+ return true, nil
+ } else if id.Name == "false" {
+ return false, nil
+ } else {
+ return nil, fmt.Errorf("true or false required for type bool")
+ }
+ }
+ }
+
+ var (
+ val string
+ kind token.Token
+ )
+ if op, ok := arg.(*ast.UnaryExpr); ok {
+ switch lit := op.X.(type) {
+ case *ast.BasicLit:
+ if op.Op != token.SUB {
+ return nil, fmt.Errorf("unsupported operation on int/float: %v", op.Op)
+ }
+ // Special case for negative numbers.
+ val = op.Op.String() + lit.Value // e.g. "-" + "124"
+ kind = lit.Kind
+ case *ast.Ident:
+ if lit.Name != "Inf" {
+ return nil, fmt.Errorf("expected operation on int or float type")
+ }
+ if op.Op == token.SUB {
+ val = "-Inf"
+ } else {
+ val = "+Inf"
+ }
+ kind = token.FLOAT
+ default:
+ return nil, fmt.Errorf("expected operation on int or float type")
+ }
+ } else {
+ switch lit := arg.(type) {
+ case *ast.BasicLit:
+ val, kind = lit.Value, lit.Kind
+ case *ast.Ident:
+ if lit.Name != "NaN" {
+ return nil, fmt.Errorf("literal value required for primitive type")
+ }
+ val, kind = "NaN", token.FLOAT
+ default:
+ return nil, fmt.Errorf("literal value required for primitive type")
+ }
+ }
+
+ switch typ := idType.Name; typ {
+ case "string":
+ if kind != token.STRING {
+ return nil, fmt.Errorf("string literal value required for type string")
+ }
+ return strconv.Unquote(val)
+ case "byte", "rune":
+ if kind == token.INT {
+ switch typ {
+ case "rune":
+ return parseInt(val, typ)
+ case "byte":
+ return parseUint(val, typ)
+ }
+ }
+ if kind != token.CHAR {
+ return nil, fmt.Errorf("character literal required for byte/rune types")
+ }
+ n := len(val)
+ if n < 2 {
+ return nil, fmt.Errorf("malformed character literal, missing single quotes")
+ }
+ code, _, _, err := strconv.UnquoteChar(val[1:n-1], '\'')
+ if err != nil {
+ return nil, err
+ }
+ if typ == "rune" {
+ return code, nil
+ }
+ if code >= 256 {
+ return nil, fmt.Errorf("can only encode single byte to a byte type")
+ }
+ return byte(code), nil
+ case "int", "int8", "int16", "int32", "int64":
+ if kind != token.INT {
+ return nil, fmt.Errorf("integer literal required for int types")
+ }
+ return parseInt(val, typ)
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ if kind != token.INT {
+ return nil, fmt.Errorf("integer literal required for uint types")
+ }
+ return parseUint(val, typ)
+ case "float32":
+ if kind != token.FLOAT && kind != token.INT {
+ return nil, fmt.Errorf("float or integer literal required for float32 type")
+ }
+ v, err := strconv.ParseFloat(val, 32)
+ return float32(v), err
+ case "float64":
+ if kind != token.FLOAT && kind != token.INT {
+ return nil, fmt.Errorf("float or integer literal required for float64 type")
+ }
+ return strconv.ParseFloat(val, 64)
+ case "float32-bits":
+ if kind != token.INT {
+ return nil, fmt.Errorf("integer literal required for math.Float32frombits type")
+ }
+ bits, err := parseUint(val, "uint32")
+ if err != nil {
+ return nil, err
+ }
+ return math.Float32frombits(bits.(uint32)), nil
+ case "float64-bits":
+ if kind != token.FLOAT && kind != token.INT {
+ return nil, fmt.Errorf("integer literal required for math.Float64frombits type")
+ }
+ bits, err := parseUint(val, "uint64")
+ if err != nil {
+ return nil, err
+ }
+ return math.Float64frombits(bits.(uint64)), nil
+ default:
+ return nil, fmt.Errorf("expected []byte or primitive type")
+ }
+}
+
+// parseInt returns an integer of value val and type typ.
+func parseInt(val, typ string) (any, error) {
+ switch typ {
+ case "int":
+ // The int type may be either 32 or 64 bits. If 32, the fuzz tests in the
+ // corpus may include 64-bit values produced by fuzzing runs on 64-bit
+ // architectures. When running those tests, we implicitly wrap the values to
+ // fit in a regular int. (The test case is still “interesting”, even if the
+ // specific values of its inputs are platform-dependent.)
+ i, err := strconv.ParseInt(val, 0, 64)
+ return int(i), err
+ case "int8":
+ i, err := strconv.ParseInt(val, 0, 8)
+ return int8(i), err
+ case "int16":
+ i, err := strconv.ParseInt(val, 0, 16)
+ return int16(i), err
+ case "int32", "rune":
+ i, err := strconv.ParseInt(val, 0, 32)
+ return int32(i), err
+ case "int64":
+ return strconv.ParseInt(val, 0, 64)
+ default:
+ panic("unreachable")
+ }
+}
+
+// parseUint returns an unsigned integer of value val and type typ.
+func parseUint(val, typ string) (any, error) {
+ switch typ {
+ case "uint":
+ i, err := strconv.ParseUint(val, 0, 64)
+ return uint(i), err
+ case "uint8", "byte":
+ i, err := strconv.ParseUint(val, 0, 8)
+ return uint8(i), err
+ case "uint16":
+ i, err := strconv.ParseUint(val, 0, 16)
+ return uint16(i), err
+ case "uint32":
+ i, err := strconv.ParseUint(val, 0, 32)
+ return uint32(i), err
+ case "uint64":
+ return strconv.ParseUint(val, 0, 64)
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/src/internal/fuzz/encoding_test.go b/src/internal/fuzz/encoding_test.go
new file mode 100644
index 0000000..6f6173d
--- /dev/null
+++ b/src/internal/fuzz/encoding_test.go
@@ -0,0 +1,409 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "math"
+ "strconv"
+ "testing"
+ "unicode"
+)
+
+func TestUnmarshalMarshal(t *testing.T) {
+ var tests = []struct {
+ desc string
+ in string
+ reject bool
+ want string // if different from in
+ }{
+ {
+ desc: "missing version",
+ in: "int(1234)",
+ reject: true,
+ },
+ {
+ desc: "malformed string",
+ in: `go test fuzz v1
+string("a"bcad")`,
+ reject: true,
+ },
+ {
+ desc: "empty value",
+ in: `go test fuzz v1
+int()`,
+ reject: true,
+ },
+ {
+ desc: "negative uint",
+ in: `go test fuzz v1
+uint(-32)`,
+ reject: true,
+ },
+ {
+ desc: "int8 too large",
+ in: `go test fuzz v1
+int8(1234456)`,
+ reject: true,
+ },
+ {
+ desc: "multiplication in int value",
+ in: `go test fuzz v1
+int(20*5)`,
+ reject: true,
+ },
+ {
+ desc: "double negation",
+ in: `go test fuzz v1
+int(--5)`,
+ reject: true,
+ },
+ {
+ desc: "malformed bool",
+ in: `go test fuzz v1
+bool(0)`,
+ reject: true,
+ },
+ {
+ desc: "malformed byte",
+ in: `go test fuzz v1
+byte('aa)`,
+ reject: true,
+ },
+ {
+ desc: "byte out of range",
+ in: `go test fuzz v1
+byte('☃')`,
+ reject: true,
+ },
+ {
+ desc: "extra newline",
+ in: `go test fuzz v1
+string("has extra newline")
+`,
+ want: `go test fuzz v1
+string("has extra newline")`,
+ },
+ {
+ desc: "trailing spaces",
+ in: `go test fuzz v1
+string("extra")
+[]byte("spacing")
+ `,
+ want: `go test fuzz v1
+string("extra")
+[]byte("spacing")`,
+ },
+ {
+ desc: "float types",
+ in: `go test fuzz v1
+float64(0)
+float32(0)`,
+ },
+ {
+ desc: "various types",
+ in: `go test fuzz v1
+int(-23)
+int8(-2)
+int64(2342425)
+uint(1)
+uint16(234)
+uint32(352342)
+uint64(123)
+rune('œ')
+byte('K')
+byte('ÿ')
+[]byte("hello¿")
+[]byte("a")
+bool(true)
+string("hello\\xbd\\xb2=\\xbc ⌘")
+float64(-12.5)
+float32(2.5)`,
+ },
+ {
+ desc: "float edge cases",
+ // The two IEEE 754 bit patterns used for the math.Float{64,32}frombits
+ // encodings are non-math.NAN quiet-NaN values. Since they are not equal
+ // to math.NaN(), they should be re-encoded to their bit patterns. They
+ // are, respectively:
+ // * math.Float64bits(math.NaN())+1
+ // * math.Float32bits(float32(math.NaN()))+1
+ in: `go test fuzz v1
+float32(-0)
+float64(-0)
+float32(+Inf)
+float32(-Inf)
+float32(NaN)
+float64(+Inf)
+float64(-Inf)
+float64(NaN)
+math.Float64frombits(0x7ff8000000000002)
+math.Float32frombits(0x7fc00001)`,
+ },
+ {
+ desc: "int variations",
+ // Although we arbitrarily choose default integer bases (0 or 16), we may
+ // want to change those arbitrary choices in the future and should not
+ // break the parser. Verify that integers in the opposite bases still
+ // parse correctly.
+ in: `go test fuzz v1
+int(0x0)
+int32(0x41)
+int64(0xfffffffff)
+uint32(0xcafef00d)
+uint64(0xffffffffffffffff)
+uint8(0b0000000)
+byte(0x0)
+byte('\000')
+byte('\u0000')
+byte('\'')
+math.Float64frombits(9221120237041090562)
+math.Float32frombits(2143289345)`,
+ want: `go test fuzz v1
+int(0)
+rune('A')
+int64(68719476735)
+uint32(3405705229)
+uint64(18446744073709551615)
+byte('\x00')
+byte('\x00')
+byte('\x00')
+byte('\x00')
+byte('\'')
+math.Float64frombits(0x7ff8000000000002)
+math.Float32frombits(0x7fc00001)`,
+ },
+ {
+ desc: "rune validation",
+ in: `go test fuzz v1
+rune(0)
+rune(0x41)
+rune(-1)
+rune(0xfffd)
+rune(0xd800)
+rune(0x10ffff)
+rune(0x110000)
+`,
+ want: `go test fuzz v1
+rune('\x00')
+rune('A')
+int32(-1)
+rune('�')
+int32(55296)
+rune('\U0010ffff')
+int32(1114112)`,
+ },
+ {
+ desc: "int overflow",
+ in: `go test fuzz v1
+int(0x7fffffffffffffff)
+uint(0xffffffffffffffff)`,
+ want: func() string {
+ switch strconv.IntSize {
+ case 32:
+ return `go test fuzz v1
+int(-1)
+uint(4294967295)`
+ case 64:
+ return `go test fuzz v1
+int(9223372036854775807)
+uint(18446744073709551615)`
+ default:
+ panic("unreachable")
+ }
+ }(),
+ },
+ {
+ desc: "windows new line",
+ in: "go test fuzz v1\r\nint(0)\r\n",
+ want: "go test fuzz v1\nint(0)",
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ vals, err := unmarshalCorpusFile([]byte(test.in))
+ if test.reject {
+ if err == nil {
+ t.Fatalf("unmarshal unexpected success")
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("unmarshal unexpected error: %v", err)
+ }
+ newB := marshalCorpusFile(vals...)
+ if err != nil {
+ t.Fatalf("marshal unexpected error: %v", err)
+ }
+ if newB[len(newB)-1] != '\n' {
+ t.Error("didn't write final newline to corpus file")
+ }
+
+ want := test.want
+ if want == "" {
+ want = test.in
+ }
+ want += "\n"
+ got := string(newB)
+ if got != want {
+ t.Errorf("unexpected marshaled value\ngot:\n%s\nwant:\n%s", got, want)
+ }
+ })
+ }
+}
+
+// BenchmarkMarshalCorpusFile measures the time it takes to serialize byte
+// slices of various sizes to a corpus file. The slice contains a repeating
+// sequence of bytes 0-255 to mix escaped and non-escaped characters.
+func BenchmarkMarshalCorpusFile(b *testing.B) {
+ buf := make([]byte, 1024*1024)
+ for i := 0; i < len(buf); i++ {
+ buf[i] = byte(i)
+ }
+
+ for sz := 1; sz <= len(buf); sz <<= 1 {
+ sz := sz
+ b.Run(strconv.Itoa(sz), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ b.SetBytes(int64(sz))
+ marshalCorpusFile(buf[:sz])
+ }
+ })
+ }
+}
+
+// BenchmarkUnmarshalCorpusfile measures the time it takes to deserialize
+// files encoding byte slices of various sizes. The slice contains a repeating
+// sequence of bytes 0-255 to mix escaped and non-escaped characters.
+func BenchmarkUnmarshalCorpusFile(b *testing.B) {
+ buf := make([]byte, 1024*1024)
+ for i := 0; i < len(buf); i++ {
+ buf[i] = byte(i)
+ }
+
+ for sz := 1; sz <= len(buf); sz <<= 1 {
+ sz := sz
+ data := marshalCorpusFile(buf[:sz])
+ b.Run(strconv.Itoa(sz), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ b.SetBytes(int64(sz))
+ unmarshalCorpusFile(data)
+ }
+ })
+ }
+}
+
+func TestByteRoundTrip(t *testing.T) {
+ for x := 0; x < 256; x++ {
+ b1 := byte(x)
+ buf := marshalCorpusFile(b1)
+ vs, err := unmarshalCorpusFile(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b2 := vs[0].(byte)
+ if b2 != b1 {
+ t.Fatalf("unmarshaled %v, want %v:\n%s", b2, b1, buf)
+ }
+ }
+}
+
+func TestInt8RoundTrip(t *testing.T) {
+ for x := -128; x < 128; x++ {
+ i1 := int8(x)
+ buf := marshalCorpusFile(i1)
+ vs, err := unmarshalCorpusFile(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ i2 := vs[0].(int8)
+ if i2 != i1 {
+ t.Fatalf("unmarshaled %v, want %v:\n%s", i2, i1, buf)
+ }
+ }
+}
+
+func FuzzFloat64RoundTrip(f *testing.F) {
+ f.Add(math.Float64bits(0))
+ f.Add(math.Float64bits(math.Copysign(0, -1)))
+ f.Add(math.Float64bits(math.MaxFloat64))
+ f.Add(math.Float64bits(math.SmallestNonzeroFloat64))
+ f.Add(math.Float64bits(math.NaN()))
+ f.Add(uint64(0x7FF0000000000001)) // signaling NaN
+ f.Add(math.Float64bits(math.Inf(1)))
+ f.Add(math.Float64bits(math.Inf(-1)))
+
+ f.Fuzz(func(t *testing.T, u1 uint64) {
+ x1 := math.Float64frombits(u1)
+
+ b := marshalCorpusFile(x1)
+ t.Logf("marshaled math.Float64frombits(0x%x):\n%s", u1, b)
+
+ xs, err := unmarshalCorpusFile(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(xs) != 1 {
+ t.Fatalf("unmarshaled %d values", len(xs))
+ }
+ x2 := xs[0].(float64)
+ u2 := math.Float64bits(x2)
+ if u2 != u1 {
+ t.Errorf("unmarshaled %v (bits 0x%x)", x2, u2)
+ }
+ })
+}
+
+func FuzzRuneRoundTrip(f *testing.F) {
+ f.Add(rune(-1))
+ f.Add(rune(0xd800))
+ f.Add(rune(0xdfff))
+ f.Add(rune(unicode.ReplacementChar))
+ f.Add(rune(unicode.MaxASCII))
+ f.Add(rune(unicode.MaxLatin1))
+ f.Add(rune(unicode.MaxRune))
+ f.Add(rune(unicode.MaxRune + 1))
+ f.Add(rune(-0x80000000))
+ f.Add(rune(0x7fffffff))
+
+ f.Fuzz(func(t *testing.T, r1 rune) {
+ b := marshalCorpusFile(r1)
+ t.Logf("marshaled rune(0x%x):\n%s", r1, b)
+
+ rs, err := unmarshalCorpusFile(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(rs) != 1 {
+ t.Fatalf("unmarshaled %d values", len(rs))
+ }
+ r2 := rs[0].(rune)
+ if r2 != r1 {
+ t.Errorf("unmarshaled rune(0x%x)", r2)
+ }
+ })
+}
+
+func FuzzStringRoundTrip(f *testing.F) {
+ f.Add("")
+ f.Add("\x00")
+ f.Add(string([]rune{unicode.ReplacementChar}))
+
+ f.Fuzz(func(t *testing.T, s1 string) {
+ b := marshalCorpusFile(s1)
+ t.Logf("marshaled %q:\n%s", s1, b)
+
+ rs, err := unmarshalCorpusFile(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(rs) != 1 {
+ t.Fatalf("unmarshaled %d values", len(rs))
+ }
+ s2 := rs[0].(string)
+ if s2 != s1 {
+ t.Errorf("unmarshaled %q", s2)
+ }
+ })
+}
diff --git a/src/internal/fuzz/fuzz.go b/src/internal/fuzz/fuzz.go
new file mode 100644
index 0000000..fb4e1d3
--- /dev/null
+++ b/src/internal/fuzz/fuzz.go
@@ -0,0 +1,1102 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fuzz provides common fuzzing functionality for tests built with
+// "go test" and for programs that use fuzzing functionality in the testing
+// package.
+package fuzz
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "math/bits"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// CoordinateFuzzingOpts is a set of arguments for CoordinateFuzzing.
+// The zero value is valid for each field unless specified otherwise.
+type CoordinateFuzzingOpts struct {
+ // Log is a writer for logging progress messages and warnings.
+ // If nil, io.Discard will be used instead.
+ Log io.Writer
+
+ // Timeout is the amount of wall clock time to spend fuzzing after the corpus
+ // has loaded. If zero, there will be no time limit.
+ Timeout time.Duration
+
+ // Limit is the number of random values to generate and test. If zero,
+ // there will be no limit on the number of generated values.
+ Limit int64
+
+ // MinimizeTimeout is the amount of wall clock time to spend minimizing
+ // after discovering a crasher. If zero, there will be no time limit. If
+ // MinimizeTimeout and MinimizeLimit are both zero, then minimization will
+ // be disabled.
+ MinimizeTimeout time.Duration
+
+ // MinimizeLimit is the maximum number of calls to the fuzz function to be
+ // made while minimizing after finding a crash. If zero, there will be no
+ // limit. Calls to the fuzz function made when minimizing also count toward
+ // Limit. If MinimizeTimeout and MinimizeLimit are both zero, then
+ // minimization will be disabled.
+ MinimizeLimit int64
+
+ // parallel is the number of worker processes to run in parallel. If zero,
+ // CoordinateFuzzing will run GOMAXPROCS workers.
+ Parallel int
+
+ // Seed is a list of seed values added by the fuzz target with testing.F.Add
+ // and in testdata.
+ Seed []CorpusEntry
+
+ // Types is the list of types which make up a corpus entry.
+ // Types must be set and must match values in Seed.
+ Types []reflect.Type
+
+ // CorpusDir is a directory where files containing values that crash the
+ // code being tested may be written. CorpusDir must be set.
+ CorpusDir string
+
+ // CacheDir is a directory containing additional "interesting" values.
+ // The fuzzer may derive new values from these, and may write new values here.
+ CacheDir string
+}
+
+// CoordinateFuzzing creates several worker processes and communicates with
+// them to test random inputs that could trigger crashes and expose bugs.
+// The worker processes run the same binary in the same directory with the
+// same environment variables as the coordinator process. Workers also run
+// with the same arguments as the coordinator, except with the -test.fuzzworker
+// flag prepended to the argument list.
+//
+// If a crash occurs, the function will return an error containing information
+// about the crash, which can be reported to the user.
+func CoordinateFuzzing(ctx context.Context, opts CoordinateFuzzingOpts) (err error) {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ if opts.Log == nil {
+ opts.Log = io.Discard
+ }
+ if opts.Parallel == 0 {
+ opts.Parallel = runtime.GOMAXPROCS(0)
+ }
+ if opts.Limit > 0 && int64(opts.Parallel) > opts.Limit {
+ // Don't start more workers than we need.
+ opts.Parallel = int(opts.Limit)
+ }
+
+ c, err := newCoordinator(opts)
+ if err != nil {
+ return err
+ }
+
+ if opts.Timeout > 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, opts.Timeout)
+ defer cancel()
+ }
+
+ // fuzzCtx is used to stop workers, for example, after finding a crasher.
+ fuzzCtx, cancelWorkers := context.WithCancel(ctx)
+ defer cancelWorkers()
+ doneC := ctx.Done()
+
+ // stop is called when a worker encounters a fatal error.
+ var fuzzErr error
+ stopping := false
+ stop := func(err error) {
+ if shouldPrintDebugInfo() {
+ _, file, line, ok := runtime.Caller(1)
+ if ok {
+ c.debugLogf("stop called at %s:%d. stopping: %t", file, line, stopping)
+ } else {
+ c.debugLogf("stop called at unknown. stopping: %t", stopping)
+ }
+ }
+
+ if err == fuzzCtx.Err() || isInterruptError(err) {
+ // Suppress cancellation errors and terminations due to SIGINT.
+ // The messages are not helpful since either the user triggered the error
+ // (with ^C) or another more helpful message will be printed (a crasher).
+ err = nil
+ }
+ if err != nil && (fuzzErr == nil || fuzzErr == ctx.Err()) {
+ fuzzErr = err
+ }
+ if stopping {
+ return
+ }
+ stopping = true
+ cancelWorkers()
+ doneC = nil
+ }
+
+ // Ensure that any crash we find is written to the corpus, even if an error
+ // or interruption occurs while minimizing it.
+ crashWritten := false
+ defer func() {
+ if c.crashMinimizing == nil || crashWritten {
+ return
+ }
+ werr := writeToCorpus(&c.crashMinimizing.entry, opts.CorpusDir)
+ if werr != nil {
+ err = fmt.Errorf("%w\n%v", err, werr)
+ return
+ }
+ if err == nil {
+ err = &crashError{
+ path: c.crashMinimizing.entry.Path,
+ err: errors.New(c.crashMinimizing.crasherMsg),
+ }
+ }
+ }()
+
+ // Start workers.
+ // TODO(jayconrod): do we want to support fuzzing different binaries?
+ dir := "" // same as self
+ binPath := os.Args[0]
+ args := append([]string{"-test.fuzzworker"}, os.Args[1:]...)
+ env := os.Environ() // same as self
+
+ errC := make(chan error)
+ workers := make([]*worker, opts.Parallel)
+ for i := range workers {
+ var err error
+ workers[i], err = newWorker(c, dir, binPath, args, env)
+ if err != nil {
+ return err
+ }
+ }
+ for i := range workers {
+ w := workers[i]
+ go func() {
+ err := w.coordinate(fuzzCtx)
+ if fuzzCtx.Err() != nil || isInterruptError(err) {
+ err = nil
+ }
+ cleanErr := w.cleanup()
+ if err == nil {
+ err = cleanErr
+ }
+ errC <- err
+ }()
+ }
+
+ // Main event loop.
+ // Do not return until all workers have terminated. We avoid a deadlock by
+ // receiving messages from workers even after ctx is cancelled.
+ activeWorkers := len(workers)
+ statTicker := time.NewTicker(3 * time.Second)
+ defer statTicker.Stop()
+ defer c.logStats()
+
+ c.logStats()
+ for {
+ // If there is an execution limit, and we've reached it, stop.
+ if c.opts.Limit > 0 && c.count >= c.opts.Limit {
+ stop(nil)
+ }
+
+ var inputC chan fuzzInput
+ input, ok := c.peekInput()
+ if ok && c.crashMinimizing == nil && !stopping {
+ inputC = c.inputC
+ }
+
+ var minimizeC chan fuzzMinimizeInput
+ minimizeInput, ok := c.peekMinimizeInput()
+ if ok && !stopping {
+ minimizeC = c.minimizeC
+ }
+
+ select {
+ case <-doneC:
+ // Interrupted, cancelled, or timed out.
+ // stop sets doneC to nil so we don't busy wait here.
+ stop(ctx.Err())
+
+ case err := <-errC:
+ // A worker terminated, possibly after encountering a fatal error.
+ stop(err)
+ activeWorkers--
+ if activeWorkers == 0 {
+ return fuzzErr
+ }
+
+ case result := <-c.resultC:
+ // Received response from worker.
+ if stopping {
+ break
+ }
+ c.updateStats(result)
+
+ if result.crasherMsg != "" {
+ if c.warmupRun() && result.entry.IsSeed {
+ target := filepath.Base(c.opts.CorpusDir)
+ fmt.Fprintf(c.opts.Log, "failure while testing seed corpus entry: %s/%s\n", target, testName(result.entry.Parent))
+ stop(errors.New(result.crasherMsg))
+ break
+ }
+ if c.canMinimize() && result.canMinimize {
+ if c.crashMinimizing != nil {
+ // This crash is not minimized, and another crash is being minimized.
+ // Ignore this one and wait for the other one to finish.
+ if shouldPrintDebugInfo() {
+ c.debugLogf("found unminimized crasher, skipping in favor of minimizable crasher")
+ }
+ break
+ }
+ // Found a crasher but haven't yet attempted to minimize it.
+ // Send it back to a worker for minimization. Disable inputC so
+ // other workers don't continue fuzzing.
+ c.crashMinimizing = &result
+ fmt.Fprintf(c.opts.Log, "fuzz: minimizing %d-byte failing input file\n", len(result.entry.Data))
+ c.queueForMinimization(result, nil)
+ } else if !crashWritten {
+ // Found a crasher that's either minimized or not minimizable.
+ // Write to corpus and stop.
+ err := writeToCorpus(&result.entry, opts.CorpusDir)
+ if err == nil {
+ crashWritten = true
+ err = &crashError{
+ path: result.entry.Path,
+ err: errors.New(result.crasherMsg),
+ }
+ }
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "found crasher, id: %s, parent: %s, gen: %d, size: %d, exec time: %s",
+ result.entry.Path,
+ result.entry.Parent,
+ result.entry.Generation,
+ len(result.entry.Data),
+ result.entryDuration,
+ )
+ }
+ stop(err)
+ }
+ } else if result.coverageData != nil {
+ if c.warmupRun() {
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "processed an initial input, id: %s, new bits: %d, size: %d, exec time: %s",
+ result.entry.Parent,
+ countBits(diffCoverage(c.coverageMask, result.coverageData)),
+ len(result.entry.Data),
+ result.entryDuration,
+ )
+ }
+ c.updateCoverage(result.coverageData)
+ c.warmupInputLeft--
+ if c.warmupInputLeft == 0 {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel)
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "finished processing input corpus, entries: %d, initial coverage bits: %d",
+ len(c.corpus.entries),
+ countBits(c.coverageMask),
+ )
+ }
+ }
+ } else if keepCoverage := diffCoverage(c.coverageMask, result.coverageData); keepCoverage != nil {
+ // Found a value that expanded coverage.
+ // It's not a crasher, but we may want to add it to the on-disk
+ // corpus and prioritize it for future fuzzing.
+ // TODO(jayconrod, katiehockman): Prioritize fuzzing these
+ // values which expanded coverage, perhaps based on the
+ // number of new edges that this result expanded.
+ // TODO(jayconrod, katiehockman): Don't write a value that's already
+ // in the corpus.
+ if c.canMinimize() && result.canMinimize && c.crashMinimizing == nil {
+ // Send back to workers to find a smaller value that preserves
+ // at least one new coverage bit.
+ c.queueForMinimization(result, keepCoverage)
+ } else {
+ // Update the coordinator's coverage mask and save the value.
+ inputSize := len(result.entry.Data)
+ entryNew, err := c.addCorpusEntries(true, result.entry)
+ if err != nil {
+ stop(err)
+ break
+ }
+ if !entryNew {
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "ignoring duplicate input which increased coverage, id: %s",
+ result.entry.Path,
+ )
+ }
+ break
+ }
+ c.updateCoverage(keepCoverage)
+ c.inputQueue.enqueue(result.entry)
+ c.interestingCount++
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "new interesting input, id: %s, parent: %s, gen: %d, new bits: %d, total bits: %d, size: %d, exec time: %s",
+ result.entry.Path,
+ result.entry.Parent,
+ result.entry.Generation,
+ countBits(keepCoverage),
+ countBits(c.coverageMask),
+ inputSize,
+ result.entryDuration,
+ )
+ }
+ }
+ } else {
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "worker reported interesting input that doesn't expand coverage, id: %s, parent: %s, canMinimize: %t",
+ result.entry.Path,
+ result.entry.Parent,
+ result.canMinimize,
+ )
+ }
+ }
+ } else if c.warmupRun() {
+ // No error or coverage data was reported for this input during
+ // warmup, so continue processing results.
+ c.warmupInputLeft--
+ if c.warmupInputLeft == 0 {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel)
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "finished testing-only phase, entries: %d",
+ len(c.corpus.entries),
+ )
+ }
+ }
+ }
+
+ case inputC <- input:
+ // Sent the next input to a worker.
+ c.sentInput(input)
+
+ case minimizeC <- minimizeInput:
+ // Sent the next input for minimization to a worker.
+ c.sentMinimizeInput(minimizeInput)
+
+ case <-statTicker.C:
+ c.logStats()
+ }
+ }
+
+ // TODO(jayconrod,katiehockman): if a crasher can't be written to the corpus,
+ // write to the cache instead.
+}
+
+// crashError wraps a crasher written to the seed corpus. It saves the name
+// of the file where the input causing the crasher was saved. The testing
+// framework uses this to report a command to re-run that specific input.
+type crashError struct {
+ path string
+ err error
+}
+
+func (e *crashError) Error() string {
+ return e.err.Error()
+}
+
+func (e *crashError) Unwrap() error {
+ return e.err
+}
+
+func (e *crashError) CrashPath() string {
+ return e.path
+}
+
+type corpus struct {
+ entries []CorpusEntry
+ hashes map[[sha256.Size]byte]bool
+}
+
+// addCorpusEntries adds entries to the corpus, and optionally writes the entries
+// to the cache directory. If an entry is already in the corpus it is skipped. If
+// all of the entries are unique, addCorpusEntries returns true and a nil error,
+// if at least one of the entries was a duplicate, it returns false and a nil error.
+func (c *coordinator) addCorpusEntries(addToCache bool, entries ...CorpusEntry) (bool, error) {
+ noDupes := true
+ for _, e := range entries {
+ data, err := corpusEntryData(e)
+ if err != nil {
+ return false, err
+ }
+ h := sha256.Sum256(data)
+ if c.corpus.hashes[h] {
+ noDupes = false
+ continue
+ }
+ if addToCache {
+ if err := writeToCorpus(&e, c.opts.CacheDir); err != nil {
+ return false, err
+ }
+ // For entries written to disk, we don't hold onto the bytes,
+ // since the corpus would consume a significant amount of
+ // memory.
+ e.Data = nil
+ }
+ c.corpus.hashes[h] = true
+ c.corpus.entries = append(c.corpus.entries, e)
+ }
+ return noDupes, nil
+}
+
+// CorpusEntry represents an individual input for fuzzing.
+//
+// We must use an equivalent type in the testing and testing/internal/testdeps
+// packages, but testing can't import this package directly, and we don't want
+// to export this type from testing. Instead, we use the same struct type and
+// use a type alias (not a defined type) for convenience.
+type CorpusEntry = struct {
+ Parent string
+
+ // Path is the path of the corpus file, if the entry was loaded from disk.
+ // For other entries, including seed values provided by f.Add, Path is the
+ // name of the test, e.g. seed#0 or its hash.
+ Path string
+
+ // Data is the raw input data. Data should only be populated for seed
+ // values. For on-disk corpus files, Data will be nil, as it will be loaded
+ // from disk using Path.
+ Data []byte
+
+ // Values is the unmarshaled values from a corpus file.
+ Values []any
+
+ Generation int
+
+ // IsSeed indicates whether this entry is part of the seed corpus.
+ IsSeed bool
+}
+
+// corpusEntryData returns the raw input bytes, either from the data struct
+// field, or from disk.
+func corpusEntryData(ce CorpusEntry) ([]byte, error) {
+ if ce.Data != nil {
+ return ce.Data, nil
+ }
+
+ return os.ReadFile(ce.Path)
+}
+
+type fuzzInput struct {
+ // entry is the value to test initially. The worker will randomly mutate
+ // values from this starting point.
+ entry CorpusEntry
+
+ // timeout is the time to spend fuzzing variations of this input,
+ // not including starting or cleaning up.
+ timeout time.Duration
+
+ // limit is the maximum number of calls to the fuzz function the worker may
+ // make. The worker may make fewer calls, for example, if it finds an
+ // error early. If limit is zero, there is no limit on calls to the
+ // fuzz function.
+ limit int64
+
+ // warmup indicates whether this is a warmup input before fuzzing begins. If
+ // true, the input should not be fuzzed.
+ warmup bool
+
+ // coverageData reflects the coordinator's current coverageMask.
+ coverageData []byte
+}
+
+type fuzzResult struct {
+ // entry is an interesting value or a crasher.
+ entry CorpusEntry
+
+ // crasherMsg is an error message from a crash. It's "" if no crash was found.
+ crasherMsg string
+
+ // canMinimize is true if the worker should attempt to minimize this result.
+ // It may be false because an attempt has already been made.
+ canMinimize bool
+
+ // coverageData is set if the worker found new coverage.
+ coverageData []byte
+
+ // limit is the number of values the coordinator asked the worker
+ // to test. 0 if there was no limit.
+ limit int64
+
+ // count is the number of values the worker actually tested.
+ count int64
+
+ // totalDuration is the time the worker spent testing inputs.
+ totalDuration time.Duration
+
+ // entryDuration is the time the worker spent execution an interesting result
+ entryDuration time.Duration
+}
+
+type fuzzMinimizeInput struct {
+ // entry is an interesting value or crasher to minimize.
+ entry CorpusEntry
+
+ // crasherMsg is an error message from a crash. It's "" if no crash was found.
+ // If set, the worker will attempt to find a smaller input that also produces
+ // an error, though not necessarily the same error.
+ crasherMsg string
+
+ // limit is the maximum number of calls to the fuzz function the worker may
+ // make. The worker may make fewer calls, for example, if it can't reproduce
+ // an error. If limit is zero, there is no limit on calls to the fuzz function.
+ limit int64
+
+ // timeout is the time to spend minimizing this input.
+ // A zero timeout means no limit.
+ timeout time.Duration
+
+ // keepCoverage is a set of coverage bits that entry found that were not in
+ // the coordinator's combined set. When minimizing, the worker should find an
+ // input that preserves at least one of these bits. keepCoverage is nil for
+ // crashing inputs.
+ keepCoverage []byte
+}
+
+// coordinator holds channels that workers can use to communicate with
+// the coordinator.
+type coordinator struct {
+ opts CoordinateFuzzingOpts
+
+ // startTime is the time we started the workers after loading the corpus.
+ // Used for logging.
+ startTime time.Time
+
+ // inputC is sent values to fuzz by the coordinator. Any worker may receive
+ // values from this channel. Workers send results to resultC.
+ inputC chan fuzzInput
+
+ // minimizeC is sent values to minimize by the coordinator. Any worker may
+ // receive values from this channel. Workers send results to resultC.
+ minimizeC chan fuzzMinimizeInput
+
+ // resultC is sent results of fuzzing by workers. The coordinator
+ // receives these. Multiple types of messages are allowed.
+ resultC chan fuzzResult
+
+ // count is the number of values fuzzed so far.
+ count int64
+
+ // countLastLog is the number of values fuzzed when the output was last
+ // logged.
+ countLastLog int64
+
+ // timeLastLog is the time at which the output was last logged.
+ timeLastLog time.Time
+
+ // interestingCount is the number of unique interesting values which have
+ // been found this execution.
+ interestingCount int
+
+ // warmupInputCount is the count of all entries in the corpus which will
+ // need to be received from workers to run once during warmup, but not fuzz.
+ // This could be for coverage data, or only for the purposes of verifying
+ // that the seed corpus doesn't have any crashers. See warmupRun.
+ warmupInputCount int
+
+ // warmupInputLeft is the number of entries in the corpus which still need
+ // to be received from workers to run once during warmup, but not fuzz.
+ // See warmupInputLeft.
+ warmupInputLeft int
+
+ // duration is the time spent fuzzing inside workers, not counting time
+ // starting up or tearing down.
+ duration time.Duration
+
+ // countWaiting is the number of fuzzing executions the coordinator is
+ // waiting on workers to complete.
+ countWaiting int64
+
+ // corpus is a set of interesting values, including the seed corpus and
+ // generated values that workers reported as interesting.
+ corpus corpus
+
+ // minimizationAllowed is true if one or more of the types of fuzz
+ // function's parameters can be minimized.
+ minimizationAllowed bool
+
+ // inputQueue is a queue of inputs that workers should try fuzzing. This is
+ // initially populated from the seed corpus and cached inputs. More inputs
+ // may be added as new coverage is discovered.
+ inputQueue queue
+
+ // minimizeQueue is a queue of inputs that caused errors or exposed new
+ // coverage. Workers should attempt to find smaller inputs that do the
+ // same thing.
+ minimizeQueue queue
+
+ // crashMinimizing is the crash that is currently being minimized.
+ crashMinimizing *fuzzResult
+
+ // coverageMask aggregates coverage that was found for all inputs in the
+ // corpus. Each byte represents a single basic execution block. Each set bit
+ // within the byte indicates that an input has triggered that block at least
+ // 1 << n times, where n is the position of the bit in the byte. For example, a
+ // value of 12 indicates that separate inputs have triggered this block
+ // between 4-7 times and 8-15 times.
+ coverageMask []byte
+}
+
+func newCoordinator(opts CoordinateFuzzingOpts) (*coordinator, error) {
+ // Make sure all of the seed corpus has marshalled data.
+ for i := range opts.Seed {
+ if opts.Seed[i].Data == nil && opts.Seed[i].Values != nil {
+ opts.Seed[i].Data = marshalCorpusFile(opts.Seed[i].Values...)
+ }
+ }
+ c := &coordinator{
+ opts: opts,
+ startTime: time.Now(),
+ inputC: make(chan fuzzInput),
+ minimizeC: make(chan fuzzMinimizeInput),
+ resultC: make(chan fuzzResult),
+ timeLastLog: time.Now(),
+ corpus: corpus{hashes: make(map[[sha256.Size]byte]bool)},
+ }
+ if err := c.readCache(); err != nil {
+ return nil, err
+ }
+ if opts.MinimizeLimit > 0 || opts.MinimizeTimeout > 0 {
+ for _, t := range opts.Types {
+ if isMinimizable(t) {
+ c.minimizationAllowed = true
+ break
+ }
+ }
+ }
+
+ covSize := len(coverage())
+ if covSize == 0 {
+ fmt.Fprintf(c.opts.Log, "warning: the test binary was not built with coverage instrumentation, so fuzzing will run without coverage guidance and may be inefficient\n")
+ // Even though a coverage-only run won't occur, we should still run all
+ // of the seed corpus to make sure there are no existing failures before
+ // we start fuzzing.
+ c.warmupInputCount = len(c.opts.Seed)
+ for _, e := range c.opts.Seed {
+ c.inputQueue.enqueue(e)
+ }
+ } else {
+ c.warmupInputCount = len(c.corpus.entries)
+ for _, e := range c.corpus.entries {
+ c.inputQueue.enqueue(e)
+ }
+ // Set c.coverageMask to a clean []byte full of zeros.
+ c.coverageMask = make([]byte, covSize)
+ }
+ c.warmupInputLeft = c.warmupInputCount
+
+ if len(c.corpus.entries) == 0 {
+ fmt.Fprintf(c.opts.Log, "warning: starting with empty corpus\n")
+ var vals []any
+ for _, t := range opts.Types {
+ vals = append(vals, zeroValue(t))
+ }
+ data := marshalCorpusFile(vals...)
+ h := sha256.Sum256(data)
+ name := fmt.Sprintf("%x", h[:4])
+ c.addCorpusEntries(false, CorpusEntry{Path: name, Data: data})
+ }
+
+ return c, nil
+}
+
+func (c *coordinator) updateStats(result fuzzResult) {
+ c.count += result.count
+ c.countWaiting -= result.limit
+ c.duration += result.totalDuration
+}
+
+func (c *coordinator) logStats() {
+ now := time.Now()
+ if c.warmupRun() {
+ runSoFar := c.warmupInputCount - c.warmupInputLeft
+ if coverageEnabled {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount)
+ } else {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount)
+ }
+ } else if c.crashMinimizing != nil {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, minimizing\n", c.elapsed())
+ } else {
+ rate := float64(c.count-c.countLastLog) / now.Sub(c.timeLastLog).Seconds()
+ if coverageEnabled {
+ total := c.warmupInputCount + c.interestingCount
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec), new interesting: %d (total: %d)\n", c.elapsed(), c.count, rate, c.interestingCount, total)
+ } else {
+ fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec)\n", c.elapsed(), c.count, rate)
+ }
+ }
+ c.countLastLog = c.count
+ c.timeLastLog = now
+}
+
+// peekInput returns the next value that should be sent to workers.
+// If the number of executions is limited, the returned value includes
+// a limit for one worker. If there are no executions left, peekInput returns
+// a zero value and false.
+//
+// peekInput doesn't actually remove the input from the queue. The caller
+// must call sentInput after sending the input.
+//
+// If the input queue is empty and the coverage/testing-only run has completed,
+// queue refills it from the corpus.
+func (c *coordinator) peekInput() (fuzzInput, bool) {
+ if c.opts.Limit > 0 && c.count+c.countWaiting >= c.opts.Limit {
+ // Already making the maximum number of calls to the fuzz function.
+ // Don't send more inputs right now.
+ return fuzzInput{}, false
+ }
+ if c.inputQueue.len == 0 {
+ if c.warmupRun() {
+ // Wait for coverage/testing-only run to finish before sending more
+ // inputs.
+ return fuzzInput{}, false
+ }
+ c.refillInputQueue()
+ }
+
+ entry, ok := c.inputQueue.peek()
+ if !ok {
+ panic("input queue empty after refill")
+ }
+ input := fuzzInput{
+ entry: entry.(CorpusEntry),
+ timeout: workerFuzzDuration,
+ warmup: c.warmupRun(),
+ }
+ if c.coverageMask != nil {
+ input.coverageData = bytes.Clone(c.coverageMask)
+ }
+ if input.warmup {
+ // No fuzzing will occur, but it should count toward the limit set by
+ // -fuzztime.
+ input.limit = 1
+ return input, true
+ }
+
+ if c.opts.Limit > 0 {
+ input.limit = c.opts.Limit / int64(c.opts.Parallel)
+ if c.opts.Limit%int64(c.opts.Parallel) > 0 {
+ input.limit++
+ }
+ remaining := c.opts.Limit - c.count - c.countWaiting
+ if input.limit > remaining {
+ input.limit = remaining
+ }
+ }
+ return input, true
+}
+
+// sentInput updates internal counters after an input is sent to c.inputC.
+func (c *coordinator) sentInput(input fuzzInput) {
+ c.inputQueue.dequeue()
+ c.countWaiting += input.limit
+}
+
+// refillInputQueue refills the input queue from the corpus after it becomes
+// empty.
+func (c *coordinator) refillInputQueue() {
+ for _, e := range c.corpus.entries {
+ c.inputQueue.enqueue(e)
+ }
+}
+
+// queueForMinimization creates a fuzzMinimizeInput from result and adds it
+// to the minimization queue to be sent to workers.
+func (c *coordinator) queueForMinimization(result fuzzResult, keepCoverage []byte) {
+ if shouldPrintDebugInfo() {
+ c.debugLogf(
+ "queueing input for minimization, id: %s, parent: %s, keepCoverage: %t, crasher: %t",
+ result.entry.Path,
+ result.entry.Parent,
+ keepCoverage != nil,
+ result.crasherMsg != "",
+ )
+ }
+ if result.crasherMsg != "" {
+ c.minimizeQueue.clear()
+ }
+
+ input := fuzzMinimizeInput{
+ entry: result.entry,
+ crasherMsg: result.crasherMsg,
+ keepCoverage: keepCoverage,
+ }
+ c.minimizeQueue.enqueue(input)
+}
+
+// peekMinimizeInput returns the next input that should be sent to workers for
+// minimization.
+func (c *coordinator) peekMinimizeInput() (fuzzMinimizeInput, bool) {
+ if !c.canMinimize() {
+ // Already making the maximum number of calls to the fuzz function.
+ // Don't send more inputs right now.
+ return fuzzMinimizeInput{}, false
+ }
+ v, ok := c.minimizeQueue.peek()
+ if !ok {
+ return fuzzMinimizeInput{}, false
+ }
+ input := v.(fuzzMinimizeInput)
+
+ if c.opts.MinimizeTimeout > 0 {
+ input.timeout = c.opts.MinimizeTimeout
+ }
+ if c.opts.MinimizeLimit > 0 {
+ input.limit = c.opts.MinimizeLimit
+ } else if c.opts.Limit > 0 {
+ if input.crasherMsg != "" {
+ input.limit = c.opts.Limit
+ } else {
+ input.limit = c.opts.Limit / int64(c.opts.Parallel)
+ if c.opts.Limit%int64(c.opts.Parallel) > 0 {
+ input.limit++
+ }
+ }
+ }
+ if c.opts.Limit > 0 {
+ remaining := c.opts.Limit - c.count - c.countWaiting
+ if input.limit > remaining {
+ input.limit = remaining
+ }
+ }
+ return input, true
+}
+
+// sentMinimizeInput removes an input from the minimization queue after it's
+// sent to minimizeC.
+func (c *coordinator) sentMinimizeInput(input fuzzMinimizeInput) {
+ c.minimizeQueue.dequeue()
+ c.countWaiting += input.limit
+}
+
+// warmupRun returns true while the coordinator is running inputs without
+// mutating them as a warmup before fuzzing. This could be to gather baseline
+// coverage data for entries in the corpus, or to test all of the seed corpus
+// for errors before fuzzing begins.
+//
+// The coordinator doesn't store coverage data in the cache with each input
+// because that data would be invalid when counter offsets in the test binary
+// change.
+//
+// When gathering coverage, the coordinator sends each entry to a worker to
+// gather coverage for that entry only, without fuzzing or minimizing. This
+// phase ends when all workers have finished, and the coordinator has a combined
+// coverage map.
+func (c *coordinator) warmupRun() bool {
+ return c.warmupInputLeft > 0
+}
+
+// updateCoverage sets bits in c.coverageMask that are set in newCoverage.
+// updateCoverage returns the number of newly set bits. See the comment on
+// coverageMask for the format.
+func (c *coordinator) updateCoverage(newCoverage []byte) int {
+ if len(newCoverage) != len(c.coverageMask) {
+ panic(fmt.Sprintf("number of coverage counters changed at runtime: %d, expected %d", len(newCoverage), len(c.coverageMask)))
+ }
+ newBitCount := 0
+ for i := range newCoverage {
+ diff := newCoverage[i] &^ c.coverageMask[i]
+ newBitCount += bits.OnesCount8(diff)
+ c.coverageMask[i] |= newCoverage[i]
+ }
+ return newBitCount
+}
+
+// canMinimize returns whether the coordinator should attempt to find smaller
+// inputs that reproduce a crash or new coverage.
+func (c *coordinator) canMinimize() bool {
+ return c.minimizationAllowed &&
+ (c.opts.Limit == 0 || c.count+c.countWaiting < c.opts.Limit)
+}
+
+func (c *coordinator) elapsed() time.Duration {
+ return time.Since(c.startTime).Round(1 * time.Second)
+}
+
+// readCache creates a combined corpus from seed values and values in the cache
+// (in GOCACHE/fuzz).
+//
+// TODO(fuzzing): need a mechanism that can remove values that
+// aren't useful anymore, for example, because they have the wrong type.
+func (c *coordinator) readCache() error {
+ if _, err := c.addCorpusEntries(false, c.opts.Seed...); err != nil {
+ return err
+ }
+ entries, err := ReadCorpus(c.opts.CacheDir, c.opts.Types)
+ if err != nil {
+ if _, ok := err.(*MalformedCorpusError); !ok {
+ // It's okay if some files in the cache directory are malformed and
+ // are not included in the corpus, but fail if it's an I/O error.
+ return err
+ }
+ // TODO(jayconrod,katiehockman): consider printing some kind of warning
+ // indicating the number of files which were skipped because they are
+ // malformed.
+ }
+ if _, err := c.addCorpusEntries(false, entries...); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MalformedCorpusError is an error found while reading the corpus from the
+// filesystem. All of the errors are stored in the errs list. The testing
+// framework uses this to report malformed files in testdata.
+type MalformedCorpusError struct {
+ errs []error
+}
+
+func (e *MalformedCorpusError) Error() string {
+ var msgs []string
+ for _, s := range e.errs {
+ msgs = append(msgs, s.Error())
+ }
+ return strings.Join(msgs, "\n")
+}
+
+// ReadCorpus reads the corpus from the provided dir. The returned corpus
+// entries are guaranteed to match the given types. Any malformed files will
+// be saved in a MalformedCorpusError and returned, along with the most recent
+// error.
+func ReadCorpus(dir string, types []reflect.Type) ([]CorpusEntry, error) {
+ files, err := os.ReadDir(dir)
+ if os.IsNotExist(err) {
+ return nil, nil // No corpus to read
+ } else if err != nil {
+ return nil, fmt.Errorf("reading seed corpus from testdata: %v", err)
+ }
+ var corpus []CorpusEntry
+ var errs []error
+ for _, file := range files {
+ // TODO(jayconrod,katiehockman): determine when a file is a fuzzing input
+ // based on its name. We should only read files created by writeToCorpus.
+ // If we read ALL files, we won't be able to change the file format by
+ // changing the extension. We also won't be able to add files like
+ // README.txt explaining why the directory exists.
+ if file.IsDir() {
+ continue
+ }
+ filename := filepath.Join(dir, file.Name())
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read corpus file: %v", err)
+ }
+ var vals []any
+ vals, err = readCorpusData(data, types)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("%q: %v", filename, err))
+ continue
+ }
+ corpus = append(corpus, CorpusEntry{Path: filename, Values: vals})
+ }
+ if len(errs) > 0 {
+ return corpus, &MalformedCorpusError{errs: errs}
+ }
+ return corpus, nil
+}
+
+func readCorpusData(data []byte, types []reflect.Type) ([]any, error) {
+ vals, err := unmarshalCorpusFile(data)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal: %v", err)
+ }
+ if err = CheckCorpus(vals, types); err != nil {
+ return nil, err
+ }
+ return vals, nil
+}
+
+// CheckCorpus verifies that the types in vals match the expected types
+// provided.
+func CheckCorpus(vals []any, types []reflect.Type) error {
+ if len(vals) != len(types) {
+ return fmt.Errorf("wrong number of values in corpus entry: %d, want %d", len(vals), len(types))
+ }
+ valsT := make([]reflect.Type, len(vals))
+ for valsI, v := range vals {
+ valsT[valsI] = reflect.TypeOf(v)
+ }
+ for i := range types {
+ if valsT[i] != types[i] {
+ return fmt.Errorf("mismatched types in corpus entry: %v, want %v", valsT, types)
+ }
+ }
+ return nil
+}
+
+// writeToCorpus atomically writes the given bytes to a new file in testdata. If
+// the directory does not exist, it will create one. If the file already exists,
+// writeToCorpus will not rewrite it. writeToCorpus sets entry.Path to the new
+// file that was just written or an error if it failed.
+func writeToCorpus(entry *CorpusEntry, dir string) (err error) {
+ sum := fmt.Sprintf("%x", sha256.Sum256(entry.Data))[:16]
+ entry.Path = filepath.Join(dir, sum)
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ if err := os.WriteFile(entry.Path, entry.Data, 0666); err != nil {
+ os.Remove(entry.Path) // remove partially written file
+ return err
+ }
+ return nil
+}
+
+func testName(path string) string {
+ return filepath.Base(path)
+}
+
+func zeroValue(t reflect.Type) any {
+ for _, v := range zeroVals {
+ if reflect.TypeOf(v) == t {
+ return v
+ }
+ }
+ panic(fmt.Sprintf("unsupported type: %v", t))
+}
+
+var zeroVals []any = []any{
+ []byte(""),
+ string(""),
+ false,
+ byte(0),
+ rune(0),
+ float32(0),
+ float64(0),
+ int(0),
+ int8(0),
+ int16(0),
+ int32(0),
+ int64(0),
+ uint(0),
+ uint8(0),
+ uint16(0),
+ uint32(0),
+ uint64(0),
+}
+
+var debugInfo = godebug.New("#fuzzdebug").Value() == "1"
+
+func shouldPrintDebugInfo() bool {
+ return debugInfo
+}
+
+func (c *coordinator) debugLogf(format string, args ...any) {
+ t := time.Now().Format("2006-01-02 15:04:05.999999999")
+ fmt.Fprintf(c.opts.Log, t+" DEBUG "+format+"\n", args...)
+}
diff --git a/src/internal/fuzz/mem.go b/src/internal/fuzz/mem.go
new file mode 100644
index 0000000..4155e4e
--- /dev/null
+++ b/src/internal/fuzz/mem.go
@@ -0,0 +1,138 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unsafe"
+)
+
+// sharedMem manages access to a region of virtual memory mapped from a file,
+// shared between multiple processes. The region includes space for a header and
+// a value of variable length.
+//
+// When fuzzing, the coordinator creates a sharedMem from a temporary file for
+// each worker. This buffer is used to pass values to fuzz between processes.
+// Care must be taken to manage access to shared memory across processes;
+// sharedMem provides no synchronization on its own. See workerComm for an
+// explanation.
+type sharedMem struct {
+ // f is the file mapped into memory.
+ f *os.File
+
+ // region is the mapped region of virtual memory for f. The content of f may
+ // be read or written through this slice.
+ region []byte
+
+ // removeOnClose is true if the file should be deleted by Close.
+ removeOnClose bool
+
+ // sys contains OS-specific information.
+ sys sharedMemSys
+}
+
+// sharedMemHeader stores metadata in shared memory.
+type sharedMemHeader struct {
+ // count is the number of times the worker has called the fuzz function.
+ // May be reset by coordinator.
+ count int64
+
+ // valueLen is the number of bytes in region which should be read.
+ valueLen int
+
+ // randState and randInc hold the state of a pseudo-random number generator.
+ randState, randInc uint64
+
+ // rawInMem is true if the region holds raw bytes, which occurs during
+ // minimization. If true after the worker fails during minimization, this
+ // indicates that an unrecoverable error occurred, and the region can be
+ // used to retrieve the raw bytes that caused the error.
+ rawInMem bool
+}
+
+// sharedMemSize returns the size needed for a shared memory buffer that can
+// contain values of the given size.
+func sharedMemSize(valueSize int) int {
+ // TODO(jayconrod): set a reasonable maximum size per platform.
+ return int(unsafe.Sizeof(sharedMemHeader{})) + valueSize
+}
+
+// sharedMemTempFile creates a new temporary file of the given size, then maps
+// it into memory. The file will be removed when the Close method is called.
+func sharedMemTempFile(size int) (m *sharedMem, err error) {
+ // Create a temporary file.
+ f, err := os.CreateTemp("", "fuzz-*")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ }
+ }()
+
+ // Resize it to the correct size.
+ totalSize := sharedMemSize(size)
+ if err := f.Truncate(int64(totalSize)); err != nil {
+ return nil, err
+ }
+
+ // Map the file into memory.
+ removeOnClose := true
+ return sharedMemMapFile(f, totalSize, removeOnClose)
+}
+
+// header returns a pointer to metadata within the shared memory region.
+func (m *sharedMem) header() *sharedMemHeader {
+ return (*sharedMemHeader)(unsafe.Pointer(&m.region[0]))
+}
+
+// valueRef returns the value currently stored in shared memory. The returned
+// slice points to shared memory; it is not a copy.
+func (m *sharedMem) valueRef() []byte {
+ length := m.header().valueLen
+ valueOffset := int(unsafe.Sizeof(sharedMemHeader{}))
+ return m.region[valueOffset : valueOffset+length]
+}
+
+// valueCopy returns a copy of the value stored in shared memory.
+func (m *sharedMem) valueCopy() []byte {
+ ref := m.valueRef()
+ return bytes.Clone(ref)
+}
+
+// setValue copies the data in b into the shared memory buffer and sets
+// the length. len(b) must be less than or equal to the capacity of the buffer
+// (as returned by cap(m.value())).
+func (m *sharedMem) setValue(b []byte) {
+ v := m.valueRef()
+ if len(b) > cap(v) {
+ panic(fmt.Sprintf("value length %d larger than shared memory capacity %d", len(b), cap(v)))
+ }
+ m.header().valueLen = len(b)
+ copy(v[:cap(v)], b)
+}
+
+// setValueLen sets the length of the shared memory buffer returned by valueRef
+// to n, which may be at most the cap of that slice.
+//
+// Note that we can only store the length in the shared memory header. The full
+// slice header contains a pointer, which is likely only valid for one process,
+// since each process can map shared memory at a different virtual address.
+func (m *sharedMem) setValueLen(n int) {
+ v := m.valueRef()
+ if n > cap(v) {
+ panic(fmt.Sprintf("length %d larger than shared memory capacity %d", n, cap(v)))
+ }
+ m.header().valueLen = n
+}
+
+// TODO(jayconrod): add method to resize the buffer. We'll need that when the
+// mutator can increase input length. Only the coordinator will be able to
+// do it, since we'll need to send a message to the worker telling it to
+// remap the file.
diff --git a/src/internal/fuzz/minimize.go b/src/internal/fuzz/minimize.go
new file mode 100644
index 0000000..0e410fb
--- /dev/null
+++ b/src/internal/fuzz/minimize.go
@@ -0,0 +1,95 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "reflect"
+)
+
+func isMinimizable(t reflect.Type) bool {
+ return t == reflect.TypeOf("") || t == reflect.TypeOf([]byte(nil))
+}
+
+func minimizeBytes(v []byte, try func([]byte) bool, shouldStop func() bool) {
+ tmp := make([]byte, len(v))
+ // If minimization was successful at any point during minimizeBytes,
+ // then the vals slice in (*workerServer).minimizeInput will point to
+ // tmp. Since tmp is altered while making new candidates, we need to
+ // make sure that it is equal to the correct value, v, before exiting
+ // this function.
+ defer copy(tmp, v)
+
+ // First, try to cut the tail.
+ for n := 1024; n != 0; n /= 2 {
+ for len(v) > n {
+ if shouldStop() {
+ return
+ }
+ candidate := v[:len(v)-n]
+ if !try(candidate) {
+ break
+ }
+ // Set v to the new value to continue iterating.
+ v = candidate
+ }
+ }
+
+ // Then, try to remove each individual byte.
+ for i := 0; i < len(v)-1; i++ {
+ if shouldStop() {
+ return
+ }
+ candidate := tmp[:len(v)-1]
+ copy(candidate[:i], v[:i])
+ copy(candidate[i:], v[i+1:])
+ if !try(candidate) {
+ continue
+ }
+ // Update v to delete the value at index i.
+ copy(v[i:], v[i+1:])
+ v = v[:len(candidate)]
+ // v[i] is now different, so decrement i to redo this iteration
+ // of the loop with the new value.
+ i--
+ }
+
+ // Then, try to remove each possible subset of bytes.
+ for i := 0; i < len(v)-1; i++ {
+ copy(tmp, v[:i])
+ for j := len(v); j > i+1; j-- {
+ if shouldStop() {
+ return
+ }
+ candidate := tmp[:len(v)-j+i]
+ copy(candidate[i:], v[j:])
+ if !try(candidate) {
+ continue
+ }
+ // Update v and reset the loop with the new length.
+ copy(v[i:], v[j:])
+ v = v[:len(candidate)]
+ j = len(v)
+ }
+ }
+
+ // Then, try to make it more simplified and human-readable by trying to replace each
+ // byte with a printable character.
+ printableChars := []byte("012789ABCXYZabcxyz !\"#$%&'()*+,.")
+ for i, b := range v {
+ if shouldStop() {
+ return
+ }
+
+ for _, pc := range printableChars {
+ v[i] = pc
+ if try(v) {
+ // Successful. Move on to the next byte in v.
+ break
+ }
+ // Unsuccessful. Revert v[i] back to original value.
+ v[i] = b
+ }
+ }
+}
diff --git a/src/internal/fuzz/minimize_test.go b/src/internal/fuzz/minimize_test.go
new file mode 100644
index 0000000..2db2633
--- /dev/null
+++ b/src/internal/fuzz/minimize_test.go
@@ -0,0 +1,182 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || freebsd || linux || windows
+
+package fuzz
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+func TestMinimizeInput(t *testing.T) {
+ type testcase struct {
+ name string
+ fn func(CorpusEntry) error
+ input []any
+ expected []any
+ }
+ cases := []testcase{
+ {
+ name: "ones_byte",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].([]byte)
+ ones := 0
+ for _, v := range b {
+ if v == 1 {
+ ones++
+ }
+ }
+ if ones == 3 {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{[]byte{0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ expected: []any{[]byte{1, 1, 1}},
+ },
+ {
+ name: "single_bytes",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].([]byte)
+ if len(b) < 2 {
+ return nil
+ }
+ if len(b) == 2 && b[0] == 1 && b[1] == 2 {
+ return nil
+ }
+ return fmt.Errorf("bad %v", e.Values[0])
+ },
+ input: []any{[]byte{1, 2, 3, 4, 5}},
+ expected: []any{[]byte("00")},
+ },
+ {
+ name: "set_of_bytes",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].([]byte)
+ if len(b) < 3 {
+ return nil
+ }
+ if bytes.Equal(b, []byte{0, 1, 2, 3, 4, 5}) || bytes.Equal(b, []byte{0, 4, 5}) {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{[]byte{0, 1, 2, 3, 4, 5}},
+ expected: []any{[]byte{0, 4, 5}},
+ },
+ {
+ name: "non_ascii_bytes",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].([]byte)
+ if len(b) == 3 {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{[]byte("ท")}, // ท is 3 bytes
+ expected: []any{[]byte("000")},
+ },
+ {
+ name: "ones_string",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].(string)
+ ones := 0
+ for _, v := range b {
+ if v == '1' {
+ ones++
+ }
+ }
+ if ones == 3 {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{"001010001000000000000000000"},
+ expected: []any{"111"},
+ },
+ {
+ name: "string_length",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].(string)
+ if len(b) == 5 {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{"zzzzz"},
+ expected: []any{"00000"},
+ },
+ {
+ name: "string_with_letter",
+ fn: func(e CorpusEntry) error {
+ b := e.Values[0].(string)
+ r, _ := utf8.DecodeRune([]byte(b))
+ if unicode.IsLetter(r) {
+ return fmt.Errorf("bad %v", e.Values[0])
+ }
+ return nil
+ },
+ input: []any{"ZZZZZ"},
+ expected: []any{"A"},
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ ws := &workerServer{
+ fuzzFn: func(e CorpusEntry) (time.Duration, error) {
+ return time.Second, tc.fn(e)
+ },
+ }
+ mem := &sharedMem{region: make([]byte, 100)} // big enough to hold value and header
+ vals := tc.input
+ success, err := ws.minimizeInput(context.Background(), vals, mem, minimizeArgs{})
+ if !success {
+ t.Errorf("minimizeInput did not succeed")
+ }
+ if err == nil {
+ t.Fatal("minimizeInput didn't provide an error")
+ }
+ if expected := fmt.Sprintf("bad %v", tc.expected[0]); err.Error() != expected {
+ t.Errorf("unexpected error: got %q, want %q", err, expected)
+ }
+ if !reflect.DeepEqual(vals, tc.expected) {
+ t.Errorf("unexpected results: got %v, want %v", vals, tc.expected)
+ }
+ })
+ }
+}
+
+// TestMinimizeFlaky checks that if we're minimizing an interesting
+// input and a flaky failure occurs, that minimization was not indicated
+// to be successful, and the error isn't returned (since it's flaky).
+func TestMinimizeFlaky(t *testing.T) {
+ ws := &workerServer{fuzzFn: func(e CorpusEntry) (time.Duration, error) {
+ return time.Second, errors.New("ohno")
+ }}
+ mem := &sharedMem{region: make([]byte, 100)} // big enough to hold value and header
+ vals := []any{[]byte(nil)}
+ args := minimizeArgs{KeepCoverage: make([]byte, len(coverageSnapshot))}
+ success, err := ws.minimizeInput(context.Background(), vals, mem, args)
+ if success {
+ t.Error("unexpected success")
+ }
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if count := mem.header().count; count != 1 {
+ t.Errorf("count: got %d, want 1", count)
+ }
+}
diff --git a/src/internal/fuzz/mutator.go b/src/internal/fuzz/mutator.go
new file mode 100644
index 0000000..bb96066
--- /dev/null
+++ b/src/internal/fuzz/mutator.go
@@ -0,0 +1,300 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "unsafe"
+)
+
+type mutator struct {
+ r mutatorRand
+ scratch []byte // scratch slice to avoid additional allocations
+}
+
+func newMutator() *mutator {
+ return &mutator{r: newPcgRand()}
+}
+
+func (m *mutator) rand(n int) int {
+ return m.r.intn(n)
+}
+
+func (m *mutator) randByteOrder() binary.ByteOrder {
+ if m.r.bool() {
+ return binary.LittleEndian
+ }
+ return binary.BigEndian
+}
+
+// chooseLen chooses length of range mutation in range [1,n]. It gives
+// preference to shorter ranges.
+func (m *mutator) chooseLen(n int) int {
+ switch x := m.rand(100); {
+ case x < 90:
+ return m.rand(min(8, n)) + 1
+ case x < 99:
+ return m.rand(min(32, n)) + 1
+ default:
+ return m.rand(n) + 1
+ }
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// mutate performs several mutations on the provided values.
+func (m *mutator) mutate(vals []any, maxBytes int) {
+ // TODO(katiehockman): pull some of these functions into helper methods and
+ // test that each case is working as expected.
+ // TODO(katiehockman): perform more types of mutations for []byte.
+
+ // maxPerVal will represent the maximum number of bytes that each value be
+ // allowed after mutating, giving an equal amount of capacity to each line.
+ // Allow a little wiggle room for the encoding.
+ maxPerVal := maxBytes/len(vals) - 100
+
+ // Pick a random value to mutate.
+ // TODO: consider mutating more than one value at a time.
+ i := m.rand(len(vals))
+ switch v := vals[i].(type) {
+ case int:
+ vals[i] = int(m.mutateInt(int64(v), maxInt))
+ case int8:
+ vals[i] = int8(m.mutateInt(int64(v), math.MaxInt8))
+ case int16:
+ vals[i] = int16(m.mutateInt(int64(v), math.MaxInt16))
+ case int64:
+ vals[i] = m.mutateInt(v, maxInt)
+ case uint:
+ vals[i] = uint(m.mutateUInt(uint64(v), maxUint))
+ case uint16:
+ vals[i] = uint16(m.mutateUInt(uint64(v), math.MaxUint16))
+ case uint32:
+ vals[i] = uint32(m.mutateUInt(uint64(v), math.MaxUint32))
+ case uint64:
+ vals[i] = m.mutateUInt(uint64(v), maxUint)
+ case float32:
+ vals[i] = float32(m.mutateFloat(float64(v), math.MaxFloat32))
+ case float64:
+ vals[i] = m.mutateFloat(v, math.MaxFloat64)
+ case bool:
+ if m.rand(2) == 1 {
+ vals[i] = !v // 50% chance of flipping the bool
+ }
+ case rune: // int32
+ vals[i] = rune(m.mutateInt(int64(v), math.MaxInt32))
+ case byte: // uint8
+ vals[i] = byte(m.mutateUInt(uint64(v), math.MaxUint8))
+ case string:
+ if len(v) > maxPerVal {
+ panic(fmt.Sprintf("cannot mutate bytes of length %d", len(v)))
+ }
+ if cap(m.scratch) < maxPerVal {
+ m.scratch = append(make([]byte, 0, maxPerVal), v...)
+ } else {
+ m.scratch = m.scratch[:len(v)]
+ copy(m.scratch, v)
+ }
+ m.mutateBytes(&m.scratch)
+ vals[i] = string(m.scratch)
+ case []byte:
+ if len(v) > maxPerVal {
+ panic(fmt.Sprintf("cannot mutate bytes of length %d", len(v)))
+ }
+ if cap(m.scratch) < maxPerVal {
+ m.scratch = append(make([]byte, 0, maxPerVal), v...)
+ } else {
+ m.scratch = m.scratch[:len(v)]
+ copy(m.scratch, v)
+ }
+ m.mutateBytes(&m.scratch)
+ vals[i] = m.scratch
+ default:
+ panic(fmt.Sprintf("type not supported for mutating: %T", vals[i]))
+ }
+}
+
+func (m *mutator) mutateInt(v, maxValue int64) int64 {
+ var max int64
+ for {
+ max = 100
+ switch m.rand(2) {
+ case 0:
+ // Add a random number
+ if v >= maxValue {
+ continue
+ }
+ if v > 0 && maxValue-v < max {
+ // Don't let v exceed maxValue
+ max = maxValue - v
+ }
+ v += int64(1 + m.rand(int(max)))
+ return v
+ case 1:
+ // Subtract a random number
+ if v <= -maxValue {
+ continue
+ }
+ if v < 0 && maxValue+v < max {
+ // Don't let v drop below -maxValue
+ max = maxValue + v
+ }
+ v -= int64(1 + m.rand(int(max)))
+ return v
+ }
+ }
+}
+
+func (m *mutator) mutateUInt(v, maxValue uint64) uint64 {
+ var max uint64
+ for {
+ max = 100
+ switch m.rand(2) {
+ case 0:
+ // Add a random number
+ if v >= maxValue {
+ continue
+ }
+ if v > 0 && maxValue-v < max {
+ // Don't let v exceed maxValue
+ max = maxValue - v
+ }
+
+ v += uint64(1 + m.rand(int(max)))
+ return v
+ case 1:
+ // Subtract a random number
+ if v <= 0 {
+ continue
+ }
+ if v < max {
+ // Don't let v drop below 0
+ max = v
+ }
+ v -= uint64(1 + m.rand(int(max)))
+ return v
+ }
+ }
+}
+
+func (m *mutator) mutateFloat(v, maxValue float64) float64 {
+ var max float64
+ for {
+ switch m.rand(4) {
+ case 0:
+ // Add a random number
+ if v >= maxValue {
+ continue
+ }
+ max = 100
+ if v > 0 && maxValue-v < max {
+ // Don't let v exceed maxValue
+ max = maxValue - v
+ }
+ v += float64(1 + m.rand(int(max)))
+ return v
+ case 1:
+ // Subtract a random number
+ if v <= -maxValue {
+ continue
+ }
+ max = 100
+ if v < 0 && maxValue+v < max {
+ // Don't let v drop below -maxValue
+ max = maxValue + v
+ }
+ v -= float64(1 + m.rand(int(max)))
+ return v
+ case 2:
+ // Multiply by a random number
+ absV := math.Abs(v)
+ if v == 0 || absV >= maxValue {
+ continue
+ }
+ max = 10
+ if maxValue/absV < max {
+ // Don't let v go beyond the minimum or maximum value
+ max = maxValue / absV
+ }
+ v *= float64(1 + m.rand(int(max)))
+ return v
+ case 3:
+ // Divide by a random number
+ if v == 0 {
+ continue
+ }
+ v /= float64(1 + m.rand(10))
+ return v
+ }
+ }
+}
+
+type byteSliceMutator func(*mutator, []byte) []byte
+
+var byteSliceMutators = []byteSliceMutator{
+ byteSliceRemoveBytes,
+ byteSliceInsertRandomBytes,
+ byteSliceDuplicateBytes,
+ byteSliceOverwriteBytes,
+ byteSliceBitFlip,
+ byteSliceXORByte,
+ byteSliceSwapByte,
+ byteSliceArithmeticUint8,
+ byteSliceArithmeticUint16,
+ byteSliceArithmeticUint32,
+ byteSliceArithmeticUint64,
+ byteSliceOverwriteInterestingUint8,
+ byteSliceOverwriteInterestingUint16,
+ byteSliceOverwriteInterestingUint32,
+ byteSliceInsertConstantBytes,
+ byteSliceOverwriteConstantBytes,
+ byteSliceShuffleBytes,
+ byteSliceSwapBytes,
+}
+
+func (m *mutator) mutateBytes(ptrB *[]byte) {
+ b := *ptrB
+ defer func() {
+ if unsafe.SliceData(*ptrB) != unsafe.SliceData(b) {
+ panic("data moved to new address")
+ }
+ *ptrB = b
+ }()
+
+ for {
+ mut := byteSliceMutators[m.rand(len(byteSliceMutators))]
+ if mutated := mut(m, b); mutated != nil {
+ b = mutated
+ return
+ }
+ }
+}
+
+var (
+ interesting8 = []int8{-128, -1, 0, 1, 16, 32, 64, 100, 127}
+ interesting16 = []int16{-32768, -129, 128, 255, 256, 512, 1000, 1024, 4096, 32767}
+ interesting32 = []int32{-2147483648, -100663046, -32769, 32768, 65535, 65536, 100663045, 2147483647}
+)
+
+const (
+ maxUint = uint64(^uint(0))
+ maxInt = int64(maxUint >> 1)
+)
+
+func init() {
+ for _, v := range interesting8 {
+ interesting16 = append(interesting16, int16(v))
+ }
+ for _, v := range interesting16 {
+ interesting32 = append(interesting32, int32(v))
+ }
+}
diff --git a/src/internal/fuzz/mutator_test.go b/src/internal/fuzz/mutator_test.go
new file mode 100644
index 0000000..cea7e2e
--- /dev/null
+++ b/src/internal/fuzz/mutator_test.go
@@ -0,0 +1,117 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "testing"
+)
+
+func BenchmarkMutatorBytes(b *testing.B) {
+ origEnv := os.Getenv("GODEBUG")
+ defer func() { os.Setenv("GODEBUG", origEnv) }()
+ os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv))
+ m := newMutator()
+
+ for _, size := range []int{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ } {
+ b.Run(strconv.Itoa(size), func(b *testing.B) {
+ buf := make([]byte, size)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ // resize buffer to the correct shape and reset the PCG
+ buf = buf[0:size]
+ m.r = newPcgRand()
+ m.mutate([]any{buf}, workerSharedMemSize)
+ }
+ })
+ }
+}
+
+func BenchmarkMutatorString(b *testing.B) {
+ origEnv := os.Getenv("GODEBUG")
+ defer func() { os.Setenv("GODEBUG", origEnv) }()
+ os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv))
+ m := newMutator()
+
+ for _, size := range []int{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ } {
+ b.Run(strconv.Itoa(size), func(b *testing.B) {
+ buf := make([]byte, size)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ // resize buffer to the correct shape and reset the PCG
+ buf = buf[0:size]
+ m.r = newPcgRand()
+ m.mutate([]any{string(buf)}, workerSharedMemSize)
+ }
+ })
+ }
+}
+
+func BenchmarkMutatorAllBasicTypes(b *testing.B) {
+ origEnv := os.Getenv("GODEBUG")
+ defer func() { os.Setenv("GODEBUG", origEnv) }()
+ os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv))
+ m := newMutator()
+
+ types := []any{
+ []byte(""),
+ string(""),
+ false,
+ float32(0),
+ float64(0),
+ int(0),
+ int8(0),
+ int16(0),
+ int32(0),
+ int64(0),
+ uint8(0),
+ uint16(0),
+ uint32(0),
+ uint64(0),
+ }
+
+ for _, t := range types {
+ b.Run(fmt.Sprintf("%T", t), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ m.r = newPcgRand()
+ m.mutate([]any{t}, workerSharedMemSize)
+ }
+ })
+ }
+}
+
+func TestStringImmutability(t *testing.T) {
+ v := []any{"hello"}
+ m := newMutator()
+ m.mutate(v, 1024)
+ original := v[0].(string)
+ originalCopy := make([]byte, len(original))
+ copy(originalCopy, []byte(original))
+ for i := 0; i < 25; i++ {
+ m.mutate(v, 1024)
+ }
+ if !bytes.Equal([]byte(original), originalCopy) {
+ t.Fatalf("string was mutated: got %x, want %x", []byte(original), originalCopy)
+ }
+}
diff --git a/src/internal/fuzz/mutators_byteslice.go b/src/internal/fuzz/mutators_byteslice.go
new file mode 100644
index 0000000..d9dab1d
--- /dev/null
+++ b/src/internal/fuzz/mutators_byteslice.go
@@ -0,0 +1,313 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+// byteSliceRemoveBytes removes a random chunk of bytes from b.
+func byteSliceRemoveBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ pos0 := m.rand(len(b))
+ pos1 := pos0 + m.chooseLen(len(b)-pos0)
+ copy(b[pos0:], b[pos1:])
+ b = b[:len(b)-(pos1-pos0)]
+ return b
+}
+
+// byteSliceInsertRandomBytes inserts a chunk of random bytes into b at a random
+// position.
+func byteSliceInsertRandomBytes(m *mutator, b []byte) []byte {
+ pos := m.rand(len(b) + 1)
+ n := m.chooseLen(1024)
+ if len(b)+n >= cap(b) {
+ return nil
+ }
+ b = b[:len(b)+n]
+ copy(b[pos+n:], b[pos:])
+ for i := 0; i < n; i++ {
+ b[pos+i] = byte(m.rand(256))
+ }
+ return b
+}
+
+// byteSliceDuplicateBytes duplicates a chunk of bytes in b and inserts it into
+// a random position.
+func byteSliceDuplicateBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ src := m.rand(len(b))
+ dst := m.rand(len(b))
+ for dst == src {
+ dst = m.rand(len(b))
+ }
+ n := m.chooseLen(len(b) - src)
+ // Use the end of the slice as scratch space to avoid doing an
+ // allocation. If the slice is too small abort and try something
+ // else.
+ if len(b)+(n*2) >= cap(b) {
+ return nil
+ }
+ end := len(b)
+ // Increase the size of b to fit the duplicated block as well as
+ // some extra working space
+ b = b[:end+(n*2)]
+ // Copy the block of bytes we want to duplicate to the end of the
+ // slice
+ copy(b[end+n:], b[src:src+n])
+ // Shift the bytes after the splice point n positions to the right
+ // to make room for the new block
+ copy(b[dst+n:end+n], b[dst:end])
+ // Insert the duplicate block into the splice point
+ copy(b[dst:], b[end+n:])
+ b = b[:end+n]
+ return b
+}
+
+// byteSliceOverwriteBytes overwrites a chunk of b with another chunk of b.
+func byteSliceOverwriteBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ src := m.rand(len(b))
+ dst := m.rand(len(b))
+ for dst == src {
+ dst = m.rand(len(b))
+ }
+ n := m.chooseLen(len(b) - src - 1)
+ copy(b[dst:], b[src:src+n])
+ return b
+}
+
+// byteSliceBitFlip flips a random bit in a random byte in b.
+func byteSliceBitFlip(m *mutator, b []byte) []byte {
+ if len(b) == 0 {
+ return nil
+ }
+ pos := m.rand(len(b))
+ b[pos] ^= 1 << uint(m.rand(8))
+ return b
+}
+
+// byteSliceXORByte XORs a random byte in b with a random value.
+func byteSliceXORByte(m *mutator, b []byte) []byte {
+ if len(b) == 0 {
+ return nil
+ }
+ pos := m.rand(len(b))
+ // In order to avoid a no-op (where the random value matches
+ // the existing value), use XOR instead of just setting to
+ // the random value.
+ b[pos] ^= byte(1 + m.rand(255))
+ return b
+}
+
+// byteSliceSwapByte swaps two random bytes in b.
+func byteSliceSwapByte(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ src := m.rand(len(b))
+ dst := m.rand(len(b))
+ for dst == src {
+ dst = m.rand(len(b))
+ }
+ b[src], b[dst] = b[dst], b[src]
+ return b
+}
+
+// byteSliceArithmeticUint8 adds/subtracts from a random byte in b.
+func byteSliceArithmeticUint8(m *mutator, b []byte) []byte {
+ if len(b) == 0 {
+ return nil
+ }
+ pos := m.rand(len(b))
+ v := byte(m.rand(35) + 1)
+ if m.r.bool() {
+ b[pos] += v
+ } else {
+ b[pos] -= v
+ }
+ return b
+}
+
+// byteSliceArithmeticUint16 adds/subtracts from a random uint16 in b.
+func byteSliceArithmeticUint16(m *mutator, b []byte) []byte {
+ if len(b) < 2 {
+ return nil
+ }
+ v := uint16(m.rand(35) + 1)
+ if m.r.bool() {
+ v = 0 - v
+ }
+ pos := m.rand(len(b) - 1)
+ enc := m.randByteOrder()
+ enc.PutUint16(b[pos:], enc.Uint16(b[pos:])+v)
+ return b
+}
+
+// byteSliceArithmeticUint32 adds/subtracts from a random uint32 in b.
+func byteSliceArithmeticUint32(m *mutator, b []byte) []byte {
+ if len(b) < 4 {
+ return nil
+ }
+ v := uint32(m.rand(35) + 1)
+ if m.r.bool() {
+ v = 0 - v
+ }
+ pos := m.rand(len(b) - 3)
+ enc := m.randByteOrder()
+ enc.PutUint32(b[pos:], enc.Uint32(b[pos:])+v)
+ return b
+}
+
+// byteSliceArithmeticUint64 adds/subtracts from a random uint64 in b.
+func byteSliceArithmeticUint64(m *mutator, b []byte) []byte {
+ if len(b) < 8 {
+ return nil
+ }
+ v := uint64(m.rand(35) + 1)
+ if m.r.bool() {
+ v = 0 - v
+ }
+ pos := m.rand(len(b) - 7)
+ enc := m.randByteOrder()
+ enc.PutUint64(b[pos:], enc.Uint64(b[pos:])+v)
+ return b
+}
+
+// byteSliceOverwriteInterestingUint8 overwrites a random byte in b with an interesting
+// value.
+func byteSliceOverwriteInterestingUint8(m *mutator, b []byte) []byte {
+ if len(b) == 0 {
+ return nil
+ }
+ pos := m.rand(len(b))
+ b[pos] = byte(interesting8[m.rand(len(interesting8))])
+ return b
+}
+
+// byteSliceOverwriteInterestingUint16 overwrites a random uint16 in b with an interesting
+// value.
+func byteSliceOverwriteInterestingUint16(m *mutator, b []byte) []byte {
+ if len(b) < 2 {
+ return nil
+ }
+ pos := m.rand(len(b) - 1)
+ v := uint16(interesting16[m.rand(len(interesting16))])
+ m.randByteOrder().PutUint16(b[pos:], v)
+ return b
+}
+
+// byteSliceOverwriteInterestingUint32 overwrites a random uint16 in b with an interesting
+// value.
+func byteSliceOverwriteInterestingUint32(m *mutator, b []byte) []byte {
+ if len(b) < 4 {
+ return nil
+ }
+ pos := m.rand(len(b) - 3)
+ v := uint32(interesting32[m.rand(len(interesting32))])
+ m.randByteOrder().PutUint32(b[pos:], v)
+ return b
+}
+
+// byteSliceInsertConstantBytes inserts a chunk of constant bytes into a random position in b.
+func byteSliceInsertConstantBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ dst := m.rand(len(b))
+ // TODO(rolandshoemaker,katiehockman): 4096 was mainly picked
+ // randomly. We may want to either pick a much larger value
+ // (AFL uses 32768, paired with a similar impl to chooseLen
+ // which biases towards smaller lengths that grow over time),
+ // or set the max based on characteristics of the corpus
+ // (libFuzzer sets a min/max based on the min/max size of
+ // entries in the corpus and then picks uniformly from
+ // that range).
+ n := m.chooseLen(4096)
+ if len(b)+n >= cap(b) {
+ return nil
+ }
+ b = b[:len(b)+n]
+ copy(b[dst+n:], b[dst:])
+ rb := byte(m.rand(256))
+ for i := dst; i < dst+n; i++ {
+ b[i] = rb
+ }
+ return b
+}
+
+// byteSliceOverwriteConstantBytes overwrites a chunk of b with constant bytes.
+func byteSliceOverwriteConstantBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ dst := m.rand(len(b))
+ n := m.chooseLen(len(b) - dst)
+ rb := byte(m.rand(256))
+ for i := dst; i < dst+n; i++ {
+ b[i] = rb
+ }
+ return b
+}
+
+// byteSliceShuffleBytes shuffles a chunk of bytes in b.
+func byteSliceShuffleBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ dst := m.rand(len(b))
+ n := m.chooseLen(len(b) - dst)
+ if n <= 2 {
+ return nil
+ }
+ // Start at the end of the range, and iterate backwards
+ // to dst, swapping each element with another element in
+ // dst:dst+n (Fisher-Yates shuffle).
+ for i := n - 1; i > 0; i-- {
+ j := m.rand(i + 1)
+ b[dst+i], b[dst+j] = b[dst+j], b[dst+i]
+ }
+ return b
+}
+
+// byteSliceSwapBytes swaps two chunks of bytes in b.
+func byteSliceSwapBytes(m *mutator, b []byte) []byte {
+ if len(b) <= 1 {
+ return nil
+ }
+ src := m.rand(len(b))
+ dst := m.rand(len(b))
+ for dst == src {
+ dst = m.rand(len(b))
+ }
+ // Choose the random length as len(b) - max(src, dst)
+ // so that we don't attempt to swap a chunk that extends
+ // beyond the end of the slice
+ max := dst
+ if src > max {
+ max = src
+ }
+ n := m.chooseLen(len(b) - max - 1)
+ // Check that neither chunk intersect, so that we don't end up
+ // duplicating parts of the input, rather than swapping them
+ if src > dst && dst+n >= src || dst > src && src+n >= dst {
+ return nil
+ }
+ // Use the end of the slice as scratch space to avoid doing an
+ // allocation. If the slice is too small abort and try something
+ // else.
+ if len(b)+n >= cap(b) {
+ return nil
+ }
+ end := len(b)
+ b = b[:end+n]
+ copy(b[end:], b[dst:dst+n])
+ copy(b[dst:], b[src:src+n])
+ copy(b[src:], b[end:])
+ b = b[:end]
+ return b
+}
diff --git a/src/internal/fuzz/mutators_byteslice_test.go b/src/internal/fuzz/mutators_byteslice_test.go
new file mode 100644
index 0000000..7886967
--- /dev/null
+++ b/src/internal/fuzz/mutators_byteslice_test.go
@@ -0,0 +1,186 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "bytes"
+ "testing"
+)
+
+type mockRand struct {
+ values []int
+ counter int
+ b bool
+}
+
+func (mr *mockRand) uint32() uint32 {
+ c := mr.values[mr.counter]
+ mr.counter++
+ return uint32(c)
+}
+
+func (mr *mockRand) intn(n int) int {
+ c := mr.values[mr.counter]
+ mr.counter++
+ return c % n
+}
+
+func (mr *mockRand) uint32n(n uint32) uint32 {
+ c := mr.values[mr.counter]
+ mr.counter++
+ return uint32(c) % n
+}
+
+func (mr *mockRand) exp2() int {
+ c := mr.values[mr.counter]
+ mr.counter++
+ return c
+}
+
+func (mr *mockRand) bool() bool {
+ b := mr.b
+ mr.b = !mr.b
+ return b
+}
+
+func (mr *mockRand) save(*uint64, *uint64) {
+ panic("unimplemented")
+}
+
+func (mr *mockRand) restore(uint64, uint64) {
+ panic("unimplemented")
+}
+
+func TestByteSliceMutators(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ mutator func(*mutator, []byte) []byte
+ randVals []int
+ input []byte
+ expected []byte
+ }{
+ {
+ name: "byteSliceRemoveBytes",
+ mutator: byteSliceRemoveBytes,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{4},
+ },
+ {
+ name: "byteSliceInsertRandomBytes",
+ mutator: byteSliceInsertRandomBytes,
+ input: make([]byte, 4, 8),
+ expected: []byte{3, 4, 5, 0, 0, 0, 0},
+ },
+ {
+ name: "byteSliceDuplicateBytes",
+ mutator: byteSliceDuplicateBytes,
+ input: append(make([]byte, 0, 13), []byte{1, 2, 3, 4}...),
+ expected: []byte{1, 1, 2, 3, 4, 2, 3, 4},
+ },
+ {
+ name: "byteSliceOverwriteBytes",
+ mutator: byteSliceOverwriteBytes,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{1, 1, 3, 4},
+ },
+ {
+ name: "byteSliceBitFlip",
+ mutator: byteSliceBitFlip,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{3, 2, 3, 4},
+ },
+ {
+ name: "byteSliceXORByte",
+ mutator: byteSliceXORByte,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{3, 2, 3, 4},
+ },
+ {
+ name: "byteSliceSwapByte",
+ mutator: byteSliceSwapByte,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{2, 1, 3, 4},
+ },
+ {
+ name: "byteSliceArithmeticUint8",
+ mutator: byteSliceArithmeticUint8,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{255, 2, 3, 4},
+ },
+ {
+ name: "byteSliceArithmeticUint16",
+ mutator: byteSliceArithmeticUint16,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{1, 3, 3, 4},
+ },
+ {
+ name: "byteSliceArithmeticUint32",
+ mutator: byteSliceArithmeticUint32,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{2, 2, 3, 4},
+ },
+ {
+ name: "byteSliceArithmeticUint64",
+ mutator: byteSliceArithmeticUint64,
+ input: []byte{1, 2, 3, 4, 5, 6, 7, 8},
+ expected: []byte{2, 2, 3, 4, 5, 6, 7, 8},
+ },
+ {
+ name: "byteSliceOverwriteInterestingUint8",
+ mutator: byteSliceOverwriteInterestingUint8,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{255, 2, 3, 4},
+ },
+ {
+ name: "byteSliceOverwriteInterestingUint16",
+ mutator: byteSliceOverwriteInterestingUint16,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{255, 127, 3, 4},
+ },
+ {
+ name: "byteSliceOverwriteInterestingUint32",
+ mutator: byteSliceOverwriteInterestingUint32,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{250, 0, 0, 250},
+ },
+ {
+ name: "byteSliceInsertConstantBytes",
+ mutator: byteSliceInsertConstantBytes,
+ input: append(make([]byte, 0, 8), []byte{1, 2, 3, 4}...),
+ expected: []byte{3, 3, 3, 1, 2, 3, 4},
+ },
+ {
+ name: "byteSliceOverwriteConstantBytes",
+ mutator: byteSliceOverwriteConstantBytes,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{3, 3, 3, 4},
+ },
+ {
+ name: "byteSliceShuffleBytes",
+ mutator: byteSliceShuffleBytes,
+ input: []byte{1, 2, 3, 4},
+ expected: []byte{2, 3, 1, 4},
+ },
+ {
+ name: "byteSliceSwapBytes",
+ mutator: byteSliceSwapBytes,
+ randVals: []int{0, 2, 0, 2},
+ input: append(make([]byte, 0, 9), []byte{1, 2, 3, 4}...),
+ expected: []byte{3, 2, 1, 4},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ r := &mockRand{values: []int{0, 1, 2, 3, 4, 5}}
+ if tc.randVals != nil {
+ r.values = tc.randVals
+ }
+ m := &mutator{r: r}
+ b := tc.mutator(m, tc.input)
+ if !bytes.Equal(b, tc.expected) {
+ t.Errorf("got %x, want %x", b, tc.expected)
+ }
+ })
+ }
+}
diff --git a/src/internal/fuzz/pcg.go b/src/internal/fuzz/pcg.go
new file mode 100644
index 0000000..c9ea0af
--- /dev/null
+++ b/src/internal/fuzz/pcg.go
@@ -0,0 +1,145 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "math/bits"
+ "os"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+type mutatorRand interface {
+ uint32() uint32
+ intn(int) int
+ uint32n(uint32) uint32
+ exp2() int
+ bool() bool
+
+ save(randState, randInc *uint64)
+ restore(randState, randInc uint64)
+}
+
+// The functions in pcg implement a 32 bit PRNG with a 64 bit period: pcg xsh rr
+// 64 32. See https://www.pcg-random.org/ for more information. This
+// implementation is geared specifically towards the needs of fuzzing: Simple
+// creation and use, no reproducibility, no concurrency safety, just the
+// necessary methods, optimized for speed.
+
+var globalInc uint64 // PCG stream
+
+const multiplier uint64 = 6364136223846793005
+
+// pcgRand is a PRNG. It should not be copied or shared. No Rand methods are
+// concurrency safe.
+type pcgRand struct {
+ noCopy noCopy // help avoid mistakes: ask vet to ensure that we don't make a copy
+ state uint64
+ inc uint64
+}
+
+func godebugSeed() *int {
+ debug := strings.Split(os.Getenv("GODEBUG"), ",")
+ for _, f := range debug {
+ if strings.HasPrefix(f, "fuzzseed=") {
+ seed, err := strconv.Atoi(strings.TrimPrefix(f, "fuzzseed="))
+ if err != nil {
+ panic("malformed fuzzseed")
+ }
+ return &seed
+ }
+ }
+ return nil
+}
+
+// newPcgRand generates a new, seeded Rand, ready for use.
+func newPcgRand() *pcgRand {
+ r := new(pcgRand)
+ now := uint64(time.Now().UnixNano())
+ if seed := godebugSeed(); seed != nil {
+ now = uint64(*seed)
+ }
+ inc := atomic.AddUint64(&globalInc, 1)
+ r.state = now
+ r.inc = (inc << 1) | 1
+ r.step()
+ r.state += now
+ r.step()
+ return r
+}
+
+func (r *pcgRand) step() {
+ r.state *= multiplier
+ r.state += r.inc
+}
+
+func (r *pcgRand) save(randState, randInc *uint64) {
+ *randState = r.state
+ *randInc = r.inc
+}
+
+func (r *pcgRand) restore(randState, randInc uint64) {
+ r.state = randState
+ r.inc = randInc
+}
+
+// uint32 returns a pseudo-random uint32.
+func (r *pcgRand) uint32() uint32 {
+ x := r.state
+ r.step()
+ return bits.RotateLeft32(uint32(((x>>18)^x)>>27), -int(x>>59))
+}
+
+// intn returns a pseudo-random number in [0, n).
+// n must fit in a uint32.
+func (r *pcgRand) intn(n int) int {
+ if int(uint32(n)) != n {
+ panic("large Intn")
+ }
+ return int(r.uint32n(uint32(n)))
+}
+
+// uint32n returns a pseudo-random number in [0, n).
+//
+// For implementation details, see:
+// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction
+// https://lemire.me/blog/2016/06/30/fast-random-shuffling
+func (r *pcgRand) uint32n(n uint32) uint32 {
+ v := r.uint32()
+ prod := uint64(v) * uint64(n)
+ low := uint32(prod)
+ if low < n {
+ thresh := uint32(-int32(n)) % n
+ for low < thresh {
+ v = r.uint32()
+ prod = uint64(v) * uint64(n)
+ low = uint32(prod)
+ }
+ }
+ return uint32(prod >> 32)
+}
+
+// exp2 generates n with probability 1/2^(n+1).
+func (r *pcgRand) exp2() int {
+ return bits.TrailingZeros32(r.uint32())
+}
+
+// bool generates a random bool.
+func (r *pcgRand) bool() bool {
+ return r.uint32()&1 == 0
+}
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) lock() {}
+func (*noCopy) unlock() {}
diff --git a/src/internal/fuzz/queue.go b/src/internal/fuzz/queue.go
new file mode 100644
index 0000000..195d6eb
--- /dev/null
+++ b/src/internal/fuzz/queue.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+// queue holds a growable sequence of inputs for fuzzing and minimization.
+//
+// For now, this is a simple ring buffer
+// (https://en.wikipedia.org/wiki/Circular_buffer).
+//
+// TODO(golang.org/issue/46224): use a prioritization algorithm based on input
+// size, previous duration, coverage, and any other metrics that seem useful.
+type queue struct {
+ // elems holds a ring buffer.
+ // The queue is empty when begin = end.
+ // The queue is full (until grow is called) when end = begin + N - 1 (mod N)
+ // where N = cap(elems).
+ elems []any
+ head, len int
+}
+
+func (q *queue) cap() int {
+ return len(q.elems)
+}
+
+func (q *queue) grow() {
+ oldCap := q.cap()
+ newCap := oldCap * 2
+ if newCap == 0 {
+ newCap = 8
+ }
+ newElems := make([]any, newCap)
+ oldLen := q.len
+ for i := 0; i < oldLen; i++ {
+ newElems[i] = q.elems[(q.head+i)%oldCap]
+ }
+ q.elems = newElems
+ q.head = 0
+}
+
+func (q *queue) enqueue(e any) {
+ if q.len+1 > q.cap() {
+ q.grow()
+ }
+ i := (q.head + q.len) % q.cap()
+ q.elems[i] = e
+ q.len++
+}
+
+func (q *queue) dequeue() (any, bool) {
+ if q.len == 0 {
+ return nil, false
+ }
+ e := q.elems[q.head]
+ q.elems[q.head] = nil
+ q.head = (q.head + 1) % q.cap()
+ q.len--
+ return e, true
+}
+
+func (q *queue) peek() (any, bool) {
+ if q.len == 0 {
+ return nil, false
+ }
+ return q.elems[q.head], true
+}
+
+func (q *queue) clear() {
+ *q = queue{}
+}
diff --git a/src/internal/fuzz/queue_test.go b/src/internal/fuzz/queue_test.go
new file mode 100644
index 0000000..3b179af
--- /dev/null
+++ b/src/internal/fuzz/queue_test.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import "testing"
+
+func TestQueue(t *testing.T) {
+ // Zero valued queue should have 0 length and capacity.
+ var q queue
+ if n := q.len; n != 0 {
+ t.Fatalf("empty queue has len %d; want 0", n)
+ }
+ if n := q.cap(); n != 0 {
+ t.Fatalf("empty queue has cap %d; want 0", n)
+ }
+
+ // As we add elements, len should grow.
+ N := 32
+ for i := 0; i < N; i++ {
+ q.enqueue(i)
+ if n := q.len; n != i+1 {
+ t.Fatalf("after adding %d elements, queue has len %d", i, n)
+ }
+ if v, ok := q.peek(); !ok {
+ t.Fatalf("couldn't peek after adding %d elements", i)
+ } else if v.(int) != 0 {
+ t.Fatalf("after adding %d elements, peek is %d; want 0", i, v)
+ }
+ }
+
+ // As we remove and add elements, len should shrink and grow.
+ // We should also remove elements in the same order they were added.
+ want := 0
+ for _, r := range []int{1, 2, 3, 5, 8, 13, 21} {
+ s := make([]int, 0, r)
+ for i := 0; i < r; i++ {
+ if got, ok := q.dequeue(); !ok {
+ t.Fatalf("after removing %d of %d elements, could not dequeue", i+1, r)
+ } else if got != want {
+ t.Fatalf("after removing %d of %d elements, got %d; want %d", i+1, r, got, want)
+ } else {
+ s = append(s, got.(int))
+ }
+ want = (want + 1) % N
+ if n := q.len; n != N-i-1 {
+ t.Fatalf("after removing %d of %d elements, len is %d; want %d", i+1, r, n, N-i-1)
+ }
+ }
+ for i, v := range s {
+ q.enqueue(v)
+ if n := q.len; n != N-r+i+1 {
+ t.Fatalf("after adding back %d of %d elements, len is %d; want %d", i+1, r, n, n-r+i+1)
+ }
+ }
+ }
+}
diff --git a/src/internal/fuzz/sys_posix.go b/src/internal/fuzz/sys_posix.go
new file mode 100644
index 0000000..fec6054
--- /dev/null
+++ b/src/internal/fuzz/sys_posix.go
@@ -0,0 +1,130 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || freebsd || linux
+
+package fuzz
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+)
+
+type sharedMemSys struct{}
+
+func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (*sharedMem, error) {
+ prot := syscall.PROT_READ | syscall.PROT_WRITE
+ flags := syscall.MAP_FILE | syscall.MAP_SHARED
+ region, err := syscall.Mmap(int(f.Fd()), 0, size, prot, flags)
+ if err != nil {
+ return nil, err
+ }
+
+ return &sharedMem{f: f, region: region, removeOnClose: removeOnClose}, nil
+}
+
+// Close unmaps the shared memory and closes the temporary file. If this
+// sharedMem was created with sharedMemTempFile, Close also removes the file.
+func (m *sharedMem) Close() error {
+ // Attempt all operations, even if we get an error for an earlier operation.
+ // os.File.Close may fail due to I/O errors, but we still want to delete
+ // the temporary file.
+ var errs []error
+ errs = append(errs,
+ syscall.Munmap(m.region),
+ m.f.Close())
+ if m.removeOnClose {
+ errs = append(errs, os.Remove(m.f.Name()))
+ }
+ for _, err := range errs {
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setWorkerComm configures communication channels on the cmd that will
+// run a worker process.
+func setWorkerComm(cmd *exec.Cmd, comm workerComm) {
+ mem := <-comm.memMu
+ memFile := mem.f
+ comm.memMu <- mem
+ cmd.ExtraFiles = []*os.File{comm.fuzzIn, comm.fuzzOut, memFile}
+}
+
+// getWorkerComm returns communication channels in the worker process.
+func getWorkerComm() (comm workerComm, err error) {
+ fuzzIn := os.NewFile(3, "fuzz_in")
+ fuzzOut := os.NewFile(4, "fuzz_out")
+ memFile := os.NewFile(5, "fuzz_mem")
+ fi, err := memFile.Stat()
+ if err != nil {
+ return workerComm{}, err
+ }
+ size := int(fi.Size())
+ if int64(size) != fi.Size() {
+ return workerComm{}, fmt.Errorf("fuzz temp file exceeds maximum size")
+ }
+ removeOnClose := false
+ mem, err := sharedMemMapFile(memFile, size, removeOnClose)
+ if err != nil {
+ return workerComm{}, err
+ }
+ memMu := make(chan *sharedMem, 1)
+ memMu <- mem
+ return workerComm{fuzzIn: fuzzIn, fuzzOut: fuzzOut, memMu: memMu}, nil
+}
+
+// isInterruptError returns whether an error was returned by a process that
+// was terminated by an interrupt signal (SIGINT).
+func isInterruptError(err error) bool {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok || exitErr.ExitCode() >= 0 {
+ return false
+ }
+ status := exitErr.Sys().(syscall.WaitStatus)
+ return status.Signal() == syscall.SIGINT
+}
+
+// terminationSignal checks if err is an exec.ExitError with a signal status.
+// If it is, terminationSignal returns the signal and true.
+// If not, -1 and false.
+func terminationSignal(err error) (os.Signal, bool) {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok || exitErr.ExitCode() >= 0 {
+ return syscall.Signal(-1), false
+ }
+ status := exitErr.Sys().(syscall.WaitStatus)
+ return status.Signal(), status.Signaled()
+}
+
+// isCrashSignal returns whether a signal was likely to have been caused by an
+// error in the program that received it, triggered by a fuzz input. For
+// example, SIGSEGV would be received after a nil pointer dereference.
+// Other signals like SIGKILL or SIGHUP are more likely to have been sent by
+// another process, and we shouldn't record a crasher if the worker process
+// receives one of these.
+//
+// Note that Go installs its own signal handlers on startup, so some of these
+// signals may only be received if signal handlers are changed. For example,
+// SIGSEGV is normally transformed into a panic that causes the process to exit
+// with status 2 if not recovered, which we handle as a crash.
+func isCrashSignal(signal os.Signal) bool {
+ switch signal {
+ case
+ syscall.SIGILL, // illegal instruction
+ syscall.SIGTRAP, // breakpoint
+ syscall.SIGABRT, // abort() called
+ syscall.SIGBUS, // invalid memory access (e.g., misaligned address)
+ syscall.SIGFPE, // math error, e.g., integer divide by zero
+ syscall.SIGSEGV, // invalid memory access (e.g., write to read-only)
+ syscall.SIGPIPE: // sent data to closed pipe or socket
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/internal/fuzz/sys_unimplemented.go b/src/internal/fuzz/sys_unimplemented.go
new file mode 100644
index 0000000..8687c1f
--- /dev/null
+++ b/src/internal/fuzz/sys_unimplemented.go
@@ -0,0 +1,44 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// If you update this constraint, also update internal/platform.FuzzSupported.
+//
+//go:build !darwin && !freebsd && !linux && !windows
+
+package fuzz
+
+import (
+ "os"
+ "os/exec"
+)
+
+type sharedMemSys struct{}
+
+func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (*sharedMem, error) {
+ panic("not implemented")
+}
+
+func (m *sharedMem) Close() error {
+ panic("not implemented")
+}
+
+func setWorkerComm(cmd *exec.Cmd, comm workerComm) {
+ panic("not implemented")
+}
+
+func getWorkerComm() (comm workerComm, err error) {
+ panic("not implemented")
+}
+
+func isInterruptError(err error) bool {
+ panic("not implemented")
+}
+
+func terminationSignal(err error) (os.Signal, bool) {
+ panic("not implemented")
+}
+
+func isCrashSignal(signal os.Signal) bool {
+ panic("not implemented")
+}
diff --git a/src/internal/fuzz/sys_windows.go b/src/internal/fuzz/sys_windows.go
new file mode 100644
index 0000000..aa85be7
--- /dev/null
+++ b/src/internal/fuzz/sys_windows.go
@@ -0,0 +1,147 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+ "unsafe"
+)
+
+type sharedMemSys struct {
+ mapObj syscall.Handle
+}
+
+func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (mem *sharedMem, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("mapping temporary file %s: %w", f.Name(), err)
+ }
+ }()
+
+ // Create a file mapping object. The object itself is not shared.
+ mapObj, err := syscall.CreateFileMapping(
+ syscall.Handle(f.Fd()), // fhandle
+ nil, // sa
+ syscall.PAGE_READWRITE, // prot
+ 0, // maxSizeHigh
+ 0, // maxSizeLow
+ nil, // name
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a view from the file mapping object.
+ access := uint32(syscall.FILE_MAP_READ | syscall.FILE_MAP_WRITE)
+ addr, err := syscall.MapViewOfFile(
+ mapObj, // handle
+ access, // access
+ 0, // offsetHigh
+ 0, // offsetLow
+ uintptr(size), // length
+ )
+ if err != nil {
+ syscall.CloseHandle(mapObj)
+ return nil, err
+ }
+
+ region := unsafe.Slice((*byte)(unsafe.Pointer(addr)), size)
+ return &sharedMem{
+ f: f,
+ region: region,
+ removeOnClose: removeOnClose,
+ sys: sharedMemSys{mapObj: mapObj},
+ }, nil
+}
+
+// Close unmaps the shared memory and closes the temporary file. If this
+// sharedMem was created with sharedMemTempFile, Close also removes the file.
+func (m *sharedMem) Close() error {
+ // Attempt all operations, even if we get an error for an earlier operation.
+ // os.File.Close may fail due to I/O errors, but we still want to delete
+ // the temporary file.
+ var errs []error
+ errs = append(errs,
+ syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&m.region[0]))),
+ syscall.CloseHandle(m.sys.mapObj),
+ m.f.Close())
+ if m.removeOnClose {
+ errs = append(errs, os.Remove(m.f.Name()))
+ }
+ for _, err := range errs {
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setWorkerComm configures communication channels on the cmd that will
+// run a worker process.
+func setWorkerComm(cmd *exec.Cmd, comm workerComm) {
+ mem := <-comm.memMu
+ memName := mem.f.Name()
+ comm.memMu <- mem
+ syscall.SetHandleInformation(syscall.Handle(comm.fuzzIn.Fd()), syscall.HANDLE_FLAG_INHERIT, 1)
+ syscall.SetHandleInformation(syscall.Handle(comm.fuzzOut.Fd()), syscall.HANDLE_FLAG_INHERIT, 1)
+ cmd.Env = append(cmd.Env, fmt.Sprintf("GO_TEST_FUZZ_WORKER_HANDLES=%x,%x,%q", comm.fuzzIn.Fd(), comm.fuzzOut.Fd(), memName))
+ cmd.SysProcAttr = &syscall.SysProcAttr{AdditionalInheritedHandles: []syscall.Handle{syscall.Handle(comm.fuzzIn.Fd()), syscall.Handle(comm.fuzzOut.Fd())}}
+}
+
+// getWorkerComm returns communication channels in the worker process.
+func getWorkerComm() (comm workerComm, err error) {
+ v := os.Getenv("GO_TEST_FUZZ_WORKER_HANDLES")
+ if v == "" {
+ return workerComm{}, fmt.Errorf("GO_TEST_FUZZ_WORKER_HANDLES not set")
+ }
+ var fuzzInFD, fuzzOutFD uintptr
+ var memName string
+ if _, err := fmt.Sscanf(v, "%x,%x,%q", &fuzzInFD, &fuzzOutFD, &memName); err != nil {
+ return workerComm{}, fmt.Errorf("parsing GO_TEST_FUZZ_WORKER_HANDLES=%s: %v", v, err)
+ }
+
+ fuzzIn := os.NewFile(fuzzInFD, "fuzz_in")
+ fuzzOut := os.NewFile(fuzzOutFD, "fuzz_out")
+ tmpFile, err := os.OpenFile(memName, os.O_RDWR, 0)
+ if err != nil {
+ return workerComm{}, fmt.Errorf("worker opening temp file: %w", err)
+ }
+ fi, err := tmpFile.Stat()
+ if err != nil {
+ return workerComm{}, fmt.Errorf("worker checking temp file size: %w", err)
+ }
+ size := int(fi.Size())
+ if int64(size) != fi.Size() {
+ return workerComm{}, fmt.Errorf("fuzz temp file exceeds maximum size")
+ }
+ removeOnClose := false
+ mem, err := sharedMemMapFile(tmpFile, size, removeOnClose)
+ if err != nil {
+ return workerComm{}, err
+ }
+ memMu := make(chan *sharedMem, 1)
+ memMu <- mem
+
+ return workerComm{fuzzIn: fuzzIn, fuzzOut: fuzzOut, memMu: memMu}, nil
+}
+
+func isInterruptError(err error) bool {
+ // On Windows, we can't tell whether the process was interrupted by the error
+ // returned by Wait. It looks like an ExitError with status 1.
+ return false
+}
+
+// terminationSignal returns -1 and false because Windows doesn't have signals.
+func terminationSignal(err error) (os.Signal, bool) {
+ return syscall.Signal(-1), false
+}
+
+// isCrashSignal is not implemented because Windows doesn't have signals.
+func isCrashSignal(signal os.Signal) bool {
+ panic("not implemented: no signals on windows")
+}
diff --git a/src/internal/fuzz/trace.go b/src/internal/fuzz/trace.go
new file mode 100644
index 0000000..a15c370
--- /dev/null
+++ b/src/internal/fuzz/trace.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !libfuzzer
+
+package fuzz
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname libfuzzerTraceCmp1 runtime.libfuzzerTraceCmp1
+//go:linkname libfuzzerTraceCmp2 runtime.libfuzzerTraceCmp2
+//go:linkname libfuzzerTraceCmp4 runtime.libfuzzerTraceCmp4
+//go:linkname libfuzzerTraceCmp8 runtime.libfuzzerTraceCmp8
+
+//go:linkname libfuzzerTraceConstCmp1 runtime.libfuzzerTraceConstCmp1
+//go:linkname libfuzzerTraceConstCmp2 runtime.libfuzzerTraceConstCmp2
+//go:linkname libfuzzerTraceConstCmp4 runtime.libfuzzerTraceConstCmp4
+//go:linkname libfuzzerTraceConstCmp8 runtime.libfuzzerTraceConstCmp8
+
+//go:linkname libfuzzerHookStrCmp runtime.libfuzzerHookStrCmp
+//go:linkname libfuzzerHookEqualFold runtime.libfuzzerHookEqualFold
+
+func libfuzzerTraceCmp1(arg0, arg1 uint8, fakePC uint) {}
+func libfuzzerTraceCmp2(arg0, arg1 uint16, fakePC uint) {}
+func libfuzzerTraceCmp4(arg0, arg1 uint32, fakePC uint) {}
+func libfuzzerTraceCmp8(arg0, arg1 uint64, fakePC uint) {}
+
+func libfuzzerTraceConstCmp1(arg0, arg1 uint8, fakePC uint) {}
+func libfuzzerTraceConstCmp2(arg0, arg1 uint16, fakePC uint) {}
+func libfuzzerTraceConstCmp4(arg0, arg1 uint32, fakePC uint) {}
+func libfuzzerTraceConstCmp8(arg0, arg1 uint64, fakePC uint) {}
+
+func libfuzzerHookStrCmp(arg0, arg1 string, fakePC uint) {}
+func libfuzzerHookEqualFold(arg0, arg1 string, fakePC uint) {}
diff --git a/src/internal/fuzz/worker.go b/src/internal/fuzz/worker.go
new file mode 100644
index 0000000..c952670
--- /dev/null
+++ b/src/internal/fuzz/worker.go
@@ -0,0 +1,1195 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime"
+ "sync"
+ "time"
+)
+
+const (
+ // workerFuzzDuration is the amount of time a worker can spend testing random
+ // variations of an input given by the coordinator.
+ workerFuzzDuration = 100 * time.Millisecond
+
+ // workerTimeoutDuration is the amount of time a worker can go without
+ // responding to the coordinator before being stopped.
+ workerTimeoutDuration = 1 * time.Second
+
+ // workerExitCode is used as an exit code by fuzz worker processes after an internal error.
+ // This distinguishes internal errors from uncontrolled panics and other crashes.
+ // Keep in sync with internal/fuzz.workerExitCode.
+ workerExitCode = 70
+
+ // workerSharedMemSize is the maximum size of the shared memory file used to
+ // communicate with workers. This limits the size of fuzz inputs.
+ workerSharedMemSize = 100 << 20 // 100 MB
+)
+
+// worker manages a worker process running a test binary. The worker object
+// exists only in the coordinator (the process started by 'go test -fuzz').
+// workerClient is used by the coordinator to send RPCs to the worker process,
+// which handles them with workerServer.
+type worker struct {
+ dir string // working directory, same as package directory
+ binPath string // path to test executable
+ args []string // arguments for test executable
+ env []string // environment for test executable
+
+ coordinator *coordinator
+
+ memMu chan *sharedMem // mutex guarding shared memory with worker; persists across processes.
+
+ cmd *exec.Cmd // current worker process
+ client *workerClient // used to communicate with worker process
+ waitErr error // last error returned by wait, set before termC is closed.
+ interrupted bool // true after stop interrupts a running worker.
+ termC chan struct{} // closed by wait when worker process terminates
+}
+
+func newWorker(c *coordinator, dir, binPath string, args, env []string) (*worker, error) {
+ mem, err := sharedMemTempFile(workerSharedMemSize)
+ if err != nil {
+ return nil, err
+ }
+ memMu := make(chan *sharedMem, 1)
+ memMu <- mem
+ return &worker{
+ dir: dir,
+ binPath: binPath,
+ args: args,
+ env: env[:len(env):len(env)], // copy on append to ensure workers don't overwrite each other.
+ coordinator: c,
+ memMu: memMu,
+ }, nil
+}
+
+// cleanup releases persistent resources associated with the worker.
+func (w *worker) cleanup() error {
+ mem := <-w.memMu
+ if mem == nil {
+ return nil
+ }
+ close(w.memMu)
+ return mem.Close()
+}
+
+// coordinate runs the test binary to perform fuzzing.
+//
+// coordinate loops until ctx is cancelled or a fatal error is encountered.
+// If a test process terminates unexpectedly while fuzzing, coordinate will
+// attempt to restart and continue unless the termination can be attributed
+// to an interruption (from a timer or the user).
+//
+// While looping, coordinate receives inputs from the coordinator, passes
+// those inputs to the worker process, then passes the results back to
+// the coordinator.
+func (w *worker) coordinate(ctx context.Context) error {
+ // Main event loop.
+ for {
+ // Start or restart the worker if it's not running.
+ if !w.isRunning() {
+ if err := w.startAndPing(ctx); err != nil {
+ return err
+ }
+ }
+
+ select {
+ case <-ctx.Done():
+ // Worker was told to stop.
+ err := w.stop()
+ if err != nil && !w.interrupted && !isInterruptError(err) {
+ return err
+ }
+ return ctx.Err()
+
+ case <-w.termC:
+ // Worker process terminated unexpectedly while waiting for input.
+ err := w.stop()
+ if w.interrupted {
+ panic("worker interrupted after unexpected termination")
+ }
+ if err == nil || isInterruptError(err) {
+ // Worker stopped, either by exiting with status 0 or after being
+ // interrupted with a signal that was not sent by the coordinator.
+ //
+ // When the user presses ^C, on POSIX platforms, SIGINT is delivered to
+ // all processes in the group concurrently, and the worker may see it
+ // before the coordinator. The worker should exit 0 gracefully (in
+ // theory).
+ //
+ // This condition is probably intended by the user, so suppress
+ // the error.
+ return nil
+ }
+ if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == workerExitCode {
+ // Worker exited with a code indicating F.Fuzz was not called correctly,
+ // for example, F.Fail was called first.
+ return fmt.Errorf("fuzzing process exited unexpectedly due to an internal failure: %w", err)
+ }
+ // Worker exited non-zero or was terminated by a non-interrupt
+ // signal (for example, SIGSEGV) while fuzzing.
+ return fmt.Errorf("fuzzing process hung or terminated unexpectedly: %w", err)
+ // TODO(jayconrod,katiehockman): if -keepfuzzing, restart worker.
+
+ case input := <-w.coordinator.inputC:
+ // Received input from coordinator.
+ args := fuzzArgs{
+ Limit: input.limit,
+ Timeout: input.timeout,
+ Warmup: input.warmup,
+ CoverageData: input.coverageData,
+ }
+ entry, resp, isInternalError, err := w.client.fuzz(ctx, input.entry, args)
+ canMinimize := true
+ if err != nil {
+ // Error communicating with worker.
+ w.stop()
+ if ctx.Err() != nil {
+ // Timeout or interruption.
+ return ctx.Err()
+ }
+ if w.interrupted {
+ // Communication error before we stopped the worker.
+ // Report an error, but don't record a crasher.
+ return fmt.Errorf("communicating with fuzzing process: %v", err)
+ }
+ if sig, ok := terminationSignal(w.waitErr); ok && !isCrashSignal(sig) {
+ // Worker terminated by a signal that probably wasn't caused by a
+ // specific input to the fuzz function. For example, on Linux,
+ // the kernel (OOM killer) may send SIGKILL to a process using a lot
+ // of memory. Or the shell might send SIGHUP when the terminal
+ // is closed. Don't record a crasher.
+ return fmt.Errorf("fuzzing process terminated by unexpected signal; no crash will be recorded: %v", w.waitErr)
+ }
+ if isInternalError {
+ // An internal error occurred which shouldn't be considered
+ // a crash.
+ return err
+ }
+ // Unexpected termination. Set error message and fall through.
+ // We'll restart the worker on the next iteration.
+ // Don't attempt to minimize this since it crashed the worker.
+ resp.Err = fmt.Sprintf("fuzzing process hung or terminated unexpectedly: %v", w.waitErr)
+ canMinimize = false
+ }
+ result := fuzzResult{
+ limit: input.limit,
+ count: resp.Count,
+ totalDuration: resp.TotalDuration,
+ entryDuration: resp.InterestingDuration,
+ entry: entry,
+ crasherMsg: resp.Err,
+ coverageData: resp.CoverageData,
+ canMinimize: canMinimize,
+ }
+ w.coordinator.resultC <- result
+
+ case input := <-w.coordinator.minimizeC:
+ // Received input to minimize from coordinator.
+ result, err := w.minimize(ctx, input)
+ if err != nil {
+ // Error minimizing. Send back the original input. If it didn't cause
+ // an error before, report it as causing an error now.
+ // TODO: double-check this is handled correctly when
+ // implementing -keepfuzzing.
+ result = fuzzResult{
+ entry: input.entry,
+ crasherMsg: input.crasherMsg,
+ canMinimize: false,
+ limit: input.limit,
+ }
+ if result.crasherMsg == "" {
+ result.crasherMsg = err.Error()
+ }
+ }
+ if shouldPrintDebugInfo() {
+ w.coordinator.debugLogf(
+ "input minimized, id: %s, original id: %s, crasher: %t, originally crasher: %t, minimizing took: %s",
+ result.entry.Path,
+ input.entry.Path,
+ result.crasherMsg != "",
+ input.crasherMsg != "",
+ result.totalDuration,
+ )
+ }
+ w.coordinator.resultC <- result
+ }
+ }
+}
+
+// minimize tells a worker process to attempt to find a smaller value that
+// either causes an error (if we started minimizing because we found an input
+// that causes an error) or preserves new coverage (if we started minimizing
+// because we found an input that expands coverage).
+func (w *worker) minimize(ctx context.Context, input fuzzMinimizeInput) (min fuzzResult, err error) {
+ if w.coordinator.opts.MinimizeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, w.coordinator.opts.MinimizeTimeout)
+ defer cancel()
+ }
+
+ args := minimizeArgs{
+ Limit: input.limit,
+ Timeout: input.timeout,
+ KeepCoverage: input.keepCoverage,
+ }
+ entry, resp, err := w.client.minimize(ctx, input.entry, args)
+ if err != nil {
+ // Error communicating with worker.
+ w.stop()
+ if ctx.Err() != nil || w.interrupted || isInterruptError(w.waitErr) {
+ // Worker was interrupted, possibly by the user pressing ^C.
+ // Normally, workers can handle interrupts and timeouts gracefully and
+ // will return without error. An error here indicates the worker
+ // may not have been in a good state, but the error won't be meaningful
+ // to the user. Just return the original crasher without logging anything.
+ return fuzzResult{
+ entry: input.entry,
+ crasherMsg: input.crasherMsg,
+ coverageData: input.keepCoverage,
+ canMinimize: false,
+ limit: input.limit,
+ }, nil
+ }
+ return fuzzResult{
+ entry: entry,
+ crasherMsg: fmt.Sprintf("fuzzing process hung or terminated unexpectedly while minimizing: %v", err),
+ canMinimize: false,
+ limit: input.limit,
+ count: resp.Count,
+ totalDuration: resp.Duration,
+ }, nil
+ }
+
+ if input.crasherMsg != "" && resp.Err == "" {
+ return fuzzResult{}, fmt.Errorf("attempted to minimize a crash but could not reproduce")
+ }
+
+ return fuzzResult{
+ entry: entry,
+ crasherMsg: resp.Err,
+ coverageData: resp.CoverageData,
+ canMinimize: false,
+ limit: input.limit,
+ count: resp.Count,
+ totalDuration: resp.Duration,
+ }, nil
+}
+
+func (w *worker) isRunning() bool {
+ return w.cmd != nil
+}
+
+// startAndPing starts the worker process and sends it a message to make sure it
+// can communicate.
+//
+// startAndPing returns an error if any part of this didn't work, including if
+// the context is expired or the worker process was interrupted before it
+// responded. Errors that happen after start but before the ping response
+// likely indicate that the worker did not call F.Fuzz or called F.Fail first.
+// We don't record crashers for these errors.
+func (w *worker) startAndPing(ctx context.Context) error {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if err := w.start(); err != nil {
+ return err
+ }
+ if err := w.client.ping(ctx); err != nil {
+ w.stop()
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if isInterruptError(err) {
+ // User may have pressed ^C before worker responded.
+ return err
+ }
+ // TODO: record and return stderr.
+ return fmt.Errorf("fuzzing process terminated without fuzzing: %w", err)
+ }
+ return nil
+}
+
+// start runs a new worker process.
+//
+// If the process couldn't be started, start returns an error. Start won't
+// return later termination errors from the process if they occur.
+//
+// If the process starts successfully, start returns nil. stop must be called
+// once later to clean up, even if the process terminates on its own.
+//
+// When the process terminates, w.waitErr is set to the error (if any), and
+// w.termC is closed.
+func (w *worker) start() (err error) {
+ if w.isRunning() {
+ panic("worker already started")
+ }
+ w.waitErr = nil
+ w.interrupted = false
+ w.termC = nil
+
+ cmd := exec.Command(w.binPath, w.args...)
+ cmd.Dir = w.dir
+ cmd.Env = w.env[:len(w.env):len(w.env)] // copy on append to ensure workers don't overwrite each other.
+
+ // Create the "fuzz_in" and "fuzz_out" pipes so we can communicate with
+ // the worker. We don't use stdin and stdout, since the test binary may
+ // do something else with those.
+ //
+ // Each pipe has a reader and a writer. The coordinator writes to fuzzInW
+ // and reads from fuzzOutR. The worker inherits fuzzInR and fuzzOutW.
+ // The coordinator closes fuzzInR and fuzzOutW after starting the worker,
+ // since we have no further need of them.
+ fuzzInR, fuzzInW, err := os.Pipe()
+ if err != nil {
+ return err
+ }
+ defer fuzzInR.Close()
+ fuzzOutR, fuzzOutW, err := os.Pipe()
+ if err != nil {
+ fuzzInW.Close()
+ return err
+ }
+ defer fuzzOutW.Close()
+ setWorkerComm(cmd, workerComm{fuzzIn: fuzzInR, fuzzOut: fuzzOutW, memMu: w.memMu})
+
+ // Start the worker process.
+ if err := cmd.Start(); err != nil {
+ fuzzInW.Close()
+ fuzzOutR.Close()
+ return err
+ }
+
+ // Worker started successfully.
+ // After this, w.client owns fuzzInW and fuzzOutR, so w.client.Close must be
+ // called later by stop.
+ w.cmd = cmd
+ w.termC = make(chan struct{})
+ comm := workerComm{fuzzIn: fuzzInW, fuzzOut: fuzzOutR, memMu: w.memMu}
+ m := newMutator()
+ w.client = newWorkerClient(comm, m)
+
+ go func() {
+ w.waitErr = w.cmd.Wait()
+ close(w.termC)
+ }()
+
+ return nil
+}
+
+// stop tells the worker process to exit by closing w.client, then blocks until
+// it terminates. If the worker doesn't terminate after a short time, stop
+// signals it with os.Interrupt (where supported), then os.Kill.
+//
+// stop returns the error the process terminated with, if any (same as
+// w.waitErr).
+//
+// stop must be called at least once after start returns successfully, even if
+// the worker process terminates unexpectedly.
+func (w *worker) stop() error {
+ if w.termC == nil {
+ panic("worker was not started successfully")
+ }
+ select {
+ case <-w.termC:
+ // Worker already terminated.
+ if w.client == nil {
+ // stop already called.
+ return w.waitErr
+ }
+ // Possible unexpected termination.
+ w.client.Close()
+ w.cmd = nil
+ w.client = nil
+ return w.waitErr
+ default:
+ // Worker still running.
+ }
+
+ // Tell the worker to stop by closing fuzz_in. It won't actually stop until it
+ // finishes with earlier calls.
+ closeC := make(chan struct{})
+ go func() {
+ w.client.Close()
+ close(closeC)
+ }()
+
+ sig := os.Interrupt
+ if runtime.GOOS == "windows" {
+ // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on
+ // Windows; using it with os.Process.Signal will return an error.”
+ // Fall back to Kill instead.
+ sig = os.Kill
+ }
+
+ t := time.NewTimer(workerTimeoutDuration)
+ for {
+ select {
+ case <-w.termC:
+ // Worker terminated.
+ t.Stop()
+ <-closeC
+ w.cmd = nil
+ w.client = nil
+ return w.waitErr
+
+ case <-t.C:
+ // Timer fired before worker terminated.
+ w.interrupted = true
+ switch sig {
+ case os.Interrupt:
+ // Try to stop the worker with SIGINT and wait a little longer.
+ w.cmd.Process.Signal(sig)
+ sig = os.Kill
+ t.Reset(workerTimeoutDuration)
+
+ case os.Kill:
+ // Try to stop the worker with SIGKILL and keep waiting.
+ w.cmd.Process.Signal(sig)
+ sig = nil
+ t.Reset(workerTimeoutDuration)
+
+ case nil:
+ // Still waiting. Print a message to let the user know why.
+ fmt.Fprintf(w.coordinator.opts.Log, "waiting for fuzzing process to terminate...\n")
+ }
+ }
+ }
+}
+
+// RunFuzzWorker is called in a worker process to communicate with the
+// coordinator process in order to fuzz random inputs. RunFuzzWorker loops
+// until the coordinator tells it to stop.
+//
+// fn is a wrapper on the fuzz function. It may return an error to indicate
+// a given input "crashed". The coordinator will also record a crasher if
+// the function times out or terminates the process.
+//
+// RunFuzzWorker returns an error if it could not communicate with the
+// coordinator process.
+func RunFuzzWorker(ctx context.Context, fn func(CorpusEntry) error) error {
+ comm, err := getWorkerComm()
+ if err != nil {
+ return err
+ }
+ srv := &workerServer{
+ workerComm: comm,
+ fuzzFn: func(e CorpusEntry) (time.Duration, error) {
+ timer := time.AfterFunc(10*time.Second, func() {
+ panic("deadlocked!") // this error message won't be printed
+ })
+ defer timer.Stop()
+ start := time.Now()
+ err := fn(e)
+ return time.Since(start), err
+ },
+ m: newMutator(),
+ }
+ return srv.serve(ctx)
+}
+
+// call is serialized and sent from the coordinator on fuzz_in. It acts as
+// a minimalist RPC mechanism. Exactly one of its fields must be set to indicate
+// which method to call.
+type call struct {
+ Ping *pingArgs
+ Fuzz *fuzzArgs
+ Minimize *minimizeArgs
+}
+
+// minimizeArgs contains arguments to workerServer.minimize. The value to
+// minimize is already in shared memory.
+type minimizeArgs struct {
+ // Timeout is the time to spend minimizing. This may include time to start up,
+ // especially if the input causes the worker process to terminated, requiring
+ // repeated restarts.
+ Timeout time.Duration
+
+ // Limit is the maximum number of values to test, without spending more time
+ // than Duration. 0 indicates no limit.
+ Limit int64
+
+ // KeepCoverage is a set of coverage counters the worker should attempt to
+ // keep in minimized values. When provided, the worker will reject inputs that
+ // don't cause at least one of these bits to be set.
+ KeepCoverage []byte
+
+ // Index is the index of the fuzz target parameter to be minimized.
+ Index int
+}
+
+// minimizeResponse contains results from workerServer.minimize.
+type minimizeResponse struct {
+ // WroteToMem is true if the worker found a smaller input and wrote it to
+ // shared memory. If minimizeArgs.KeepCoverage was set, the minimized input
+ // preserved at least one coverage bit and did not cause an error.
+ // Otherwise, the minimized input caused some error, recorded in Err.
+ WroteToMem bool
+
+ // Err is the error string caused by the value in shared memory, if any.
+ Err string
+
+ // CoverageData is the set of coverage bits activated by the minimized value
+ // in shared memory. When set, it contains at least one bit from KeepCoverage.
+ // CoverageData will be nil if Err is set or if minimization failed.
+ CoverageData []byte
+
+ // Duration is the time spent minimizing, not including starting or cleaning up.
+ Duration time.Duration
+
+ // Count is the number of values tested.
+ Count int64
+}
+
+// fuzzArgs contains arguments to workerServer.fuzz. The value to fuzz is
+// passed in shared memory.
+type fuzzArgs struct {
+ // Timeout is the time to spend fuzzing, not including starting or
+ // cleaning up.
+ Timeout time.Duration
+
+ // Limit is the maximum number of values to test, without spending more time
+ // than Duration. 0 indicates no limit.
+ Limit int64
+
+ // Warmup indicates whether this is part of a warmup run, meaning that
+ // fuzzing should not occur. If coverageEnabled is true, then coverage data
+ // should be reported.
+ Warmup bool
+
+ // CoverageData is the coverage data. If set, the worker should update its
+ // local coverage data prior to fuzzing.
+ CoverageData []byte
+}
+
+// fuzzResponse contains results from workerServer.fuzz.
+type fuzzResponse struct {
+ // Duration is the time spent fuzzing, not including starting or cleaning up.
+ TotalDuration time.Duration
+ InterestingDuration time.Duration
+
+ // Count is the number of values tested.
+ Count int64
+
+ // CoverageData is set if the value in shared memory expands coverage
+ // and therefore may be interesting to the coordinator.
+ CoverageData []byte
+
+ // Err is the error string caused by the value in shared memory, which is
+ // non-empty if the value in shared memory caused a crash.
+ Err string
+
+ // InternalErr is the error string caused by an internal error in the
+ // worker. This shouldn't be considered a crasher.
+ InternalErr string
+}
+
+// pingArgs contains arguments to workerServer.ping.
+type pingArgs struct{}
+
+// pingResponse contains results from workerServer.ping.
+type pingResponse struct{}
+
+// workerComm holds pipes and shared memory used for communication
+// between the coordinator process (client) and a worker process (server).
+// These values are unique to each worker; they are shared only with the
+// coordinator, not with other workers.
+//
+// Access to shared memory is synchronized implicitly over the RPC protocol
+// implemented in workerServer and workerClient. During a call, the client
+// (worker) has exclusive access to shared memory; at other times, the server
+// (coordinator) has exclusive access.
+type workerComm struct {
+ fuzzIn, fuzzOut *os.File
+ memMu chan *sharedMem // mutex guarding shared memory
+}
+
+// workerServer is a minimalist RPC server, run by fuzz worker processes.
+// It allows the coordinator process (using workerClient) to call methods in a
+// worker process. This system allows the coordinator to run multiple worker
+// processes in parallel and to collect inputs that caused crashes from shared
+// memory after a worker process terminates unexpectedly.
+type workerServer struct {
+ workerComm
+ m *mutator
+
+ // coverageMask is the local coverage data for the worker. It is
+ // periodically updated to reflect the data in the coordinator when new
+ // coverage is found.
+ coverageMask []byte
+
+ // fuzzFn runs the worker's fuzz target on the given input and returns an
+ // error if it finds a crasher (the process may also exit or crash), and the
+ // time it took to run the input. It sets a deadline of 10 seconds, at which
+ // point it will panic with the assumption that the process is hanging or
+ // deadlocked.
+ fuzzFn func(CorpusEntry) (time.Duration, error)
+}
+
+// serve reads serialized RPC messages on fuzzIn. When serve receives a message,
+// it calls the corresponding method, then sends the serialized result back
+// on fuzzOut.
+//
+// serve handles RPC calls synchronously; it will not attempt to read a message
+// until the previous call has finished.
+//
+// serve returns errors that occurred when communicating over pipes. serve
+// does not return errors from method calls; those are passed through serialized
+// responses.
+func (ws *workerServer) serve(ctx context.Context) error {
+ enc := json.NewEncoder(ws.fuzzOut)
+ dec := json.NewDecoder(&contextReader{ctx: ctx, r: ws.fuzzIn})
+ for {
+ var c call
+ if err := dec.Decode(&c); err != nil {
+ if err == io.EOF || err == ctx.Err() {
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ var resp any
+ switch {
+ case c.Fuzz != nil:
+ resp = ws.fuzz(ctx, *c.Fuzz)
+ case c.Minimize != nil:
+ resp = ws.minimize(ctx, *c.Minimize)
+ case c.Ping != nil:
+ resp = ws.ping(ctx, *c.Ping)
+ default:
+ return errors.New("no arguments provided for any call")
+ }
+
+ if err := enc.Encode(resp); err != nil {
+ return err
+ }
+ }
+}
+
+// chainedMutations is how many mutations are applied before the worker
+// resets the input to it's original state.
+// NOTE: this number was picked without much thought. It is low enough that
+// it seems to create a significant diversity in mutated inputs. We may want
+// to consider looking into this more closely once we have a proper performance
+// testing framework. Another option is to randomly pick the number of chained
+// mutations on each invocation of the workerServer.fuzz method (this appears to
+// be what libFuzzer does, although there seems to be no documentation which
+// explains why this choice was made.)
+const chainedMutations = 5
+
+// fuzz runs the test function on random variations of the input value in shared
+// memory for a limited duration or number of iterations.
+//
+// fuzz returns early if it finds an input that crashes the fuzz function (with
+// fuzzResponse.Err set) or an input that expands coverage (with
+// fuzzResponse.InterestingDuration set).
+//
+// fuzz does not modify the input in shared memory. Instead, it saves the
+// initial PRNG state in shared memory and increments a counter in shared
+// memory before each call to the test function. The caller may reconstruct
+// the crashing input with this information, since the PRNG is deterministic.
+func (ws *workerServer) fuzz(ctx context.Context, args fuzzArgs) (resp fuzzResponse) {
+ if args.CoverageData != nil {
+ if ws.coverageMask != nil && len(args.CoverageData) != len(ws.coverageMask) {
+ resp.InternalErr = fmt.Sprintf("unexpected size for CoverageData: got %d, expected %d", len(args.CoverageData), len(ws.coverageMask))
+ return resp
+ }
+ ws.coverageMask = args.CoverageData
+ }
+ start := time.Now()
+ defer func() { resp.TotalDuration = time.Since(start) }()
+
+ if args.Timeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, args.Timeout)
+ defer cancel()
+ }
+ mem := <-ws.memMu
+ ws.m.r.save(&mem.header().randState, &mem.header().randInc)
+ defer func() {
+ resp.Count = mem.header().count
+ ws.memMu <- mem
+ }()
+ if args.Limit > 0 && mem.header().count >= args.Limit {
+ resp.InternalErr = fmt.Sprintf("mem.header().count %d already exceeds args.Limit %d", mem.header().count, args.Limit)
+ return resp
+ }
+
+ originalVals, err := unmarshalCorpusFile(mem.valueCopy())
+ if err != nil {
+ resp.InternalErr = err.Error()
+ return resp
+ }
+ vals := make([]any, len(originalVals))
+ copy(vals, originalVals)
+
+ shouldStop := func() bool {
+ return args.Limit > 0 && mem.header().count >= args.Limit
+ }
+ fuzzOnce := func(entry CorpusEntry) (dur time.Duration, cov []byte, errMsg string) {
+ mem.header().count++
+ var err error
+ dur, err = ws.fuzzFn(entry)
+ if err != nil {
+ errMsg = err.Error()
+ if errMsg == "" {
+ errMsg = "fuzz function failed with no input"
+ }
+ return dur, nil, errMsg
+ }
+ if ws.coverageMask != nil && countNewCoverageBits(ws.coverageMask, coverageSnapshot) > 0 {
+ return dur, coverageSnapshot, ""
+ }
+ return dur, nil, ""
+ }
+
+ if args.Warmup {
+ dur, _, errMsg := fuzzOnce(CorpusEntry{Values: vals})
+ if errMsg != "" {
+ resp.Err = errMsg
+ return resp
+ }
+ resp.InterestingDuration = dur
+ if coverageEnabled {
+ resp.CoverageData = coverageSnapshot
+ }
+ return resp
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return resp
+ default:
+ if mem.header().count%chainedMutations == 0 {
+ copy(vals, originalVals)
+ ws.m.r.save(&mem.header().randState, &mem.header().randInc)
+ }
+ ws.m.mutate(vals, cap(mem.valueRef()))
+
+ entry := CorpusEntry{Values: vals}
+ dur, cov, errMsg := fuzzOnce(entry)
+ if errMsg != "" {
+ resp.Err = errMsg
+ return resp
+ }
+ if cov != nil {
+ resp.CoverageData = cov
+ resp.InterestingDuration = dur
+ return resp
+ }
+ if shouldStop() {
+ return resp
+ }
+ }
+ }
+}
+
+func (ws *workerServer) minimize(ctx context.Context, args minimizeArgs) (resp minimizeResponse) {
+ start := time.Now()
+ defer func() { resp.Duration = time.Since(start) }()
+ mem := <-ws.memMu
+ defer func() { ws.memMu <- mem }()
+ vals, err := unmarshalCorpusFile(mem.valueCopy())
+ if err != nil {
+ panic(err)
+ }
+ inpHash := sha256.Sum256(mem.valueCopy())
+ if args.Timeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, args.Timeout)
+ defer cancel()
+ }
+
+ // Minimize the values in vals, then write to shared memory. We only write
+ // to shared memory after completing minimization.
+ success, err := ws.minimizeInput(ctx, vals, mem, args)
+ if success {
+ writeToMem(vals, mem)
+ outHash := sha256.Sum256(mem.valueCopy())
+ mem.header().rawInMem = false
+ resp.WroteToMem = true
+ if err != nil {
+ resp.Err = err.Error()
+ } else {
+ // If the values didn't change during minimization then coverageSnapshot is likely
+ // a dirty snapshot which represents the very last step of minimization, not the
+ // coverage for the initial input. In that case just return the coverage we were
+ // given initially, since it more accurately represents the coverage map for the
+ // input we are returning.
+ if outHash != inpHash {
+ resp.CoverageData = coverageSnapshot
+ } else {
+ resp.CoverageData = args.KeepCoverage
+ }
+ }
+ }
+ return resp
+}
+
+// minimizeInput applies a series of minimizing transformations on the provided
+// vals, ensuring that each minimization still causes an error, or keeps
+// coverage, in fuzzFn. It uses the context to determine how long to run,
+// stopping once closed. It returns a bool indicating whether minimization was
+// successful and an error if one was found.
+func (ws *workerServer) minimizeInput(ctx context.Context, vals []any, mem *sharedMem, args minimizeArgs) (success bool, retErr error) {
+ keepCoverage := args.KeepCoverage
+ memBytes := mem.valueRef()
+ bPtr := &memBytes
+ count := &mem.header().count
+ shouldStop := func() bool {
+ return ctx.Err() != nil ||
+ (args.Limit > 0 && *count >= args.Limit)
+ }
+ if shouldStop() {
+ return false, nil
+ }
+
+ // Check that the original value preserves coverage or causes an error.
+ // If not, then whatever caused us to think the value was interesting may
+ // have been a flake, and we can't minimize it.
+ *count++
+ _, retErr = ws.fuzzFn(CorpusEntry{Values: vals})
+ if keepCoverage != nil {
+ if !hasCoverageBit(keepCoverage, coverageSnapshot) || retErr != nil {
+ return false, nil
+ }
+ } else if retErr == nil {
+ return false, nil
+ }
+ mem.header().rawInMem = true
+
+ // tryMinimized runs the fuzz function with candidate replacing the value
+ // at index valI. tryMinimized returns whether the input with candidate is
+ // interesting for the same reason as the original input: it returns
+ // an error if one was expected, or it preserves coverage.
+ tryMinimized := func(candidate []byte) bool {
+ prev := vals[args.Index]
+ switch prev.(type) {
+ case []byte:
+ vals[args.Index] = candidate
+ case string:
+ vals[args.Index] = string(candidate)
+ default:
+ panic("impossible")
+ }
+ copy(*bPtr, candidate)
+ *bPtr = (*bPtr)[:len(candidate)]
+ mem.setValueLen(len(candidate))
+ *count++
+ _, err := ws.fuzzFn(CorpusEntry{Values: vals})
+ if err != nil {
+ retErr = err
+ if keepCoverage != nil {
+ // Now that we've found a crash, that's more important than any
+ // minimization of interesting inputs that was being done. Clear out
+ // keepCoverage to only minimize the crash going forward.
+ keepCoverage = nil
+ }
+ return true
+ }
+ // Minimization should preserve coverage bits.
+ if keepCoverage != nil && isCoverageSubset(keepCoverage, coverageSnapshot) {
+ return true
+ }
+ vals[args.Index] = prev
+ return false
+ }
+ switch v := vals[args.Index].(type) {
+ case string:
+ minimizeBytes([]byte(v), tryMinimized, shouldStop)
+ case []byte:
+ minimizeBytes(v, tryMinimized, shouldStop)
+ default:
+ panic("impossible")
+ }
+ return true, retErr
+}
+
+func writeToMem(vals []any, mem *sharedMem) {
+ b := marshalCorpusFile(vals...)
+ mem.setValue(b)
+}
+
+// ping does nothing. The coordinator calls this method to ensure the worker
+// has called F.Fuzz and can communicate.
+func (ws *workerServer) ping(ctx context.Context, args pingArgs) pingResponse {
+ return pingResponse{}
+}
+
+// workerClient is a minimalist RPC client. The coordinator process uses a
+// workerClient to call methods in each worker process (handled by
+// workerServer).
+type workerClient struct {
+ workerComm
+ m *mutator
+
+ // mu is the mutex protecting the workerComm.fuzzIn pipe. This must be
+ // locked before making calls to the workerServer. It prevents
+ // workerClient.Close from closing fuzzIn while workerClient methods are
+ // writing to it concurrently, and prevents multiple callers from writing to
+ // fuzzIn concurrently.
+ mu sync.Mutex
+}
+
+func newWorkerClient(comm workerComm, m *mutator) *workerClient {
+ return &workerClient{workerComm: comm, m: m}
+}
+
+// Close shuts down the connection to the RPC server (the worker process) by
+// closing fuzz_in. Close drains fuzz_out (avoiding a SIGPIPE in the worker),
+// and closes it after the worker process closes the other end.
+func (wc *workerClient) Close() error {
+ wc.mu.Lock()
+ defer wc.mu.Unlock()
+
+ // Close fuzzIn. This signals to the server that there are no more calls,
+ // and it should exit.
+ if err := wc.fuzzIn.Close(); err != nil {
+ wc.fuzzOut.Close()
+ return err
+ }
+
+ // Drain fuzzOut and close it. When the server exits, the kernel will close
+ // its end of fuzzOut, and we'll get EOF.
+ if _, err := io.Copy(io.Discard, wc.fuzzOut); err != nil {
+ wc.fuzzOut.Close()
+ return err
+ }
+ return wc.fuzzOut.Close()
+}
+
+// errSharedMemClosed is returned by workerClient methods that cannot access
+// shared memory because it was closed and unmapped by another goroutine. That
+// can happen when worker.cleanup is called in the worker goroutine while a
+// workerClient.fuzz call runs concurrently.
+//
+// This error should not be reported. It indicates the operation was
+// interrupted.
+var errSharedMemClosed = errors.New("internal error: shared memory was closed and unmapped")
+
+// minimize tells the worker to call the minimize method. See
+// workerServer.minimize.
+func (wc *workerClient) minimize(ctx context.Context, entryIn CorpusEntry, args minimizeArgs) (entryOut CorpusEntry, resp minimizeResponse, retErr error) {
+ wc.mu.Lock()
+ defer wc.mu.Unlock()
+
+ mem, ok := <-wc.memMu
+ if !ok {
+ return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed
+ }
+ defer func() { wc.memMu <- mem }()
+ mem.header().count = 0
+ inp, err := corpusEntryData(entryIn)
+ if err != nil {
+ return CorpusEntry{}, minimizeResponse{}, err
+ }
+ mem.setValue(inp)
+ entryOut = entryIn
+ entryOut.Values, err = unmarshalCorpusFile(inp)
+ if err != nil {
+ return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling provided value: %v", err)
+ }
+ for i, v := range entryOut.Values {
+ if !isMinimizable(reflect.TypeOf(v)) {
+ continue
+ }
+
+ wc.memMu <- mem
+ args.Index = i
+ c := call{Minimize: &args}
+ callErr := wc.callLocked(ctx, c, &resp)
+ mem, ok = <-wc.memMu
+ if !ok {
+ return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed
+ }
+
+ if callErr != nil {
+ retErr = callErr
+ if !mem.header().rawInMem {
+ // An unrecoverable error occurred before minimization began.
+ return entryIn, minimizeResponse{}, retErr
+ }
+ // An unrecoverable error occurred during minimization. mem now
+ // holds the raw, unmarshalled bytes of entryIn.Values[i] that
+ // caused the error.
+ switch entryOut.Values[i].(type) {
+ case string:
+ entryOut.Values[i] = string(mem.valueCopy())
+ case []byte:
+ entryOut.Values[i] = mem.valueCopy()
+ default:
+ panic("impossible")
+ }
+ entryOut.Data = marshalCorpusFile(entryOut.Values...)
+ // Stop minimizing; another unrecoverable error is likely to occur.
+ break
+ }
+
+ if resp.WroteToMem {
+ // Minimization succeeded, and mem holds the marshaled data.
+ entryOut.Data = mem.valueCopy()
+ entryOut.Values, err = unmarshalCorpusFile(entryOut.Data)
+ if err != nil {
+ return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling minimized value: %v", err)
+ }
+ }
+
+ // Prepare for next iteration of the loop.
+ if args.Timeout != 0 {
+ args.Timeout -= resp.Duration
+ if args.Timeout <= 0 {
+ break
+ }
+ }
+ if args.Limit != 0 {
+ args.Limit -= mem.header().count
+ if args.Limit <= 0 {
+ break
+ }
+ }
+ }
+ resp.Count = mem.header().count
+ h := sha256.Sum256(entryOut.Data)
+ entryOut.Path = fmt.Sprintf("%x", h[:4])
+ return entryOut, resp, retErr
+}
+
+// fuzz tells the worker to call the fuzz method. See workerServer.fuzz.
+func (wc *workerClient) fuzz(ctx context.Context, entryIn CorpusEntry, args fuzzArgs) (entryOut CorpusEntry, resp fuzzResponse, isInternalError bool, err error) {
+ wc.mu.Lock()
+ defer wc.mu.Unlock()
+
+ mem, ok := <-wc.memMu
+ if !ok {
+ return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed
+ }
+ mem.header().count = 0
+ inp, err := corpusEntryData(entryIn)
+ if err != nil {
+ wc.memMu <- mem
+ return CorpusEntry{}, fuzzResponse{}, true, err
+ }
+ mem.setValue(inp)
+ wc.memMu <- mem
+
+ c := call{Fuzz: &args}
+ callErr := wc.callLocked(ctx, c, &resp)
+ if resp.InternalErr != "" {
+ return CorpusEntry{}, fuzzResponse{}, true, errors.New(resp.InternalErr)
+ }
+ mem, ok = <-wc.memMu
+ if !ok {
+ return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed
+ }
+ defer func() { wc.memMu <- mem }()
+ resp.Count = mem.header().count
+
+ if !bytes.Equal(inp, mem.valueRef()) {
+ return CorpusEntry{}, fuzzResponse{}, true, errors.New("workerServer.fuzz modified input")
+ }
+ needEntryOut := callErr != nil || resp.Err != "" ||
+ (!args.Warmup && resp.CoverageData != nil)
+ if needEntryOut {
+ valuesOut, err := unmarshalCorpusFile(inp)
+ if err != nil {
+ return CorpusEntry{}, fuzzResponse{}, true, fmt.Errorf("unmarshaling fuzz input value after call: %v", err)
+ }
+ wc.m.r.restore(mem.header().randState, mem.header().randInc)
+ if !args.Warmup {
+ // Only mutate the valuesOut if fuzzing actually occurred.
+ numMutations := ((resp.Count - 1) % chainedMutations) + 1
+ for i := int64(0); i < numMutations; i++ {
+ wc.m.mutate(valuesOut, cap(mem.valueRef()))
+ }
+ }
+ dataOut := marshalCorpusFile(valuesOut...)
+
+ h := sha256.Sum256(dataOut)
+ name := fmt.Sprintf("%x", h[:4])
+ entryOut = CorpusEntry{
+ Parent: entryIn.Path,
+ Path: name,
+ Data: dataOut,
+ Generation: entryIn.Generation + 1,
+ }
+ if args.Warmup {
+ // The bytes weren't mutated, so if entryIn was a seed corpus value,
+ // then entryOut is too.
+ entryOut.IsSeed = entryIn.IsSeed
+ }
+ }
+
+ return entryOut, resp, false, callErr
+}
+
+// ping tells the worker to call the ping method. See workerServer.ping.
+func (wc *workerClient) ping(ctx context.Context) error {
+ wc.mu.Lock()
+ defer wc.mu.Unlock()
+ c := call{Ping: &pingArgs{}}
+ var resp pingResponse
+ return wc.callLocked(ctx, c, &resp)
+}
+
+// callLocked sends an RPC from the coordinator to the worker process and waits
+// for the response. The callLocked may be cancelled with ctx.
+func (wc *workerClient) callLocked(ctx context.Context, c call, resp any) (err error) {
+ enc := json.NewEncoder(wc.fuzzIn)
+ dec := json.NewDecoder(&contextReader{ctx: ctx, r: wc.fuzzOut})
+ if err := enc.Encode(c); err != nil {
+ return err
+ }
+ return dec.Decode(resp)
+}
+
+// contextReader wraps a Reader with a Context. If the context is cancelled
+// while the underlying reader is blocked, Read returns immediately.
+//
+// This is useful for reading from a pipe. Closing a pipe file descriptor does
+// not unblock pending Reads on that file descriptor. All copies of the pipe's
+// other file descriptor (the write end) must be closed in all processes that
+// inherit it. This is difficult to do correctly in the situation we care about
+// (process group termination).
+type contextReader struct {
+ ctx context.Context
+ r io.Reader
+}
+
+func (cr *contextReader) Read(b []byte) (int, error) {
+ if ctxErr := cr.ctx.Err(); ctxErr != nil {
+ return 0, ctxErr
+ }
+ done := make(chan struct{})
+
+ // This goroutine may stay blocked after Read returns because the underlying
+ // read is blocked.
+ var n int
+ var err error
+ go func() {
+ n, err = cr.r.Read(b)
+ close(done)
+ }()
+
+ select {
+ case <-cr.ctx.Done():
+ return 0, cr.ctx.Err()
+ case <-done:
+ return n, err
+ }
+}
diff --git a/src/internal/fuzz/worker_test.go b/src/internal/fuzz/worker_test.go
new file mode 100644
index 0000000..d0b21da
--- /dev/null
+++ b/src/internal/fuzz/worker_test.go
@@ -0,0 +1,206 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzz
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "internal/race"
+ "io"
+ "os"
+ "os/signal"
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+)
+
+var benchmarkWorkerFlag = flag.Bool("benchmarkworker", false, "")
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if *benchmarkWorkerFlag {
+ runBenchmarkWorker()
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func BenchmarkWorkerFuzzOverhead(b *testing.B) {
+ if race.Enabled {
+ b.Skip("TODO(48504): fix and re-enable")
+ }
+ origEnv := os.Getenv("GODEBUG")
+ defer func() { os.Setenv("GODEBUG", origEnv) }()
+ os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv))
+
+ ws := &workerServer{
+ fuzzFn: func(_ CorpusEntry) (time.Duration, error) { return time.Second, nil },
+ workerComm: workerComm{memMu: make(chan *sharedMem, 1)},
+ }
+
+ mem, err := sharedMemTempFile(workerSharedMemSize)
+ if err != nil {
+ b.Fatalf("failed to create temporary shared memory file: %s", err)
+ }
+ defer func() {
+ if err := mem.Close(); err != nil {
+ b.Error(err)
+ }
+ }()
+
+ initialVal := []any{make([]byte, 32)}
+ encodedVals := marshalCorpusFile(initialVal...)
+ mem.setValue(encodedVals)
+
+ ws.memMu <- mem
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ws.m = newMutator()
+ mem.setValue(encodedVals)
+ mem.header().count = 0
+
+ ws.fuzz(context.Background(), fuzzArgs{Limit: 1})
+ }
+}
+
+// BenchmarkWorkerPing acts as the coordinator and measures the time it takes
+// a worker to respond to N pings. This is a rough measure of our RPC latency.
+func BenchmarkWorkerPing(b *testing.B) {
+ if race.Enabled {
+ b.Skip("TODO(48504): fix and re-enable")
+ }
+ b.SetParallelism(1)
+ w := newWorkerForTest(b)
+ for i := 0; i < b.N; i++ {
+ if err := w.client.ping(context.Background()); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkWorkerFuzz acts as the coordinator and measures the time it takes
+// a worker to mutate a given input and call a trivial fuzz function N times.
+func BenchmarkWorkerFuzz(b *testing.B) {
+ if race.Enabled {
+ b.Skip("TODO(48504): fix and re-enable")
+ }
+ b.SetParallelism(1)
+ w := newWorkerForTest(b)
+ entry := CorpusEntry{Values: []any{[]byte(nil)}}
+ entry.Data = marshalCorpusFile(entry.Values...)
+ for i := int64(0); i < int64(b.N); {
+ args := fuzzArgs{
+ Limit: int64(b.N) - i,
+ Timeout: workerFuzzDuration,
+ }
+ _, resp, _, err := w.client.fuzz(context.Background(), entry, args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if resp.Err != "" {
+ b.Fatal(resp.Err)
+ }
+ if resp.Count == 0 {
+ b.Fatal("worker did not make progress")
+ }
+ i += resp.Count
+ }
+}
+
+// newWorkerForTest creates and starts a worker process for testing or
+// benchmarking. The worker process calls RunFuzzWorker, which responds to
+// RPC messages until it's stopped. The process is stopped and cleaned up
+// automatically when the test is done.
+func newWorkerForTest(tb testing.TB) *worker {
+ tb.Helper()
+ c, err := newCoordinator(CoordinateFuzzingOpts{
+ Types: []reflect.Type{reflect.TypeOf([]byte(nil))},
+ Log: io.Discard,
+ })
+ if err != nil {
+ tb.Fatal(err)
+ }
+ dir := "" // same as self
+ binPath := os.Args[0] // same as self
+ args := append(os.Args[1:], "-benchmarkworker")
+ env := os.Environ() // same as self
+ w, err := newWorker(c, dir, binPath, args, env)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ tb.Cleanup(func() {
+ if err := w.cleanup(); err != nil {
+ tb.Error(err)
+ }
+ })
+ if err := w.startAndPing(context.Background()); err != nil {
+ tb.Fatal(err)
+ }
+ tb.Cleanup(func() {
+ if err := w.stop(); err != nil {
+ tb.Error(err)
+ }
+ })
+ return w
+}
+
+func runBenchmarkWorker() {
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+ fn := func(CorpusEntry) error { return nil }
+ if err := RunFuzzWorker(ctx, fn); err != nil && err != ctx.Err() {
+ panic(err)
+ }
+}
+
+func BenchmarkWorkerMinimize(b *testing.B) {
+ if race.Enabled {
+ b.Skip("TODO(48504): fix and re-enable")
+ }
+
+ ws := &workerServer{
+ workerComm: workerComm{memMu: make(chan *sharedMem, 1)},
+ }
+
+ mem, err := sharedMemTempFile(workerSharedMemSize)
+ if err != nil {
+ b.Fatalf("failed to create temporary shared memory file: %s", err)
+ }
+ defer func() {
+ if err := mem.Close(); err != nil {
+ b.Error(err)
+ }
+ }()
+ ws.memMu <- mem
+
+ bytes := make([]byte, 1024)
+ ctx := context.Background()
+ for sz := 1; sz <= len(bytes); sz <<= 1 {
+ sz := sz
+ input := []any{bytes[:sz]}
+ encodedVals := marshalCorpusFile(input...)
+ mem = <-ws.memMu
+ mem.setValue(encodedVals)
+ ws.memMu <- mem
+ b.Run(strconv.Itoa(sz), func(b *testing.B) {
+ i := 0
+ ws.fuzzFn = func(_ CorpusEntry) (time.Duration, error) {
+ if i == 0 {
+ i++
+ return time.Second, errors.New("initial failure for deflake")
+ }
+ return time.Second, nil
+ }
+ for i := 0; i < b.N; i++ {
+ b.SetBytes(int64(sz))
+ ws.minimize(ctx, minimizeArgs{})
+ }
+ })
+ }
+}
diff --git a/src/internal/goarch/gengoarch.go b/src/internal/goarch/gengoarch.go
new file mode 100644
index 0000000..0b0be5c
--- /dev/null
+++ b/src/internal/goarch/gengoarch.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+)
+
+var goarches []string
+
+func main() {
+ data, err := os.ReadFile("../../go/build/syslist.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ const goarchPrefix = `var knownArch = map[string]bool{`
+ inGOARCH := false
+ for _, line := range strings.Split(string(data), "\n") {
+ if strings.HasPrefix(line, goarchPrefix) {
+ inGOARCH = true
+ } else if inGOARCH && strings.HasPrefix(line, "}") {
+ break
+ } else if inGOARCH {
+ goarch := strings.Fields(line)[0]
+ goarch = strings.TrimPrefix(goarch, `"`)
+ goarch = strings.TrimSuffix(goarch, `":`)
+ goarches = append(goarches, goarch)
+ }
+ }
+
+ for _, target := range goarches {
+ if target == "amd64p32" {
+ continue
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n")
+ fmt.Fprintf(&buf, "//go:build %s\n\n", target) // must explicitly include target for bootstrapping purposes
+ fmt.Fprintf(&buf, "package goarch\n\n")
+ fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
+ for _, goarch := range goarches {
+ value := 0
+ if goarch == target {
+ value = 1
+ }
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value)
+ }
+ err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
diff --git a/src/internal/goarch/goarch.go b/src/internal/goarch/goarch.go
new file mode 100644
index 0000000..3dda62f
--- /dev/null
+++ b/src/internal/goarch/goarch.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goarch contains GOARCH-specific constants.
+package goarch
+
+// The next line makes 'go generate' write the zgoarch*.go files with
+// per-arch information, including constants named $GOARCH for every
+// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
+// by them is useful for defining GOARCH-specific constants.
+//
+//go:generate go run gengoarch.go
+
+type ArchFamilyType int
+
+const (
+ AMD64 ArchFamilyType = iota
+ ARM
+ ARM64
+ I386
+ LOONG64
+ MIPS
+ MIPS64
+ PPC64
+ RISCV64
+ S390X
+ WASM
+)
+
+// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant.
+// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
+const PtrSize = 4 << (^uintptr(0) >> 63)
+
+// ArchFamily is the architecture family (AMD64, ARM, ...)
+const ArchFamily ArchFamilyType = _ArchFamily
+
+// BigEndian reports whether the architecture is big-endian.
+const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1
+
+// DefaultPhysPageSize is the default physical page size.
+const DefaultPhysPageSize = _DefaultPhysPageSize
+
+// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
+// The various PC tables record PC deltas pre-divided by PCQuantum.
+const PCQuantum = _PCQuantum
+
+// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
+const Int64Align = PtrSize
+
+// MinFrameSize is the size of the system-reserved words at the bottom
+// of a frame (just above the architectural stack pointer).
+// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
+// On PowerPC it is larger, to cover three more reserved words:
+// the compiler word, the link editor word, and the TOC save word.
+const MinFrameSize = _MinFrameSize
+
+// StackAlign is the required alignment of the SP register.
+// The stack must be at least word aligned, but some architectures require more.
+const StackAlign = _StackAlign
diff --git a/src/internal/goarch/goarch_386.go b/src/internal/goarch/goarch_386.go
new file mode 100644
index 0000000..c621421
--- /dev/null
+++ b/src/internal/goarch/goarch_386.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = I386
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_amd64.go b/src/internal/goarch/goarch_amd64.go
new file mode 100644
index 0000000..911e3e7
--- /dev/null
+++ b/src/internal/goarch/goarch_amd64.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = AMD64
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_arm.go b/src/internal/goarch/goarch_arm.go
new file mode 100644
index 0000000..a659171
--- /dev/null
+++ b/src/internal/goarch/goarch_arm.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = ARM
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_arm64.go b/src/internal/goarch/goarch_arm64.go
new file mode 100644
index 0000000..85d0b47
--- /dev/null
+++ b/src/internal/goarch/goarch_arm64.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = ARM64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = 16
+)
diff --git a/src/internal/goarch/goarch_loong64.go b/src/internal/goarch/goarch_loong64.go
new file mode 100644
index 0000000..dae1f4d
--- /dev/null
+++ b/src/internal/goarch/goarch_loong64.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build loong64
+
+package goarch
+
+const (
+ _ArchFamily = LOONG64
+ _DefaultPhysPageSize = 16384
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_mips.go b/src/internal/goarch/goarch_mips.go
new file mode 100644
index 0000000..59f3995
--- /dev/null
+++ b/src/internal/goarch/goarch_mips.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = MIPS
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_mips64.go b/src/internal/goarch/goarch_mips64.go
new file mode 100644
index 0000000..9e4f827
--- /dev/null
+++ b/src/internal/goarch/goarch_mips64.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = MIPS64
+ _DefaultPhysPageSize = 16384
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_mips64le.go b/src/internal/goarch/goarch_mips64le.go
new file mode 100644
index 0000000..9e4f827
--- /dev/null
+++ b/src/internal/goarch/goarch_mips64le.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = MIPS64
+ _DefaultPhysPageSize = 16384
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_mipsle.go b/src/internal/goarch/goarch_mipsle.go
new file mode 100644
index 0000000..3e6642b
--- /dev/null
+++ b/src/internal/goarch/goarch_mipsle.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = MIPS
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_ppc64.go b/src/internal/goarch/goarch_ppc64.go
new file mode 100644
index 0000000..60cc846
--- /dev/null
+++ b/src/internal/goarch/goarch_ppc64.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = PPC64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 32
+ _StackAlign = 16
+)
diff --git a/src/internal/goarch/goarch_ppc64le.go b/src/internal/goarch/goarch_ppc64le.go
new file mode 100644
index 0000000..60cc846
--- /dev/null
+++ b/src/internal/goarch/goarch_ppc64le.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = PPC64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 32
+ _StackAlign = 16
+)
diff --git a/src/internal/goarch/goarch_riscv64.go b/src/internal/goarch/goarch_riscv64.go
new file mode 100644
index 0000000..3b6da1e
--- /dev/null
+++ b/src/internal/goarch/goarch_riscv64.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = RISCV64
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_s390x.go b/src/internal/goarch/goarch_s390x.go
new file mode 100644
index 0000000..20c5705
--- /dev/null
+++ b/src/internal/goarch/goarch_s390x.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = S390X
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 2
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/goarch_wasm.go b/src/internal/goarch/goarch_wasm.go
new file mode 100644
index 0000000..98618d6
--- /dev/null
+++ b/src/internal/goarch/goarch_wasm.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goarch
+
+const (
+ _ArchFamily = WASM
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
+)
diff --git a/src/internal/goarch/zgoarch_386.go b/src/internal/goarch/zgoarch_386.go
new file mode 100644
index 0000000..4a9b0e6
--- /dev/null
+++ b/src/internal/goarch/zgoarch_386.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build 386
+
+package goarch
+
+const GOARCH = `386`
+
+const Is386 = 1
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_amd64.go b/src/internal/goarch/zgoarch_amd64.go
new file mode 100644
index 0000000..7926392
--- /dev/null
+++ b/src/internal/goarch/zgoarch_amd64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build amd64
+
+package goarch
+
+const GOARCH = `amd64`
+
+const Is386 = 0
+const IsAmd64 = 1
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm.go b/src/internal/goarch/zgoarch_arm.go
new file mode 100644
index 0000000..6c03b8b
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm
+
+package goarch
+
+const GOARCH = `arm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 1
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm64.go b/src/internal/goarch/zgoarch_arm64.go
new file mode 100644
index 0000000..ad342d7
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64
+
+package goarch
+
+const GOARCH = `arm64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 1
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm64be.go b/src/internal/goarch/zgoarch_arm64be.go
new file mode 100644
index 0000000..0f26003
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm64be.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64be
+
+package goarch
+
+const GOARCH = `arm64be`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 1
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_armbe.go b/src/internal/goarch/zgoarch_armbe.go
new file mode 100644
index 0000000..6092fee
--- /dev/null
+++ b/src/internal/goarch/zgoarch_armbe.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build armbe
+
+package goarch
+
+const GOARCH = `armbe`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 1
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_loong64.go b/src/internal/goarch/zgoarch_loong64.go
new file mode 100644
index 0000000..21c67e1
--- /dev/null
+++ b/src/internal/goarch/zgoarch_loong64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build loong64
+
+package goarch
+
+const GOARCH = `loong64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 1
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips.go b/src/internal/goarch/zgoarch_mips.go
new file mode 100644
index 0000000..0db1974
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips
+
+package goarch
+
+const GOARCH = `mips`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 1
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64.go b/src/internal/goarch/zgoarch_mips64.go
new file mode 100644
index 0000000..738806f
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64
+
+package goarch
+
+const GOARCH = `mips64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 1
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64le.go b/src/internal/goarch/zgoarch_mips64le.go
new file mode 100644
index 0000000..8de5beb
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64le.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64le
+
+package goarch
+
+const GOARCH = `mips64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 1
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64p32.go b/src/internal/goarch/zgoarch_mips64p32.go
new file mode 100644
index 0000000..ea461be
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64p32.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32
+
+package goarch
+
+const GOARCH = `mips64p32`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 1
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64p32le.go b/src/internal/goarch/zgoarch_mips64p32le.go
new file mode 100644
index 0000000..15473ce
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64p32le.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32le
+
+package goarch
+
+const GOARCH = `mips64p32le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 1
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mipsle.go b/src/internal/goarch/zgoarch_mipsle.go
new file mode 100644
index 0000000..4955142
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mipsle.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mipsle
+
+package goarch
+
+const GOARCH = `mipsle`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 1
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc.go b/src/internal/goarch/zgoarch_ppc.go
new file mode 100644
index 0000000..ec01763
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc
+
+package goarch
+
+const GOARCH = `ppc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 1
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc64.go b/src/internal/goarch/zgoarch_ppc64.go
new file mode 100644
index 0000000..39be392
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64
+
+package goarch
+
+const GOARCH = `ppc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 1
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc64le.go b/src/internal/goarch/zgoarch_ppc64le.go
new file mode 100644
index 0000000..5f959e0
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc64le.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64le
+
+package goarch
+
+const GOARCH = `ppc64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 1
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_riscv.go b/src/internal/goarch/zgoarch_riscv.go
new file mode 100644
index 0000000..8d81a14
--- /dev/null
+++ b/src/internal/goarch/zgoarch_riscv.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv
+
+package goarch
+
+const GOARCH = `riscv`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 1
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_riscv64.go b/src/internal/goarch/zgoarch_riscv64.go
new file mode 100644
index 0000000..1df989c
--- /dev/null
+++ b/src/internal/goarch/zgoarch_riscv64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv64
+
+package goarch
+
+const GOARCH = `riscv64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 1
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_s390.go b/src/internal/goarch/zgoarch_s390.go
new file mode 100644
index 0000000..56815b9
--- /dev/null
+++ b/src/internal/goarch/zgoarch_s390.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390
+
+package goarch
+
+const GOARCH = `s390`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 1
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_s390x.go b/src/internal/goarch/zgoarch_s390x.go
new file mode 100644
index 0000000..e61e9bd
--- /dev/null
+++ b/src/internal/goarch/zgoarch_s390x.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390x
+
+package goarch
+
+const GOARCH = `s390x`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 1
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_sparc.go b/src/internal/goarch/zgoarch_sparc.go
new file mode 100644
index 0000000..ee5b746
--- /dev/null
+++ b/src/internal/goarch/zgoarch_sparc.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc
+
+package goarch
+
+const GOARCH = `sparc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 1
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_sparc64.go b/src/internal/goarch/zgoarch_sparc64.go
new file mode 100644
index 0000000..519aaa1
--- /dev/null
+++ b/src/internal/goarch/zgoarch_sparc64.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc64
+
+package goarch
+
+const GOARCH = `sparc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 1
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_wasm.go b/src/internal/goarch/zgoarch_wasm.go
new file mode 100644
index 0000000..25567a1
--- /dev/null
+++ b/src/internal/goarch/zgoarch_wasm.go
@@ -0,0 +1,32 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build wasm
+
+package goarch
+
+const GOARCH = `wasm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 1
diff --git a/src/internal/godebug/godebug.go b/src/internal/godebug/godebug.go
new file mode 100644
index 0000000..36bfeac
--- /dev/null
+++ b/src/internal/godebug/godebug.go
@@ -0,0 +1,290 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package godebug makes the settings in the $GODEBUG environment variable
+// available to other packages. These settings are often used for compatibility
+// tweaks, when we need to change a default behavior but want to let users
+// opt back in to the original. For example GODEBUG=http2server=0 disables
+// HTTP/2 support in the net/http server.
+//
+// In typical usage, code should declare a Setting as a global
+// and then call Value each time the current setting value is needed:
+//
+// var http2server = godebug.New("http2server")
+//
+// func ServeConn(c net.Conn) {
+// if http2server.Value() == "0" {
+// disallow HTTP/2
+// ...
+// }
+// ...
+// }
+//
+// Each time a non-default setting causes a change in program behavior,
+// code should call [Setting.IncNonDefault] to increment a counter that can
+// be reported by [runtime/metrics.Read].
+// Note that counters used with IncNonDefault must be added to
+// various tables in other packages. See the [Setting.IncNonDefault]
+// documentation for details.
+package godebug
+
+// Note: Be careful about new imports here. Any package
+// that internal/godebug imports cannot itself import internal/godebug,
+// meaning it cannot introduce a GODEBUG setting of its own.
+// We keep imports to the absolute bare minimum.
+import (
+ "internal/bisect"
+ "internal/godebugs"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+ _ "unsafe" // go:linkname
+)
+
+// A Setting is a single setting in the $GODEBUG environment variable.
+type Setting struct {
+ name string
+ once sync.Once
+ *setting
+}
+
+type setting struct {
+ value atomic.Pointer[value]
+ nonDefaultOnce sync.Once
+ nonDefault atomic.Uint64
+ info *godebugs.Info
+}
+
+type value struct {
+ text string
+ bisect *bisect.Matcher
+}
+
+// New returns a new Setting for the $GODEBUG setting with the given name.
+//
+// GODEBUGs meant for use by end users must be listed in ../godebugs/table.go,
+// which is used for generating and checking various documentation.
+// If the name is not listed in that table, New will succeed but calling Value
+// on the returned Setting will panic.
+// To disable that panic for access to an undocumented setting,
+// prefix the name with a #, as in godebug.New("#gofsystrace").
+// The # is a signal to New but not part of the key used in $GODEBUG.
+func New(name string) *Setting {
+ return &Setting{name: name}
+}
+
+// Name returns the name of the setting.
+func (s *Setting) Name() string {
+ if s.name != "" && s.name[0] == '#' {
+ return s.name[1:]
+ }
+ return s.name
+}
+
+// Undocumented reports whether this is an undocumented setting.
+func (s *Setting) Undocumented() bool {
+ return s.name != "" && s.name[0] == '#'
+}
+
+// String returns a printable form for the setting: name=value.
+func (s *Setting) String() string {
+ return s.Name() + "=" + s.Value()
+}
+
+// IncNonDefault increments the non-default behavior counter
+// associated with the given setting.
+// This counter is exposed in the runtime/metrics value
+// /godebug/non-default-behavior/<name>:events.
+//
+// Note that Value must be called at least once before IncNonDefault.
+func (s *Setting) IncNonDefault() {
+ s.nonDefaultOnce.Do(s.register)
+ s.nonDefault.Add(1)
+}
+
+func (s *Setting) register() {
+ if s.info == nil || s.info.Opaque {
+ panic("godebug: unexpected IncNonDefault of " + s.name)
+ }
+ registerMetric("/godebug/non-default-behavior/"+s.Name()+":events", s.nonDefault.Load)
+}
+
+// cache is a cache of all the GODEBUG settings,
+// a locked map[string]*atomic.Pointer[string].
+//
+// All Settings with the same name share a single
+// *atomic.Pointer[string], so that when GODEBUG
+// changes only that single atomic string pointer
+// needs to be updated.
+//
+// A name appears in the values map either if it is the
+// name of a Setting for which Value has been called
+// at least once, or if the name has ever appeared in
+// a name=value pair in the $GODEBUG environment variable.
+// Once entered into the map, the name is never removed.
+var cache sync.Map // name string -> value *atomic.Pointer[string]
+
+var empty value
+
+// Value returns the current value for the GODEBUG setting s.
+//
+// Value maintains an internal cache that is synchronized
+// with changes to the $GODEBUG environment variable,
+// making Value efficient to call as frequently as needed.
+// Clients should therefore typically not attempt their own
+// caching of Value's result.
+func (s *Setting) Value() string {
+ s.once.Do(func() {
+ s.setting = lookup(s.Name())
+ if s.info == nil && !s.Undocumented() {
+ panic("godebug: Value of name not listed in godebugs.All: " + s.name)
+ }
+ })
+ v := *s.value.Load()
+ if v.bisect != nil && !v.bisect.Stack(&stderr) {
+ return ""
+ }
+ return v.text
+}
+
+// lookup returns the unique *setting value for the given name.
+func lookup(name string) *setting {
+ if v, ok := cache.Load(name); ok {
+ return v.(*setting)
+ }
+ s := new(setting)
+ s.info = godebugs.Lookup(name)
+ s.value.Store(&empty)
+ if v, loaded := cache.LoadOrStore(name, s); loaded {
+ // Lost race: someone else created it. Use theirs.
+ return v.(*setting)
+ }
+
+ return s
+}
+
+// setUpdate is provided by package runtime.
+// It calls update(def, env), where def is the default GODEBUG setting
+// and env is the current value of the $GODEBUG environment variable.
+// After that first call, the runtime calls update(def, env)
+// again each time the environment variable changes
+// (due to use of os.Setenv, for example).
+//
+//go:linkname setUpdate
+func setUpdate(update func(string, string))
+
+// registerMetric is provided by package runtime.
+// It forwards registrations to runtime/metrics.
+//
+//go:linkname registerMetric
+func registerMetric(name string, read func() uint64)
+
+// setNewIncNonDefault is provided by package runtime.
+// The runtime can do
+//
+// inc := newNonDefaultInc(name)
+//
+// instead of
+//
+// inc := godebug.New(name).IncNonDefault
+//
+// since it cannot import godebug.
+//
+//go:linkname setNewIncNonDefault
+func setNewIncNonDefault(newIncNonDefault func(string) func())
+
+func init() {
+ setUpdate(update)
+ setNewIncNonDefault(newIncNonDefault)
+}
+
+func newIncNonDefault(name string) func() {
+ s := New(name)
+ s.Value()
+ return s.IncNonDefault
+}
+
+var updateMu sync.Mutex
+
+// update records an updated GODEBUG setting.
+// def is the default GODEBUG setting for the running binary,
+// and env is the current value of the $GODEBUG environment variable.
+func update(def, env string) {
+ updateMu.Lock()
+ defer updateMu.Unlock()
+
+ // Update all the cached values, creating new ones as needed.
+ // We parse the environment variable first, so that any settings it has
+ // are already locked in place (did[name] = true) before we consider
+ // the defaults.
+ did := make(map[string]bool)
+ parse(did, env)
+ parse(did, def)
+
+ // Clear any cached values that are no longer present.
+ cache.Range(func(name, s any) bool {
+ if !did[name.(string)] {
+ s.(*setting).value.Store(&empty)
+ }
+ return true
+ })
+}
+
+// parse parses the GODEBUG setting string s,
+// which has the form k=v,k2=v2,k3=v3.
+// Later settings override earlier ones.
+// Parse only updates settings k=v for which did[k] = false.
+// It also sets did[k] = true for settings that it updates.
+// Each value v can also have the form v#pattern,
+// in which case the GODEBUG is only enabled for call stacks
+// matching pattern, for use with golang.org/x/tools/cmd/bisect.
+func parse(did map[string]bool, s string) {
+ // Scan the string backward so that later settings are used
+ // and earlier settings are ignored.
+ // Note that a forward scan would cause cached values
+ // to temporarily use the ignored value before being
+ // updated to the "correct" one.
+ end := len(s)
+ eq := -1
+ for i := end - 1; i >= -1; i-- {
+ if i == -1 || s[i] == ',' {
+ if eq >= 0 {
+ name, arg := s[i+1:eq], s[eq+1:end]
+ if !did[name] {
+ did[name] = true
+ v := &value{text: arg}
+ for j := 0; j < len(arg); j++ {
+ if arg[j] == '#' {
+ v.text = arg[:j]
+ v.bisect, _ = bisect.New(arg[j+1:])
+ break
+ }
+ }
+ lookup(name).value.Store(v)
+ }
+ }
+ eq = -1
+ end = i
+ } else if s[i] == '=' {
+ eq = i
+ }
+ }
+}
+
+type runtimeStderr struct{}
+
+var stderr runtimeStderr
+
+func (*runtimeStderr) Write(b []byte) (int, error) {
+ if len(b) > 0 {
+ write(2, unsafe.Pointer(&b[0]), int32(len(b)))
+ }
+ return len(b), nil
+}
+
+// Since we cannot import os or syscall, use the runtime's write function
+// to print to standard error.
+//
+//go:linkname write runtime.write
+func write(fd uintptr, p unsafe.Pointer, n int32) int32
diff --git a/src/internal/godebug/godebug_test.go b/src/internal/godebug/godebug_test.go
new file mode 100644
index 0000000..65dd256
--- /dev/null
+++ b/src/internal/godebug/godebug_test.go
@@ -0,0 +1,162 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package godebug_test
+
+import (
+ "fmt"
+ . "internal/godebug"
+ "internal/race"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime/metrics"
+ "sort"
+ "strings"
+ "testing"
+)
+
+func TestGet(t *testing.T) {
+ foo := New("#foo")
+ tests := []struct {
+ godebug string
+ setting *Setting
+ want string
+ }{
+ {"", New("#"), ""},
+ {"", foo, ""},
+ {"foo=bar", foo, "bar"},
+ {"foo=bar,after=x", foo, "bar"},
+ {"before=x,foo=bar,after=x", foo, "bar"},
+ {"before=x,foo=bar", foo, "bar"},
+ {",,,foo=bar,,,", foo, "bar"},
+ {"foodecoy=wrong,foo=bar", foo, "bar"},
+ {"foo=", foo, ""},
+ {"foo", foo, ""},
+ {",foo", foo, ""},
+ {"foo=bar,baz", New("#loooooooong"), ""},
+ }
+ for _, tt := range tests {
+ t.Setenv("GODEBUG", tt.godebug)
+ got := tt.setting.Value()
+ if got != tt.want {
+ t.Errorf("get(%q, %q) = %q; want %q", tt.godebug, tt.setting.Name(), got, tt.want)
+ }
+ }
+}
+
+func TestMetrics(t *testing.T) {
+ const name = "http2client" // must be a real name so runtime will accept it
+
+ var m [1]metrics.Sample
+ m[0].Name = "/godebug/non-default-behavior/" + name + ":events"
+ metrics.Read(m[:])
+ if kind := m[0].Value.Kind(); kind != metrics.KindUint64 {
+ t.Fatalf("NonDefault kind = %v, want uint64", kind)
+ }
+
+ s := New(name)
+ s.Value()
+ s.IncNonDefault()
+ s.IncNonDefault()
+ s.IncNonDefault()
+ metrics.Read(m[:])
+ if kind := m[0].Value.Kind(); kind != metrics.KindUint64 {
+ t.Fatalf("NonDefault kind = %v, want uint64", kind)
+ }
+ if count := m[0].Value.Uint64(); count != 3 {
+ t.Fatalf("NonDefault value = %d, want 3", count)
+ }
+}
+
+// TestPanicNilRace checks for a race in the runtime caused by use of runtime
+// atomics (not visible to usual race detection) to install the counter for
+// non-default panic(nil) semantics. For #64649.
+func TestPanicNilRace(t *testing.T) {
+ if !race.Enabled {
+ t.Skip("Skipping test intended for use with -race.")
+ }
+ if os.Getenv("GODEBUG") != "panicnil=1" {
+ cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestPanicNilRace$", "-test.v", "-test.parallel=2", "-test.count=1"))
+ cmd.Env = append(cmd.Env, "GODEBUG=panicnil=1")
+ out, err := cmd.CombinedOutput()
+ t.Logf("output:\n%s", out)
+
+ if err != nil {
+ t.Errorf("Was not expecting a crash")
+ }
+ return
+ }
+
+ test := func(t *testing.T) {
+ t.Parallel()
+ defer func() {
+ recover()
+ }()
+ panic(nil)
+ }
+ t.Run("One", test)
+ t.Run("Two", test)
+}
+
+func TestCmdBisect(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ out, err := exec.Command("go", "run", "cmd/vendor/golang.org/x/tools/cmd/bisect", "GODEBUG=buggy=1#PATTERN", os.Args[0], "-test.run=BisectTestCase").CombinedOutput()
+ if err != nil {
+ t.Fatalf("exec bisect: %v\n%s", err, out)
+ }
+
+ var want []string
+ src, err := os.ReadFile("godebug_test.go")
+ for i, line := range strings.Split(string(src), "\n") {
+ if strings.Contains(line, "BISECT"+" "+"BUG") {
+ want = append(want, fmt.Sprintf("godebug_test.go:%d", i+1))
+ }
+ }
+ sort.Strings(want)
+
+ var have []string
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.Contains(line, "godebug_test.go:") {
+ have = append(have, line[strings.LastIndex(line, "godebug_test.go:"):])
+ }
+ }
+ sort.Strings(have)
+
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("bad bisect output:\nhave %v\nwant %v\ncomplete output:\n%s", have, want, string(out))
+ }
+}
+
+// This test does nothing by itself, but you can run
+//
+// bisect 'GODEBUG=buggy=1#PATTERN' go test -run=BisectTestCase
+//
+// to see that the GODEBUG bisect support is working.
+// TestCmdBisect above does exactly that.
+func TestBisectTestCase(t *testing.T) {
+ s := New("#buggy")
+ for i := 0; i < 10; i++ {
+ a := s.Value() == "1"
+ b := s.Value() == "1"
+ c := s.Value() == "1" // BISECT BUG
+ d := s.Value() == "1" // BISECT BUG
+ e := s.Value() == "1" // BISECT BUG
+
+ if a {
+ t.Log("ok")
+ }
+ if b {
+ t.Log("ok")
+ }
+ if c {
+ t.Error("bug")
+ }
+ if d &&
+ e {
+ t.Error("bug")
+ }
+ }
+}
diff --git a/src/internal/godebugs/godebugs_test.go b/src/internal/godebugs/godebugs_test.go
new file mode 100644
index 0000000..a1cb8d4
--- /dev/null
+++ b/src/internal/godebugs/godebugs_test.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package godebugs_test
+
+import (
+ "internal/godebugs"
+ "internal/testenv"
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestAll(t *testing.T) {
+ data, err := os.ReadFile("../../../doc/godebug.md")
+ if err != nil {
+ if os.IsNotExist(err) && (testenv.Builder() == "" || runtime.GOOS != "linux") {
+ t.Skip(err)
+ }
+ t.Fatal(err)
+ }
+ doc := string(data)
+
+ last := ""
+ for _, info := range godebugs.All {
+ if info.Name <= last {
+ t.Errorf("All not sorted: %s then %s", last, info.Name)
+ }
+ last = info.Name
+
+ if info.Package == "" {
+ t.Errorf("Name=%s missing Package", info.Name)
+ }
+ if info.Changed != 0 && info.Old == "" {
+ t.Errorf("Name=%s has Changed, missing Old", info.Name)
+ }
+ if info.Old != "" && info.Changed == 0 {
+ t.Errorf("Name=%s has Old, missing Changed", info.Name)
+ }
+ if !strings.Contains(doc, "`"+info.Name+"`") {
+ t.Errorf("Name=%s not documented in doc/godebug.md", info.Name)
+ }
+ }
+}
diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go
new file mode 100644
index 0000000..b1711d9
--- /dev/null
+++ b/src/internal/godebugs/table.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package godebugs provides a table of known GODEBUG settings,
+// for use by a variety of other packages, including internal/godebug,
+// runtime, runtime/metrics, and cmd/go/internal/load.
+package godebugs
+
+// An Info describes a single known GODEBUG setting.
+type Info struct {
+ Name string // name of the setting ("panicnil")
+ Package string // package that uses the setting ("runtime")
+ Changed int // minor version when default changed, if any; 21 means Go 1.21
+ Old string // value that restores behavior prior to Changed
+ Opaque bool // setting does not export information to runtime/metrics using [internal/godebug.Setting.IncNonDefault]
+}
+
+// All is the table of known settings, sorted by Name.
+//
+// Note: After adding entries to this table, run 'go generate runtime/metrics'
+// to update the runtime/metrics doc comment.
+// (Otherwise the runtime/metrics test will fail.)
+//
+// Note: After adding entries to this table, update the list in doc/godebug.md as well.
+// (Otherwise the test in this package will fail.)
+var All = []Info{
+ {Name: "execerrdot", Package: "os/exec"},
+ {Name: "gocachehash", Package: "cmd/go"},
+ {Name: "gocachetest", Package: "cmd/go"},
+ {Name: "gocacheverify", Package: "cmd/go"},
+ {Name: "http2client", Package: "net/http"},
+ {Name: "http2debug", Package: "net/http", Opaque: true},
+ {Name: "http2server", Package: "net/http"},
+ {Name: "installgoroot", Package: "go/build"},
+ {Name: "jstmpllitinterp", Package: "html/template"},
+ //{Name: "multipartfiles", Package: "mime/multipart"},
+ {Name: "multipartmaxheaders", Package: "mime/multipart"},
+ {Name: "multipartmaxparts", Package: "mime/multipart"},
+ {Name: "multipathtcp", Package: "net"},
+ {Name: "netdns", Package: "net", Opaque: true},
+ {Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"},
+ {Name: "randautoseed", Package: "math/rand"},
+ {Name: "tarinsecurepath", Package: "archive/tar"},
+ {Name: "tlsmaxrsasize", Package: "crypto/tls"},
+ {Name: "x509sha1", Package: "crypto/x509"},
+ {Name: "x509usefallbackroots", Package: "crypto/x509"},
+ {Name: "zipinsecurepath", Package: "archive/zip"},
+}
+
+// Lookup returns the Info with the given name.
+func Lookup(name string) *Info {
+ // binary search, avoiding import of sort.
+ lo := 0
+ hi := len(All)
+ for lo < hi {
+ m := lo + (hi-lo)>>1
+ mid := All[m].Name
+ if name == mid {
+ return &All[m]
+ }
+ if name < mid {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return nil
+}
diff --git a/src/internal/goexperiment/exp_arenas_off.go b/src/internal/goexperiment/exp_arenas_off.go
new file mode 100644
index 0000000..9e40ebc
--- /dev/null
+++ b/src/internal/goexperiment/exp_arenas_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.arenas
+// +build !goexperiment.arenas
+
+package goexperiment
+
+const Arenas = false
+const ArenasInt = 0
diff --git a/src/internal/goexperiment/exp_arenas_on.go b/src/internal/goexperiment/exp_arenas_on.go
new file mode 100644
index 0000000..92ef748
--- /dev/null
+++ b/src/internal/goexperiment/exp_arenas_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.arenas
+// +build goexperiment.arenas
+
+package goexperiment
+
+const Arenas = true
+const ArenasInt = 1
diff --git a/src/internal/goexperiment/exp_boringcrypto_off.go b/src/internal/goexperiment/exp_boringcrypto_off.go
new file mode 100644
index 0000000..020c75b
--- /dev/null
+++ b/src/internal/goexperiment/exp_boringcrypto_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.boringcrypto
+// +build !goexperiment.boringcrypto
+
+package goexperiment
+
+const BoringCrypto = false
+const BoringCryptoInt = 0
diff --git a/src/internal/goexperiment/exp_boringcrypto_on.go b/src/internal/goexperiment/exp_boringcrypto_on.go
new file mode 100644
index 0000000..1454329
--- /dev/null
+++ b/src/internal/goexperiment/exp_boringcrypto_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.boringcrypto
+// +build goexperiment.boringcrypto
+
+package goexperiment
+
+const BoringCrypto = true
+const BoringCryptoInt = 1
diff --git a/src/internal/goexperiment/exp_cacheprog_off.go b/src/internal/goexperiment/exp_cacheprog_off.go
new file mode 100644
index 0000000..29aa869
--- /dev/null
+++ b/src/internal/goexperiment/exp_cacheprog_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.cacheprog
+// +build !goexperiment.cacheprog
+
+package goexperiment
+
+const CacheProg = false
+const CacheProgInt = 0
diff --git a/src/internal/goexperiment/exp_cacheprog_on.go b/src/internal/goexperiment/exp_cacheprog_on.go
new file mode 100644
index 0000000..121b299
--- /dev/null
+++ b/src/internal/goexperiment/exp_cacheprog_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.cacheprog
+// +build goexperiment.cacheprog
+
+package goexperiment
+
+const CacheProg = true
+const CacheProgInt = 1
diff --git a/src/internal/goexperiment/exp_cgocheck2_off.go b/src/internal/goexperiment/exp_cgocheck2_off.go
new file mode 100644
index 0000000..77aa538
--- /dev/null
+++ b/src/internal/goexperiment/exp_cgocheck2_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.cgocheck2
+// +build !goexperiment.cgocheck2
+
+package goexperiment
+
+const CgoCheck2 = false
+const CgoCheck2Int = 0
diff --git a/src/internal/goexperiment/exp_cgocheck2_on.go b/src/internal/goexperiment/exp_cgocheck2_on.go
new file mode 100644
index 0000000..6201249
--- /dev/null
+++ b/src/internal/goexperiment/exp_cgocheck2_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.cgocheck2
+// +build goexperiment.cgocheck2
+
+package goexperiment
+
+const CgoCheck2 = true
+const CgoCheck2Int = 1
diff --git a/src/internal/goexperiment/exp_coverageredesign_off.go b/src/internal/goexperiment/exp_coverageredesign_off.go
new file mode 100644
index 0000000..95d3a6c
--- /dev/null
+++ b/src/internal/goexperiment/exp_coverageredesign_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.coverageredesign
+// +build !goexperiment.coverageredesign
+
+package goexperiment
+
+const CoverageRedesign = false
+const CoverageRedesignInt = 0
diff --git a/src/internal/goexperiment/exp_coverageredesign_on.go b/src/internal/goexperiment/exp_coverageredesign_on.go
new file mode 100644
index 0000000..330a234
--- /dev/null
+++ b/src/internal/goexperiment/exp_coverageredesign_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.coverageredesign
+// +build goexperiment.coverageredesign
+
+package goexperiment
+
+const CoverageRedesign = true
+const CoverageRedesignInt = 1
diff --git a/src/internal/goexperiment/exp_fieldtrack_off.go b/src/internal/goexperiment/exp_fieldtrack_off.go
new file mode 100644
index 0000000..e5e1326
--- /dev/null
+++ b/src/internal/goexperiment/exp_fieldtrack_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.fieldtrack
+// +build !goexperiment.fieldtrack
+
+package goexperiment
+
+const FieldTrack = false
+const FieldTrackInt = 0
diff --git a/src/internal/goexperiment/exp_fieldtrack_on.go b/src/internal/goexperiment/exp_fieldtrack_on.go
new file mode 100644
index 0000000..0d8c447
--- /dev/null
+++ b/src/internal/goexperiment/exp_fieldtrack_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.fieldtrack
+// +build goexperiment.fieldtrack
+
+package goexperiment
+
+const FieldTrack = true
+const FieldTrackInt = 1
diff --git a/src/internal/goexperiment/exp_heapminimum512kib_off.go b/src/internal/goexperiment/exp_heapminimum512kib_off.go
new file mode 100644
index 0000000..09da431
--- /dev/null
+++ b/src/internal/goexperiment/exp_heapminimum512kib_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.heapminimum512kib
+// +build !goexperiment.heapminimum512kib
+
+package goexperiment
+
+const HeapMinimum512KiB = false
+const HeapMinimum512KiBInt = 0
diff --git a/src/internal/goexperiment/exp_heapminimum512kib_on.go b/src/internal/goexperiment/exp_heapminimum512kib_on.go
new file mode 100644
index 0000000..bab684b
--- /dev/null
+++ b/src/internal/goexperiment/exp_heapminimum512kib_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.heapminimum512kib
+// +build goexperiment.heapminimum512kib
+
+package goexperiment
+
+const HeapMinimum512KiB = true
+const HeapMinimum512KiBInt = 1
diff --git a/src/internal/goexperiment/exp_loopvar_off.go b/src/internal/goexperiment/exp_loopvar_off.go
new file mode 100644
index 0000000..1cd7ee1
--- /dev/null
+++ b/src/internal/goexperiment/exp_loopvar_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.loopvar
+// +build !goexperiment.loopvar
+
+package goexperiment
+
+const LoopVar = false
+const LoopVarInt = 0
diff --git a/src/internal/goexperiment/exp_loopvar_on.go b/src/internal/goexperiment/exp_loopvar_on.go
new file mode 100644
index 0000000..e3c8980
--- /dev/null
+++ b/src/internal/goexperiment/exp_loopvar_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.loopvar
+// +build goexperiment.loopvar
+
+package goexperiment
+
+const LoopVar = true
+const LoopVarInt = 1
diff --git a/src/internal/goexperiment/exp_pagetrace_off.go b/src/internal/goexperiment/exp_pagetrace_off.go
new file mode 100644
index 0000000..789e883
--- /dev/null
+++ b/src/internal/goexperiment/exp_pagetrace_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.pagetrace
+// +build !goexperiment.pagetrace
+
+package goexperiment
+
+const PageTrace = false
+const PageTraceInt = 0
diff --git a/src/internal/goexperiment/exp_pagetrace_on.go b/src/internal/goexperiment/exp_pagetrace_on.go
new file mode 100644
index 0000000..ea72b54
--- /dev/null
+++ b/src/internal/goexperiment/exp_pagetrace_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.pagetrace
+// +build goexperiment.pagetrace
+
+package goexperiment
+
+const PageTrace = true
+const PageTraceInt = 1
diff --git a/src/internal/goexperiment/exp_preemptibleloops_off.go b/src/internal/goexperiment/exp_preemptibleloops_off.go
new file mode 100644
index 0000000..7a26088
--- /dev/null
+++ b/src/internal/goexperiment/exp_preemptibleloops_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.preemptibleloops
+// +build !goexperiment.preemptibleloops
+
+package goexperiment
+
+const PreemptibleLoops = false
+const PreemptibleLoopsInt = 0
diff --git a/src/internal/goexperiment/exp_preemptibleloops_on.go b/src/internal/goexperiment/exp_preemptibleloops_on.go
new file mode 100644
index 0000000..a9ca28c
--- /dev/null
+++ b/src/internal/goexperiment/exp_preemptibleloops_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.preemptibleloops
+// +build goexperiment.preemptibleloops
+
+package goexperiment
+
+const PreemptibleLoops = true
+const PreemptibleLoopsInt = 1
diff --git a/src/internal/goexperiment/exp_regabiargs_off.go b/src/internal/goexperiment/exp_regabiargs_off.go
new file mode 100644
index 0000000..31a139b
--- /dev/null
+++ b/src/internal/goexperiment/exp_regabiargs_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.regabiargs
+// +build !goexperiment.regabiargs
+
+package goexperiment
+
+const RegabiArgs = false
+const RegabiArgsInt = 0
diff --git a/src/internal/goexperiment/exp_regabiargs_on.go b/src/internal/goexperiment/exp_regabiargs_on.go
new file mode 100644
index 0000000..9b26f3c
--- /dev/null
+++ b/src/internal/goexperiment/exp_regabiargs_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.regabiargs
+// +build goexperiment.regabiargs
+
+package goexperiment
+
+const RegabiArgs = true
+const RegabiArgsInt = 1
diff --git a/src/internal/goexperiment/exp_regabiwrappers_off.go b/src/internal/goexperiment/exp_regabiwrappers_off.go
new file mode 100644
index 0000000..bfa0fa3
--- /dev/null
+++ b/src/internal/goexperiment/exp_regabiwrappers_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.regabiwrappers
+// +build !goexperiment.regabiwrappers
+
+package goexperiment
+
+const RegabiWrappers = false
+const RegabiWrappersInt = 0
diff --git a/src/internal/goexperiment/exp_regabiwrappers_on.go b/src/internal/goexperiment/exp_regabiwrappers_on.go
new file mode 100644
index 0000000..11ffffb
--- /dev/null
+++ b/src/internal/goexperiment/exp_regabiwrappers_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.regabiwrappers
+// +build goexperiment.regabiwrappers
+
+package goexperiment
+
+const RegabiWrappers = true
+const RegabiWrappersInt = 1
diff --git a/src/internal/goexperiment/exp_staticlockranking_off.go b/src/internal/goexperiment/exp_staticlockranking_off.go
new file mode 100644
index 0000000..3d546c0
--- /dev/null
+++ b/src/internal/goexperiment/exp_staticlockranking_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.staticlockranking
+// +build !goexperiment.staticlockranking
+
+package goexperiment
+
+const StaticLockRanking = false
+const StaticLockRankingInt = 0
diff --git a/src/internal/goexperiment/exp_staticlockranking_on.go b/src/internal/goexperiment/exp_staticlockranking_on.go
new file mode 100644
index 0000000..78188fb
--- /dev/null
+++ b/src/internal/goexperiment/exp_staticlockranking_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.staticlockranking
+// +build goexperiment.staticlockranking
+
+package goexperiment
+
+const StaticLockRanking = true
+const StaticLockRankingInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
new file mode 100644
index 0000000..ae3cbaf
--- /dev/null
+++ b/src/internal/goexperiment/flags.go
@@ -0,0 +1,112 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package goexperiment implements support for toolchain experiments.
+//
+// Toolchain experiments are controlled by the GOEXPERIMENT
+// environment variable. GOEXPERIMENT is a comma-separated list of
+// experiment names. GOEXPERIMENT can be set at make.bash time, which
+// sets the default experiments for binaries built with the tool
+// chain; or it can be set at build time. GOEXPERIMENT can also be set
+// to "none", which disables any experiments that were enabled at
+// make.bash time.
+//
+// Experiments are exposed to the build in the following ways:
+//
+// - Build tag goexperiment.x is set if experiment x (lower case) is
+// enabled.
+//
+// - For each experiment x (in camel case), this package contains a
+// boolean constant x and an integer constant xInt.
+//
+// - In runtime assembly, the macro GOEXPERIMENT_x is defined if
+// experiment x (lower case) is enabled.
+//
+// In the toolchain, the set of experiments enabled for the current
+// build should be accessed via objabi.Experiment.
+//
+// The set of experiments is included in the output of runtime.Version()
+// and "go version <binary>" if it differs from the default experiments.
+//
+// For the set of experiments supported by the current toolchain, see
+// "go doc goexperiment.Flags".
+//
+// Note that this package defines the set of experiments (in Flags)
+// and records the experiments that were enabled when the package
+// was compiled (as boolean and integer constants).
+//
+// Note especially that this package does not itself change behavior
+// at run time based on the GOEXPERIMENT variable.
+// The code used in builds to interpret the GOEXPERIMENT variable
+// is in the separate package internal/buildcfg.
+package goexperiment
+
+//go:generate go run mkconsts.go
+
+// Flags is the set of experiments that can be enabled or disabled in
+// the current toolchain.
+//
+// When specified in the GOEXPERIMENT environment variable or as build
+// tags, experiments use the strings.ToLower of their field name.
+//
+// For the baseline experimental configuration, see
+// objabi.experimentBaseline.
+//
+// If you change this struct definition, run "go generate".
+type Flags struct {
+ FieldTrack bool
+ PreemptibleLoops bool
+ StaticLockRanking bool
+ BoringCrypto bool
+
+ // Regabi is split into several sub-experiments that can be
+ // enabled individually. Not all combinations work.
+ // The "regabi" GOEXPERIMENT is an alias for all "working"
+ // subexperiments.
+
+ // RegabiWrappers enables ABI wrappers for calling between
+ // ABI0 and ABIInternal functions. Without this, the ABIs are
+ // assumed to be identical so cross-ABI calls are direct.
+ RegabiWrappers bool
+ // RegabiArgs enables register arguments/results in all
+ // compiled Go functions.
+ //
+ // Requires wrappers (to do ABI translation), and reflect (so
+ // reflection calls use registers).
+ RegabiArgs bool
+
+ // HeapMinimum512KiB reduces the minimum heap size to 512 KiB.
+ //
+ // This was originally reduced as part of PacerRedesign, but
+ // has been broken out to its own experiment that is disabled
+ // by default.
+ HeapMinimum512KiB bool
+
+ // CoverageRedesign enables the new compiler-based code coverage
+ // tooling.
+ CoverageRedesign bool
+
+ // Arenas causes the "arena" standard library package to be visible
+ // to the outside world.
+ Arenas bool
+
+ // PageTrace enables GODEBUG=pagetrace=/path/to/result. This feature
+ // is a GOEXPERIMENT due to a security risk with setuid binaries:
+ // this compels the Go runtime to write to some arbitrary file, which
+ // may be exploited.
+ PageTrace bool
+
+ // CgoCheck2 enables an expensive cgo rule checker.
+ // When this experiment is enabled, cgo rule checks occur regardless
+ // of the GODEBUG=cgocheck setting provided at runtime.
+ CgoCheck2 bool
+
+ // LoopVar changes loop semantics so that each iteration gets its own
+ // copy of the iteration variable.
+ LoopVar bool
+
+ // CacheProg adds support to cmd/go to use a child process to implement
+ // the build cache; see https://github.com/golang/go/issues/59719.
+ CacheProg bool
+}
diff --git a/src/internal/goexperiment/mkconsts.go b/src/internal/goexperiment/mkconsts.go
new file mode 100644
index 0000000..204ca9d
--- /dev/null
+++ b/src/internal/goexperiment/mkconsts.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// mkconsts generates const definition files for each GOEXPERIMENT.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "internal/goexperiment"
+ "log"
+ "os"
+ "reflect"
+ "strings"
+)
+
+func main() {
+ // Delete existing experiment constant files.
+ ents, err := os.ReadDir(".")
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, ent := range ents {
+ name := ent.Name()
+ if !strings.HasPrefix(name, "exp_") {
+ continue
+ }
+ // Check that this is definitely a generated file.
+ data, err := os.ReadFile(name)
+ if err != nil {
+ log.Fatalf("reading %s: %v", name, err)
+ }
+ if !bytes.Contains(data, []byte("Code generated by mkconsts")) {
+ log.Fatalf("%s: expected generated file", name)
+ }
+ if err := os.Remove(name); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ // Generate new experiment constant files.
+ rt := reflect.TypeOf(&goexperiment.Flags{}).Elem()
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i).Name
+ buildTag := "goexperiment." + strings.ToLower(f)
+ for _, val := range []bool{false, true} {
+ name := fmt.Sprintf("exp_%s_%s.go", strings.ToLower(f), pick(val, "off", "on"))
+ data := fmt.Sprintf(`// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build %s%s
+// +build %s%s
+
+package goexperiment
+
+const %s = %v
+const %sInt = %s
+`, pick(val, "!", ""), buildTag, pick(val, "!", ""), buildTag, f, val, f, pick(val, "0", "1"))
+ if err := os.WriteFile(name, []byte(data), 0666); err != nil {
+ log.Fatalf("writing %s: %v", name, err)
+ }
+ }
+ }
+}
+
+func pick(v bool, f, t string) string {
+ if v {
+ return t
+ }
+ return f
+}
diff --git a/src/internal/goos/gengoos.go b/src/internal/goos/gengoos.go
new file mode 100644
index 0000000..37d9706
--- /dev/null
+++ b/src/internal/goos/gengoos.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+)
+
+var gooses []string
+
+func main() {
+ data, err := os.ReadFile("../../go/build/syslist.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ const goosPrefix = `var knownOS = map[string]bool{`
+ inGOOS := false
+ for _, line := range strings.Split(string(data), "\n") {
+ if strings.HasPrefix(line, goosPrefix) {
+ inGOOS = true
+ } else if inGOOS && strings.HasPrefix(line, "}") {
+ break
+ } else if inGOOS {
+ goos := strings.Fields(line)[0]
+ goos = strings.TrimPrefix(goos, `"`)
+ goos = strings.TrimSuffix(goos, `":`)
+ gooses = append(gooses, goos)
+ }
+ }
+
+ for _, target := range gooses {
+ if target == "nacl" {
+ continue
+ }
+ var tags []string
+ if target == "linux" {
+ tags = append(tags, "!android") // must explicitly exclude android for linux
+ }
+ if target == "solaris" {
+ tags = append(tags, "!illumos") // must explicitly exclude illumos for solaris
+ }
+ if target == "darwin" {
+ tags = append(tags, "!ios") // must explicitly exclude ios for darwin
+ }
+ tags = append(tags, target) // must explicitly include target for bootstrapping purposes
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
+ fmt.Fprintf(&buf, "//go:build %s\n\n", strings.Join(tags, " && "))
+ fmt.Fprintf(&buf, "package goos\n\n")
+ fmt.Fprintf(&buf, "const GOOS = `%s`\n\n", target)
+ for _, goos := range gooses {
+ value := 0
+ if goos == target {
+ value = 1
+ }
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goos), value)
+ }
+ err := os.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
diff --git a/src/internal/goos/goos.go b/src/internal/goos/goos.go
new file mode 100644
index 0000000..02dc968
--- /dev/null
+++ b/src/internal/goos/goos.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goos contains GOOS-specific constants.
+package goos
+
+// The next line makes 'go generate' write the zgoos*.go files with
+// per-OS information, including constants named Is$GOOS for every
+// known GOOS. The constant is 1 on the current system, 0 otherwise;
+// multiplying by them is useful for defining GOOS-specific constants.
+//
+//go:generate go run gengoos.go
diff --git a/src/internal/goos/nonunix.go b/src/internal/goos/nonunix.go
new file mode 100644
index 0000000..2ba5c85
--- /dev/null
+++ b/src/internal/goos/nonunix.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package goos
+
+const IsUnix = false
diff --git a/src/internal/goos/unix.go b/src/internal/goos/unix.go
new file mode 100644
index 0000000..6cfd5ef
--- /dev/null
+++ b/src/internal/goos/unix.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package goos
+
+const IsUnix = true
diff --git a/src/internal/goos/zgoos_aix.go b/src/internal/goos/zgoos_aix.go
new file mode 100644
index 0000000..24e05c9
--- /dev/null
+++ b/src/internal/goos/zgoos_aix.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build aix
+
+package goos
+
+const GOOS = `aix`
+
+const IsAix = 1
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_android.go b/src/internal/goos/zgoos_android.go
new file mode 100644
index 0000000..3c4a318
--- /dev/null
+++ b/src/internal/goos/zgoos_android.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build android
+
+package goos
+
+const GOOS = `android`
+
+const IsAix = 0
+const IsAndroid = 1
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_darwin.go b/src/internal/goos/zgoos_darwin.go
new file mode 100644
index 0000000..10b1499
--- /dev/null
+++ b/src/internal/goos/zgoos_darwin.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !ios && darwin
+
+package goos
+
+const GOOS = `darwin`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 1
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_dragonfly.go b/src/internal/goos/zgoos_dragonfly.go
new file mode 100644
index 0000000..b92d126
--- /dev/null
+++ b/src/internal/goos/zgoos_dragonfly.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build dragonfly
+
+package goos
+
+const GOOS = `dragonfly`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 1
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_freebsd.go b/src/internal/goos/zgoos_freebsd.go
new file mode 100644
index 0000000..f547591
--- /dev/null
+++ b/src/internal/goos/zgoos_freebsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build freebsd
+
+package goos
+
+const GOOS = `freebsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 1
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_hurd.go b/src/internal/goos/zgoos_hurd.go
new file mode 100644
index 0000000..1189d65
--- /dev/null
+++ b/src/internal/goos/zgoos_hurd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build hurd
+
+package goos
+
+const GOOS = `hurd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 1
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_illumos.go b/src/internal/goos/zgoos_illumos.go
new file mode 100644
index 0000000..4f02540
--- /dev/null
+++ b/src/internal/goos/zgoos_illumos.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build illumos
+
+package goos
+
+const GOOS = `illumos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 1
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_ios.go b/src/internal/goos/zgoos_ios.go
new file mode 100644
index 0000000..02f3586
--- /dev/null
+++ b/src/internal/goos/zgoos_ios.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build ios
+
+package goos
+
+const GOOS = `ios`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 1
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_js.go b/src/internal/goos/zgoos_js.go
new file mode 100644
index 0000000..4818741
--- /dev/null
+++ b/src/internal/goos/zgoos_js.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build js
+
+package goos
+
+const GOOS = `js`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 1
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_linux.go b/src/internal/goos/zgoos_linux.go
new file mode 100644
index 0000000..6f4d4e0
--- /dev/null
+++ b/src/internal/goos/zgoos_linux.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !android && linux
+
+package goos
+
+const GOOS = `linux`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 1
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_netbsd.go b/src/internal/goos/zgoos_netbsd.go
new file mode 100644
index 0000000..948603d
--- /dev/null
+++ b/src/internal/goos/zgoos_netbsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build netbsd
+
+package goos
+
+const GOOS = `netbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 1
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_openbsd.go b/src/internal/goos/zgoos_openbsd.go
new file mode 100644
index 0000000..f4b2014
--- /dev/null
+++ b/src/internal/goos/zgoos_openbsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build openbsd
+
+package goos
+
+const GOOS = `openbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 1
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_plan9.go b/src/internal/goos/zgoos_plan9.go
new file mode 100644
index 0000000..95572df
--- /dev/null
+++ b/src/internal/goos/zgoos_plan9.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build plan9
+
+package goos
+
+const GOOS = `plan9`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 1
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_solaris.go b/src/internal/goos/zgoos_solaris.go
new file mode 100644
index 0000000..c705826
--- /dev/null
+++ b/src/internal/goos/zgoos_solaris.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !illumos && solaris
+
+package goos
+
+const GOOS = `solaris`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 1
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_wasip1.go b/src/internal/goos/zgoos_wasip1.go
new file mode 100644
index 0000000..ae35eeb
--- /dev/null
+++ b/src/internal/goos/zgoos_wasip1.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build wasip1
+
+package goos
+
+const GOOS = `wasip1`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 1
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_windows.go b/src/internal/goos/zgoos_windows.go
new file mode 100644
index 0000000..f89f4cf
--- /dev/null
+++ b/src/internal/goos/zgoos_windows.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build windows
+
+package goos
+
+const GOOS = `windows`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 1
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_zos.go b/src/internal/goos/zgoos_zos.go
new file mode 100644
index 0000000..29fb0f8
--- /dev/null
+++ b/src/internal/goos/zgoos_zos.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build zos
+
+package goos
+
+const GOOS = `zos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWasip1 = 0
+const IsWindows = 0
+const IsZos = 1
diff --git a/src/internal/goroot/gc.go b/src/internal/goroot/gc.go
new file mode 100644
index 0000000..c0216f4
--- /dev/null
+++ b/src/internal/goroot/gc.go
@@ -0,0 +1,131 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+package goroot
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+// IsStandardPackage reports whether path is a standard package,
+// given goroot and compiler.
+func IsStandardPackage(goroot, compiler, path string) bool {
+ switch compiler {
+ case "gc":
+ dir := filepath.Join(goroot, "src", path)
+ info, err := os.Stat(dir)
+ return err == nil && info.IsDir()
+ case "gccgo":
+ return gccgoSearch.isStandard(path)
+ default:
+ panic("unknown compiler " + compiler)
+ }
+}
+
+// gccgoSearch holds the gccgo search directories.
+type gccgoDirs struct {
+ once sync.Once
+ dirs []string
+}
+
+// gccgoSearch is used to check whether a gccgo package exists in the
+// standard library.
+var gccgoSearch gccgoDirs
+
+// init finds the gccgo search directories. If this fails it leaves dirs == nil.
+func (gd *gccgoDirs) init() {
+ gccgo := os.Getenv("GCCGO")
+ if gccgo == "" {
+ gccgo = "gccgo"
+ }
+ bin, err := exec.LookPath(gccgo)
+ if err != nil {
+ return
+ }
+
+ allDirs, err := exec.Command(bin, "-print-search-dirs").Output()
+ if err != nil {
+ return
+ }
+ versionB, err := exec.Command(bin, "-dumpversion").Output()
+ if err != nil {
+ return
+ }
+ version := strings.TrimSpace(string(versionB))
+ machineB, err := exec.Command(bin, "-dumpmachine").Output()
+ if err != nil {
+ return
+ }
+ machine := strings.TrimSpace(string(machineB))
+
+ dirsEntries := strings.Split(string(allDirs), "\n")
+ const prefix = "libraries: ="
+ var dirs []string
+ for _, dirEntry := range dirsEntries {
+ if strings.HasPrefix(dirEntry, prefix) {
+ dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix))
+ break
+ }
+ }
+ if len(dirs) == 0 {
+ return
+ }
+
+ var lastDirs []string
+ for _, dir := range dirs {
+ goDir := filepath.Join(dir, "go", version)
+ if fi, err := os.Stat(goDir); err == nil && fi.IsDir() {
+ gd.dirs = append(gd.dirs, goDir)
+ goDir = filepath.Join(goDir, machine)
+ if fi, err = os.Stat(goDir); err == nil && fi.IsDir() {
+ gd.dirs = append(gd.dirs, goDir)
+ }
+ }
+ if fi, err := os.Stat(dir); err == nil && fi.IsDir() {
+ lastDirs = append(lastDirs, dir)
+ }
+ }
+ gd.dirs = append(gd.dirs, lastDirs...)
+}
+
+// isStandard reports whether path is a standard library for gccgo.
+func (gd *gccgoDirs) isStandard(path string) bool {
+ // Quick check: if the first path component has a '.', it's not
+ // in the standard library. This skips most GOPATH directories.
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ if strings.Contains(path[:i], ".") {
+ return false
+ }
+
+ if path == "unsafe" {
+ // Special case.
+ return true
+ }
+
+ gd.once.Do(gd.init)
+ if gd.dirs == nil {
+ // We couldn't find the gccgo search directories.
+ // Best guess, since the first component did not contain
+ // '.', is that this is a standard library package.
+ return true
+ }
+
+ for _, dir := range gd.dirs {
+ full := filepath.Join(dir, path) + ".gox"
+ if fi, err := os.Stat(full); err == nil && !fi.IsDir() {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/src/internal/goroot/gccgo.go b/src/internal/goroot/gccgo.go
new file mode 100644
index 0000000..6284122
--- /dev/null
+++ b/src/internal/goroot/gccgo.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo
+
+package goroot
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// IsStandardPackage reports whether path is a standard package,
+// given goroot and compiler.
+func IsStandardPackage(goroot, compiler, path string) bool {
+ switch compiler {
+ case "gc":
+ dir := filepath.Join(goroot, "src", path)
+ _, err := os.Stat(dir)
+ return err == nil
+ case "gccgo":
+ return stdpkg[path]
+ default:
+ panic("unknown compiler " + compiler)
+ }
+}
diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go
new file mode 100644
index 0000000..5a52f9e
--- /dev/null
+++ b/src/internal/goversion/goversion.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package goversion
+
+// Version is the Go 1.x version which is currently
+// in development and will eventually get released.
+//
+// It should be updated at the start of each development cycle to be
+// the version of the next Go 1.x release. See golang.org/issue/40705.
+const Version = 21
diff --git a/src/internal/intern/intern.go b/src/internal/intern/intern.go
new file mode 100644
index 0000000..2f97c2e
--- /dev/null
+++ b/src/internal/intern/intern.go
@@ -0,0 +1,181 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package intern lets you make smaller comparable values by boxing
+// a larger comparable value (such as a 16 byte string header) down
+// into a globally unique 8 byte pointer.
+//
+// The globally unique pointers are garbage collected with weak
+// references and finalizers. This package hides that.
+package intern
+
+import (
+ "internal/godebug"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+// A Value pointer is the handle to an underlying comparable value.
+// See func Get for how Value pointers may be used.
+type Value struct {
+ _ [0]func() // prevent people from accidentally using value type as comparable
+ cmpVal any
+ // resurrected is guarded by mu (for all instances of Value).
+ // It is set true whenever v is synthesized from a uintptr.
+ resurrected bool
+}
+
+// Get returns the comparable value passed to the Get func
+// that returned v.
+func (v *Value) Get() any { return v.cmpVal }
+
+// key is a key in our global value map.
+// It contains type-specialized fields to avoid allocations
+// when converting common types to empty interfaces.
+type key struct {
+ s string
+ cmpVal any
+ // isString reports whether key contains a string.
+ // Without it, the zero value of key is ambiguous.
+ isString bool
+}
+
+// keyFor returns a key to use with cmpVal.
+func keyFor(cmpVal any) key {
+ if s, ok := cmpVal.(string); ok {
+ return key{s: s, isString: true}
+ }
+ return key{cmpVal: cmpVal}
+}
+
+// Value returns a *Value built from k.
+func (k key) Value() *Value {
+ if k.isString {
+ return &Value{cmpVal: k.s}
+ }
+ return &Value{cmpVal: k.cmpVal}
+}
+
+var (
+ // mu guards valMap, a weakref map of *Value by underlying value.
+ // It also guards the resurrected field of all *Values.
+ mu sync.Mutex
+ valMap = map[key]uintptr{} // to uintptr(*Value)
+ valSafe = safeMap() // non-nil in safe+leaky mode
+)
+
+var intern = godebug.New("#intern")
+
+// safeMap returns a non-nil map if we're in safe-but-leaky mode,
+// as controlled by GODEBUG=intern=leaky
+func safeMap() map[key]*Value {
+ if intern.Value() == "leaky" {
+ return map[key]*Value{}
+ }
+ return nil
+}
+
+// Get returns a pointer representing the comparable value cmpVal.
+//
+// The returned pointer will be the same for Get(v) and Get(v2)
+// if and only if v == v2, and can be used as a map key.
+func Get(cmpVal any) *Value {
+ return get(keyFor(cmpVal))
+}
+
+// GetByString is identical to Get, except that it is specialized for strings.
+// This avoids an allocation from putting a string into an interface{}
+// to pass as an argument to Get.
+func GetByString(s string) *Value {
+ return get(key{s: s, isString: true})
+}
+
+// We play unsafe games that violate Go's rules (and assume a non-moving
+// collector). So we quiet Go here.
+// See the comment below Get for more implementation details.
+//
+//go:nocheckptr
+func get(k key) *Value {
+ mu.Lock()
+ defer mu.Unlock()
+
+ var v *Value
+ if valSafe != nil {
+ v = valSafe[k]
+ } else if addr, ok := valMap[k]; ok {
+ v = (*Value)(unsafe.Pointer(addr))
+ v.resurrected = true
+ }
+ if v != nil {
+ return v
+ }
+ v = k.Value()
+ if valSafe != nil {
+ valSafe[k] = v
+ } else {
+ // SetFinalizer before uintptr conversion (theoretical concern;
+ // see https://github.com/go4org/intern/issues/13)
+ runtime.SetFinalizer(v, finalize)
+ valMap[k] = uintptr(unsafe.Pointer(v))
+ }
+ return v
+}
+
+func finalize(v *Value) {
+ mu.Lock()
+ defer mu.Unlock()
+ if v.resurrected {
+ // We lost the race. Somebody resurrected it while we
+ // were about to finalize it. Try again next round.
+ v.resurrected = false
+ runtime.SetFinalizer(v, finalize)
+ return
+ }
+ delete(valMap, keyFor(v.cmpVal))
+}
+
+// Interning is simple if you don't require that unused values be
+// garbage collectable. But we do require that; we don't want to be
+// DOS vector. We do this by using a uintptr to hide the pointer from
+// the garbage collector, and using a finalizer to eliminate the
+// pointer when no other code is using it.
+//
+// The obvious implementation of this is to use a
+// map[interface{}]uintptr-of-*interface{}, and set up a finalizer to
+// delete from the map. Unfortunately, this is racy. Because pointers
+// are being created in violation of Go's unsafety rules, it's
+// possible to create a pointer to a value concurrently with the GC
+// concluding that the value can be collected. There are other races
+// that break the equality invariant as well, but the use-after-free
+// will cause a runtime crash.
+//
+// To make this work, the finalizer needs to know that no references
+// have been unsafely created since the finalizer was set up. To do
+// this, values carry a "resurrected" sentinel, which gets set
+// whenever a pointer is unsafely created. If the finalizer encounters
+// the sentinel, it clears the sentinel and delays collection for one
+// additional GC cycle, by re-installing itself as finalizer. This
+// ensures that the unsafely created pointer is visible to the GC, and
+// will correctly prevent collection.
+//
+// This technique does mean that interned values that get reused take
+// at least 3 GC cycles to fully collect (1 to clear the sentinel, 1
+// to clean up the unsafe map, 1 to be actually deleted).
+//
+// @ianlancetaylor commented in
+// https://github.com/golang/go/issues/41303#issuecomment-717401656
+// that it is possible to implement weak references in terms of
+// finalizers without unsafe. Unfortunately, the approach he outlined
+// does not work here, for two reasons. First, there is no way to
+// construct a strong pointer out of a weak pointer; our map stores
+// weak pointers, but we must return strong pointers to callers.
+// Second, and more fundamentally, we must return not just _a_ strong
+// pointer to callers, but _the same_ strong pointer to callers. In
+// order to return _the same_ strong pointer to callers, we must track
+// it, which is exactly what we cannot do with strong pointers.
+//
+// See https://github.com/inetaf/netaddr/issues/53 for more
+// discussion, and https://github.com/go4org/intern/issues/2 for an
+// illustration of the subtleties at play.
diff --git a/src/internal/intern/intern_test.go b/src/internal/intern/intern_test.go
new file mode 100644
index 0000000..d1e409e
--- /dev/null
+++ b/src/internal/intern/intern_test.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package intern
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+)
+
+func TestBasics(t *testing.T) {
+ clearMap()
+ foo := Get("foo")
+ bar := Get("bar")
+ empty := Get("")
+ nilEface := Get(nil)
+ i := Get(0x7777777)
+ foo2 := Get("foo")
+ bar2 := Get("bar")
+ empty2 := Get("")
+ nilEface2 := Get(nil)
+ i2 := Get(0x7777777)
+ foo3 := GetByString("foo")
+ empty3 := GetByString("")
+
+ if foo.Get() != foo2.Get() {
+ t.Error("foo/foo2 values differ")
+ }
+ if foo.Get() != foo3.Get() {
+ t.Error("foo/foo3 values differ")
+ }
+ if foo.Get() != "foo" {
+ t.Error("foo.Get not foo")
+ }
+ if foo != foo2 {
+ t.Error("foo/foo2 pointers differ")
+ }
+ if foo != foo3 {
+ t.Error("foo/foo3 pointers differ")
+ }
+
+ if bar.Get() != bar2.Get() {
+ t.Error("bar values differ")
+ }
+ if bar.Get() != "bar" {
+ t.Error("bar.Get not bar")
+ }
+ if bar != bar2 {
+ t.Error("bar pointers differ")
+ }
+
+ if i.Get() != i.Get() {
+ t.Error("i values differ")
+ }
+ if i.Get() != 0x7777777 {
+ t.Error("i.Get not 0x7777777")
+ }
+ if i != i2 {
+ t.Error("i pointers differ")
+ }
+
+ if empty.Get() != empty2.Get() {
+ t.Error("empty/empty2 values differ")
+ }
+ if empty.Get() != empty.Get() {
+ t.Error("empty/empty3 values differ")
+ }
+ if empty.Get() != "" {
+ t.Error("empty.Get not empty string")
+ }
+ if empty != empty2 {
+ t.Error("empty/empty2 pointers differ")
+ }
+ if empty != empty3 {
+ t.Error("empty/empty3 pointers differ")
+ }
+
+ if nilEface.Get() != nilEface2.Get() {
+ t.Error("nilEface values differ")
+ }
+ if nilEface.Get() != nil {
+ t.Error("nilEface.Get not nil")
+ }
+ if nilEface != nilEface2 {
+ t.Error("nilEface pointers differ")
+ }
+
+ if n := mapLen(); n != 5 {
+ t.Errorf("map len = %d; want 4", n)
+ }
+
+ wantEmpty(t)
+}
+
+func wantEmpty(t testing.TB) {
+ t.Helper()
+ const gcTries = 5000
+ for try := 0; try < gcTries; try++ {
+ runtime.GC()
+ n := mapLen()
+ if n == 0 {
+ break
+ }
+ if try == gcTries-1 {
+ t.Errorf("map len = %d after (%d GC tries); want 0, contents: %v", n, gcTries, mapKeys())
+ }
+ }
+}
+
+func TestStress(t *testing.T) {
+ iters := 10000
+ if testing.Short() {
+ iters = 1000
+ }
+ var sink []byte
+ for i := 0; i < iters; i++ {
+ _ = Get("foo")
+ sink = make([]byte, 1<<20)
+ }
+ _ = sink
+}
+
+func BenchmarkStress(b *testing.B) {
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ for {
+ select {
+ case <-done:
+ return
+ default:
+ }
+ runtime.GC()
+ }
+ }()
+
+ clearMap()
+ v1 := Get("foo")
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v2 := Get("foo")
+ if v1 != v2 {
+ b.Fatal("wrong value")
+ }
+ // And also a key we don't retain:
+ _ = Get("bar")
+ }
+ })
+ runtime.GC()
+ wantEmpty(b)
+}
+
+func mapLen() int {
+ mu.Lock()
+ defer mu.Unlock()
+ return len(valMap)
+}
+
+func mapKeys() (keys []string) {
+ mu.Lock()
+ defer mu.Unlock()
+ for k := range valMap {
+ keys = append(keys, fmt.Sprint(k))
+ }
+ return keys
+}
+
+func clearMap() {
+ mu.Lock()
+ defer mu.Unlock()
+ for k := range valMap {
+ delete(valMap, k)
+ }
+}
+
+var (
+ globalString = "not a constant"
+ sink string
+)
+
+func TestGetByStringAllocs(t *testing.T) {
+ allocs := int(testing.AllocsPerRun(100, func() {
+ GetByString(globalString)
+ }))
+ if allocs != 0 {
+ t.Errorf("GetString allocated %d objects, want 0", allocs)
+ }
+}
+
+func BenchmarkGetByString(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ v := GetByString(globalString)
+ sink = v.Get().(string)
+ }
+}
diff --git a/src/internal/itoa/itoa.go b/src/internal/itoa/itoa.go
new file mode 100644
index 0000000..c6062d9
--- /dev/null
+++ b/src/internal/itoa/itoa.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Simple conversions to avoid depending on strconv.
+
+package itoa
+
+// Itoa converts val to a decimal string.
+func Itoa(val int) string {
+ if val < 0 {
+ return "-" + Uitoa(uint(-val))
+ }
+ return Uitoa(uint(val))
+}
+
+// Uitoa converts val to a decimal string.
+func Uitoa(val uint) string {
+ if val == 0 { // avoid string allocation
+ return "0"
+ }
+ var buf [20]byte // big enough for 64bit value base 10
+ i := len(buf) - 1
+ for val >= 10 {
+ q := val / 10
+ buf[i] = byte('0' + val - q*10)
+ i--
+ val = q
+ }
+ // val < 10
+ buf[i] = byte('0' + val)
+ return string(buf[i:])
+}
diff --git a/src/internal/itoa/itoa_test.go b/src/internal/itoa/itoa_test.go
new file mode 100644
index 0000000..71931c1
--- /dev/null
+++ b/src/internal/itoa/itoa_test.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package itoa_test
+
+import (
+ "fmt"
+ "internal/itoa"
+ "math"
+ "testing"
+)
+
+var (
+ minInt64 int64 = math.MinInt64
+ maxInt64 int64 = math.MaxInt64
+ maxUint64 uint64 = math.MaxUint64
+)
+
+func TestItoa(t *testing.T) {
+ tests := []int{int(minInt64), math.MinInt32, -999, -100, -1, 0, 1, 100, 999, math.MaxInt32, int(maxInt64)}
+ for _, tt := range tests {
+ got := itoa.Itoa(tt)
+ want := fmt.Sprint(tt)
+ if want != got {
+ t.Fatalf("Itoa(%d) = %s, want %s", tt, got, want)
+ }
+ }
+}
+
+func TestUitoa(t *testing.T) {
+ tests := []uint{0, 1, 100, 999, math.MaxUint32, uint(maxUint64)}
+ for _, tt := range tests {
+ got := itoa.Uitoa(tt)
+ want := fmt.Sprint(tt)
+ if want != got {
+ t.Fatalf("Uitoa(%d) = %s, want %s", tt, got, want)
+ }
+ }
+}
diff --git a/src/internal/lazyregexp/lazyre.go b/src/internal/lazyregexp/lazyre.go
new file mode 100644
index 0000000..2681af3
--- /dev/null
+++ b/src/internal/lazyregexp/lazyre.go
@@ -0,0 +1,78 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+)
+
+// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+ str string
+ once sync.Once
+ rx *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+ r.once.Do(r.build)
+ return r.rx
+}
+
+func (r *Regexp) build() {
+ r.rx = regexp.MustCompile(r.str)
+ r.str = ""
+}
+
+func (r *Regexp) FindSubmatch(s []byte) [][]byte {
+ return r.re().FindSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatch(s string) []string {
+ return r.re().FindStringSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatchIndex(s string) []int {
+ return r.re().FindStringSubmatchIndex(s)
+}
+
+func (r *Regexp) ReplaceAllString(src, repl string) string {
+ return r.re().ReplaceAllString(src, repl)
+}
+
+func (r *Regexp) FindString(s string) string {
+ return r.re().FindString(s)
+}
+
+func (r *Regexp) FindAllString(s string, n int) []string {
+ return r.re().FindAllString(s, n)
+}
+
+func (r *Regexp) MatchString(s string) bool {
+ return r.re().MatchString(s)
+}
+
+func (r *Regexp) SubexpNames() []string {
+ return r.re().SubexpNames()
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+ lr := &Regexp{str: str}
+ if inTest {
+ // In tests, always compile the regexps early.
+ lr.re()
+ }
+ return lr
+}
diff --git a/src/internal/lazytemplate/lazytemplate.go b/src/internal/lazytemplate/lazytemplate.go
new file mode 100644
index 0000000..8eeed5a
--- /dev/null
+++ b/src/internal/lazytemplate/lazytemplate.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lazytemplate is a thin wrapper over text/template, allowing the use
+// of global template variables without forcing them to be parsed at init.
+package lazytemplate
+
+import (
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "text/template"
+)
+
+// Template is a wrapper around text/template.Template, where the underlying
+// template will be parsed the first time it is needed.
+type Template struct {
+ name, text string
+
+ once sync.Once
+ tmpl *template.Template
+}
+
+func (r *Template) tp() *template.Template {
+ r.once.Do(r.build)
+ return r.tmpl
+}
+
+func (r *Template) build() {
+ r.tmpl = template.Must(template.New(r.name).Parse(r.text))
+ r.name, r.text = "", ""
+}
+
+func (r *Template) Execute(w io.Writer, data any) error {
+ return r.tp().Execute(w, data)
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy template, delaying the parsing work until it is first
+// needed. If the code is being run as part of tests, the template parsing will
+// happen immediately.
+func New(name, text string) *Template {
+ lt := &Template{name: name, text: text}
+ if inTest {
+ // In tests, always parse the templates early.
+ lt.tp()
+ }
+ return lt
+}
diff --git a/src/internal/nettrace/nettrace.go b/src/internal/nettrace/nettrace.go
new file mode 100644
index 0000000..0a2bf92
--- /dev/null
+++ b/src/internal/nettrace/nettrace.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nettrace contains internal hooks for tracing activity in
+// the net package. This package is purely internal for use by the
+// net/http/httptrace package and has no stable API exposed to end
+// users.
+package nettrace
+
+// TraceKey is a context.Context Value key. Its associated value should
+// be a *Trace struct.
+type TraceKey struct{}
+
+// LookupIPAltResolverKey is a context.Context Value key used by tests to
+// specify an alternate resolver func.
+// It is not exposed to outsider users. (But see issue 12503)
+// The value should be the same type as lookupIP:
+//
+// func lookupIP(ctx context.Context, host string) ([]IPAddr, error)
+type LookupIPAltResolverKey struct{}
+
+// Trace contains a set of hooks for tracing events within
+// the net package. Any specific hook may be nil.
+type Trace struct {
+ // DNSStart is called with the hostname of a DNS lookup
+ // before it begins.
+ DNSStart func(name string)
+
+ // DNSDone is called after a DNS lookup completes (or fails).
+ // The coalesced parameter is whether singleflight de-duped
+ // the call. The addrs are of type net.IPAddr but can't
+ // actually be for circular dependency reasons.
+ DNSDone func(netIPs []any, coalesced bool, err error)
+
+ // ConnectStart is called before a Dial, excluding Dials made
+ // during DNS lookups. In the case of DualStack (Happy Eyeballs)
+ // dialing, this may be called multiple times, from multiple
+ // goroutines.
+ ConnectStart func(network, addr string)
+
+ // ConnectStart is called after a Dial with the results, excluding
+ // Dials made during DNS lookups. It may also be called multiple
+ // times, like ConnectStart.
+ ConnectDone func(network, addr string, err error)
+}
diff --git a/src/internal/obscuretestdata/obscuretestdata.go b/src/internal/obscuretestdata/obscuretestdata.go
new file mode 100644
index 0000000..d54d3f6
--- /dev/null
+++ b/src/internal/obscuretestdata/obscuretestdata.go
@@ -0,0 +1,65 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package obscuretestdata contains functionality used by tests to more easily
+// work with testdata that must be obscured primarily due to
+// golang.org/issue/34986.
+package obscuretestdata
+
+import (
+ "encoding/base64"
+ "io"
+ "os"
+)
+
+// Rot13 returns the rot13 encoding or decoding of its input.
+func Rot13(data []byte) []byte {
+ out := make([]byte, len(data))
+ copy(out, data)
+ for i, c := range out {
+ switch {
+ case 'A' <= c && c <= 'M' || 'a' <= c && c <= 'm':
+ out[i] = c + 13
+ case 'N' <= c && c <= 'Z' || 'n' <= c && c <= 'z':
+ out[i] = c - 13
+ }
+ }
+ return out
+}
+
+// DecodeToTempFile decodes the named file to a temporary location.
+// If successful, it returns the path of the decoded file.
+// The caller is responsible for ensuring that the temporary file is removed.
+func DecodeToTempFile(name string) (path string, err error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ tmp, err := os.CreateTemp("", "obscuretestdata-decoded-")
+ if err != nil {
+ return "", err
+ }
+ if _, err := io.Copy(tmp, base64.NewDecoder(base64.StdEncoding, f)); err != nil {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ return "", err
+ }
+ if err := tmp.Close(); err != nil {
+ os.Remove(tmp.Name())
+ return "", err
+ }
+ return tmp.Name(), nil
+}
+
+// ReadFile reads the named file and returns its decoded contents.
+func ReadFile(name string) ([]byte, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return io.ReadAll(base64.NewDecoder(base64.StdEncoding, f))
+}
diff --git a/src/internal/oserror/errors.go b/src/internal/oserror/errors.go
new file mode 100644
index 0000000..28a1ab3
--- /dev/null
+++ b/src/internal/oserror/errors.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oserror defines errors values used in the os package.
+//
+// These types are defined here to permit the syscall package to reference them.
+package oserror
+
+import "errors"
+
+var (
+ ErrInvalid = errors.New("invalid argument")
+ ErrPermission = errors.New("permission denied")
+ ErrExist = errors.New("file already exists")
+ ErrNotExist = errors.New("file does not exist")
+ ErrClosed = errors.New("file already closed")
+)
diff --git a/src/internal/pkgbits/codes.go b/src/internal/pkgbits/codes.go
new file mode 100644
index 0000000..f0cabde
--- /dev/null
+++ b/src/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+ // Marker returns the SyncMarker for the Code's dynamic type.
+ Marker() SyncMarker
+
+ // Value returns the Code's ordinal value.
+ Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/src/internal/pkgbits/decoder.go b/src/internal/pkgbits/decoder.go
new file mode 100644
index 0000000..4fe024d
--- /dev/null
+++ b/src/internal/pkgbits/decoder.go
@@ -0,0 +1,515 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+ // version is the file format version.
+ version uint32
+
+ // sync indicates whether the file uses sync markers.
+ sync bool
+
+ // pkgPath is the package path for the package to be decoded.
+ //
+ // TODO(mdempsky): Remove; unneeded since CL 391014.
+ pkgPath string
+
+ // elemData is the full data payload of the encoded package.
+ // Elements are densely and contiguously packed together.
+ //
+ // The last 8 bytes of elemData are the package fingerprint.
+ elemData string
+
+ // elemEnds stores the byte-offset end positions of element
+ // bitstreams within elemData.
+ //
+ // For example, element I's bitstream data starts at elemEnds[I-1]
+ // (or 0, if I==0) and ends at elemEnds[I].
+ //
+ // Note: elemEnds is indexed by absolute indices, not
+ // section-relative indices.
+ elemEnds []uint32
+
+ // elemEndsEnds stores the index-offset end positions of relocation
+ // sections within elemEnds.
+ //
+ // For example, section K's end positions start at elemEndsEnds[K-1]
+ // (or 0, if K==0) and end at elemEndsEnds[K].
+ elemEndsEnds [numRelocs]uint32
+
+ scratchRelocEnt []RelocEnt
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+//
+// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+
+ switch pr.version {
+ default:
+ panic(fmt.Errorf("unsupported version: %v", pr.version))
+ case 0:
+ // no flags
+ case 1:
+ var flags uint32
+ assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+ pr.sync = flags&flagSyncMarkers != 0
+ }
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, io.SeekCurrent)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+ var fp [8]byte
+ copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+ return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+ absIdx := int(idx)
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+// TempDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+// If possible the Decoder should be RetireDecoder'd when it is no longer
+// needed, this will avoid heap allocations.
+func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.TempDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
+ pr.scratchRelocEnt = d.Relocs
+ d.Relocs = nil
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ l := r.Len()
+ if cap(pr.scratchRelocEnt) >= l {
+ r.Relocs = pr.scratchRelocEnt[:l]
+ pr.scratchRelocEnt = nil
+ } else {
+ r.Relocs = make([]RelocEnt, l)
+ }
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := readUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
+// This avoids the interface conversion and thus has better escape properties,
+// which flows up the stack.
+func readUvarint(r *strings.Reader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := 0; i < binary.MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == binary.MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<<s, nil
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return x, overflow
+}
+
+var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
+
+func (r *Decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If EnableSync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !r.common.sync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, io.SeekCurrent)
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+// Int64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+ var path string
+ {
+ r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
+ path = r.String()
+ pr.RetireDecoder(&r)
+ }
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+ var ridx Index
+ var name string
+ var rcode int
+ {
+ r := pr.TempDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ ridx = r.Reloc(RelocPkg)
+ name = r.String()
+ rcode = r.Code(SyncCodeObj)
+ pr.RetireDecoder(&r)
+ }
+
+ path := pr.PeekPkgPath(ridx)
+ assert(name != "")
+
+ tag := CodeObj(rcode)
+
+ return path, name, tag
+}
diff --git a/src/internal/pkgbits/doc.go b/src/internal/pkgbits/doc.go
new file mode 100644
index 0000000..4862e39
--- /dev/null
+++ b/src/internal/pkgbits/doc.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+package pkgbits
diff --git a/src/internal/pkgbits/encoder.go b/src/internal/pkgbits/encoder.go
new file mode 100644
index 0000000..70a2cba
--- /dev/null
+++ b/src/internal/pkgbits/encoder.go
@@ -0,0 +1,394 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+ "strings"
+)
+
+// currentVersion is the current version number.
+//
+// - v0: initial prototype
+//
+// - v1: adds the flags uint32 word
+//
+// TODO(mdempsky): For the next version bump:
+// - remove the legacy "has init" bool from the public root
+// - remove obj's "derived func instance" bool
+const currentVersion uint32 = 1
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+ // elems holds the bitstream for previously encoded elements.
+ elems [numRelocs][]string
+
+ // stringsIdx maps previously encoded strings to their index within
+ // the RelocString section, to allow deduplication. That is,
+ // elems[RelocString][stringsIdx[s]] == s (if present).
+ stringsIdx map[string]Index
+
+ // syncFrames is the number of frames to write at each sync
+ // marker. A negative value means sync markers are omitted.
+ syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ stringsIdx: make(map[string]Index),
+ syncFrames: syncFrames,
+ }
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+ h := md5.New()
+ out := io.MultiWriter(out0, h)
+
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(currentVersion)
+
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
+
+ // Write elemEndsEnds.
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ // Write elemEnds.
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ // Write elemData.
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+
+ // Write fingerprint.
+ copy(fingerprint[:], h.Sum(nil))
+ _, err := out0.Write(fingerprint[:])
+ assert(err == nil)
+
+ return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := Index(len(pw.elems[RelocString]))
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := Index(len(pw.elems[k]))
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ RelocMap map[RelocEnt]uint32
+ Data bytes.Buffer // accumulated element bitstream data
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+ var sb strings.Builder
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rEnt := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rEnt.Kind))
+ w.Len(int(rEnt.Idx))
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+ e := RelocEnt{r, idx}
+ if w.RelocMap != nil {
+ if i, ok := w.RelocMap[e]; ok {
+ return int(i)
+ }
+ } else {
+ w.RelocMap = make(map[RelocEnt]uint32)
+ }
+
+ i := len(w.Relocs)
+ w.RelocMap[e] = uint32(i)
+ w.Relocs = append(w.Relocs, e)
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !w.p.SyncMarkers() {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+// if w.Bool(x != 0) {
+// // alternative #1
+// } else {
+// // alternative #2
+// }
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Len encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, idx)
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ errorf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
diff --git a/src/internal/pkgbits/flags.go b/src/internal/pkgbits/flags.go
new file mode 100644
index 0000000..6542227
--- /dev/null
+++ b/src/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+ flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/src/internal/pkgbits/reloc.go b/src/internal/pkgbits/reloc.go
new file mode 100644
index 0000000..fcdfb97
--- /dev/null
+++ b/src/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int32
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int32
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+ Kind RelocKind
+ Idx Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ PublicRootIdx Index = 0
+ PrivateRootIdx Index = 1
+)
+
+const (
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
+
+ numRelocs = iota
+)
diff --git a/src/internal/pkgbits/support.go b/src/internal/pkgbits/support.go
new file mode 100644
index 0000000..f7579df
--- /dev/null
+++ b/src/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func errorf(format string, args ...any) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/src/internal/pkgbits/sync.go b/src/internal/pkgbits/sync.go
new file mode 100644
index 0000000..1520b73
--- /dev/null
+++ b/src/internal/pkgbits/sync.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncExprType
+ SyncAssign
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
+)
diff --git a/src/internal/pkgbits/syncmarker_string.go b/src/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 0000000..582ad56
--- /dev/null
+++ b/src/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncExprType-37]
+ _ = x[SyncAssign-38]
+ _ = x[SyncOp-39]
+ _ = x[SyncFuncLit-40]
+ _ = x[SyncCompLit-41]
+ _ = x[SyncDecl-42]
+ _ = x[SyncFuncBody-43]
+ _ = x[SyncOpenScope-44]
+ _ = x[SyncCloseScope-45]
+ _ = x[SyncCloseAnotherScope-46]
+ _ = x[SyncDeclNames-47]
+ _ = x[SyncDeclName-48]
+ _ = x[SyncStmts-49]
+ _ = x[SyncBlockStmt-50]
+ _ = x[SyncIfStmt-51]
+ _ = x[SyncForStmt-52]
+ _ = x[SyncSwitchStmt-53]
+ _ = x[SyncRangeStmt-54]
+ _ = x[SyncCaseClause-55]
+ _ = x[SyncCommClause-56]
+ _ = x[SyncSelectStmt-57]
+ _ = x[SyncDecls-58]
+ _ = x[SyncLabeledStmt-59]
+ _ = x[SyncUseObjLocal-60]
+ _ = x[SyncAddLocal-61]
+ _ = x[SyncLinkname-62]
+ _ = x[SyncStmt1-63]
+ _ = x[SyncStmtsEnd-64]
+ _ = x[SyncLabel-65]
+ _ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go
new file mode 100644
index 0000000..230a952
--- /dev/null
+++ b/src/internal/platform/supported.go
@@ -0,0 +1,286 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go test . -run=TestGenerated -fix
+
+package platform
+
+// An OSArch is a pair of GOOS and GOARCH values indicating a platform.
+type OSArch struct {
+ GOOS, GOARCH string
+}
+
+func (p OSArch) String() string {
+ return p.GOOS + "/" + p.GOARCH
+}
+
+// RaceDetectorSupported reports whether goos/goarch supports the race
+// detector. There is a copy of this function in cmd/dist/test.go.
+// Race detector only supports 48-bit VMA on arm64. But it will always
+// return true for arm64, because we don't have VMA size information during
+// the compile time.
+func RaceDetectorSupported(goos, goarch string) bool {
+ switch goos {
+ case "linux":
+ return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" || goarch == "s390x"
+ case "darwin":
+ return goarch == "amd64" || goarch == "arm64"
+ case "freebsd", "netbsd", "openbsd", "windows":
+ return goarch == "amd64"
+ default:
+ return false
+ }
+}
+
+// MSanSupported reports whether goos/goarch supports the memory
+// sanitizer option.
+func MSanSupported(goos, goarch string) bool {
+ switch goos {
+ case "linux":
+ return goarch == "amd64" || goarch == "arm64"
+ case "freebsd":
+ return goarch == "amd64"
+ default:
+ return false
+ }
+}
+
+// ASanSupported reports whether goos/goarch supports the address
+// sanitizer option.
+func ASanSupported(goos, goarch string) bool {
+ switch goos {
+ case "linux":
+ return goarch == "arm64" || goarch == "amd64" || goarch == "riscv64" || goarch == "ppc64le"
+ default:
+ return false
+ }
+}
+
+// FuzzSupported reports whether goos/goarch supports fuzzing
+// ('go test -fuzz=.').
+func FuzzSupported(goos, goarch string) bool {
+ switch goos {
+ case "darwin", "freebsd", "linux", "windows":
+ return true
+ default:
+ return false
+ }
+}
+
+// FuzzInstrumented reports whether fuzzing on goos/goarch uses coverage
+// instrumentation. (FuzzInstrumented implies FuzzSupported.)
+func FuzzInstrumented(goos, goarch string) bool {
+ switch goarch {
+ case "amd64", "arm64":
+ // TODO(#14565): support more architectures.
+ return FuzzSupported(goos, goarch)
+ default:
+ return false
+ }
+}
+
+// MustLinkExternal reports whether goos/goarch requires external linking
+// with or without cgo dependencies.
+func MustLinkExternal(goos, goarch string, withCgo bool) bool {
+ if withCgo {
+ switch goarch {
+ case "loong64",
+ "mips", "mipsle", "mips64", "mips64le",
+ "riscv64":
+ // Internally linking cgo is incomplete on some architectures.
+ // https://go.dev/issue/14449
+ return true
+ case "arm64":
+ if goos == "windows" {
+ // windows/arm64 internal linking is not implemented.
+ return true
+ }
+ case "ppc64":
+ // Big Endian PPC64 cgo internal linking is not implemented for aix or linux.
+ // https://go.dev/issue/8912
+ return true
+ }
+
+ switch goos {
+ case "android":
+ return true
+ case "dragonfly":
+ // It seems that on Dragonfly thread local storage is
+ // set up by the dynamic linker, so internal cgo linking
+ // doesn't work. Test case is "go test runtime/cgo".
+ return true
+ }
+ }
+
+ switch goos {
+ case "android":
+ if goarch != "arm64" {
+ return true
+ }
+ case "ios":
+ if goarch == "arm64" {
+ return true
+ }
+ }
+ return false
+}
+
+// BuildModeSupported reports whether goos/goarch supports the given build mode
+// using the given compiler.
+// There is a copy of this function in cmd/dist/test.go.
+func BuildModeSupported(compiler, buildmode, goos, goarch string) bool {
+ if compiler == "gccgo" {
+ return true
+ }
+
+ if _, ok := distInfo[OSArch{goos, goarch}]; !ok {
+ return false // platform unrecognized
+ }
+
+ platform := goos + "/" + goarch
+ switch buildmode {
+ case "archive":
+ return true
+
+ case "c-archive":
+ switch goos {
+ case "aix", "darwin", "ios", "windows":
+ return true
+ case "linux":
+ switch goarch {
+ case "386", "amd64", "arm", "armbe", "arm64", "arm64be", "loong64", "ppc64le", "riscv64", "s390x":
+ // linux/ppc64 not supported because it does
+ // not support external linking mode yet.
+ return true
+ default:
+ // Other targets do not support -shared,
+ // per ParseFlags in
+ // cmd/compile/internal/base/flag.go.
+ // For c-archive the Go tool passes -shared,
+ // so that the result is suitable for inclusion
+ // in a PIE or shared library.
+ return false
+ }
+ case "freebsd":
+ return goarch == "amd64"
+ }
+ return false
+
+ case "c-shared":
+ switch platform {
+ case "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/386", "linux/ppc64le", "linux/riscv64", "linux/s390x",
+ "android/amd64", "android/arm", "android/arm64", "android/386",
+ "freebsd/amd64",
+ "darwin/amd64", "darwin/arm64",
+ "windows/amd64", "windows/386", "windows/arm64":
+ return true
+ }
+ return false
+
+ case "default":
+ return true
+
+ case "exe":
+ return true
+
+ case "pie":
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/ppc64le", "linux/riscv64", "linux/s390x",
+ "android/amd64", "android/arm", "android/arm64", "android/386",
+ "freebsd/amd64",
+ "darwin/amd64", "darwin/arm64",
+ "ios/amd64", "ios/arm64",
+ "aix/ppc64",
+ "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ return true
+ }
+ return false
+
+ case "shared":
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+ return true
+ }
+ return false
+
+ case "plugin":
+ switch platform {
+ case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le",
+ "android/amd64", "android/386",
+ "darwin/amd64", "darwin/arm64",
+ "freebsd/amd64":
+ return true
+ }
+ return false
+
+ default:
+ return false
+ }
+}
+
+func InternalLinkPIESupported(goos, goarch string) bool {
+ switch goos + "/" + goarch {
+ case "android/arm64",
+ "darwin/amd64", "darwin/arm64",
+ "linux/amd64", "linux/arm64", "linux/ppc64le",
+ "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ return true
+ }
+ return false
+}
+
+// DefaultPIE reports whether goos/goarch produces a PIE binary when using the
+// "default" buildmode. On Windows this is affected by -race,
+// so force the caller to pass that in to centralize that choice.
+func DefaultPIE(goos, goarch string, isRace bool) bool {
+ switch goos {
+ case "android", "ios":
+ return true
+ case "windows":
+ if isRace {
+ // PIE is not supported with -race on windows;
+ // see https://go.dev/cl/416174.
+ return false
+ }
+ return true
+ case "darwin":
+ return goarch == "arm64"
+ }
+ return false
+}
+
+// ExecutableHasDWARF reports whether the linked executable includes DWARF
+// symbols on goos/goarch.
+func ExecutableHasDWARF(goos, goarch string) bool {
+ switch goos {
+ case "plan9", "ios":
+ return false
+ }
+ return true
+}
+
+// osArchInfo describes information about an OSArch extracted from cmd/dist and
+// stored in the generated distInfo map.
+type osArchInfo struct {
+ CgoSupported bool
+ FirstClass bool
+ Broken bool
+}
+
+// CgoSupported reports whether goos/goarch supports cgo.
+func CgoSupported(goos, goarch string) bool {
+ return distInfo[OSArch{goos, goarch}].CgoSupported
+}
+
+// FirstClass reports whether goos/goarch is considered a “first class” port.
+// (See https://go.dev/wiki/PortingPolicy#first-class-ports.)
+func FirstClass(goos, goarch string) bool {
+ return distInfo[OSArch{goos, goarch}].FirstClass
+}
+
+// Broken reportsr whether goos/goarch is considered a broken port.
+// (See https://go.dev/wiki/PortingPolicy#broken-ports.)
+func Broken(goos, goarch string) bool {
+ return distInfo[OSArch{goos, goarch}].Broken
+}
diff --git a/src/internal/platform/zosarch.go b/src/internal/platform/zosarch.go
new file mode 100644
index 0000000..7f5a290
--- /dev/null
+++ b/src/internal/platform/zosarch.go
@@ -0,0 +1,114 @@
+// Code generated by go test internal/platform -fix. DO NOT EDIT.
+
+// To change the information in this file, edit the cgoEnabled and/or firstClass
+// maps in cmd/dist/build.go, then run 'go generate internal/platform'.
+
+package platform
+
+// List is the list of all valid GOOS/GOARCH combinations,
+// including known-broken ports.
+var List = []OSArch{
+ {"aix", "ppc64"},
+ {"android", "386"},
+ {"android", "amd64"},
+ {"android", "arm"},
+ {"android", "arm64"},
+ {"darwin", "amd64"},
+ {"darwin", "arm64"},
+ {"dragonfly", "amd64"},
+ {"freebsd", "386"},
+ {"freebsd", "amd64"},
+ {"freebsd", "arm"},
+ {"freebsd", "arm64"},
+ {"freebsd", "riscv64"},
+ {"illumos", "amd64"},
+ {"ios", "amd64"},
+ {"ios", "arm64"},
+ {"js", "wasm"},
+ {"linux", "386"},
+ {"linux", "amd64"},
+ {"linux", "arm"},
+ {"linux", "arm64"},
+ {"linux", "loong64"},
+ {"linux", "mips"},
+ {"linux", "mips64"},
+ {"linux", "mips64le"},
+ {"linux", "mipsle"},
+ {"linux", "ppc64"},
+ {"linux", "ppc64le"},
+ {"linux", "riscv64"},
+ {"linux", "s390x"},
+ {"linux", "sparc64"},
+ {"netbsd", "386"},
+ {"netbsd", "amd64"},
+ {"netbsd", "arm"},
+ {"netbsd", "arm64"},
+ {"openbsd", "386"},
+ {"openbsd", "amd64"},
+ {"openbsd", "arm"},
+ {"openbsd", "arm64"},
+ {"openbsd", "mips64"},
+ {"openbsd", "ppc64"},
+ {"plan9", "386"},
+ {"plan9", "amd64"},
+ {"plan9", "arm"},
+ {"solaris", "amd64"},
+ {"wasip1", "wasm"},
+ {"windows", "386"},
+ {"windows", "amd64"},
+ {"windows", "arm"},
+ {"windows", "arm64"},
+}
+
+var distInfo = map[OSArch]osArchInfo{
+ {"aix", "ppc64"}: {CgoSupported: true},
+ {"android", "386"}: {CgoSupported: true},
+ {"android", "amd64"}: {CgoSupported: true},
+ {"android", "arm"}: {CgoSupported: true},
+ {"android", "arm64"}: {CgoSupported: true},
+ {"darwin", "amd64"}: {CgoSupported: true, FirstClass: true},
+ {"darwin", "arm64"}: {CgoSupported: true, FirstClass: true},
+ {"dragonfly", "amd64"}: {CgoSupported: true},
+ {"freebsd", "386"}: {CgoSupported: true},
+ {"freebsd", "amd64"}: {CgoSupported: true},
+ {"freebsd", "arm"}: {CgoSupported: true},
+ {"freebsd", "arm64"}: {CgoSupported: true},
+ {"freebsd", "riscv64"}: {CgoSupported: true},
+ {"illumos", "amd64"}: {CgoSupported: true},
+ {"ios", "amd64"}: {CgoSupported: true},
+ {"ios", "arm64"}: {CgoSupported: true},
+ {"js", "wasm"}: {},
+ {"linux", "386"}: {CgoSupported: true, FirstClass: true},
+ {"linux", "amd64"}: {CgoSupported: true, FirstClass: true},
+ {"linux", "arm"}: {CgoSupported: true, FirstClass: true},
+ {"linux", "arm64"}: {CgoSupported: true, FirstClass: true},
+ {"linux", "loong64"}: {CgoSupported: true},
+ {"linux", "mips"}: {CgoSupported: true},
+ {"linux", "mips64"}: {CgoSupported: true},
+ {"linux", "mips64le"}: {CgoSupported: true},
+ {"linux", "mipsle"}: {CgoSupported: true},
+ {"linux", "ppc64"}: {},
+ {"linux", "ppc64le"}: {CgoSupported: true},
+ {"linux", "riscv64"}: {CgoSupported: true},
+ {"linux", "s390x"}: {CgoSupported: true},
+ {"linux", "sparc64"}: {CgoSupported: true, Broken: true},
+ {"netbsd", "386"}: {CgoSupported: true},
+ {"netbsd", "amd64"}: {CgoSupported: true},
+ {"netbsd", "arm"}: {CgoSupported: true},
+ {"netbsd", "arm64"}: {CgoSupported: true},
+ {"openbsd", "386"}: {CgoSupported: true},
+ {"openbsd", "amd64"}: {CgoSupported: true},
+ {"openbsd", "arm"}: {CgoSupported: true},
+ {"openbsd", "arm64"}: {CgoSupported: true},
+ {"openbsd", "mips64"}: {CgoSupported: true, Broken: true},
+ {"openbsd", "ppc64"}: {Broken: true},
+ {"plan9", "386"}: {},
+ {"plan9", "amd64"}: {},
+ {"plan9", "arm"}: {},
+ {"solaris", "amd64"}: {CgoSupported: true},
+ {"wasip1", "wasm"}: {},
+ {"windows", "386"}: {CgoSupported: true, FirstClass: true},
+ {"windows", "amd64"}: {CgoSupported: true, FirstClass: true},
+ {"windows", "arm"}: {},
+ {"windows", "arm64"}: {CgoSupported: true},
+}
diff --git a/src/internal/platform/zosarch_test.go b/src/internal/platform/zosarch_test.go
new file mode 100644
index 0000000..e8ffe9e
--- /dev/null
+++ b/src/internal/platform/zosarch_test.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package platform_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "internal/diff"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "testing"
+ "text/template"
+)
+
+var flagFix = flag.Bool("fix", false, "if true, fix out-of-date generated files")
+
+// TestGenerated verifies that zosarch.go is up to date,
+// or regenerates it if the -fix flag is set.
+func TestGenerated(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+
+ // Here we use 'go run cmd/dist' instead of 'go tool dist' in case the
+ // installed cmd/dist is stale or missing. We don't want to miss a
+ // skew in the data due to a stale binary.
+ cmd := testenv.Command(t, "go", "run", "cmd/dist", "list", "-json", "-broken")
+
+ // cmd/dist requires GOROOT to be set explicitly in the environment.
+ cmd.Env = append(cmd.Environ(), "GOROOT="+testenv.GOROOT(t))
+
+ out, err := cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ t.Logf("stderr:\n%s", ee.Stderr)
+ }
+ t.Fatalf("%v: %v", cmd, err)
+ }
+
+ type listEntry struct {
+ GOOS, GOARCH string
+ CgoSupported bool
+ FirstClass bool
+ Broken bool
+ }
+ var entries []listEntry
+ if err := json.Unmarshal(out, &entries); err != nil {
+ t.Fatal(err)
+ }
+
+ tmplOut := new(bytes.Buffer)
+ tmpl := template.Must(template.New("zosarch").Parse(zosarchTmpl))
+ err = tmpl.Execute(tmplOut, entries)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = testenv.Command(t, "gofmt")
+ cmd.Stdin = bytes.NewReader(tmplOut.Bytes())
+ want, err := cmd.Output()
+ if err != nil {
+ t.Logf("stdin:\n%s", tmplOut.Bytes())
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ t.Logf("stderr:\n%s", ee.Stderr)
+ }
+ t.Fatalf("%v: %v", cmd, err)
+ }
+
+ got, err := os.ReadFile("zosarch.go")
+ if err == nil && bytes.Equal(got, want) {
+ return
+ }
+
+ if !*flagFix {
+ if err != nil {
+ t.Log(err)
+ } else {
+ t.Logf("diff:\n%s", diff.Diff("zosarch.go", got, "want", want))
+ }
+ t.Fatalf("zosarch.go is missing or out of date; to regenerate, run\ngo generate internal/platform")
+ }
+
+ if err := os.WriteFile("zosarch.go", want, 0666); err != nil {
+ t.Fatal(err)
+ }
+}
+
+const zosarchTmpl = `// Code generated by go test internal/platform -fix. DO NOT EDIT.
+
+// To change the information in this file, edit the cgoEnabled and/or firstClass
+// maps in cmd/dist/build.go, then run 'go generate internal/platform'.
+
+package platform
+
+// List is the list of all valid GOOS/GOARCH combinations,
+// including known-broken ports.
+var List = []OSArch{
+{{range .}} { {{ printf "%q" .GOOS }}, {{ printf "%q" .GOARCH }} },
+{{end}}
+}
+
+var distInfo = map[OSArch]osArchInfo {
+{{range .}} { {{ printf "%q" .GOOS }}, {{ printf "%q" .GOARCH }} }:
+{ {{if .CgoSupported}}CgoSupported: true, {{end}}{{if .FirstClass}}FirstClass: true, {{end}}{{if .Broken}} Broken: true, {{end}} },
+{{end}}
+}
+`
diff --git a/src/internal/poll/copy_file_range_linux.go b/src/internal/poll/copy_file_range_linux.go
new file mode 100644
index 0000000..ba33f51
--- /dev/null
+++ b/src/internal/poll/copy_file_range_linux.go
@@ -0,0 +1,128 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "sync"
+ "syscall"
+)
+
+var (
+ kernelVersion53Once sync.Once
+ kernelVersion53 bool
+)
+
+const maxCopyFileRangeRound = 1 << 30
+
+// CopyFileRange copies at most remain bytes of data from src to dst, using
+// the copy_file_range system call. dst and src must refer to regular files.
+func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err error) {
+ kernelVersion53Once.Do(func() {
+ major, minor := unix.KernelVersion()
+ // copy_file_range(2) is broken in various ways on kernels older than 5.3,
+ // see issue #42400 and
+ // https://man7.org/linux/man-pages/man2/copy_file_range.2.html#VERSIONS
+ if major > 5 || (major == 5 && minor >= 3) {
+ kernelVersion53 = true
+ }
+ })
+
+ if !kernelVersion53 {
+ return 0, false, nil
+ }
+
+ for remain > 0 {
+ max := remain
+ if max > maxCopyFileRangeRound {
+ max = maxCopyFileRangeRound
+ }
+ n, err := copyFileRange(dst, src, int(max))
+ switch err {
+ case syscall.ENOSYS:
+ // copy_file_range(2) was introduced in Linux 4.5.
+ // Go supports Linux >= 2.6.33, so the system call
+ // may not be present.
+ //
+ // If we see ENOSYS, we have certainly not transferred
+ // any data, so we can tell the caller that we
+ // couldn't handle the transfer and let them fall
+ // back to more generic code.
+ return 0, false, nil
+ case syscall.EXDEV, syscall.EINVAL, syscall.EIO, syscall.EOPNOTSUPP, syscall.EPERM:
+ // Prior to Linux 5.3, it was not possible to
+ // copy_file_range across file systems. Similarly to
+ // the ENOSYS case above, if we see EXDEV, we have
+ // not transferred any data, and we can let the caller
+ // fall back to generic code.
+ //
+ // As for EINVAL, that is what we see if, for example,
+ // dst or src refer to a pipe rather than a regular
+ // file. This is another case where no data has been
+ // transferred, so we consider it unhandled.
+ //
+ // If src and dst are on CIFS, we can see EIO.
+ // See issue #42334.
+ //
+ // If the file is on NFS, we can see EOPNOTSUPP.
+ // See issue #40731.
+ //
+ // If the process is running inside a Docker container,
+ // we might see EPERM instead of ENOSYS. See issue
+ // #40893. Since EPERM might also be a legitimate error,
+ // don't mark copy_file_range(2) as unsupported.
+ return 0, false, nil
+ case nil:
+ if n == 0 {
+ // If we did not read any bytes at all,
+ // then this file may be in a file system
+ // where copy_file_range silently fails.
+ // https://lore.kernel.org/linux-fsdevel/20210126233840.GG4626@dread.disaster.area/T/#m05753578c7f7882f6e9ffe01f981bc223edef2b0
+ if written == 0 {
+ return 0, false, nil
+ }
+ // Otherwise src is at EOF, which means
+ // we are done.
+ return written, true, nil
+ }
+ remain -= n
+ written += n
+ default:
+ return written, true, err
+ }
+ }
+ return written, true, nil
+}
+
+// copyFileRange performs one round of copy_file_range(2).
+func copyFileRange(dst, src *FD, max int) (written int64, err error) {
+ // The signature of copy_file_range(2) is:
+ //
+ // ssize_t copy_file_range(int fd_in, loff_t *off_in,
+ // int fd_out, loff_t *off_out,
+ // size_t len, unsigned int flags);
+ //
+ // Note that in the call to unix.CopyFileRange below, we use nil
+ // values for off_in and off_out. For the system call, this means
+ // "use and update the file offsets". That is why we must acquire
+ // locks for both file descriptors (and why this whole machinery is
+ // in the internal/poll package to begin with).
+ if err := dst.writeLock(); err != nil {
+ return 0, err
+ }
+ defer dst.writeUnlock()
+ if err := src.readLock(); err != nil {
+ return 0, err
+ }
+ defer src.readUnlock()
+ var n int
+ for {
+ n, err = unix.CopyFileRange(src.Sysfd, nil, dst.Sysfd, nil, max, 0)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ return int64(n), err
+}
diff --git a/src/internal/poll/errno_unix.go b/src/internal/poll/errno_unix.go
new file mode 100644
index 0000000..d1a18ab
--- /dev/null
+++ b/src/internal/poll/errno_unix.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || wasip1
+
+package poll
+
+import "syscall"
+
+// Do the interface allocations only once for common
+// Errno values.
+var (
+ errEAGAIN error = syscall.EAGAIN
+ errEINVAL error = syscall.EINVAL
+ errENOENT error = syscall.ENOENT
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case syscall.EAGAIN:
+ return errEAGAIN
+ case syscall.EINVAL:
+ return errEINVAL
+ case syscall.ENOENT:
+ return errENOENT
+ }
+ return e
+}
diff --git a/src/internal/poll/errno_windows.go b/src/internal/poll/errno_windows.go
new file mode 100644
index 0000000..6381479
--- /dev/null
+++ b/src/internal/poll/errno_windows.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package poll
+
+import "syscall"
+
+// Do the interface allocations only once for common
+// Errno values.
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(syscall.ERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case syscall.ERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
diff --git a/src/internal/poll/error_linux_test.go b/src/internal/poll/error_linux_test.go
new file mode 100644
index 0000000..059fb8e
--- /dev/null
+++ b/src/internal/poll/error_linux_test.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "errors"
+ "internal/poll"
+ "os"
+ "syscall"
+)
+
+func badStateFile() (*os.File, error) {
+ if os.Getuid() != 0 {
+ return nil, errors.New("must be root")
+ }
+ // Using OpenFile for a device file is an easy way to make a
+ // file attached to the runtime-integrated network poller and
+ // configured in halfway.
+ return os.OpenFile("/dev/net/tun", os.O_RDWR, 0)
+}
+
+func isBadStateFileError(err error) (string, bool) {
+ switch err {
+ case poll.ErrNotPollable, syscall.EBADFD:
+ return "", true
+ default:
+ return "not pollable or file in bad state error", false
+ }
+}
diff --git a/src/internal/poll/error_stub_test.go b/src/internal/poll/error_stub_test.go
new file mode 100644
index 0000000..48e0952
--- /dev/null
+++ b/src/internal/poll/error_stub_test.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package poll_test
+
+import (
+ "errors"
+ "os"
+ "runtime"
+)
+
+func badStateFile() (*os.File, error) {
+ return nil, errors.New("not supported on " + runtime.GOOS)
+}
+
+func isBadStateFileError(err error) (string, bool) {
+ return "", false
+}
diff --git a/src/internal/poll/error_test.go b/src/internal/poll/error_test.go
new file mode 100644
index 0000000..abc8b16
--- /dev/null
+++ b/src/internal/poll/error_test.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "fmt"
+ "io/fs"
+ "net"
+ "os"
+ "testing"
+ "time"
+)
+
+func TestReadError(t *testing.T) {
+ t.Run("ErrNotPollable", func(t *testing.T) {
+ f, err := badStateFile()
+ if err != nil {
+ t.Skip(err)
+ }
+ defer f.Close()
+
+ // Give scheduler a chance to have two separated
+ // goroutines: an event poller and an event waiter.
+ time.Sleep(100 * time.Millisecond)
+
+ var b [1]byte
+ _, err = f.Read(b[:])
+ if perr := parseReadError(err, isBadStateFileError); perr != nil {
+ t.Fatal(perr)
+ }
+ })
+}
+
+func parseReadError(nestedErr error, verify func(error) (string, bool)) error {
+ err := nestedErr
+ if nerr, ok := err.(*net.OpError); ok {
+ err = nerr.Err
+ }
+ if nerr, ok := err.(*fs.PathError); ok {
+ err = nerr.Err
+ }
+ if nerr, ok := err.(*os.SyscallError); ok {
+ err = nerr.Err
+ }
+ if s, ok := verify(err); !ok {
+ return fmt.Errorf("got %v; want %s", nestedErr, s)
+ }
+ return nil
+}
diff --git a/src/internal/poll/export_linux_test.go b/src/internal/poll/export_linux_test.go
new file mode 100644
index 0000000..7fba793
--- /dev/null
+++ b/src/internal/poll/export_linux_test.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing on linux.
+// Since testing imports os and os imports internal/poll,
+// the internal/poll tests can not be in package poll.
+
+package poll
+
+var (
+ GetPipe = getPipe
+ PutPipe = putPipe
+ NewPipe = newPipe
+ DestroyPipe = destroyPipe
+)
+
+func GetPipeFds(p *SplicePipe) (int, int) {
+ return p.rfd, p.wfd
+}
+
+type SplicePipe = splicePipe
diff --git a/src/internal/poll/export_posix_test.go b/src/internal/poll/export_posix_test.go
new file mode 100644
index 0000000..3415ab3
--- /dev/null
+++ b/src/internal/poll/export_posix_test.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+// Export guts for testing on posix.
+// Since testing imports os and os imports internal/poll,
+// the internal/poll tests can not be in package poll.
+
+package poll
+
+func (fd *FD) EOFError(n int, err error) error {
+ return fd.eofError(n, err)
+}
diff --git a/src/internal/poll/export_test.go b/src/internal/poll/export_test.go
new file mode 100644
index 0000000..66d7c32
--- /dev/null
+++ b/src/internal/poll/export_test.go
@@ -0,0 +1,35 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing.
+// Since testing imports os and os imports internal/poll,
+// the internal/poll tests can not be in package poll.
+
+package poll
+
+var Consume = consume
+
+type XFDMutex struct {
+ fdMutex
+}
+
+func (mu *XFDMutex) Incref() bool {
+ return mu.incref()
+}
+
+func (mu *XFDMutex) IncrefAndClose() bool {
+ return mu.increfAndClose()
+}
+
+func (mu *XFDMutex) Decref() bool {
+ return mu.decref()
+}
+
+func (mu *XFDMutex) RWLock(read bool) bool {
+ return mu.rwlock(read)
+}
+
+func (mu *XFDMutex) RWUnlock(read bool) bool {
+ return mu.rwunlock(read)
+}
diff --git a/src/internal/poll/export_windows_test.go b/src/internal/poll/export_windows_test.go
new file mode 100644
index 0000000..88ed71a
--- /dev/null
+++ b/src/internal/poll/export_windows_test.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing on windows.
+// Since testing imports os and os imports internal/poll,
+// the internal/poll tests can not be in package poll.
+
+package poll
+
+var (
+ LogInitFD = &logInitFD
+)
+
+func (fd *FD) IsPartOfNetpoll() bool {
+ return fd.pd.runtimeCtx != 0
+}
diff --git a/src/internal/poll/fd.go b/src/internal/poll/fd.go
new file mode 100644
index 0000000..ef61d0c
--- /dev/null
+++ b/src/internal/poll/fd.go
@@ -0,0 +1,83 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package poll supports non-blocking I/O on file descriptors with polling.
+// This supports I/O operations that block only a goroutine, not a thread.
+// This is used by the net and os packages.
+// It uses a poller built into the runtime, with support from the
+// runtime scheduler.
+package poll
+
+import (
+ "errors"
+)
+
+// errNetClosing is the type of the variable ErrNetClosing.
+// This is used to implement the net.Error interface.
+type errNetClosing struct{}
+
+// Error returns the error message for ErrNetClosing.
+// Keep this string consistent because of issue #4373:
+// since historically programs have not been able to detect
+// this error, they look for the string.
+func (e errNetClosing) Error() string { return "use of closed network connection" }
+
+func (e errNetClosing) Timeout() bool { return false }
+func (e errNetClosing) Temporary() bool { return false }
+
+// ErrNetClosing is returned when a network descriptor is used after
+// it has been closed.
+var ErrNetClosing = errNetClosing{}
+
+// ErrFileClosing is returned when a file descriptor is used after it
+// has been closed.
+var ErrFileClosing = errors.New("use of closed file")
+
+// ErrNoDeadline is returned when a request is made to set a deadline
+// on a file type that does not use the poller.
+var ErrNoDeadline = errors.New("file type does not support deadline")
+
+// Return the appropriate closing error based on isFile.
+func errClosing(isFile bool) error {
+ if isFile {
+ return ErrFileClosing
+ }
+ return ErrNetClosing
+}
+
+// ErrDeadlineExceeded is returned for an expired deadline.
+// This is exported by the os package as os.ErrDeadlineExceeded.
+var ErrDeadlineExceeded error = &DeadlineExceededError{}
+
+// DeadlineExceededError is returned for an expired deadline.
+type DeadlineExceededError struct{}
+
+// Implement the net.Error interface.
+// The string is "i/o timeout" because that is what was returned
+// by earlier Go versions. Changing it may break programs that
+// match on error strings.
+func (e *DeadlineExceededError) Error() string { return "i/o timeout" }
+func (e *DeadlineExceededError) Timeout() bool { return true }
+func (e *DeadlineExceededError) Temporary() bool { return true }
+
+// ErrNotPollable is returned when the file or socket is not suitable
+// for event notification.
+var ErrNotPollable = errors.New("not pollable")
+
+// consume removes data from a slice of byte slices, for writev.
+func consume(v *[][]byte, n int64) {
+ for len(*v) > 0 {
+ ln0 := int64(len((*v)[0]))
+ if ln0 > n {
+ (*v)[0] = (*v)[0][n:]
+ return
+ }
+ n -= ln0
+ (*v)[0] = nil
+ *v = (*v)[1:]
+ }
+}
+
+// TestHookDidWritev is a hook for testing writev.
+var TestHookDidWritev = func(wrote int) {}
diff --git a/src/internal/poll/fd_fsync_darwin.go b/src/internal/poll/fd_fsync_darwin.go
new file mode 100644
index 0000000..731b7fd
--- /dev/null
+++ b/src/internal/poll/fd_fsync_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "syscall"
+)
+
+// Fsync invokes SYS_FCNTL with SYS_FULLFSYNC because
+// on OS X, SYS_FSYNC doesn't fully flush contents to disk.
+// See Issue #26650 as well as the man page for fsync on OS X.
+func (fd *FD) Fsync() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ _, err := unix.Fcntl(fd.Sysfd, syscall.F_FULLFSYNC, 0)
+ return err
+ })
+}
diff --git a/src/internal/poll/fd_fsync_posix.go b/src/internal/poll/fd_fsync_posix.go
new file mode 100644
index 0000000..469ca75
--- /dev/null
+++ b/src/internal/poll/fd_fsync_posix.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || wasip1
+
+package poll
+
+import "syscall"
+
+// Fsync wraps syscall.Fsync.
+func (fd *FD) Fsync() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fsync(fd.Sysfd)
+ })
+}
diff --git a/src/internal/poll/fd_fsync_windows.go b/src/internal/poll/fd_fsync_windows.go
new file mode 100644
index 0000000..fb12119
--- /dev/null
+++ b/src/internal/poll/fd_fsync_windows.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// Fsync wraps syscall.Fsync.
+func (fd *FD) Fsync() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Fsync(fd.Sysfd)
+}
diff --git a/src/internal/poll/fd_io_plan9.go b/src/internal/poll/fd_io_plan9.go
new file mode 100644
index 0000000..3205ac8
--- /dev/null
+++ b/src/internal/poll/fd_io_plan9.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "internal/itoa"
+ "runtime"
+ "sync"
+ "syscall"
+)
+
+// asyncIO implements asynchronous cancelable I/O.
+// An asyncIO represents a single asynchronous Read or Write
+// operation. The result is returned on the result channel.
+// The undergoing I/O system call can either complete or be
+// interrupted by a note.
+type asyncIO struct {
+ res chan result
+
+ // mu guards the pid field.
+ mu sync.Mutex
+
+ // pid holds the process id of
+ // the process running the IO operation.
+ pid int
+}
+
+// result is the return value of a Read or Write operation.
+type result struct {
+ n int
+ err error
+}
+
+// newAsyncIO returns a new asyncIO that performs an I/O
+// operation by calling fn, which must do one and only one
+// interruptible system call.
+func newAsyncIO(fn func([]byte) (int, error), b []byte) *asyncIO {
+ aio := &asyncIO{
+ res: make(chan result, 0),
+ }
+ aio.mu.Lock()
+ go func() {
+ // Lock the current goroutine to its process
+ // and store the pid in io so that Cancel can
+ // interrupt it. We ignore the "hangup" signal,
+ // so the signal does not take down the entire
+ // Go runtime.
+ runtime.LockOSThread()
+ runtime_ignoreHangup()
+ aio.pid = syscall.Getpid()
+ aio.mu.Unlock()
+
+ n, err := fn(b)
+
+ aio.mu.Lock()
+ aio.pid = -1
+ runtime_unignoreHangup()
+ aio.mu.Unlock()
+
+ aio.res <- result{n, err}
+ }()
+ return aio
+}
+
+// Cancel interrupts the I/O operation, causing
+// the Wait function to return.
+func (aio *asyncIO) Cancel() {
+ aio.mu.Lock()
+ defer aio.mu.Unlock()
+ if aio.pid == -1 {
+ return
+ }
+ f, e := syscall.Open("/proc/"+itoa.Itoa(aio.pid)+"/note", syscall.O_WRONLY)
+ if e != nil {
+ return
+ }
+ syscall.Write(f, []byte("hangup"))
+ syscall.Close(f)
+}
+
+// Wait for the I/O operation to complete.
+func (aio *asyncIO) Wait() (int, error) {
+ res := <-aio.res
+ return res.n, res.err
+}
+
+// The following functions, provided by the runtime, are used to
+// ignore and unignore the "hangup" signal received by the process.
+func runtime_ignoreHangup()
+func runtime_unignoreHangup()
diff --git a/src/internal/poll/fd_mutex.go b/src/internal/poll/fd_mutex.go
new file mode 100644
index 0000000..0a8ee6f
--- /dev/null
+++ b/src/internal/poll/fd_mutex.go
@@ -0,0 +1,252 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "sync/atomic"
+
+// fdMutex is a specialized synchronization primitive that manages
+// lifetime of an fd and serializes access to Read, Write and Close
+// methods on FD.
+type fdMutex struct {
+ state uint64
+ rsema uint32
+ wsema uint32
+}
+
+// fdMutex.state is organized as follows:
+// 1 bit - whether FD is closed, if set all subsequent lock operations will fail.
+// 1 bit - lock for read operations.
+// 1 bit - lock for write operations.
+// 20 bits - total number of references (read+write+misc).
+// 20 bits - number of outstanding read waiters.
+// 20 bits - number of outstanding write waiters.
+const (
+ mutexClosed = 1 << 0
+ mutexRLock = 1 << 1
+ mutexWLock = 1 << 2
+ mutexRef = 1 << 3
+ mutexRefMask = (1<<20 - 1) << 3
+ mutexRWait = 1 << 23
+ mutexRMask = (1<<20 - 1) << 23
+ mutexWWait = 1 << 43
+ mutexWMask = (1<<20 - 1) << 43
+)
+
+const overflowMsg = "too many concurrent operations on a single file or socket (max 1048575)"
+
+// Read operations must do rwlock(true)/rwunlock(true).
+//
+// Write operations must do rwlock(false)/rwunlock(false).
+//
+// Misc operations must do incref/decref.
+// Misc operations include functions like setsockopt and setDeadline.
+// They need to use incref/decref to ensure that they operate on the
+// correct fd in presence of a concurrent close call (otherwise fd can
+// be closed under their feet).
+//
+// Close operations must do increfAndClose/decref.
+
+// incref adds a reference to mu.
+// It reports whether mu is available for reading or writing.
+func (mu *fdMutex) incref() bool {
+ for {
+ old := atomic.LoadUint64(&mu.state)
+ if old&mutexClosed != 0 {
+ return false
+ }
+ new := old + mutexRef
+ if new&mutexRefMask == 0 {
+ panic(overflowMsg)
+ }
+ if atomic.CompareAndSwapUint64(&mu.state, old, new) {
+ return true
+ }
+ }
+}
+
+// increfAndClose sets the state of mu to closed.
+// It returns false if the file was already closed.
+func (mu *fdMutex) increfAndClose() bool {
+ for {
+ old := atomic.LoadUint64(&mu.state)
+ if old&mutexClosed != 0 {
+ return false
+ }
+ // Mark as closed and acquire a reference.
+ new := (old | mutexClosed) + mutexRef
+ if new&mutexRefMask == 0 {
+ panic(overflowMsg)
+ }
+ // Remove all read and write waiters.
+ new &^= mutexRMask | mutexWMask
+ if atomic.CompareAndSwapUint64(&mu.state, old, new) {
+ // Wake all read and write waiters,
+ // they will observe closed flag after wakeup.
+ for old&mutexRMask != 0 {
+ old -= mutexRWait
+ runtime_Semrelease(&mu.rsema)
+ }
+ for old&mutexWMask != 0 {
+ old -= mutexWWait
+ runtime_Semrelease(&mu.wsema)
+ }
+ return true
+ }
+ }
+}
+
+// decref removes a reference from mu.
+// It reports whether there is no remaining reference.
+func (mu *fdMutex) decref() bool {
+ for {
+ old := atomic.LoadUint64(&mu.state)
+ if old&mutexRefMask == 0 {
+ panic("inconsistent poll.fdMutex")
+ }
+ new := old - mutexRef
+ if atomic.CompareAndSwapUint64(&mu.state, old, new) {
+ return new&(mutexClosed|mutexRefMask) == mutexClosed
+ }
+ }
+}
+
+// lock adds a reference to mu and locks mu.
+// It reports whether mu is available for reading or writing.
+func (mu *fdMutex) rwlock(read bool) bool {
+ var mutexBit, mutexWait, mutexMask uint64
+ var mutexSema *uint32
+ if read {
+ mutexBit = mutexRLock
+ mutexWait = mutexRWait
+ mutexMask = mutexRMask
+ mutexSema = &mu.rsema
+ } else {
+ mutexBit = mutexWLock
+ mutexWait = mutexWWait
+ mutexMask = mutexWMask
+ mutexSema = &mu.wsema
+ }
+ for {
+ old := atomic.LoadUint64(&mu.state)
+ if old&mutexClosed != 0 {
+ return false
+ }
+ var new uint64
+ if old&mutexBit == 0 {
+ // Lock is free, acquire it.
+ new = (old | mutexBit) + mutexRef
+ if new&mutexRefMask == 0 {
+ panic(overflowMsg)
+ }
+ } else {
+ // Wait for lock.
+ new = old + mutexWait
+ if new&mutexMask == 0 {
+ panic(overflowMsg)
+ }
+ }
+ if atomic.CompareAndSwapUint64(&mu.state, old, new) {
+ if old&mutexBit == 0 {
+ return true
+ }
+ runtime_Semacquire(mutexSema)
+ // The signaller has subtracted mutexWait.
+ }
+ }
+}
+
+// unlock removes a reference from mu and unlocks mu.
+// It reports whether there is no remaining reference.
+func (mu *fdMutex) rwunlock(read bool) bool {
+ var mutexBit, mutexWait, mutexMask uint64
+ var mutexSema *uint32
+ if read {
+ mutexBit = mutexRLock
+ mutexWait = mutexRWait
+ mutexMask = mutexRMask
+ mutexSema = &mu.rsema
+ } else {
+ mutexBit = mutexWLock
+ mutexWait = mutexWWait
+ mutexMask = mutexWMask
+ mutexSema = &mu.wsema
+ }
+ for {
+ old := atomic.LoadUint64(&mu.state)
+ if old&mutexBit == 0 || old&mutexRefMask == 0 {
+ panic("inconsistent poll.fdMutex")
+ }
+ // Drop lock, drop reference and wake read waiter if present.
+ new := (old &^ mutexBit) - mutexRef
+ if old&mutexMask != 0 {
+ new -= mutexWait
+ }
+ if atomic.CompareAndSwapUint64(&mu.state, old, new) {
+ if old&mutexMask != 0 {
+ runtime_Semrelease(mutexSema)
+ }
+ return new&(mutexClosed|mutexRefMask) == mutexClosed
+ }
+ }
+}
+
+// Implemented in runtime package.
+func runtime_Semacquire(sema *uint32)
+func runtime_Semrelease(sema *uint32)
+
+// incref adds a reference to fd.
+// It returns an error when fd cannot be used.
+func (fd *FD) incref() error {
+ if !fd.fdmu.incref() {
+ return errClosing(fd.isFile)
+ }
+ return nil
+}
+
+// decref removes a reference from fd.
+// It also closes fd when the state of fd is set to closed and there
+// is no remaining reference.
+func (fd *FD) decref() error {
+ if fd.fdmu.decref() {
+ return fd.destroy()
+ }
+ return nil
+}
+
+// readLock adds a reference to fd and locks fd for reading.
+// It returns an error when fd cannot be used for reading.
+func (fd *FD) readLock() error {
+ if !fd.fdmu.rwlock(true) {
+ return errClosing(fd.isFile)
+ }
+ return nil
+}
+
+// readUnlock removes a reference from fd and unlocks fd for reading.
+// It also closes fd when the state of fd is set to closed and there
+// is no remaining reference.
+func (fd *FD) readUnlock() {
+ if fd.fdmu.rwunlock(true) {
+ fd.destroy()
+ }
+}
+
+// writeLock adds a reference to fd and locks fd for writing.
+// It returns an error when fd cannot be used for writing.
+func (fd *FD) writeLock() error {
+ if !fd.fdmu.rwlock(false) {
+ return errClosing(fd.isFile)
+ }
+ return nil
+}
+
+// writeUnlock removes a reference from fd and unlocks fd for writing.
+// It also closes fd when the state of fd is set to closed and there
+// is no remaining reference.
+func (fd *FD) writeUnlock() {
+ if fd.fdmu.rwunlock(false) {
+ fd.destroy()
+ }
+}
diff --git a/src/internal/poll/fd_mutex_test.go b/src/internal/poll/fd_mutex_test.go
new file mode 100644
index 0000000..62f9531
--- /dev/null
+++ b/src/internal/poll/fd_mutex_test.go
@@ -0,0 +1,222 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ . "internal/poll"
+ "math/rand"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMutexLock(t *testing.T) {
+ var mu XFDMutex
+
+ if !mu.Incref() {
+ t.Fatal("broken")
+ }
+ if mu.Decref() {
+ t.Fatal("broken")
+ }
+
+ if !mu.RWLock(true) {
+ t.Fatal("broken")
+ }
+ if mu.RWUnlock(true) {
+ t.Fatal("broken")
+ }
+
+ if !mu.RWLock(false) {
+ t.Fatal("broken")
+ }
+ if mu.RWUnlock(false) {
+ t.Fatal("broken")
+ }
+}
+
+func TestMutexClose(t *testing.T) {
+ var mu XFDMutex
+ if !mu.IncrefAndClose() {
+ t.Fatal("broken")
+ }
+
+ if mu.Incref() {
+ t.Fatal("broken")
+ }
+ if mu.RWLock(true) {
+ t.Fatal("broken")
+ }
+ if mu.RWLock(false) {
+ t.Fatal("broken")
+ }
+ if mu.IncrefAndClose() {
+ t.Fatal("broken")
+ }
+}
+
+func TestMutexCloseUnblock(t *testing.T) {
+ c := make(chan bool, 4)
+ var mu XFDMutex
+ mu.RWLock(true)
+ for i := 0; i < 4; i++ {
+ go func() {
+ if mu.RWLock(true) {
+ t.Error("broken")
+ return
+ }
+ c <- true
+ }()
+ }
+ // Concurrent goroutines must not be able to read lock the mutex.
+ time.Sleep(time.Millisecond)
+ select {
+ case <-c:
+ t.Fatal("broken")
+ default:
+ }
+ mu.IncrefAndClose() // Must unblock the readers.
+ for i := 0; i < 4; i++ {
+ select {
+ case <-c:
+ case <-time.After(10 * time.Second):
+ t.Fatal("broken")
+ }
+ }
+ if mu.Decref() {
+ t.Fatal("broken")
+ }
+ if !mu.RWUnlock(true) {
+ t.Fatal("broken")
+ }
+}
+
+func TestMutexPanic(t *testing.T) {
+ ensurePanics := func(f func()) {
+ defer func() {
+ if recover() == nil {
+ t.Fatal("does not panic")
+ }
+ }()
+ f()
+ }
+
+ var mu XFDMutex
+ ensurePanics(func() { mu.Decref() })
+ ensurePanics(func() { mu.RWUnlock(true) })
+ ensurePanics(func() { mu.RWUnlock(false) })
+
+ ensurePanics(func() { mu.Incref(); mu.Decref(); mu.Decref() })
+ ensurePanics(func() { mu.RWLock(true); mu.RWUnlock(true); mu.RWUnlock(true) })
+ ensurePanics(func() { mu.RWLock(false); mu.RWUnlock(false); mu.RWUnlock(false) })
+
+ // ensure that it's still not broken
+ mu.Incref()
+ mu.Decref()
+ mu.RWLock(true)
+ mu.RWUnlock(true)
+ mu.RWLock(false)
+ mu.RWUnlock(false)
+}
+
+func TestMutexOverflowPanic(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ t.Fatal("did not panic")
+ }
+ msg, ok := r.(string)
+ if !ok {
+ t.Fatalf("unexpected panic type %T", r)
+ }
+ if !strings.Contains(msg, "too many") || strings.Contains(msg, "inconsistent") {
+ t.Fatalf("wrong panic message %q", msg)
+ }
+ }()
+
+ var mu1 XFDMutex
+ for i := 0; i < 1<<21; i++ {
+ mu1.Incref()
+ }
+}
+
+func TestMutexStress(t *testing.T) {
+ P := 8
+ N := int(1e6)
+ if testing.Short() {
+ P = 4
+ N = 1e4
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
+ done := make(chan bool, P)
+ var mu XFDMutex
+ var readState [2]uint64
+ var writeState [2]uint64
+ for p := 0; p < P; p++ {
+ go func() {
+ defer func() {
+ done <- !t.Failed()
+ }()
+ r := rand.New(rand.NewSource(rand.Int63()))
+ for i := 0; i < N; i++ {
+ switch r.Intn(3) {
+ case 0:
+ if !mu.Incref() {
+ t.Error("broken")
+ return
+ }
+ if mu.Decref() {
+ t.Error("broken")
+ return
+ }
+ case 1:
+ if !mu.RWLock(true) {
+ t.Error("broken")
+ return
+ }
+ // Ensure that it provides mutual exclusion for readers.
+ if readState[0] != readState[1] {
+ t.Error("broken")
+ return
+ }
+ readState[0]++
+ readState[1]++
+ if mu.RWUnlock(true) {
+ t.Error("broken")
+ return
+ }
+ case 2:
+ if !mu.RWLock(false) {
+ t.Error("broken")
+ return
+ }
+ // Ensure that it provides mutual exclusion for writers.
+ if writeState[0] != writeState[1] {
+ t.Error("broken")
+ return
+ }
+ writeState[0]++
+ writeState[1]++
+ if mu.RWUnlock(false) {
+ t.Error("broken")
+ return
+ }
+ }
+ }
+ }()
+ }
+ for p := 0; p < P; p++ {
+ if !<-done {
+ t.FailNow()
+ }
+ }
+ if !mu.IncrefAndClose() {
+ t.Fatal("broken")
+ }
+ if !mu.Decref() {
+ t.Fatal("broken")
+ }
+}
diff --git a/src/internal/poll/fd_opendir_darwin.go b/src/internal/poll/fd_opendir_darwin.go
new file mode 100644
index 0000000..3ae2dc8
--- /dev/null
+++ b/src/internal/poll/fd_opendir_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+// OpenDir returns a pointer to a DIR structure suitable for
+// ReadDir. In case of an error, the name of the failed
+// syscall is returned along with a syscall.Errno.
+func (fd *FD) OpenDir() (uintptr, string, error) {
+ // fdopendir(3) takes control of the file descriptor,
+ // so use a dup.
+ fd2, call, err := fd.Dup()
+ if err != nil {
+ return 0, call, err
+ }
+ var dir uintptr
+ for {
+ dir, err = fdopendir(fd2)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ syscall.Close(fd2)
+ return 0, "fdopendir", err
+ }
+ return dir, "", nil
+}
+
+// Implemented in syscall/syscall_darwin.go.
+//
+//go:linkname fdopendir syscall.fdopendir
+func fdopendir(fd int) (dir uintptr, err error)
diff --git a/src/internal/poll/fd_plan9.go b/src/internal/poll/fd_plan9.go
new file mode 100644
index 0000000..7cc178a
--- /dev/null
+++ b/src/internal/poll/fd_plan9.go
@@ -0,0 +1,232 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "errors"
+ "io"
+ "sync"
+ "time"
+)
+
+type FD struct {
+ // Lock sysfd and serialize access to Read and Write methods.
+ fdmu fdMutex
+
+ Destroy func()
+
+ // deadlines
+ rmu sync.Mutex
+ wmu sync.Mutex
+ raio *asyncIO
+ waio *asyncIO
+ rtimer *time.Timer
+ wtimer *time.Timer
+ rtimedout bool // set true when read deadline has been reached
+ wtimedout bool // set true when write deadline has been reached
+
+ // Whether this is a normal file.
+ // On Plan 9 we do not use this package for ordinary files,
+ // so this is always false, but the field is present because
+ // shared code in fd_mutex.go checks it.
+ isFile bool
+}
+
+// We need this to close out a file descriptor when it is unlocked,
+// but the real implementation has to live in the net package because
+// it uses os.File's.
+func (fd *FD) destroy() error {
+ if fd.Destroy != nil {
+ fd.Destroy()
+ }
+ return nil
+}
+
+// Close handles the locking for closing an FD. The real operation
+// is in the net package.
+func (fd *FD) Close() error {
+ if !fd.fdmu.increfAndClose() {
+ return errClosing(fd.isFile)
+ }
+ return nil
+}
+
+// Read implements io.Reader.
+func (fd *FD) Read(fn func([]byte) (int, error), b []byte) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if len(b) == 0 {
+ return 0, nil
+ }
+ fd.rmu.Lock()
+ if fd.rtimedout {
+ fd.rmu.Unlock()
+ return 0, ErrDeadlineExceeded
+ }
+ fd.raio = newAsyncIO(fn, b)
+ fd.rmu.Unlock()
+ n, err := fd.raio.Wait()
+ fd.raio = nil
+ if isHangup(err) {
+ err = io.EOF
+ }
+ if isInterrupted(err) {
+ err = ErrDeadlineExceeded
+ }
+ return n, err
+}
+
+// Write implements io.Writer.
+func (fd *FD) Write(fn func([]byte) (int, error), b []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ fd.wmu.Lock()
+ if fd.wtimedout {
+ fd.wmu.Unlock()
+ return 0, ErrDeadlineExceeded
+ }
+ fd.waio = newAsyncIO(fn, b)
+ fd.wmu.Unlock()
+ n, err := fd.waio.Wait()
+ fd.waio = nil
+ if isInterrupted(err) {
+ err = ErrDeadlineExceeded
+ }
+ return n, err
+}
+
+// SetDeadline sets the read and write deadlines associated with fd.
+func (fd *FD) SetDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r'+'w')
+}
+
+// SetReadDeadline sets the read deadline associated with fd.
+func (fd *FD) SetReadDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r')
+}
+
+// SetWriteDeadline sets the write deadline associated with fd.
+func (fd *FD) SetWriteDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'w')
+}
+
+func setDeadlineImpl(fd *FD, t time.Time, mode int) error {
+ d := t.Sub(time.Now())
+ if mode == 'r' || mode == 'r'+'w' {
+ fd.rmu.Lock()
+ defer fd.rmu.Unlock()
+ if fd.rtimer != nil {
+ fd.rtimer.Stop()
+ fd.rtimer = nil
+ }
+ fd.rtimedout = false
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ fd.wmu.Lock()
+ defer fd.wmu.Unlock()
+ if fd.wtimer != nil {
+ fd.wtimer.Stop()
+ fd.wtimer = nil
+ }
+ fd.wtimedout = false
+ }
+ if !t.IsZero() && d > 0 {
+ // Interrupt I/O operation once timer has expired
+ if mode == 'r' || mode == 'r'+'w' {
+ var timer *time.Timer
+ timer = time.AfterFunc(d, func() {
+ fd.rmu.Lock()
+ defer fd.rmu.Unlock()
+ if fd.rtimer != timer {
+ // deadline was changed
+ return
+ }
+ fd.rtimedout = true
+ if fd.raio != nil {
+ fd.raio.Cancel()
+ }
+ })
+ fd.rtimer = timer
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ var timer *time.Timer
+ timer = time.AfterFunc(d, func() {
+ fd.wmu.Lock()
+ defer fd.wmu.Unlock()
+ if fd.wtimer != timer {
+ // deadline was changed
+ return
+ }
+ fd.wtimedout = true
+ if fd.waio != nil {
+ fd.waio.Cancel()
+ }
+ })
+ fd.wtimer = timer
+ }
+ }
+ if !t.IsZero() && d <= 0 {
+ // Interrupt current I/O operation
+ if mode == 'r' || mode == 'r'+'w' {
+ fd.rtimedout = true
+ if fd.raio != nil {
+ fd.raio.Cancel()
+ }
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ fd.wtimedout = true
+ if fd.waio != nil {
+ fd.waio.Cancel()
+ }
+ }
+ }
+ return nil
+}
+
+// On Plan 9 only, expose the locking for the net code.
+
+// ReadLock wraps FD.readLock.
+func (fd *FD) ReadLock() error {
+ return fd.readLock()
+}
+
+// ReadUnlock wraps FD.readUnlock.
+func (fd *FD) ReadUnlock() {
+ fd.readUnlock()
+}
+
+func isHangup(err error) bool {
+ return err != nil && stringsHasSuffix(err.Error(), "Hangup")
+}
+
+func isInterrupted(err error) bool {
+ return err != nil && stringsHasSuffix(err.Error(), "interrupted")
+}
+
+// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
+// This is only used for testing.
+func IsPollDescriptor(fd uintptr) bool {
+ return false
+}
+
+// RawControl invokes the user-defined function f for a non-IO
+// operation.
+func (fd *FD) RawControl(f func(uintptr)) error {
+ return errors.New("not implemented")
+}
+
+// RawRead invokes the user-defined function f for a read operation.
+func (fd *FD) RawRead(f func(uintptr) bool) error {
+ return errors.New("not implemented")
+}
+
+// RawWrite invokes the user-defined function f for a write operation.
+func (fd *FD) RawWrite(f func(uintptr) bool) error {
+ return errors.New("not implemented")
+}
diff --git a/src/internal/poll/fd_poll_js.go b/src/internal/poll/fd_poll_js.go
new file mode 100644
index 0000000..fe5e73a
--- /dev/null
+++ b/src/internal/poll/fd_poll_js.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+
+package poll
+
+import (
+ "syscall"
+ "time"
+)
+
+type pollDesc struct {
+ fd *FD
+ closing bool
+}
+
+func (pd *pollDesc) init(fd *FD) error { pd.fd = fd; return nil }
+
+func (pd *pollDesc) close() {}
+
+func (pd *pollDesc) evict() {
+ pd.closing = true
+ if pd.fd != nil {
+ syscall.StopIO(pd.fd.Sysfd)
+ }
+}
+
+func (pd *pollDesc) prepare(mode int, isFile bool) error {
+ if pd.closing {
+ return errClosing(isFile)
+ }
+ return nil
+}
+
+func (pd *pollDesc) prepareRead(isFile bool) error { return pd.prepare('r', isFile) }
+
+func (pd *pollDesc) prepareWrite(isFile bool) error { return pd.prepare('w', isFile) }
+
+func (pd *pollDesc) wait(mode int, isFile bool) error {
+ if pd.closing {
+ return errClosing(isFile)
+ }
+ if isFile { // TODO(neelance): js/wasm: Use callbacks from JS to block until the read/write finished.
+ return nil
+ }
+ return ErrDeadlineExceeded
+}
+
+func (pd *pollDesc) waitRead(isFile bool) error { return pd.wait('r', isFile) }
+
+func (pd *pollDesc) waitWrite(isFile bool) error { return pd.wait('w', isFile) }
+
+func (pd *pollDesc) waitCanceled(mode int) {}
+
+func (pd *pollDesc) pollable() bool { return true }
+
+// SetDeadline sets the read and write deadlines associated with fd.
+func (fd *FD) SetDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r'+'w')
+}
+
+// SetReadDeadline sets the read deadline associated with fd.
+func (fd *FD) SetReadDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r')
+}
+
+// SetWriteDeadline sets the write deadline associated with fd.
+func (fd *FD) SetWriteDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'w')
+}
+
+func setDeadlineImpl(fd *FD, t time.Time, mode int) error {
+ d := t.UnixNano()
+ if t.IsZero() {
+ d = 0
+ }
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ switch mode {
+ case 'r':
+ syscall.SetReadDeadline(fd.Sysfd, d)
+ case 'w':
+ syscall.SetWriteDeadline(fd.Sysfd, d)
+ case 'r' + 'w':
+ syscall.SetReadDeadline(fd.Sysfd, d)
+ syscall.SetWriteDeadline(fd.Sysfd, d)
+ }
+ fd.decref()
+ return nil
+}
+
+// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
+// This is only used for testing.
+func IsPollDescriptor(fd uintptr) bool {
+ return false
+}
diff --git a/src/internal/poll/fd_poll_runtime.go b/src/internal/poll/fd_poll_runtime.go
new file mode 100644
index 0000000..b51535e
--- /dev/null
+++ b/src/internal/poll/fd_poll_runtime.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows || wasip1
+
+package poll
+
+import (
+ "errors"
+ "sync"
+ "syscall"
+ "time"
+ _ "unsafe" // for go:linkname
+)
+
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+//
+//go:linkname runtimeNano runtime.nanotime
+func runtimeNano() int64
+
+func runtime_pollServerInit()
+func runtime_pollOpen(fd uintptr) (uintptr, int)
+func runtime_pollClose(ctx uintptr)
+func runtime_pollWait(ctx uintptr, mode int) int
+func runtime_pollWaitCanceled(ctx uintptr, mode int)
+func runtime_pollReset(ctx uintptr, mode int) int
+func runtime_pollSetDeadline(ctx uintptr, d int64, mode int)
+func runtime_pollUnblock(ctx uintptr)
+func runtime_isPollServerDescriptor(fd uintptr) bool
+
+type pollDesc struct {
+ runtimeCtx uintptr
+}
+
+var serverInit sync.Once
+
+func (pd *pollDesc) init(fd *FD) error {
+ serverInit.Do(runtime_pollServerInit)
+ ctx, errno := runtime_pollOpen(uintptr(fd.Sysfd))
+ if errno != 0 {
+ return errnoErr(syscall.Errno(errno))
+ }
+ pd.runtimeCtx = ctx
+ return nil
+}
+
+func (pd *pollDesc) close() {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollClose(pd.runtimeCtx)
+ pd.runtimeCtx = 0
+}
+
+// Evict evicts fd from the pending list, unblocking any I/O running on fd.
+func (pd *pollDesc) evict() {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollUnblock(pd.runtimeCtx)
+}
+
+func (pd *pollDesc) prepare(mode int, isFile bool) error {
+ if pd.runtimeCtx == 0 {
+ return nil
+ }
+ res := runtime_pollReset(pd.runtimeCtx, mode)
+ return convertErr(res, isFile)
+}
+
+func (pd *pollDesc) prepareRead(isFile bool) error {
+ return pd.prepare('r', isFile)
+}
+
+func (pd *pollDesc) prepareWrite(isFile bool) error {
+ return pd.prepare('w', isFile)
+}
+
+func (pd *pollDesc) wait(mode int, isFile bool) error {
+ if pd.runtimeCtx == 0 {
+ return errors.New("waiting for unsupported file type")
+ }
+ res := runtime_pollWait(pd.runtimeCtx, mode)
+ return convertErr(res, isFile)
+}
+
+func (pd *pollDesc) waitRead(isFile bool) error {
+ return pd.wait('r', isFile)
+}
+
+func (pd *pollDesc) waitWrite(isFile bool) error {
+ return pd.wait('w', isFile)
+}
+
+func (pd *pollDesc) waitCanceled(mode int) {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollWaitCanceled(pd.runtimeCtx, mode)
+}
+
+func (pd *pollDesc) pollable() bool {
+ return pd.runtimeCtx != 0
+}
+
+// Error values returned by runtime_pollReset and runtime_pollWait.
+// These must match the values in runtime/netpoll.go.
+const (
+ pollNoError = 0
+ pollErrClosing = 1
+ pollErrTimeout = 2
+ pollErrNotPollable = 3
+)
+
+func convertErr(res int, isFile bool) error {
+ switch res {
+ case pollNoError:
+ return nil
+ case pollErrClosing:
+ return errClosing(isFile)
+ case pollErrTimeout:
+ return ErrDeadlineExceeded
+ case pollErrNotPollable:
+ return ErrNotPollable
+ }
+ println("unreachable: ", res)
+ panic("unreachable")
+}
+
+// SetDeadline sets the read and write deadlines associated with fd.
+func (fd *FD) SetDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r'+'w')
+}
+
+// SetReadDeadline sets the read deadline associated with fd.
+func (fd *FD) SetReadDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r')
+}
+
+// SetWriteDeadline sets the write deadline associated with fd.
+func (fd *FD) SetWriteDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'w')
+}
+
+func setDeadlineImpl(fd *FD, t time.Time, mode int) error {
+ var d int64
+ if !t.IsZero() {
+ d = int64(time.Until(t))
+ if d == 0 {
+ d = -1 // don't confuse deadline right now with no deadline
+ }
+ }
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ if fd.pd.runtimeCtx == 0 {
+ return ErrNoDeadline
+ }
+ runtime_pollSetDeadline(fd.pd.runtimeCtx, d, mode)
+ return nil
+}
+
+// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
+// This is only used for testing.
+func IsPollDescriptor(fd uintptr) bool {
+ return runtime_isPollServerDescriptor(fd)
+}
diff --git a/src/internal/poll/fd_posix.go b/src/internal/poll/fd_posix.go
new file mode 100644
index 0000000..5bd333b
--- /dev/null
+++ b/src/internal/poll/fd_posix.go
@@ -0,0 +1,79 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || wasip1 || windows
+
+package poll
+
+import (
+ "io"
+ "syscall"
+)
+
+// eofError returns io.EOF when fd is available for reading end of
+// file.
+func (fd *FD) eofError(n int, err error) error {
+ if n == 0 && err == nil && fd.ZeroReadIsEOF {
+ return io.EOF
+ }
+ return err
+}
+
+// Shutdown wraps syscall.Shutdown.
+func (fd *FD) Shutdown(how int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Shutdown(fd.Sysfd, how)
+}
+
+// Fchown wraps syscall.Fchown.
+func (fd *FD) Fchown(uid, gid int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fchown(fd.Sysfd, uid, gid)
+ })
+}
+
+// Ftruncate wraps syscall.Ftruncate.
+func (fd *FD) Ftruncate(size int64) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Ftruncate(fd.Sysfd, size)
+ })
+}
+
+// RawControl invokes the user-defined function f for a non-IO
+// operation.
+func (fd *FD) RawControl(f func(uintptr)) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ f(uintptr(fd.Sysfd))
+ return nil
+}
+
+// ignoringEINTR makes a function call and repeats it if it returns
+// an EINTR error. This appears to be required even though we install all
+// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
+// Also #20400 and #36644 are issues in which a signal handler is
+// installed without setting SA_RESTART. None of these are the common case,
+// but there are enough of them that it seems that we can't avoid
+// an EINTR loop.
+func ignoringEINTR(fn func() error) error {
+ for {
+ err := fn()
+ if err != syscall.EINTR {
+ return err
+ }
+ }
+}
diff --git a/src/internal/poll/fd_posix_test.go b/src/internal/poll/fd_posix_test.go
new file mode 100644
index 0000000..b97e465
--- /dev/null
+++ b/src/internal/poll/fd_posix_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll_test
+
+import (
+ . "internal/poll"
+ "io"
+ "testing"
+)
+
+var eofErrorTests = []struct {
+ n int
+ err error
+ fd *FD
+ expected error
+}{
+ {100, nil, &FD{ZeroReadIsEOF: true}, nil},
+ {100, io.EOF, &FD{ZeroReadIsEOF: true}, io.EOF},
+ {100, ErrNetClosing, &FD{ZeroReadIsEOF: true}, ErrNetClosing},
+ {0, nil, &FD{ZeroReadIsEOF: true}, io.EOF},
+ {0, io.EOF, &FD{ZeroReadIsEOF: true}, io.EOF},
+ {0, ErrNetClosing, &FD{ZeroReadIsEOF: true}, ErrNetClosing},
+
+ {100, nil, &FD{ZeroReadIsEOF: false}, nil},
+ {100, io.EOF, &FD{ZeroReadIsEOF: false}, io.EOF},
+ {100, ErrNetClosing, &FD{ZeroReadIsEOF: false}, ErrNetClosing},
+ {0, nil, &FD{ZeroReadIsEOF: false}, nil},
+ {0, io.EOF, &FD{ZeroReadIsEOF: false}, io.EOF},
+ {0, ErrNetClosing, &FD{ZeroReadIsEOF: false}, ErrNetClosing},
+}
+
+func TestEOFError(t *testing.T) {
+ for _, tt := range eofErrorTests {
+ actual := tt.fd.EOFError(tt.n, tt.err)
+ if actual != tt.expected {
+ t.Errorf("eofError(%v, %v, %v): expected %v, actual %v", tt.n, tt.err, tt.fd.ZeroReadIsEOF, tt.expected, actual)
+ }
+ }
+}
diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go
new file mode 100644
index 0000000..61c2338
--- /dev/null
+++ b/src/internal/poll/fd_unix.go
@@ -0,0 +1,741 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || wasip1
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "io"
+ "sync/atomic"
+ "syscall"
+)
+
+// FD is a file descriptor. The net and os packages use this type as a
+// field of a larger type representing a network connection or OS file.
+type FD struct {
+ // Lock sysfd and serialize access to Read and Write methods.
+ fdmu fdMutex
+
+ // System file descriptor. Immutable until Close.
+ Sysfd int
+
+ // Platform dependent state of the file descriptor.
+ SysFile
+
+ // I/O poller.
+ pd pollDesc
+
+ // Semaphore signaled when file is closed.
+ csema uint32
+
+ // Non-zero if this file has been set to blocking mode.
+ isBlocking uint32
+
+ // Whether this is a streaming descriptor, as opposed to a
+ // packet-based descriptor like a UDP socket. Immutable.
+ IsStream bool
+
+ // Whether a zero byte read indicates EOF. This is false for a
+ // message based socket connection.
+ ZeroReadIsEOF bool
+
+ // Whether this is a file rather than a network socket.
+ isFile bool
+}
+
+// Init initializes the FD. The Sysfd field should already be set.
+// This can be called multiple times on a single FD.
+// The net argument is a network name from the net package (e.g., "tcp"),
+// or "file".
+// Set pollable to true if fd should be managed by runtime netpoll.
+func (fd *FD) Init(net string, pollable bool) error {
+ fd.SysFile.init()
+
+ // We don't actually care about the various network types.
+ if net == "file" {
+ fd.isFile = true
+ }
+ if !pollable {
+ fd.isBlocking = 1
+ return nil
+ }
+ err := fd.pd.init(fd)
+ if err != nil {
+ // If we could not initialize the runtime poller,
+ // assume we are using blocking mode.
+ fd.isBlocking = 1
+ }
+ return err
+}
+
+// Destroy closes the file descriptor. This is called when there are
+// no remaining references.
+func (fd *FD) destroy() error {
+ // Poller may want to unregister fd in readiness notification mechanism,
+ // so this must be executed before CloseFunc.
+ fd.pd.close()
+
+ err := fd.SysFile.destroy(fd.Sysfd)
+
+ fd.Sysfd = -1
+ runtime_Semrelease(&fd.csema)
+ return err
+}
+
+// Close closes the FD. The underlying file descriptor is closed by the
+// destroy method when there are no remaining references.
+func (fd *FD) Close() error {
+ if !fd.fdmu.increfAndClose() {
+ return errClosing(fd.isFile)
+ }
+
+ // Unblock any I/O. Once it all unblocks and returns,
+ // so that it cannot be referring to fd.sysfd anymore,
+ // the final decref will close fd.sysfd. This should happen
+ // fairly quickly, since all the I/O is non-blocking, and any
+ // attempts to block in the pollDesc will return errClosing(fd.isFile).
+ fd.pd.evict()
+
+ // The call to decref will call destroy if there are no other
+ // references.
+ err := fd.decref()
+
+ // Wait until the descriptor is closed. If this was the only
+ // reference, it is already closed. Only wait if the file has
+ // not been set to blocking mode, as otherwise any current I/O
+ // may be blocking, and that would block the Close.
+ // No need for an atomic read of isBlocking, increfAndClose means
+ // we have exclusive access to fd.
+ if fd.isBlocking == 0 {
+ runtime_Semacquire(&fd.csema)
+ }
+
+ return err
+}
+
+// SetBlocking puts the file into blocking mode.
+func (fd *FD) SetBlocking() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ // Atomic store so that concurrent calls to SetBlocking
+ // do not cause a race condition. isBlocking only ever goes
+ // from 0 to 1 so there is no real race here.
+ atomic.StoreUint32(&fd.isBlocking, 1)
+ return syscall.SetNonblock(fd.Sysfd, false)
+}
+
+// Darwin and FreeBSD can't read or write 2GB+ files at a time,
+// even on 64-bit systems.
+// The same is true of socket implementations on many systems.
+// See golang.org/issue/7812 and golang.org/issue/16266.
+// Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned.
+const maxRW = 1 << 30
+
+// Read implements io.Reader.
+func (fd *FD) Read(p []byte) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if len(p) == 0 {
+ // If the caller wanted a zero byte read, return immediately
+ // without trying (but after acquiring the readLock).
+ // Otherwise syscall.Read returns 0, nil which looks like
+ // io.EOF.
+ // TODO(bradfitz): make it wait for readability? (Issue 15735)
+ return 0, nil
+ }
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ if fd.IsStream && len(p) > maxRW {
+ p = p[:maxRW]
+ }
+ for {
+ n, err := ignoringEINTRIO(syscall.Read, fd.Sysfd, p)
+ if err != nil {
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// Pread wraps the pread system call.
+func (fd *FD) Pread(p []byte, off int64) (int, error) {
+ // Call incref, not readLock, because since pread specifies the
+ // offset it is independent from other reads.
+ // Similarly, using the poller doesn't make sense for pread.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ if fd.IsStream && len(p) > maxRW {
+ p = p[:maxRW]
+ }
+ var (
+ n int
+ err error
+ )
+ for {
+ n, err = syscall.Pread(fd.Sysfd, p, off)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ n = 0
+ }
+ fd.decref()
+ err = fd.eofError(n, err)
+ return n, err
+}
+
+// ReadFrom wraps the recvfrom network call.
+func (fd *FD) ReadFrom(p []byte) (int, syscall.Sockaddr, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, nil, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, nil, err
+ }
+ for {
+ n, sa, err := syscall.Recvfrom(fd.Sysfd, p, 0)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, sa, err
+ }
+}
+
+// ReadFromInet4 wraps the recvfrom network call for IPv4.
+func (fd *FD) ReadFromInet4(p []byte, from *syscall.SockaddrInet4) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ n, err := unix.RecvfromInet4(fd.Sysfd, p, 0, from)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// ReadFromInet6 wraps the recvfrom network call for IPv6.
+func (fd *FD) ReadFromInet6(p []byte, from *syscall.SockaddrInet6) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ n, err := unix.RecvfromInet6(fd.Sysfd, p, 0, from)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// ReadMsg wraps the recvmsg network call.
+func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.Sockaddr, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, nil, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, nil, err
+ }
+ for {
+ n, oobn, sysflags, sa, err := syscall.Recvmsg(fd.Sysfd, p, oob, flags)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, sa, err
+ }
+}
+
+// ReadMsgInet4 is ReadMsg, but specialized for syscall.SockaddrInet4.
+func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.SockaddrInet4) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, err
+ }
+ for {
+ n, oobn, sysflags, err := unix.RecvmsgInet4(fd.Sysfd, p, oob, flags, sa4)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, err
+ }
+}
+
+// ReadMsgInet6 is ReadMsg, but specialized for syscall.SockaddrInet6.
+func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.SockaddrInet6) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, err
+ }
+ for {
+ n, oobn, sysflags, err := unix.RecvmsgInet6(fd.Sysfd, p, oob, flags, sa6)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, err
+ }
+}
+
+// Write implements io.Writer.
+func (fd *FD) Write(p []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ var nn int
+ for {
+ max := len(p)
+ if fd.IsStream && max-nn > maxRW {
+ max = nn + maxRW
+ }
+ n, err := ignoringEINTRIO(syscall.Write, fd.Sysfd, p[nn:max])
+ if n > 0 {
+ nn += n
+ }
+ if nn == len(p) {
+ return nn, err
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return nn, err
+ }
+ if n == 0 {
+ return nn, io.ErrUnexpectedEOF
+ }
+ }
+}
+
+// Pwrite wraps the pwrite system call.
+func (fd *FD) Pwrite(p []byte, off int64) (int, error) {
+ // Call incref, not writeLock, because since pwrite specifies the
+ // offset it is independent from other writes.
+ // Similarly, using the poller doesn't make sense for pwrite.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ var nn int
+ for {
+ max := len(p)
+ if fd.IsStream && max-nn > maxRW {
+ max = nn + maxRW
+ }
+ n, err := syscall.Pwrite(fd.Sysfd, p[nn:max], off+int64(nn))
+ if err == syscall.EINTR {
+ continue
+ }
+ if n > 0 {
+ nn += n
+ }
+ if nn == len(p) {
+ return nn, err
+ }
+ if err != nil {
+ return nn, err
+ }
+ if n == 0 {
+ return nn, io.ErrUnexpectedEOF
+ }
+ }
+}
+
+// WriteToInet4 wraps the sendto network call for IPv4 addresses.
+func (fd *FD) WriteToInet4(p []byte, sa *syscall.SockaddrInet4) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := unix.SendtoInet4(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteToInet6 wraps the sendto network call for IPv6 addresses.
+func (fd *FD) WriteToInet6(p []byte, sa *syscall.SockaddrInet6) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := unix.SendtoInet6(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteTo wraps the sendto network call.
+func (fd *FD) WriteTo(p []byte, sa syscall.Sockaddr) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := syscall.Sendto(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteMsg wraps the sendmsg network call.
+func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := syscall.SendmsgN(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// WriteMsgInet4 is WriteMsg specialized for syscall.SockaddrInet4.
+func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := unix.SendmsgNInet4(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// WriteMsgInet6 is WriteMsg specialized for syscall.SockaddrInet6.
+func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := unix.SendmsgNInet6(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// Accept wraps the accept network call.
+func (fd *FD) Accept() (int, syscall.Sockaddr, string, error) {
+ if err := fd.readLock(); err != nil {
+ return -1, nil, "", err
+ }
+ defer fd.readUnlock()
+
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return -1, nil, "", err
+ }
+ for {
+ s, rsa, errcall, err := accept(fd.Sysfd)
+ if err == nil {
+ return s, rsa, "", err
+ }
+ switch err {
+ case syscall.EINTR:
+ continue
+ case syscall.EAGAIN:
+ if fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ case syscall.ECONNABORTED:
+ // This means that a socket on the listen
+ // queue was closed before we Accept()ed it;
+ // it's a silly error, so try again.
+ continue
+ }
+ return -1, nil, errcall, err
+ }
+}
+
+// Fchmod wraps syscall.Fchmod.
+func (fd *FD) Fchmod(mode uint32) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fchmod(fd.Sysfd, mode)
+ })
+}
+
+// Fstat wraps syscall.Fstat
+func (fd *FD) Fstat(s *syscall.Stat_t) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fstat(fd.Sysfd, s)
+ })
+}
+
+// dupCloexecUnsupported indicates whether F_DUPFD_CLOEXEC is supported by the kernel.
+var dupCloexecUnsupported atomic.Bool
+
+// DupCloseOnExec dups fd and marks it close-on-exec.
+func DupCloseOnExec(fd int) (int, string, error) {
+ if syscall.F_DUPFD_CLOEXEC != 0 && !dupCloexecUnsupported.Load() {
+ r0, err := unix.Fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0)
+ if err == nil {
+ return r0, "", nil
+ }
+ switch err {
+ case syscall.EINVAL, syscall.ENOSYS:
+ // Old kernel, or js/wasm (which returns
+ // ENOSYS). Fall back to the portable way from
+ // now on.
+ dupCloexecUnsupported.Store(true)
+ default:
+ return -1, "fcntl", err
+ }
+ }
+ return dupCloseOnExecOld(fd)
+}
+
+// Dup duplicates the file descriptor.
+func (fd *FD) Dup() (int, string, error) {
+ if err := fd.incref(); err != nil {
+ return -1, "", err
+ }
+ defer fd.decref()
+ return DupCloseOnExec(fd.Sysfd)
+}
+
+// On Unix variants only, expose the IO event for the net code.
+
+// WaitWrite waits until data can be read from fd.
+func (fd *FD) WaitWrite() error {
+ return fd.pd.waitWrite(fd.isFile)
+}
+
+// WriteOnce is for testing only. It makes a single write call.
+func (fd *FD) WriteOnce(p []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ return ignoringEINTRIO(syscall.Write, fd.Sysfd, p)
+}
+
+// RawRead invokes the user-defined function f for a read operation.
+func (fd *FD) RawRead(f func(uintptr) bool) error {
+ if err := fd.readLock(); err != nil {
+ return err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return err
+ }
+ for {
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+ if err := fd.pd.waitRead(fd.isFile); err != nil {
+ return err
+ }
+ }
+}
+
+// RawWrite invokes the user-defined function f for a write operation.
+func (fd *FD) RawWrite(f func(uintptr) bool) error {
+ if err := fd.writeLock(); err != nil {
+ return err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return err
+ }
+ for {
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+ if err := fd.pd.waitWrite(fd.isFile); err != nil {
+ return err
+ }
+ }
+}
+
+// ignoringEINTRIO is like ignoringEINTR, but just for IO calls.
+func ignoringEINTRIO(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) {
+ for {
+ n, err := fn(fd, p)
+ if err != syscall.EINTR {
+ return n, err
+ }
+ }
+}
diff --git a/src/internal/poll/fd_unixjs.go b/src/internal/poll/fd_unixjs.go
new file mode 100644
index 0000000..090974d
--- /dev/null
+++ b/src/internal/poll/fd_unixjs.go
@@ -0,0 +1,79 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package poll
+
+import "syscall"
+
+type SysFile struct {
+ // Writev cache.
+ iovecs *[]syscall.Iovec
+}
+
+func (s *SysFile) init() {}
+
+func (s *SysFile) destroy(fd int) error {
+ // We don't use ignoringEINTR here because POSIX does not define
+ // whether the descriptor is closed if close returns EINTR.
+ // If the descriptor is indeed closed, using a loop would race
+ // with some other goroutine opening a new descriptor.
+ // (The Linux kernel guarantees that it is closed on an EINTR error.)
+ return CloseFunc(fd)
+}
+
+// dupCloseOnExecOld is the traditional way to dup an fd and
+// set its O_CLOEXEC bit, using two system calls.
+func dupCloseOnExecOld(fd int) (int, string, error) {
+ syscall.ForkLock.RLock()
+ defer syscall.ForkLock.RUnlock()
+ newfd, err := syscall.Dup(fd)
+ if err != nil {
+ return -1, "dup", err
+ }
+ syscall.CloseOnExec(newfd)
+ return newfd, "", nil
+}
+
+// Fchdir wraps syscall.Fchdir.
+func (fd *FD) Fchdir() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Fchdir(fd.Sysfd)
+}
+
+// ReadDirent wraps syscall.ReadDirent.
+// We treat this like an ordinary system call rather than a call
+// that tries to fill the buffer.
+func (fd *FD) ReadDirent(buf []byte) (int, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ for {
+ n, err := ignoringEINTRIO(syscall.ReadDirent, fd.Sysfd, buf)
+ if err != nil {
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ // Do not call eofError; caller does not expect to see io.EOF.
+ return n, err
+ }
+}
+
+// Seek wraps syscall.Seek.
+func (fd *FD) Seek(offset int64, whence int) (int64, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ return syscall.Seek(fd.Sysfd, offset, whence)
+}
diff --git a/src/internal/poll/fd_wasip1.go b/src/internal/poll/fd_wasip1.go
new file mode 100644
index 0000000..aecd896
--- /dev/null
+++ b/src/internal/poll/fd_wasip1.go
@@ -0,0 +1,239 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+type SysFile struct {
+ // RefCountPtr is a pointer to the reference count of Sysfd.
+ //
+ // WASI preview 1 lacks a dup(2) system call. When the os and net packages
+ // need to share a file/socket, instead of duplicating the underlying file
+ // descriptor, we instead provide a way to copy FD instances and manage the
+ // underlying file descriptor with reference counting.
+ RefCountPtr *int32
+
+ // RefCount is the reference count of Sysfd. When a copy of an FD is made,
+ // it points to the reference count of the original FD instance.
+ RefCount int32
+
+ // Cache for the file type, lazily initialized when Seek is called.
+ Filetype uint32
+
+ // If the file represents a directory, this field contains the current
+ // readdir position. It is reset to zero if the program calls Seek(0, 0).
+ Dircookie uint64
+
+ // Absolute path of the file, as returned by syscall.PathOpen;
+ // this is used by Fchdir to emulate setting the current directory
+ // to an open file descriptor.
+ Path string
+
+ // TODO(achille): it could be meaningful to move isFile from FD to a method
+ // on this struct type, and expose it as `IsFile() bool` which derives the
+ // result from the Filetype field. We would need to ensure that Filetype is
+ // always set instead of being lazily initialized.
+}
+
+func (s *SysFile) init() {
+ if s.RefCountPtr == nil {
+ s.RefCount = 1
+ s.RefCountPtr = &s.RefCount
+ }
+}
+
+func (s *SysFile) ref() SysFile {
+ atomic.AddInt32(s.RefCountPtr, +1)
+ return SysFile{RefCountPtr: s.RefCountPtr}
+}
+
+func (s *SysFile) destroy(fd int) error {
+ if s.RefCountPtr != nil && atomic.AddInt32(s.RefCountPtr, -1) > 0 {
+ return nil
+ }
+
+ // We don't use ignoringEINTR here because POSIX does not define
+ // whether the descriptor is closed if close returns EINTR.
+ // If the descriptor is indeed closed, using a loop would race
+ // with some other goroutine opening a new descriptor.
+ // (The Linux kernel guarantees that it is closed on an EINTR error.)
+ return CloseFunc(fd)
+}
+
+// Copy creates a copy of the FD.
+//
+// The FD instance points to the same underlying file descriptor. The file
+// descriptor isn't closed until all FD instances that refer to it have been
+// closed/destroyed.
+func (fd *FD) Copy() FD {
+ return FD{
+ Sysfd: fd.Sysfd,
+ SysFile: fd.SysFile.ref(),
+ IsStream: fd.IsStream,
+ ZeroReadIsEOF: fd.ZeroReadIsEOF,
+ isBlocking: fd.isBlocking,
+ isFile: fd.isFile,
+ }
+}
+
+// dupCloseOnExecOld always errors on wasip1 because there is no mechanism to
+// duplicate file descriptors.
+func dupCloseOnExecOld(fd int) (int, string, error) {
+ return -1, "dup", syscall.ENOSYS
+}
+
+// Fchdir wraps syscall.Fchdir.
+func (fd *FD) Fchdir() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Chdir(fd.Path)
+}
+
+// ReadDir wraps syscall.ReadDir.
+// We treat this like an ordinary system call rather than a call
+// that tries to fill the buffer.
+func (fd *FD) ReadDir(buf []byte, cookie syscall.Dircookie) (int, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ for {
+ n, err := syscall.ReadDir(fd.Sysfd, buf, cookie)
+ if err != nil {
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ // Do not call eofError; caller does not expect to see io.EOF.
+ return n, err
+ }
+}
+
+func (fd *FD) ReadDirent(buf []byte) (int, error) {
+ n, err := fd.ReadDir(buf, fd.Dircookie)
+ if err != nil {
+ return 0, err
+ }
+ if n <= 0 {
+ return n, nil // EOF
+ }
+
+ // We assume that the caller of ReadDirent will consume the entire buffer
+ // up to the last full entry, so we scan through the buffer looking for the
+ // value of the last next cookie.
+ b := buf[:n]
+
+ for len(b) > 0 {
+ next, ok := direntNext(b)
+ if !ok {
+ break
+ }
+ size, ok := direntReclen(b)
+ if !ok {
+ break
+ }
+ if size > uint64(len(b)) {
+ break
+ }
+ fd.Dircookie = syscall.Dircookie(next)
+ b = b[size:]
+ }
+
+ // Trim a potentially incomplete trailing entry; this is necessary because
+ // the code in src/os/dir_unix.go does not deal well with partial values in
+ // calls to direntReclen, etc... and ends up causing an early EOF before all
+ // directory entries were consumed. ReadDirent is called with a large enough
+ // buffer (8 KiB) that at least one entry should always fit, tho this seems
+ // a bit brittle but cannot be addressed without a large change of the
+ // algorithm in the os.(*File).readdir method.
+ return n - len(b), nil
+}
+
+// Seek wraps syscall.Seek.
+func (fd *FD) Seek(offset int64, whence int) (int64, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ // syscall.Filetype is a uint8 but we store it as a uint32 in SysFile in
+ // order to use atomic load/store on the field, which is why we have to
+ // perform this type conversion.
+ fileType := syscall.Filetype(atomic.LoadUint32(&fd.Filetype))
+
+ if fileType == syscall.FILETYPE_UNKNOWN {
+ var stat syscall.Stat_t
+ if err := fd.Fstat(&stat); err != nil {
+ return 0, err
+ }
+ fileType = stat.Filetype
+ atomic.StoreUint32(&fd.Filetype, uint32(fileType))
+ }
+
+ if fileType == syscall.FILETYPE_DIRECTORY {
+ // If the file descriptor is opened on a directory, we reset the readdir
+ // cookie when seeking back to the beginning to allow reusing the file
+ // descriptor to scan the directory again.
+ if offset == 0 && whence == 0 {
+ fd.Dircookie = 0
+ return 0, nil
+ } else {
+ return 0, syscall.EINVAL
+ }
+ }
+
+ return syscall.Seek(fd.Sysfd, offset, whence)
+}
+
+// https://github.com/WebAssembly/WASI/blob/main/legacy/preview1/docs.md#-dirent-record
+const sizeOfDirent = 24
+
+func direntReclen(buf []byte) (uint64, bool) {
+ namelen, ok := direntNamlen(buf)
+ return sizeOfDirent + namelen, ok
+}
+
+func direntNamlen(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Namlen), unsafe.Sizeof(syscall.Dirent{}.Namlen))
+}
+
+func direntNext(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Next), unsafe.Sizeof(syscall.Dirent{}.Next))
+}
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("internal/poll: readInt with unsupported size")
+ }
+}
diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go
new file mode 100644
index 0000000..2095a6a
--- /dev/null
+++ b/src/internal/poll/fd_windows.go
@@ -0,0 +1,1331 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "errors"
+ "internal/race"
+ "internal/syscall/windows"
+ "io"
+ "sync"
+ "syscall"
+ "unicode/utf16"
+ "unicode/utf8"
+ "unsafe"
+)
+
+var (
+ initErr error
+ ioSync uint64
+)
+
+// This package uses the SetFileCompletionNotificationModes Windows
+// API to skip calling GetQueuedCompletionStatus if an IO operation
+// completes synchronously. There is a known bug where
+// SetFileCompletionNotificationModes crashes on some systems (see
+// https://support.microsoft.com/kb/2568167 for details).
+
+var useSetFileCompletionNotificationModes bool // determines is SetFileCompletionNotificationModes is present and safe to use
+
+// checkSetFileCompletionNotificationModes verifies that
+// SetFileCompletionNotificationModes Windows API is present
+// on the system and is safe to use.
+// See https://support.microsoft.com/kb/2568167 for details.
+func checkSetFileCompletionNotificationModes() {
+ err := syscall.LoadSetFileCompletionNotificationModes()
+ if err != nil {
+ return
+ }
+ protos := [2]int32{syscall.IPPROTO_TCP, 0}
+ var buf [32]syscall.WSAProtocolInfo
+ len := uint32(unsafe.Sizeof(buf))
+ n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len)
+ if err != nil {
+ return
+ }
+ for i := int32(0); i < n; i++ {
+ if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 {
+ return
+ }
+ }
+ useSetFileCompletionNotificationModes = true
+}
+
+func init() {
+ var d syscall.WSAData
+ e := syscall.WSAStartup(uint32(0x202), &d)
+ if e != nil {
+ initErr = e
+ }
+ checkSetFileCompletionNotificationModes()
+}
+
+// operation contains superset of data necessary to perform all async IO.
+type operation struct {
+ // Used by IOCP interface, it must be first field
+ // of the struct, as our code rely on it.
+ o syscall.Overlapped
+
+ // fields used by runtime.netpoll
+ runtimeCtx uintptr
+ mode int32
+ errno int32
+ qty uint32
+
+ // fields used only by net package
+ fd *FD
+ buf syscall.WSABuf
+ msg windows.WSAMsg
+ sa syscall.Sockaddr
+ rsa *syscall.RawSockaddrAny
+ rsan int32
+ handle syscall.Handle
+ flags uint32
+ bufs []syscall.WSABuf
+}
+
+func (o *operation) InitBuf(buf []byte) {
+ o.buf.Len = uint32(len(buf))
+ o.buf.Buf = nil
+ if len(buf) != 0 {
+ o.buf.Buf = &buf[0]
+ }
+}
+
+func (o *operation) InitBufs(buf *[][]byte) {
+ if o.bufs == nil {
+ o.bufs = make([]syscall.WSABuf, 0, len(*buf))
+ } else {
+ o.bufs = o.bufs[:0]
+ }
+ for _, b := range *buf {
+ if len(b) == 0 {
+ o.bufs = append(o.bufs, syscall.WSABuf{})
+ continue
+ }
+ for len(b) > maxRW {
+ o.bufs = append(o.bufs, syscall.WSABuf{Len: maxRW, Buf: &b[0]})
+ b = b[maxRW:]
+ }
+ if len(b) > 0 {
+ o.bufs = append(o.bufs, syscall.WSABuf{Len: uint32(len(b)), Buf: &b[0]})
+ }
+ }
+}
+
+// ClearBufs clears all pointers to Buffers parameter captured
+// by InitBufs, so it can be released by garbage collector.
+func (o *operation) ClearBufs() {
+ for i := range o.bufs {
+ o.bufs[i].Buf = nil
+ }
+ o.bufs = o.bufs[:0]
+}
+
+func (o *operation) InitMsg(p []byte, oob []byte) {
+ o.InitBuf(p)
+ o.msg.Buffers = &o.buf
+ o.msg.BufferCount = 1
+
+ o.msg.Name = nil
+ o.msg.Namelen = 0
+
+ o.msg.Flags = 0
+ o.msg.Control.Len = uint32(len(oob))
+ o.msg.Control.Buf = nil
+ if len(oob) != 0 {
+ o.msg.Control.Buf = &oob[0]
+ }
+}
+
+// execIO executes a single IO operation o. It submits and cancels
+// IO in the current thread for systems where Windows CancelIoEx API
+// is available. Alternatively, it passes the request onto
+// runtime netpoll and waits for completion or cancels request.
+func execIO(o *operation, submit func(o *operation) error) (int, error) {
+ if o.fd.pd.runtimeCtx == 0 {
+ return 0, errors.New("internal error: polling on unsupported descriptor type")
+ }
+
+ fd := o.fd
+ // Notify runtime netpoll about starting IO.
+ err := fd.pd.prepare(int(o.mode), fd.isFile)
+ if err != nil {
+ return 0, err
+ }
+ // Start IO.
+ err = submit(o)
+ switch err {
+ case nil:
+ // IO completed immediately
+ if o.fd.skipSyncNotif {
+ // No completion message will follow, so return immediately.
+ return int(o.qty), nil
+ }
+ // Need to get our completion message anyway.
+ case syscall.ERROR_IO_PENDING:
+ // IO started, and we have to wait for its completion.
+ err = nil
+ default:
+ return 0, err
+ }
+ // Wait for our request to complete.
+ err = fd.pd.wait(int(o.mode), fd.isFile)
+ if err == nil {
+ // All is good. Extract our IO results and return.
+ if o.errno != 0 {
+ err = syscall.Errno(o.errno)
+ // More data available. Return back the size of received data.
+ if err == syscall.ERROR_MORE_DATA || err == windows.WSAEMSGSIZE {
+ return int(o.qty), err
+ }
+ return 0, err
+ }
+ return int(o.qty), nil
+ }
+ // IO is interrupted by "close" or "timeout"
+ netpollErr := err
+ switch netpollErr {
+ case ErrNetClosing, ErrFileClosing, ErrDeadlineExceeded:
+ // will deal with those.
+ default:
+ panic("unexpected runtime.netpoll error: " + netpollErr.Error())
+ }
+ // Cancel our request.
+ err = syscall.CancelIoEx(fd.Sysfd, &o.o)
+ // Assuming ERROR_NOT_FOUND is returned, if IO is completed.
+ if err != nil && err != syscall.ERROR_NOT_FOUND {
+ // TODO(brainman): maybe do something else, but panic.
+ panic(err)
+ }
+ // Wait for cancellation to complete.
+ fd.pd.waitCanceled(int(o.mode))
+ if o.errno != 0 {
+ err = syscall.Errno(o.errno)
+ if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled
+ err = netpollErr
+ }
+ return 0, err
+ }
+ // We issued a cancellation request. But, it seems, IO operation succeeded
+ // before the cancellation request run. We need to treat the IO operation as
+ // succeeded (the bytes are actually sent/recv from network).
+ return int(o.qty), nil
+}
+
+// FD is a file descriptor. The net and os packages embed this type in
+// a larger type representing a network connection or OS file.
+type FD struct {
+ // Lock sysfd and serialize access to Read and Write methods.
+ fdmu fdMutex
+
+ // System file descriptor. Immutable until Close.
+ Sysfd syscall.Handle
+
+ // Read operation.
+ rop operation
+ // Write operation.
+ wop operation
+
+ // I/O poller.
+ pd pollDesc
+
+ // Used to implement pread/pwrite.
+ l sync.Mutex
+
+ // For console I/O.
+ lastbits []byte // first few bytes of the last incomplete rune in last write
+ readuint16 []uint16 // buffer to hold uint16s obtained with ReadConsole
+ readbyte []byte // buffer to hold decoding of readuint16 from utf16 to utf8
+ readbyteOffset int // readbyte[readOffset:] is yet to be consumed with file.Read
+
+ // Semaphore signaled when file is closed.
+ csema uint32
+
+ skipSyncNotif bool
+
+ // Whether this is a streaming descriptor, as opposed to a
+ // packet-based descriptor like a UDP socket.
+ IsStream bool
+
+ // Whether a zero byte read indicates EOF. This is false for a
+ // message based socket connection.
+ ZeroReadIsEOF bool
+
+ // Whether this is a file rather than a network socket.
+ isFile bool
+
+ // The kind of this file.
+ kind fileKind
+}
+
+// fileKind describes the kind of file.
+type fileKind byte
+
+const (
+ kindNet fileKind = iota
+ kindFile
+ kindConsole
+ kindPipe
+)
+
+// logInitFD is set by tests to enable file descriptor initialization logging.
+var logInitFD func(net string, fd *FD, err error)
+
+// Init initializes the FD. The Sysfd field should already be set.
+// This can be called multiple times on a single FD.
+// The net argument is a network name from the net package (e.g., "tcp"),
+// or "file" or "console" or "dir".
+// Set pollable to true if fd should be managed by runtime netpoll.
+func (fd *FD) Init(net string, pollable bool) (string, error) {
+ if initErr != nil {
+ return "", initErr
+ }
+
+ switch net {
+ case "file", "dir":
+ fd.kind = kindFile
+ case "console":
+ fd.kind = kindConsole
+ case "pipe":
+ fd.kind = kindPipe
+ case "tcp", "tcp4", "tcp6",
+ "udp", "udp4", "udp6",
+ "ip", "ip4", "ip6",
+ "unix", "unixgram", "unixpacket":
+ fd.kind = kindNet
+ default:
+ return "", errors.New("internal error: unknown network type " + net)
+ }
+ fd.isFile = fd.kind != kindNet
+
+ var err error
+ if pollable {
+ // Only call init for a network socket.
+ // This means that we don't add files to the runtime poller.
+ // Adding files to the runtime poller can confuse matters
+ // if the user is doing their own overlapped I/O.
+ // See issue #21172.
+ //
+ // In general the code below avoids calling the execIO
+ // function for non-network sockets. If some method does
+ // somehow call execIO, then execIO, and therefore the
+ // calling method, will return an error, because
+ // fd.pd.runtimeCtx will be 0.
+ err = fd.pd.init(fd)
+ }
+ if logInitFD != nil {
+ logInitFD(net, fd, err)
+ }
+ if err != nil {
+ return "", err
+ }
+ if pollable && useSetFileCompletionNotificationModes {
+ // We do not use events, so we can skip them always.
+ flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE)
+ switch net {
+ case "tcp", "tcp4", "tcp6",
+ "udp", "udp4", "udp6":
+ flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
+ }
+ err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, flags)
+ if err == nil && flags&syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS != 0 {
+ fd.skipSyncNotif = true
+ }
+ }
+ // Disable SIO_UDP_CONNRESET behavior.
+ // http://support.microsoft.com/kb/263823
+ switch net {
+ case "udp", "udp4", "udp6":
+ ret := uint32(0)
+ flag := uint32(0)
+ size := uint32(unsafe.Sizeof(flag))
+ err := syscall.WSAIoctl(fd.Sysfd, syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
+ if err != nil {
+ return "wsaioctl", err
+ }
+ }
+ fd.rop.mode = 'r'
+ fd.wop.mode = 'w'
+ fd.rop.fd = fd
+ fd.wop.fd = fd
+ fd.rop.runtimeCtx = fd.pd.runtimeCtx
+ fd.wop.runtimeCtx = fd.pd.runtimeCtx
+ return "", nil
+}
+
+func (fd *FD) destroy() error {
+ if fd.Sysfd == syscall.InvalidHandle {
+ return syscall.EINVAL
+ }
+ // Poller may want to unregister fd in readiness notification mechanism,
+ // so this must be executed before fd.CloseFunc.
+ fd.pd.close()
+ var err error
+ switch fd.kind {
+ case kindNet:
+ // The net package uses the CloseFunc variable for testing.
+ err = CloseFunc(fd.Sysfd)
+ default:
+ err = syscall.CloseHandle(fd.Sysfd)
+ }
+ fd.Sysfd = syscall.InvalidHandle
+ runtime_Semrelease(&fd.csema)
+ return err
+}
+
+// Close closes the FD. The underlying file descriptor is closed by
+// the destroy method when there are no remaining references.
+func (fd *FD) Close() error {
+ if !fd.fdmu.increfAndClose() {
+ return errClosing(fd.isFile)
+ }
+ if fd.kind == kindPipe {
+ syscall.CancelIoEx(fd.Sysfd, nil)
+ }
+ // unblock pending reader and writer
+ fd.pd.evict()
+ err := fd.decref()
+ // Wait until the descriptor is closed. If this was the only
+ // reference, it is already closed.
+ runtime_Semacquire(&fd.csema)
+ return err
+}
+
+// Windows ReadFile and WSARecv use DWORD (uint32) parameter to pass buffer length.
+// This prevents us reading blocks larger than 4GB.
+// See golang.org/issue/26923.
+const maxRW = 1 << 30 // 1GB is large enough and keeps subsequent reads aligned
+
+// Read implements io.Reader.
+func (fd *FD) Read(buf []byte) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+
+ if len(buf) > maxRW {
+ buf = buf[:maxRW]
+ }
+
+ var n int
+ var err error
+ if fd.isFile {
+ fd.l.Lock()
+ defer fd.l.Unlock()
+ switch fd.kind {
+ case kindConsole:
+ n, err = fd.readConsole(buf)
+ default:
+ n, err = syscall.Read(fd.Sysfd, buf)
+ if fd.kind == kindPipe && err == syscall.ERROR_OPERATION_ABORTED {
+ // Close uses CancelIoEx to interrupt concurrent I/O for pipes.
+ // If the fd is a pipe and the Read was interrupted by CancelIoEx,
+ // we assume it is interrupted by Close.
+ err = ErrFileClosing
+ }
+ }
+ if err != nil {
+ n = 0
+ }
+ } else {
+ o := &fd.rop
+ o.InitBuf(buf)
+ n, err = execIO(o, func(o *operation) error {
+ return syscall.WSARecv(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
+ })
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(&ioSync))
+ }
+ }
+ if len(buf) != 0 {
+ err = fd.eofError(n, err)
+ }
+ return n, err
+}
+
+var ReadConsole = syscall.ReadConsole // changed for testing
+
+// readConsole reads utf16 characters from console File,
+// encodes them into utf8 and stores them in buffer b.
+// It returns the number of utf8 bytes read and an error, if any.
+func (fd *FD) readConsole(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+
+ if fd.readuint16 == nil {
+ // Note: syscall.ReadConsole fails for very large buffers.
+ // The limit is somewhere around (but not exactly) 16384.
+ // Stay well below.
+ fd.readuint16 = make([]uint16, 0, 10000)
+ fd.readbyte = make([]byte, 0, 4*cap(fd.readuint16))
+ }
+
+ for fd.readbyteOffset >= len(fd.readbyte) {
+ n := cap(fd.readuint16) - len(fd.readuint16)
+ if n > len(b) {
+ n = len(b)
+ }
+ var nw uint32
+ err := ReadConsole(fd.Sysfd, &fd.readuint16[:len(fd.readuint16)+1][len(fd.readuint16)], uint32(n), &nw, nil)
+ if err != nil {
+ return 0, err
+ }
+ uint16s := fd.readuint16[:len(fd.readuint16)+int(nw)]
+ fd.readuint16 = fd.readuint16[:0]
+ buf := fd.readbyte[:0]
+ for i := 0; i < len(uint16s); i++ {
+ r := rune(uint16s[i])
+ if utf16.IsSurrogate(r) {
+ if i+1 == len(uint16s) {
+ if nw > 0 {
+ // Save half surrogate pair for next time.
+ fd.readuint16 = fd.readuint16[:1]
+ fd.readuint16[0] = uint16(r)
+ break
+ }
+ r = utf8.RuneError
+ } else {
+ r = utf16.DecodeRune(r, rune(uint16s[i+1]))
+ if r != utf8.RuneError {
+ i++
+ }
+ }
+ }
+ buf = utf8.AppendRune(buf, r)
+ }
+ fd.readbyte = buf
+ fd.readbyteOffset = 0
+ if nw == 0 {
+ break
+ }
+ }
+
+ src := fd.readbyte[fd.readbyteOffset:]
+ var i int
+ for i = 0; i < len(src) && i < len(b); i++ {
+ x := src[i]
+ if x == 0x1A { // Ctrl-Z
+ if i == 0 {
+ fd.readbyteOffset++
+ }
+ break
+ }
+ b[i] = x
+ }
+ fd.readbyteOffset += i
+ return i, nil
+}
+
+// Pread emulates the Unix pread system call.
+func (fd *FD) Pread(b []byte, off int64) (int, error) {
+ if fd.kind == kindPipe {
+ // Pread does not work with pipes
+ return 0, syscall.ESPIPE
+ }
+ // Call incref, not readLock, because since pread specifies the
+ // offset it is independent from other reads.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+
+ fd.l.Lock()
+ defer fd.l.Unlock()
+ curoffset, e := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
+ if e != nil {
+ return 0, e
+ }
+ defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
+ o := syscall.Overlapped{
+ OffsetHigh: uint32(off >> 32),
+ Offset: uint32(off),
+ }
+ var done uint32
+ e = syscall.ReadFile(fd.Sysfd, b, &done, &o)
+ if e != nil {
+ done = 0
+ if e == syscall.ERROR_HANDLE_EOF {
+ e = io.EOF
+ }
+ }
+ if len(b) != 0 {
+ e = fd.eofError(int(done), e)
+ }
+ return int(done), e
+}
+
+// ReadFrom wraps the recvfrom network call.
+func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) {
+ if len(buf) == 0 {
+ return 0, nil, nil
+ }
+ if len(buf) > maxRW {
+ buf = buf[:maxRW]
+ }
+ if err := fd.readLock(); err != nil {
+ return 0, nil, err
+ }
+ defer fd.readUnlock()
+ o := &fd.rop
+ o.InitBuf(buf)
+ n, err := execIO(o, func(o *operation) error {
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.rsan = int32(unsafe.Sizeof(*o.rsa))
+ return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ if err != nil {
+ return n, nil, err
+ }
+ sa, _ := o.rsa.Sockaddr()
+ return n, sa, nil
+}
+
+// ReadFromInet4 wraps the recvfrom network call for IPv4.
+func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) {
+ if len(buf) == 0 {
+ return 0, nil
+ }
+ if len(buf) > maxRW {
+ buf = buf[:maxRW]
+ }
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ o := &fd.rop
+ o.InitBuf(buf)
+ n, err := execIO(o, func(o *operation) error {
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.rsan = int32(unsafe.Sizeof(*o.rsa))
+ return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ if err != nil {
+ return n, err
+ }
+ rawToSockaddrInet4(o.rsa, sa4)
+ return n, err
+}
+
+// ReadFromInet6 wraps the recvfrom network call for IPv6.
+func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) {
+ if len(buf) == 0 {
+ return 0, nil
+ }
+ if len(buf) > maxRW {
+ buf = buf[:maxRW]
+ }
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ o := &fd.rop
+ o.InitBuf(buf)
+ n, err := execIO(o, func(o *operation) error {
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.rsan = int32(unsafe.Sizeof(*o.rsa))
+ return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ if err != nil {
+ return n, err
+ }
+ rawToSockaddrInet6(o.rsa, sa6)
+ return n, err
+}
+
+// Write implements io.Writer.
+func (fd *FD) Write(buf []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if fd.isFile {
+ fd.l.Lock()
+ defer fd.l.Unlock()
+ }
+
+ ntotal := 0
+ for len(buf) > 0 {
+ b := buf
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+ var n int
+ var err error
+ if fd.isFile {
+ switch fd.kind {
+ case kindConsole:
+ n, err = fd.writeConsole(b)
+ default:
+ n, err = syscall.Write(fd.Sysfd, b)
+ if fd.kind == kindPipe && err == syscall.ERROR_OPERATION_ABORTED {
+ // Close uses CancelIoEx to interrupt concurrent I/O for pipes.
+ // If the fd is a pipe and the Write was interrupted by CancelIoEx,
+ // we assume it is interrupted by Close.
+ err = ErrFileClosing
+ }
+ }
+ if err != nil {
+ n = 0
+ }
+ } else {
+ if race.Enabled {
+ race.ReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ o := &fd.wop
+ o.InitBuf(b)
+ n, err = execIO(o, func(o *operation) error {
+ return syscall.WSASend(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
+ })
+ }
+ ntotal += n
+ if err != nil {
+ return ntotal, err
+ }
+ buf = buf[n:]
+ }
+ return ntotal, nil
+}
+
+// writeConsole writes len(b) bytes to the console File.
+// It returns the number of bytes written and an error, if any.
+func (fd *FD) writeConsole(b []byte) (int, error) {
+ n := len(b)
+ runes := make([]rune, 0, 256)
+ if len(fd.lastbits) > 0 {
+ b = append(fd.lastbits, b...)
+ fd.lastbits = nil
+
+ }
+ for len(b) >= utf8.UTFMax || utf8.FullRune(b) {
+ r, l := utf8.DecodeRune(b)
+ runes = append(runes, r)
+ b = b[l:]
+ }
+ if len(b) > 0 {
+ fd.lastbits = make([]byte, len(b))
+ copy(fd.lastbits, b)
+ }
+ // syscall.WriteConsole seems to fail, if given large buffer.
+ // So limit the buffer to 16000 characters. This number was
+ // discovered by experimenting with syscall.WriteConsole.
+ const maxWrite = 16000
+ for len(runes) > 0 {
+ m := len(runes)
+ if m > maxWrite {
+ m = maxWrite
+ }
+ chunk := runes[:m]
+ runes = runes[m:]
+ uint16s := utf16.Encode(chunk)
+ for len(uint16s) > 0 {
+ var written uint32
+ err := syscall.WriteConsole(fd.Sysfd, &uint16s[0], uint32(len(uint16s)), &written, nil)
+ if err != nil {
+ return 0, err
+ }
+ uint16s = uint16s[written:]
+ }
+ }
+ return n, nil
+}
+
+// Pwrite emulates the Unix pwrite system call.
+func (fd *FD) Pwrite(buf []byte, off int64) (int, error) {
+ if fd.kind == kindPipe {
+ // Pwrite does not work with pipes
+ return 0, syscall.ESPIPE
+ }
+ // Call incref, not writeLock, because since pwrite specifies the
+ // offset it is independent from other writes.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+
+ fd.l.Lock()
+ defer fd.l.Unlock()
+ curoffset, e := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
+ if e != nil {
+ return 0, e
+ }
+ defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
+
+ ntotal := 0
+ for len(buf) > 0 {
+ b := buf
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+ var n uint32
+ o := syscall.Overlapped{
+ OffsetHigh: uint32(off >> 32),
+ Offset: uint32(off),
+ }
+ e = syscall.WriteFile(fd.Sysfd, b, &n, &o)
+ ntotal += int(n)
+ if e != nil {
+ return ntotal, e
+ }
+ buf = buf[n:]
+ off += int64(n)
+ }
+ return ntotal, nil
+}
+
+// Writev emulates the Unix writev system call.
+func (fd *FD) Writev(buf *[][]byte) (int64, error) {
+ if len(*buf) == 0 {
+ return 0, nil
+ }
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if race.Enabled {
+ race.ReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ o := &fd.wop
+ o.InitBufs(buf)
+ n, err := execIO(o, func(o *operation) error {
+ return syscall.WSASend(o.fd.Sysfd, &o.bufs[0], uint32(len(o.bufs)), &o.qty, 0, &o.o, nil)
+ })
+ o.ClearBufs()
+ TestHookDidWritev(n)
+ consume(buf, int64(n))
+ return int64(n), err
+}
+
+// WriteTo wraps the sendto network call.
+func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+
+ if len(buf) == 0 {
+ // handle zero-byte payload
+ o := &fd.wop
+ o.InitBuf(buf)
+ o.sa = sa
+ n, err := execIO(o, func(o *operation) error {
+ return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
+ })
+ return n, err
+ }
+
+ ntotal := 0
+ for len(buf) > 0 {
+ b := buf
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+ o := &fd.wop
+ o.InitBuf(b)
+ o.sa = sa
+ n, err := execIO(o, func(o *operation) error {
+ return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
+ })
+ ntotal += int(n)
+ if err != nil {
+ return ntotal, err
+ }
+ buf = buf[n:]
+ }
+ return ntotal, nil
+}
+
+// WriteToInet4 is WriteTo, specialized for syscall.SockaddrInet4.
+func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+
+ if len(buf) == 0 {
+ // handle zero-byte payload
+ o := &fd.wop
+ o.InitBuf(buf)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendtoInet4(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa4, &o.o, nil)
+ })
+ return n, err
+ }
+
+ ntotal := 0
+ for len(buf) > 0 {
+ b := buf
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+ o := &fd.wop
+ o.InitBuf(b)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendtoInet4(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa4, &o.o, nil)
+ })
+ ntotal += int(n)
+ if err != nil {
+ return ntotal, err
+ }
+ buf = buf[n:]
+ }
+ return ntotal, nil
+}
+
+// WriteToInet6 is WriteTo, specialized for syscall.SockaddrInet6.
+func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+
+ if len(buf) == 0 {
+ // handle zero-byte payload
+ o := &fd.wop
+ o.InitBuf(buf)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendtoInet6(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa6, &o.o, nil)
+ })
+ return n, err
+ }
+
+ ntotal := 0
+ for len(buf) > 0 {
+ b := buf
+ if len(b) > maxRW {
+ b = b[:maxRW]
+ }
+ o := &fd.wop
+ o.InitBuf(b)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendtoInet6(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa6, &o.o, nil)
+ })
+ ntotal += int(n)
+ if err != nil {
+ return ntotal, err
+ }
+ buf = buf[n:]
+ }
+ return ntotal, nil
+}
+
+// Call ConnectEx. This doesn't need any locking, since it is only
+// called when the descriptor is first created. This is here rather
+// than in the net package so that it can use fd.wop.
+func (fd *FD) ConnectEx(ra syscall.Sockaddr) error {
+ o := &fd.wop
+ o.sa = ra
+ _, err := execIO(o, func(o *operation) error {
+ return ConnectExFunc(o.fd.Sysfd, o.sa, nil, 0, nil, &o.o)
+ })
+ return err
+}
+
+func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny, o *operation) (string, error) {
+ // Submit accept request.
+ o.handle = s
+ o.rsan = int32(unsafe.Sizeof(rawsa[0]))
+ _, err := execIO(o, func(o *operation) error {
+ return AcceptFunc(o.fd.Sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
+ })
+ if err != nil {
+ CloseFunc(s)
+ return "acceptex", err
+ }
+
+ // Inherit properties of the listening socket.
+ err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.Sysfd)), int32(unsafe.Sizeof(fd.Sysfd)))
+ if err != nil {
+ CloseFunc(s)
+ return "setsockopt", err
+ }
+
+ return "", nil
+}
+
+// Accept handles accepting a socket. The sysSocket parameter is used
+// to allocate the net socket.
+func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle, []syscall.RawSockaddrAny, uint32, string, error) {
+ if err := fd.readLock(); err != nil {
+ return syscall.InvalidHandle, nil, 0, "", err
+ }
+ defer fd.readUnlock()
+
+ o := &fd.rop
+ var rawsa [2]syscall.RawSockaddrAny
+ for {
+ s, err := sysSocket()
+ if err != nil {
+ return syscall.InvalidHandle, nil, 0, "", err
+ }
+
+ errcall, err := fd.acceptOne(s, rawsa[:], o)
+ if err == nil {
+ return s, rawsa[:], uint32(o.rsan), "", nil
+ }
+
+ // Sometimes we see WSAECONNRESET and ERROR_NETNAME_DELETED is
+ // returned here. These happen if connection reset is received
+ // before AcceptEx could complete. These errors relate to new
+ // connection, not to AcceptEx, so ignore broken connection and
+ // try AcceptEx again for more connections.
+ errno, ok := err.(syscall.Errno)
+ if !ok {
+ return syscall.InvalidHandle, nil, 0, errcall, err
+ }
+ switch errno {
+ case syscall.ERROR_NETNAME_DELETED, syscall.WSAECONNRESET:
+ // ignore these and try again
+ default:
+ return syscall.InvalidHandle, nil, 0, errcall, err
+ }
+ }
+}
+
+// Seek wraps syscall.Seek.
+func (fd *FD) Seek(offset int64, whence int) (int64, error) {
+ if fd.kind == kindPipe {
+ return 0, syscall.ESPIPE
+ }
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+
+ fd.l.Lock()
+ defer fd.l.Unlock()
+
+ return syscall.Seek(fd.Sysfd, offset, whence)
+}
+
+// Fchmod updates syscall.ByHandleFileInformation.Fileattributes when needed.
+func (fd *FD) Fchmod(mode uint32) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+
+ var d syscall.ByHandleFileInformation
+ if err := syscall.GetFileInformationByHandle(fd.Sysfd, &d); err != nil {
+ return err
+ }
+ attrs := d.FileAttributes
+ if mode&syscall.S_IWRITE != 0 {
+ attrs &^= syscall.FILE_ATTRIBUTE_READONLY
+ } else {
+ attrs |= syscall.FILE_ATTRIBUTE_READONLY
+ }
+ if attrs == d.FileAttributes {
+ return nil
+ }
+
+ var du windows.FILE_BASIC_INFO
+ du.FileAttributes = attrs
+ return windows.SetFileInformationByHandle(fd.Sysfd, windows.FileBasicInfo, unsafe.Pointer(&du), uint32(unsafe.Sizeof(du)))
+}
+
+// Fchdir wraps syscall.Fchdir.
+func (fd *FD) Fchdir() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Fchdir(fd.Sysfd)
+}
+
+// GetFileType wraps syscall.GetFileType.
+func (fd *FD) GetFileType() (uint32, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ return syscall.GetFileType(fd.Sysfd)
+}
+
+// GetFileInformationByHandle wraps GetFileInformationByHandle.
+func (fd *FD) GetFileInformationByHandle(data *syscall.ByHandleFileInformation) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.GetFileInformationByHandle(fd.Sysfd, data)
+}
+
+// RawRead invokes the user-defined function f for a read operation.
+func (fd *FD) RawRead(f func(uintptr) bool) error {
+ if err := fd.readLock(); err != nil {
+ return err
+ }
+ defer fd.readUnlock()
+ for {
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+
+ // Use a zero-byte read as a way to get notified when this
+ // socket is readable. h/t https://stackoverflow.com/a/42019668/332798
+ o := &fd.rop
+ o.InitBuf(nil)
+ if !fd.IsStream {
+ o.flags |= windows.MSG_PEEK
+ }
+ _, err := execIO(o, func(o *operation) error {
+ return syscall.WSARecv(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
+ })
+ if err == windows.WSAEMSGSIZE {
+ // expected with a 0-byte peek, ignore.
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+// RawWrite invokes the user-defined function f for a write operation.
+func (fd *FD) RawWrite(f func(uintptr) bool) error {
+ if err := fd.writeLock(); err != nil {
+ return err
+ }
+ defer fd.writeUnlock()
+
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+
+ // TODO(tmm1): find a way to detect socket writability
+ return syscall.EWINDOWS
+}
+
+func sockaddrInet4ToRaw(rsa *syscall.RawSockaddrAny, sa *syscall.SockaddrInet4) int32 {
+ *rsa = syscall.RawSockaddrAny{}
+ raw := (*syscall.RawSockaddrInet4)(unsafe.Pointer(rsa))
+ raw.Family = syscall.AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ raw.Addr = sa.Addr
+ return int32(unsafe.Sizeof(*raw))
+}
+
+func sockaddrInet6ToRaw(rsa *syscall.RawSockaddrAny, sa *syscall.SockaddrInet6) int32 {
+ *rsa = syscall.RawSockaddrAny{}
+ raw := (*syscall.RawSockaddrInet6)(unsafe.Pointer(rsa))
+ raw.Family = syscall.AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ raw.Scope_id = sa.ZoneId
+ raw.Addr = sa.Addr
+ return int32(unsafe.Sizeof(*raw))
+}
+
+func rawToSockaddrInet4(rsa *syscall.RawSockaddrAny, sa *syscall.SockaddrInet4) {
+ pp := (*syscall.RawSockaddrInet4)(unsafe.Pointer(rsa))
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.Addr = pp.Addr
+}
+
+func rawToSockaddrInet6(rsa *syscall.RawSockaddrAny, sa *syscall.SockaddrInet6) {
+ pp := (*syscall.RawSockaddrInet6)(unsafe.Pointer(rsa))
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ sa.Addr = pp.Addr
+}
+
+func sockaddrToRaw(rsa *syscall.RawSockaddrAny, sa syscall.Sockaddr) (int32, error) {
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ sz := sockaddrInet4ToRaw(rsa, sa)
+ return sz, nil
+ case *syscall.SockaddrInet6:
+ sz := sockaddrInet6ToRaw(rsa, sa)
+ return sz, nil
+ default:
+ return 0, syscall.EWINDOWS
+ }
+}
+
+// ReadMsg wraps the WSARecvMsg network call.
+func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.Sockaddr, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, nil, err
+ }
+ defer fd.readUnlock()
+
+ if len(p) > maxRW {
+ p = p[:maxRW]
+ }
+
+ o := &fd.rop
+ o.InitMsg(p, oob)
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ o.msg.Flags = uint32(flags)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ var sa syscall.Sockaddr
+ if err == nil {
+ sa, err = o.rsa.Sockaddr()
+ }
+ return n, int(o.msg.Control.Len), int(o.msg.Flags), sa, err
+}
+
+// ReadMsgInet4 is ReadMsg, but specialized to return a syscall.SockaddrInet4.
+func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.SockaddrInet4) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+
+ if len(p) > maxRW {
+ p = p[:maxRW]
+ }
+
+ o := &fd.rop
+ o.InitMsg(p, oob)
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ o.msg.Flags = uint32(flags)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ if err == nil {
+ rawToSockaddrInet4(o.rsa, sa4)
+ }
+ return n, int(o.msg.Control.Len), int(o.msg.Flags), err
+}
+
+// ReadMsgInet6 is ReadMsg, but specialized to return a syscall.SockaddrInet6.
+func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.SockaddrInet6) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+
+ if len(p) > maxRW {
+ p = p[:maxRW]
+ }
+
+ o := &fd.rop
+ o.InitMsg(p, oob)
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ o.msg.Flags = uint32(flags)
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ })
+ err = fd.eofError(n, err)
+ if err == nil {
+ rawToSockaddrInet6(o.rsa, sa6)
+ }
+ return n, int(o.msg.Control.Len), int(o.msg.Flags), err
+}
+
+// WriteMsg wraps the WSASendMsg network call.
+func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) {
+ if len(p) > maxRW {
+ return 0, 0, errors.New("packet is too large (only 1GB is allowed)")
+ }
+
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+
+ o := &fd.wop
+ o.InitMsg(p, oob)
+ if sa != nil {
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ len, err := sockaddrToRaw(o.rsa, sa)
+ if err != nil {
+ return 0, 0, err
+ }
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = len
+ }
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ })
+ return n, int(o.msg.Control.Len), err
+}
+
+// WriteMsgInet4 is WriteMsg specialized for syscall.SockaddrInet4.
+func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (int, int, error) {
+ if len(p) > maxRW {
+ return 0, 0, errors.New("packet is too large (only 1GB is allowed)")
+ }
+
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+
+ o := &fd.wop
+ o.InitMsg(p, oob)
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ len := sockaddrInet4ToRaw(o.rsa, sa)
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = len
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ })
+ return n, int(o.msg.Control.Len), err
+}
+
+// WriteMsgInet6 is WriteMsg specialized for syscall.SockaddrInet6.
+func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (int, int, error) {
+ if len(p) > maxRW {
+ return 0, 0, errors.New("packet is too large (only 1GB is allowed)")
+ }
+
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+
+ o := &fd.wop
+ o.InitMsg(p, oob)
+ if o.rsa == nil {
+ o.rsa = new(syscall.RawSockaddrAny)
+ }
+ len := sockaddrInet6ToRaw(o.rsa, sa)
+ o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ o.msg.Namelen = len
+ n, err := execIO(o, func(o *operation) error {
+ return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ })
+ return n, int(o.msg.Control.Len), err
+}
diff --git a/src/internal/poll/fd_windows_test.go b/src/internal/poll/fd_windows_test.go
new file mode 100644
index 0000000..f0697a0
--- /dev/null
+++ b/src/internal/poll/fd_windows_test.go
@@ -0,0 +1,198 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "errors"
+ "fmt"
+ "internal/poll"
+ "internal/syscall/windows"
+ "os"
+ "sync"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+type loggedFD struct {
+ Net string
+ FD *poll.FD
+ Err error
+}
+
+var (
+ logMu sync.Mutex
+ loggedFDs map[syscall.Handle]*loggedFD
+)
+
+func logFD(net string, fd *poll.FD, err error) {
+ logMu.Lock()
+ defer logMu.Unlock()
+
+ loggedFDs[fd.Sysfd] = &loggedFD{
+ Net: net,
+ FD: fd,
+ Err: err,
+ }
+}
+
+func init() {
+ loggedFDs = make(map[syscall.Handle]*loggedFD)
+ *poll.LogInitFD = logFD
+}
+
+func findLoggedFD(h syscall.Handle) (lfd *loggedFD, found bool) {
+ logMu.Lock()
+ defer logMu.Unlock()
+
+ lfd, found = loggedFDs[h]
+ return lfd, found
+}
+
+// checkFileIsNotPartOfNetpoll verifies that f is not managed by netpoll.
+// It returns error, if check fails.
+func checkFileIsNotPartOfNetpoll(f *os.File) error {
+ lfd, found := findLoggedFD(syscall.Handle(f.Fd()))
+ if !found {
+ return fmt.Errorf("%v fd=%v: is not found in the log", f.Name(), f.Fd())
+ }
+ if lfd.FD.IsPartOfNetpoll() {
+ return fmt.Errorf("%v fd=%v: is part of netpoll, but should not be (logged: net=%v err=%v)", f.Name(), f.Fd(), lfd.Net, lfd.Err)
+ }
+ return nil
+}
+
+func TestFileFdsAreInitialised(t *testing.T) {
+ exe, err := os.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ f, err := os.Open(exe)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ err = checkFileIsNotPartOfNetpoll(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSerialFdsAreInitialised(t *testing.T) {
+ for _, name := range []string{"COM1", "COM2", "COM3", "COM4"} {
+ t.Run(name, func(t *testing.T) {
+ h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name),
+ syscall.GENERIC_READ|syscall.GENERIC_WRITE,
+ 0,
+ nil,
+ syscall.OPEN_EXISTING,
+ syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED,
+ 0)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok {
+ switch errno {
+ case syscall.ERROR_FILE_NOT_FOUND,
+ syscall.ERROR_ACCESS_DENIED:
+ t.Log("Skipping: ", err)
+ return
+ }
+ }
+ t.Fatal(err)
+ }
+ f := os.NewFile(uintptr(h), name)
+ defer f.Close()
+
+ err = checkFileIsNotPartOfNetpoll(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestWSASocketConflict(t *testing.T) {
+ s, err := windows.WSASocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP, nil, 0, windows.WSA_FLAG_OVERLAPPED)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fd := poll.FD{Sysfd: s, IsStream: true, ZeroReadIsEOF: true}
+ _, err = fd.Init("tcp", true)
+ if err != nil {
+ syscall.CloseHandle(s)
+ t.Fatal(err)
+ }
+ defer fd.Close()
+
+ const SIO_TCP_INFO = syscall.IOC_INOUT | syscall.IOC_VENDOR | 39
+ inbuf := uint32(0)
+ var outbuf _TCP_INFO_v0
+ cbbr := uint32(0)
+
+ var ovs []syscall.Overlapped = make([]syscall.Overlapped, 2)
+ // Attempt to exercise behavior where a user-owned syscall.Overlapped
+ // induces an invalid pointer dereference in the Windows-specific version
+ // of runtime.netpoll.
+ ovs[1].Internal -= 1
+
+ // Create an event so that we can efficiently wait for completion
+ // of a requested overlapped I/O operation.
+ ovs[0].HEvent, _ = windows.CreateEvent(nil, 0, 0, nil)
+ if ovs[0].HEvent == 0 {
+ t.Fatalf("could not create the event!")
+ }
+
+ // Set the low bit of the Event Handle so that the completion
+ // of the overlapped I/O event will not trigger a completion event
+ // on any I/O completion port associated with the handle.
+ ovs[0].HEvent |= 0x1
+
+ if err = fd.WSAIoctl(
+ SIO_TCP_INFO,
+ (*byte)(unsafe.Pointer(&inbuf)),
+ uint32(unsafe.Sizeof(inbuf)),
+ (*byte)(unsafe.Pointer(&outbuf)),
+ uint32(unsafe.Sizeof(outbuf)),
+ &cbbr,
+ &ovs[0],
+ 0,
+ ); err != nil && !errors.Is(err, syscall.ERROR_IO_PENDING) {
+ t.Fatalf("could not perform the WSAIoctl: %v", err)
+ }
+
+ if err != nil && errors.Is(err, syscall.ERROR_IO_PENDING) {
+ // It is possible that the overlapped I/O operation completed
+ // immediately so there is no need to wait for it to complete.
+ if res, err := syscall.WaitForSingleObject(ovs[0].HEvent, syscall.INFINITE); res != 0 {
+ t.Fatalf("waiting for the completion of the overlapped IO failed: %v", err)
+ }
+ }
+
+ if err = syscall.CloseHandle(ovs[0].HEvent); err != nil {
+ t.Fatalf("could not close the event handle: %v", err)
+ }
+}
+
+type _TCP_INFO_v0 struct {
+ State uint32
+ Mss uint32
+ ConnectionTimeMs uint64
+ TimestampsEnabled bool
+ RttUs uint32
+ MinRttUs uint32
+ BytesInFlight uint32
+ Cwnd uint32
+ SndWnd uint32
+ RcvWnd uint32
+ RcvBuf uint32
+ BytesOut uint64
+ BytesIn uint64
+ BytesReordered uint32
+ BytesRetrans uint32
+ FastRetrans uint32
+ DupAcksIn uint32
+ TimeoutEpisodes uint32
+ SynRetrans uint8
+}
diff --git a/src/internal/poll/fd_writev_libc.go b/src/internal/poll/fd_writev_libc.go
new file mode 100644
index 0000000..0a60473
--- /dev/null
+++ b/src/internal/poll/fd_writev_libc.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || (openbsd && !mips64) || solaris
+
+package poll
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+//go:linkname writev syscall.writev
+func writev(fd int, iovecs []syscall.Iovec) (uintptr, error)
diff --git a/src/internal/poll/fd_writev_unix.go b/src/internal/poll/fd_writev_unix.go
new file mode 100644
index 0000000..005638b
--- /dev/null
+++ b/src/internal/poll/fd_writev_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || (openbsd && mips64)
+
+package poll
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func writev(fd int, iovecs []syscall.Iovec) (uintptr, error) {
+ var (
+ r uintptr
+ e syscall.Errno
+ )
+ for {
+ r, _, e = syscall.Syscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(len(iovecs)))
+ if e != syscall.EINTR {
+ break
+ }
+ }
+ if e != 0 {
+ return r, e
+ }
+ return r, nil
+}
diff --git a/src/internal/poll/file_plan9.go b/src/internal/poll/file_plan9.go
new file mode 100644
index 0000000..57dc0c6
--- /dev/null
+++ b/src/internal/poll/file_plan9.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+// Expose fdMutex for use by the os package on Plan 9.
+// On Plan 9 we don't want to use async I/O for file operations,
+// but we still want the locking semantics that fdMutex provides.
+
+// FDMutex is an exported fdMutex, only for Plan 9.
+type FDMutex struct {
+ fdmu fdMutex
+}
+
+func (fdmu *FDMutex) Incref() bool {
+ return fdmu.fdmu.incref()
+}
+
+func (fdmu *FDMutex) Decref() bool {
+ return fdmu.fdmu.decref()
+}
+
+func (fdmu *FDMutex) IncrefAndClose() bool {
+ return fdmu.fdmu.increfAndClose()
+}
+
+func (fdmu *FDMutex) ReadLock() bool {
+ return fdmu.fdmu.rwlock(true)
+}
+
+func (fdmu *FDMutex) ReadUnlock() bool {
+ return fdmu.fdmu.rwunlock(true)
+}
+
+func (fdmu *FDMutex) WriteLock() bool {
+ return fdmu.fdmu.rwlock(false)
+}
+
+func (fdmu *FDMutex) WriteUnlock() bool {
+ return fdmu.fdmu.rwunlock(false)
+}
diff --git a/src/internal/poll/hook_cloexec.go b/src/internal/poll/hook_cloexec.go
new file mode 100644
index 0000000..5b3cdce
--- /dev/null
+++ b/src/internal/poll/hook_cloexec.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package poll
+
+import "syscall"
+
+// Accept4Func is used to hook the accept4 call.
+var Accept4Func func(int, int) (int, syscall.Sockaddr, error) = syscall.Accept4
diff --git a/src/internal/poll/hook_unix.go b/src/internal/poll/hook_unix.go
new file mode 100644
index 0000000..b3f4f9e
--- /dev/null
+++ b/src/internal/poll/hook_unix.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || wasip1
+
+package poll
+
+import "syscall"
+
+// CloseFunc is used to hook the close call.
+var CloseFunc func(int) error = syscall.Close
+
+// AcceptFunc is used to hook the accept call.
+var AcceptFunc func(int) (int, syscall.Sockaddr, error) = syscall.Accept
diff --git a/src/internal/poll/hook_windows.go b/src/internal/poll/hook_windows.go
new file mode 100644
index 0000000..0bd950e
--- /dev/null
+++ b/src/internal/poll/hook_windows.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// CloseFunc is used to hook the close call.
+var CloseFunc func(syscall.Handle) error = syscall.Closesocket
+
+// AcceptFunc is used to hook the accept call.
+var AcceptFunc func(syscall.Handle, syscall.Handle, *byte, uint32, uint32, uint32, *uint32, *syscall.Overlapped) error = syscall.AcceptEx
+
+// ConnectExFunc is used to hook the ConnectEx call.
+var ConnectExFunc func(syscall.Handle, syscall.Sockaddr, *byte, uint32, *uint32, *syscall.Overlapped) error = syscall.ConnectEx
diff --git a/src/internal/poll/iovec_solaris.go b/src/internal/poll/iovec_solaris.go
new file mode 100644
index 0000000..e68f833
--- /dev/null
+++ b/src/internal/poll/iovec_solaris.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func newIovecWithBase(base *byte) syscall.Iovec {
+ return syscall.Iovec{Base: (*int8)(unsafe.Pointer(base))}
+}
diff --git a/src/internal/poll/iovec_unix.go b/src/internal/poll/iovec_unix.go
new file mode 100644
index 0000000..3f2833e
--- /dev/null
+++ b/src/internal/poll/iovec_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd
+
+package poll
+
+import "syscall"
+
+func newIovecWithBase(base *byte) syscall.Iovec {
+ return syscall.Iovec{Base: base}
+}
diff --git a/src/internal/poll/read_test.go b/src/internal/poll/read_test.go
new file mode 100644
index 0000000..598a52e
--- /dev/null
+++ b/src/internal/poll/read_test.go
@@ -0,0 +1,61 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "os"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestRead(t *testing.T) {
+ t.Run("SpecialFile", func(t *testing.T) {
+ var wg sync.WaitGroup
+ for _, p := range specialFiles() {
+ for i := 0; i < 4; i++ {
+ wg.Add(1)
+ go func(p string) {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ if _, err := os.ReadFile(p); err != nil {
+ t.Error(err)
+ return
+ }
+ time.Sleep(time.Nanosecond)
+ }
+ }(p)
+ }
+ }
+ wg.Wait()
+ })
+}
+
+func specialFiles() []string {
+ var ps []string
+ switch runtime.GOOS {
+ case "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd":
+ ps = []string{
+ "/dev/null",
+ }
+ case "linux":
+ ps = []string{
+ "/dev/null",
+ "/proc/stat",
+ "/sys/devices/system/cpu/online",
+ }
+ }
+ nps := ps[:0]
+ for _, p := range ps {
+ f, err := os.Open(p)
+ if err != nil {
+ continue
+ }
+ f.Close()
+ nps = append(nps, p)
+ }
+ return nps
+}
diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go
new file mode 100644
index 0000000..89315a8
--- /dev/null
+++ b/src/internal/poll/sendfile_bsd.go
@@ -0,0 +1,59 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd
+
+package poll
+
+import "syscall"
+
+// maxSendfileSize is the largest chunk size we ask the kernel to copy
+// at a time.
+const maxSendfileSize int = 4 << 20
+
+// SendFile wraps the sendfile system call.
+func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) {
+ if err := dstFD.writeLock(); err != nil {
+ return 0, err
+ }
+ defer dstFD.writeUnlock()
+ if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
+ return 0, err
+ }
+
+ dst := dstFD.Sysfd
+ var written int64
+ var err error
+ for remain > 0 {
+ n := maxSendfileSize
+ if int64(n) > remain {
+ n = int(remain)
+ }
+ pos1 := pos
+ n, err1 := syscall.Sendfile(dst, src, &pos1, n)
+ if n > 0 {
+ pos += int64(n)
+ written += int64(n)
+ remain -= int64(n)
+ } else if n == 0 && err1 == nil {
+ break
+ }
+ if err1 == syscall.EINTR {
+ continue
+ }
+ if err1 == syscall.EAGAIN {
+ if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil {
+ continue
+ }
+ }
+ if err1 != nil {
+ // This includes syscall.ENOSYS (no kernel
+ // support) and syscall.EINVAL (fd types which
+ // don't implement sendfile)
+ err = err1
+ break
+ }
+ }
+ return written, err
+}
diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go
new file mode 100644
index 0000000..cc31969
--- /dev/null
+++ b/src/internal/poll/sendfile_linux.go
@@ -0,0 +1,59 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// maxSendfileSize is the largest chunk size we ask the kernel to copy
+// at a time.
+const maxSendfileSize int = 4 << 20
+
+// SendFile wraps the sendfile system call.
+func SendFile(dstFD *FD, src int, remain int64) (int64, error, bool) {
+ if err := dstFD.writeLock(); err != nil {
+ return 0, err, false
+ }
+ defer dstFD.writeUnlock()
+ if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
+ return 0, err, false
+ }
+
+ dst := dstFD.Sysfd
+ var (
+ written int64
+ err error
+ handled = true
+ )
+ for remain > 0 {
+ n := maxSendfileSize
+ if int64(n) > remain {
+ n = int(remain)
+ }
+ n, err1 := syscall.Sendfile(dst, src, nil, n)
+ if n > 0 {
+ written += int64(n)
+ remain -= int64(n)
+ } else if n == 0 && err1 == nil {
+ break
+ }
+ if err1 == syscall.EINTR {
+ continue
+ }
+ if err1 == syscall.EAGAIN {
+ if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil {
+ continue
+ }
+ }
+ if err1 != nil {
+ // This includes syscall.ENOSYS (no kernel
+ // support) and syscall.EINVAL (fd types which
+ // don't implement sendfile)
+ err = err1
+ handled = false
+ break
+ }
+ }
+ return written, err, handled
+}
diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go
new file mode 100644
index 0000000..7ae18f4
--- /dev/null
+++ b/src/internal/poll/sendfile_solaris.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// Not strictly needed, but very helpful for debugging, see issue #10221.
+//
+//go:cgo_import_dynamic _ _ "libsendfile.so"
+//go:cgo_import_dynamic _ _ "libsocket.so"
+
+// maxSendfileSize is the largest chunk size we ask the kernel to copy
+// at a time.
+const maxSendfileSize int = 4 << 20
+
+// SendFile wraps the sendfile system call.
+func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) {
+ if err := dstFD.writeLock(); err != nil {
+ return 0, err
+ }
+ defer dstFD.writeUnlock()
+ if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
+ return 0, err
+ }
+
+ dst := dstFD.Sysfd
+ var written int64
+ var err error
+ for remain > 0 {
+ n := maxSendfileSize
+ if int64(n) > remain {
+ n = int(remain)
+ }
+ pos1 := pos
+ n, err1 := syscall.Sendfile(dst, src, &pos1, n)
+ if err1 == syscall.EAGAIN || err1 == syscall.EINTR {
+ // partial write may have occurred
+ n = int(pos1 - pos)
+ }
+ if n > 0 {
+ pos += int64(n)
+ written += int64(n)
+ remain -= int64(n)
+ } else if n == 0 && err1 == nil {
+ break
+ }
+ if err1 == syscall.EAGAIN {
+ if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil {
+ continue
+ }
+ }
+ if err1 == syscall.EINTR {
+ continue
+ }
+ if err1 != nil {
+ // This includes syscall.ENOSYS (no kernel
+ // support) and syscall.EINVAL (fd types which
+ // don't implement sendfile)
+ err = err1
+ break
+ }
+ }
+ return written, err
+}
diff --git a/src/internal/poll/sendfile_windows.go b/src/internal/poll/sendfile_windows.go
new file mode 100644
index 0000000..8c3353b
--- /dev/null
+++ b/src/internal/poll/sendfile_windows.go
@@ -0,0 +1,84 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "io"
+ "syscall"
+)
+
+// SendFile wraps the TransmitFile call.
+func SendFile(fd *FD, src syscall.Handle, n int64) (written int64, err error) {
+ if fd.kind == kindPipe {
+ // TransmitFile does not work with pipes
+ return 0, syscall.ESPIPE
+ }
+ if ft, _ := syscall.GetFileType(src); ft == syscall.FILE_TYPE_PIPE {
+ return 0, syscall.ESPIPE
+ }
+
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+
+ o := &fd.wop
+ o.handle = src
+
+ // TODO(brainman): skip calling syscall.Seek if OS allows it
+ curpos, err := syscall.Seek(o.handle, 0, io.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ if n <= 0 { // We don't know the size of the file so infer it.
+ // Find the number of bytes offset from curpos until the end of the file.
+ n, err = syscall.Seek(o.handle, -curpos, io.SeekEnd)
+ if err != nil {
+ return
+ }
+ // Now seek back to the original position.
+ if _, err = syscall.Seek(o.handle, curpos, io.SeekStart); err != nil {
+ return
+ }
+ }
+
+ // TransmitFile can be invoked in one call with at most
+ // 2,147,483,646 bytes: the maximum value for a 32-bit integer minus 1.
+ // See https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-transmitfile
+ const maxChunkSizePerCall = int64(0x7fffffff - 1)
+
+ for n > 0 {
+ chunkSize := maxChunkSizePerCall
+ if chunkSize > n {
+ chunkSize = n
+ }
+
+ o.qty = uint32(chunkSize)
+ o.o.Offset = uint32(curpos)
+ o.o.OffsetHigh = uint32(curpos >> 32)
+
+ nw, err := execIO(o, func(o *operation) error {
+ return syscall.TransmitFile(o.fd.Sysfd, o.handle, o.qty, 0, &o.o, nil, syscall.TF_WRITE_BEHIND)
+ })
+ if err != nil {
+ return written, err
+ }
+
+ curpos += int64(nw)
+
+ // Some versions of Windows (Windows 10 1803) do not set
+ // file position after TransmitFile completes.
+ // So just use Seek to set file position.
+ if _, err = syscall.Seek(o.handle, curpos, io.SeekStart); err != nil {
+ return written, err
+ }
+
+ n -= int64(nw)
+ written += int64(nw)
+ }
+
+ return
+}
diff --git a/src/internal/poll/sock_cloexec.go b/src/internal/poll/sock_cloexec.go
new file mode 100644
index 0000000..361c11b
--- /dev/null
+++ b/src/internal/poll/sock_cloexec.go
@@ -0,0 +1,49 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements accept for platforms that provide a fast path for
+// setting SetNonblock and CloseOnExec.
+
+//go:build dragonfly || freebsd || (linux && !arm) || netbsd || openbsd || solaris
+
+package poll
+
+import "syscall"
+
+// Wrapper around the accept system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func accept(s int) (int, syscall.Sockaddr, string, error) {
+ ns, sa, err := Accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
+ // TODO: We can remove the fallback on Linux and *BSD,
+ // as currently supported versions all support accept4
+ // with SOCK_CLOEXEC, but Solaris does not. See issue #59359.
+ switch err {
+ case nil:
+ return ns, sa, "", nil
+ default: // errors other than the ones listed
+ return -1, sa, "accept4", err
+ case syscall.ENOSYS: // syscall missing
+ case syscall.EINVAL: // some Linux use this instead of ENOSYS
+ case syscall.EACCES: // some Linux use this instead of ENOSYS
+ case syscall.EFAULT: // some Linux use this instead of ENOSYS
+ }
+
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ // It is probably okay to hold the lock across syscall.Accept
+ // because we have put fd.sysfd into non-blocking mode.
+ // However, a call to the File method will put it back into
+ // blocking mode. We can't take that risk, so no use of ForkLock here.
+ ns, sa, err = AcceptFunc(s)
+ if err == nil {
+ syscall.CloseOnExec(ns)
+ }
+ if err != nil {
+ return -1, nil, "accept", err
+ }
+ if err = syscall.SetNonblock(ns, true); err != nil {
+ CloseFunc(ns)
+ return -1, nil, "setnonblock", err
+ }
+ return ns, sa, "", nil
+}
diff --git a/src/internal/poll/sock_cloexec_accept.go b/src/internal/poll/sock_cloexec_accept.go
new file mode 100644
index 0000000..4b86de5
--- /dev/null
+++ b/src/internal/poll/sock_cloexec_accept.go
@@ -0,0 +1,51 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements accept for platforms that provide a fast path for
+// setting SetNonblock and CloseOnExec, but don't necessarily have accept4.
+// This is the code we used for accept in Go 1.17 and earlier.
+// On Linux the accept4 system call was introduced in 2.6.28 kernel,
+// and our minimum requirement is 2.6.32, so we simplified the function.
+// Unfortunately, on ARM accept4 wasn't added until 2.6.36, so for ARM
+// only we continue using the older code.
+
+//go:build linux && arm
+
+package poll
+
+import "syscall"
+
+// Wrapper around the accept system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func accept(s int) (int, syscall.Sockaddr, string, error) {
+ ns, sa, err := Accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
+ switch err {
+ case nil:
+ return ns, sa, "", nil
+ default: // errors other than the ones listed
+ return -1, sa, "accept4", err
+ case syscall.ENOSYS: // syscall missing
+ case syscall.EINVAL: // some Linux use this instead of ENOSYS
+ case syscall.EACCES: // some Linux use this instead of ENOSYS
+ case syscall.EFAULT: // some Linux use this instead of ENOSYS
+ }
+
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ // It is probably okay to hold the lock across syscall.Accept
+ // because we have put fd.sysfd into non-blocking mode.
+ // However, a call to the File method will put it back into
+ // blocking mode. We can't take that risk, so no use of ForkLock here.
+ ns, sa, err = AcceptFunc(s)
+ if err == nil {
+ syscall.CloseOnExec(ns)
+ }
+ if err != nil {
+ return -1, nil, "accept", err
+ }
+ if err = syscall.SetNonblock(ns, true); err != nil {
+ CloseFunc(ns)
+ return -1, nil, "setnonblock", err
+ }
+ return ns, sa, "", nil
+}
diff --git a/src/internal/poll/sockopt.go b/src/internal/poll/sockopt.go
new file mode 100644
index 0000000..a87a9e6
--- /dev/null
+++ b/src/internal/poll/sockopt.go
@@ -0,0 +1,45 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll
+
+import "syscall"
+
+// SetsockoptInt wraps the setsockopt network call with an int argument.
+func (fd *FD) SetsockoptInt(level, name, arg int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptInt(fd.Sysfd, level, name, arg)
+}
+
+// SetsockoptInet4Addr wraps the setsockopt network call with an IPv4 address.
+func (fd *FD) SetsockoptInet4Addr(level, name int, arg [4]byte) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptInet4Addr(fd.Sysfd, level, name, arg)
+}
+
+// SetsockoptLinger wraps the setsockopt network call with a Linger argument.
+func (fd *FD) SetsockoptLinger(level, name int, l *syscall.Linger) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptLinger(fd.Sysfd, level, name, l)
+}
+
+// GetsockoptInt wraps the getsockopt network call with an int argument.
+func (fd *FD) GetsockoptInt(level, name int) (int, error) {
+ if err := fd.incref(); err != nil {
+ return -1, err
+ }
+ defer fd.decref()
+ return syscall.GetsockoptInt(fd.Sysfd, level, name)
+}
diff --git a/src/internal/poll/sockopt_linux.go b/src/internal/poll/sockopt_linux.go
new file mode 100644
index 0000000..bc79c35
--- /dev/null
+++ b/src/internal/poll/sockopt_linux.go
@@ -0,0 +1,16 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// SetsockoptIPMreqn wraps the setsockopt network call with an IPMreqn argument.
+func (fd *FD) SetsockoptIPMreqn(level, name int, mreq *syscall.IPMreqn) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptIPMreqn(fd.Sysfd, level, name, mreq)
+}
diff --git a/src/internal/poll/sockopt_unix.go b/src/internal/poll/sockopt_unix.go
new file mode 100644
index 0000000..9cba44d
--- /dev/null
+++ b/src/internal/poll/sockopt_unix.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package poll
+
+import "syscall"
+
+// SetsockoptByte wraps the setsockopt network call with a byte argument.
+func (fd *FD) SetsockoptByte(level, name int, arg byte) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptByte(fd.Sysfd, level, name, arg)
+}
diff --git a/src/internal/poll/sockopt_windows.go b/src/internal/poll/sockopt_windows.go
new file mode 100644
index 0000000..dd5fb70
--- /dev/null
+++ b/src/internal/poll/sockopt_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import "syscall"
+
+// Setsockopt wraps the setsockopt network call.
+func (fd *FD) Setsockopt(level, optname int32, optval *byte, optlen int32) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Setsockopt(fd.Sysfd, level, optname, optval, optlen)
+}
+
+// WSAIoctl wraps the WSAIoctl network call.
+func (fd *FD) WSAIoctl(iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *syscall.Overlapped, completionRoutine uintptr) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.WSAIoctl(fd.Sysfd, iocc, inbuf, cbif, outbuf, cbob, cbbr, overlapped, completionRoutine)
+}
diff --git a/src/internal/poll/sockoptip.go b/src/internal/poll/sockoptip.go
new file mode 100644
index 0000000..41955e1
--- /dev/null
+++ b/src/internal/poll/sockoptip.go
@@ -0,0 +1,27 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll
+
+import "syscall"
+
+// SetsockoptIPMreq wraps the setsockopt network call with an IPMreq argument.
+func (fd *FD) SetsockoptIPMreq(level, name int, mreq *syscall.IPMreq) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptIPMreq(fd.Sysfd, level, name, mreq)
+}
+
+// SetsockoptIPv6Mreq wraps the setsockopt network call with an IPv6Mreq argument.
+func (fd *FD) SetsockoptIPv6Mreq(level, name int, mreq *syscall.IPv6Mreq) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptIPv6Mreq(fd.Sysfd, level, name, mreq)
+}
diff --git a/src/internal/poll/splice_linux.go b/src/internal/poll/splice_linux.go
new file mode 100644
index 0000000..72cca34
--- /dev/null
+++ b/src/internal/poll/splice_linux.go
@@ -0,0 +1,250 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // spliceNonblock doesn't make the splice itself necessarily nonblocking
+ // (because the actual file descriptors that are spliced from/to may block
+ // unless they have the O_NONBLOCK flag set), but it makes the splice pipe
+ // operations nonblocking.
+ spliceNonblock = 0x2
+
+ // maxSpliceSize is the maximum amount of data Splice asks
+ // the kernel to move in a single call to splice(2).
+ // We use 1MB as Splice writes data through a pipe, and 1MB is the default maximum pipe buffer size,
+ // which is determined by /proc/sys/fs/pipe-max-size.
+ maxSpliceSize = 1 << 20
+)
+
+// Splice transfers at most remain bytes of data from src to dst, using the
+// splice system call to minimize copies of data from and to userspace.
+//
+// Splice gets a pipe buffer from the pool or creates a new one if needed, to serve as a buffer for the data transfer.
+// src and dst must both be stream-oriented sockets.
+//
+// If err != nil, sc is the system call which caused the error.
+func Splice(dst, src *FD, remain int64) (written int64, handled bool, sc string, err error) {
+ p, sc, err := getPipe()
+ if err != nil {
+ return 0, false, sc, err
+ }
+ defer putPipe(p)
+ var inPipe, n int
+ for err == nil && remain > 0 {
+ max := maxSpliceSize
+ if int64(max) > remain {
+ max = int(remain)
+ }
+ inPipe, err = spliceDrain(p.wfd, src, max)
+ // The operation is considered handled if splice returns no
+ // error, or an error other than EINVAL. An EINVAL means the
+ // kernel does not support splice for the socket type of src.
+ // The failed syscall does not consume any data so it is safe
+ // to fall back to a generic copy.
+ //
+ // spliceDrain should never return EAGAIN, so if err != nil,
+ // Splice cannot continue.
+ //
+ // If inPipe == 0 && err == nil, src is at EOF, and the
+ // transfer is complete.
+ handled = handled || (err != syscall.EINVAL)
+ if err != nil || inPipe == 0 {
+ break
+ }
+ p.data += inPipe
+
+ n, err = splicePump(dst, p.rfd, inPipe)
+ if n > 0 {
+ written += int64(n)
+ remain -= int64(n)
+ p.data -= n
+ }
+ }
+ if err != nil {
+ return written, handled, "splice", err
+ }
+ return written, true, "", nil
+}
+
+// spliceDrain moves data from a socket to a pipe.
+//
+// Invariant: when entering spliceDrain, the pipe is empty. It is either in its
+// initial state, or splicePump has emptied it previously.
+//
+// Given this, spliceDrain can reasonably assume that the pipe is ready for
+// writing, so if splice returns EAGAIN, it must be because the socket is not
+// ready for reading.
+//
+// If spliceDrain returns (0, nil), src is at EOF.
+func spliceDrain(pipefd int, sock *FD, max int) (int, error) {
+ if err := sock.readLock(); err != nil {
+ return 0, err
+ }
+ defer sock.readUnlock()
+ if err := sock.pd.prepareRead(sock.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ // In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here,
+ // because it could return EAGAIN ceaselessly when the write end of the pipe is full,
+ // but this shouldn't be a concern here, since the pipe buffer must be sufficient for
+ // this data transmission on the basis of the workflow in Splice.
+ n, err := splice(pipefd, sock.Sysfd, max, spliceNonblock)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err != syscall.EAGAIN {
+ return n, err
+ }
+ if sock.pd.pollable() {
+ if err := sock.pd.waitRead(sock.isFile); err != nil {
+ return n, err
+ }
+ }
+ }
+}
+
+// splicePump moves all the buffered data from a pipe to a socket.
+//
+// Invariant: when entering splicePump, there are exactly inPipe
+// bytes of data in the pipe, from a previous call to spliceDrain.
+//
+// By analogy to the condition from spliceDrain, splicePump
+// only needs to poll the socket for readiness, if splice returns
+// EAGAIN.
+//
+// If splicePump cannot move all the data in a single call to
+// splice(2), it loops over the buffered data until it has written
+// all of it to the socket. This behavior is similar to the Write
+// step of an io.Copy in userspace.
+func splicePump(sock *FD, pipefd int, inPipe int) (int, error) {
+ if err := sock.writeLock(); err != nil {
+ return 0, err
+ }
+ defer sock.writeUnlock()
+ if err := sock.pd.prepareWrite(sock.isFile); err != nil {
+ return 0, err
+ }
+ written := 0
+ for inPipe > 0 {
+ // In theory calling splice(2) with SPLICE_F_NONBLOCK could end up an infinite loop here,
+ // because it could return EAGAIN ceaselessly when the read end of the pipe is empty,
+ // but this shouldn't be a concern here, since the pipe buffer must contain inPipe size of
+ // data on the basis of the workflow in Splice.
+ n, err := splice(sock.Sysfd, pipefd, inPipe, spliceNonblock)
+ if err == syscall.EINTR {
+ continue
+ }
+ // Here, the condition n == 0 && err == nil should never be
+ // observed, since Splice controls the write side of the pipe.
+ if n > 0 {
+ inPipe -= n
+ written += n
+ continue
+ }
+ if err != syscall.EAGAIN {
+ return written, err
+ }
+ if sock.pd.pollable() {
+ if err := sock.pd.waitWrite(sock.isFile); err != nil {
+ return written, err
+ }
+ }
+ }
+ return written, nil
+}
+
+// splice wraps the splice system call. Since the current implementation
+// only uses splice on sockets and pipes, the offset arguments are unused.
+// splice returns int instead of int64, because callers never ask it to
+// move more data in a single call than can fit in an int32.
+func splice(out int, in int, max int, flags int) (int, error) {
+ n, err := syscall.Splice(in, nil, out, nil, max, flags)
+ return int(n), err
+}
+
+type splicePipeFields struct {
+ rfd int
+ wfd int
+ data int
+}
+
+type splicePipe struct {
+ splicePipeFields
+
+ // We want to use a finalizer, so ensure that the size is
+ // large enough to not use the tiny allocator.
+ _ [24 - unsafe.Sizeof(splicePipeFields{})%24]byte
+}
+
+// splicePipePool caches pipes to avoid high-frequency construction and destruction of pipe buffers.
+// The garbage collector will free all pipes in the sync.Pool periodically, thus we need to set up
+// a finalizer for each pipe to close its file descriptors before the actual GC.
+var splicePipePool = sync.Pool{New: newPoolPipe}
+
+func newPoolPipe() any {
+ // Discard the error which occurred during the creation of pipe buffer,
+ // redirecting the data transmission to the conventional way utilizing read() + write() as a fallback.
+ p := newPipe()
+ if p == nil {
+ return nil
+ }
+ runtime.SetFinalizer(p, destroyPipe)
+ return p
+}
+
+// getPipe tries to acquire a pipe buffer from the pool or create a new one with newPipe() if it gets nil from the cache.
+//
+// Note that it may fail to create a new pipe buffer by newPipe(), in which case getPipe() will return a generic error
+// and system call name splice in a string as the indication.
+func getPipe() (*splicePipe, string, error) {
+ v := splicePipePool.Get()
+ if v == nil {
+ return nil, "splice", syscall.EINVAL
+ }
+ return v.(*splicePipe), "", nil
+}
+
+func putPipe(p *splicePipe) {
+ // If there is still data left in the pipe,
+ // then close and discard it instead of putting it back into the pool.
+ if p.data != 0 {
+ runtime.SetFinalizer(p, nil)
+ destroyPipe(p)
+ return
+ }
+ splicePipePool.Put(p)
+}
+
+// newPipe sets up a pipe for a splice operation.
+func newPipe() *splicePipe {
+ var fds [2]int
+ if err := syscall.Pipe2(fds[:], syscall.O_CLOEXEC|syscall.O_NONBLOCK); err != nil {
+ return nil
+ }
+
+ // Splice will loop writing maxSpliceSize bytes from the source to the pipe,
+ // and then write those bytes from the pipe to the destination.
+ // Set the pipe buffer size to maxSpliceSize to optimize that.
+ // Ignore errors here, as a smaller buffer size will work,
+ // although it will require more system calls.
+ unix.Fcntl(fds[0], syscall.F_SETPIPE_SZ, maxSpliceSize)
+
+ return &splicePipe{splicePipeFields: splicePipeFields{rfd: fds[0], wfd: fds[1]}}
+}
+
+// destroyPipe destroys a pipe.
+func destroyPipe(p *splicePipe) {
+ CloseFunc(p.rfd)
+ CloseFunc(p.wfd)
+}
diff --git a/src/internal/poll/splice_linux_test.go b/src/internal/poll/splice_linux_test.go
new file mode 100644
index 0000000..29bcaab
--- /dev/null
+++ b/src/internal/poll/splice_linux_test.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "internal/poll"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+var closeHook atomic.Value // func(fd int)
+
+func init() {
+ closeFunc := poll.CloseFunc
+ poll.CloseFunc = func(fd int) (err error) {
+ if v := closeHook.Load(); v != nil {
+ if hook := v.(func(int)); hook != nil {
+ hook(fd)
+ }
+ }
+ return closeFunc(fd)
+ }
+}
+
+func TestSplicePipePool(t *testing.T) {
+ const N = 64
+ var (
+ p *poll.SplicePipe
+ ps []*poll.SplicePipe
+ allFDs []int
+ pendingFDs sync.Map // fd → struct{}{}
+ err error
+ )
+
+ closeHook.Store(func(fd int) { pendingFDs.Delete(fd) })
+ t.Cleanup(func() { closeHook.Store((func(int))(nil)) })
+
+ for i := 0; i < N; i++ {
+ p, _, err = poll.GetPipe()
+ if err != nil {
+ t.Skipf("failed to create pipe due to error(%v), skip this test", err)
+ }
+ _, pwfd := poll.GetPipeFds(p)
+ allFDs = append(allFDs, pwfd)
+ pendingFDs.Store(pwfd, struct{}{})
+ ps = append(ps, p)
+ }
+ for _, p = range ps {
+ poll.PutPipe(p)
+ }
+ ps = nil
+ p = nil
+
+ // Exploit the timeout of "go test" as a timer for the subsequent verification.
+ timeout := 5 * time.Minute
+ if deadline, ok := t.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ timeout -= timeout / 10 // Leave 10% headroom for cleanup.
+ }
+ expiredTime := time.NewTimer(timeout)
+ defer expiredTime.Stop()
+
+ // Trigger garbage collection repeatedly, waiting for all pipes in sync.Pool
+ // to either be deallocated and closed, or to time out.
+ for {
+ runtime.GC()
+ time.Sleep(10 * time.Millisecond)
+
+ // Detect whether all pipes are closed properly.
+ var leakedFDs []int
+ pendingFDs.Range(func(k, v any) bool {
+ leakedFDs = append(leakedFDs, k.(int))
+ return true
+ })
+ if len(leakedFDs) == 0 {
+ break
+ }
+
+ select {
+ case <-expiredTime.C:
+ t.Logf("all descriptors: %v", allFDs)
+ t.Fatalf("leaked descriptors: %v", leakedFDs)
+ default:
+ }
+ }
+}
+
+func BenchmarkSplicePipe(b *testing.B) {
+ b.Run("SplicePipeWithPool", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ p, _, err := poll.GetPipe()
+ if err != nil {
+ continue
+ }
+ poll.PutPipe(p)
+ }
+ })
+ b.Run("SplicePipeWithoutPool", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ p := poll.NewPipe()
+ if p == nil {
+ b.Skip("newPipe returned nil")
+ }
+ poll.DestroyPipe(p)
+ }
+ })
+}
+
+func BenchmarkSplicePipePoolParallel(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ p, _, err := poll.GetPipe()
+ if err != nil {
+ continue
+ }
+ poll.PutPipe(p)
+ }
+ })
+}
+
+func BenchmarkSplicePipeNativeParallel(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ p := poll.NewPipe()
+ if p == nil {
+ b.Skip("newPipe returned nil")
+ }
+ poll.DestroyPipe(p)
+ }
+ })
+}
diff --git a/src/internal/poll/strconv.go b/src/internal/poll/strconv.go
new file mode 100644
index 0000000..2b052fa
--- /dev/null
+++ b/src/internal/poll/strconv.go
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+
+package poll
+
+// stringsHasSuffix is strings.HasSuffix. It reports whether s ends in
+// suffix.
+func stringsHasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
diff --git a/src/internal/poll/sys_cloexec.go b/src/internal/poll/sys_cloexec.go
new file mode 100644
index 0000000..2c5da7d
--- /dev/null
+++ b/src/internal/poll/sys_cloexec.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements accept for platforms that do not provide a fast path for
+// setting SetNonblock and CloseOnExec.
+
+//go:build aix || darwin || (js && wasm) || wasip1
+
+package poll
+
+import (
+ "syscall"
+)
+
+// Wrapper around the accept system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func accept(s int) (int, syscall.Sockaddr, string, error) {
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ // It is probably okay to hold the lock across syscall.Accept
+ // because we have put fd.sysfd into non-blocking mode.
+ // However, a call to the File method will put it back into
+ // blocking mode. We can't take that risk, so no use of ForkLock here.
+ ns, sa, err := AcceptFunc(s)
+ if err == nil {
+ syscall.CloseOnExec(ns)
+ }
+ if err != nil {
+ return -1, nil, "accept", err
+ }
+ if err = syscall.SetNonblock(ns, true); err != nil {
+ CloseFunc(ns)
+ return -1, nil, "setnonblock", err
+ }
+ return ns, sa, "", nil
+}
diff --git a/src/internal/poll/writev.go b/src/internal/poll/writev.go
new file mode 100644
index 0000000..75c8b64
--- /dev/null
+++ b/src/internal/poll/writev.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package poll
+
+import (
+ "io"
+ "runtime"
+ "syscall"
+)
+
+// Writev wraps the writev system call.
+func (fd *FD) Writev(v *[][]byte) (int64, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+
+ var iovecs []syscall.Iovec
+ if fd.iovecs != nil {
+ iovecs = *fd.iovecs
+ }
+ // TODO: read from sysconf(_SC_IOV_MAX)? The Linux default is
+ // 1024 and this seems conservative enough for now. Darwin's
+ // UIO_MAXIOV also seems to be 1024.
+ maxVec := 1024
+ if runtime.GOOS == "aix" || runtime.GOOS == "solaris" {
+ // IOV_MAX is set to XOPEN_IOV_MAX on AIX and Solaris.
+ maxVec = 16
+ }
+
+ var n int64
+ var err error
+ for len(*v) > 0 {
+ iovecs = iovecs[:0]
+ for _, chunk := range *v {
+ if len(chunk) == 0 {
+ continue
+ }
+ iovecs = append(iovecs, newIovecWithBase(&chunk[0]))
+ if fd.IsStream && len(chunk) > 1<<30 {
+ iovecs[len(iovecs)-1].SetLen(1 << 30)
+ break // continue chunk on next writev
+ }
+ iovecs[len(iovecs)-1].SetLen(len(chunk))
+ if len(iovecs) == maxVec {
+ break
+ }
+ }
+ if len(iovecs) == 0 {
+ break
+ }
+ if fd.iovecs == nil {
+ fd.iovecs = new([]syscall.Iovec)
+ }
+ *fd.iovecs = iovecs // cache
+
+ var wrote uintptr
+ wrote, err = writev(fd.Sysfd, iovecs)
+ if wrote == ^uintptr(0) {
+ wrote = 0
+ }
+ TestHookDidWritev(int(wrote))
+ n += int64(wrote)
+ consume(v, int64(wrote))
+ for i := range iovecs {
+ iovecs[i] = syscall.Iovec{}
+ }
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ break
+ }
+ if n == 0 {
+ err = io.ErrUnexpectedEOF
+ break
+ }
+ }
+ return n, err
+}
diff --git a/src/internal/poll/writev_test.go b/src/internal/poll/writev_test.go
new file mode 100644
index 0000000..b46657c
--- /dev/null
+++ b/src/internal/poll/writev_test.go
@@ -0,0 +1,62 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll_test
+
+import (
+ "internal/poll"
+ "reflect"
+ "testing"
+)
+
+func TestConsume(t *testing.T) {
+ tests := []struct {
+ in [][]byte
+ consume int64
+ want [][]byte
+ }{
+ {
+ in: [][]byte{[]byte("foo"), []byte("bar")},
+ consume: 0,
+ want: [][]byte{[]byte("foo"), []byte("bar")},
+ },
+ {
+ in: [][]byte{[]byte("foo"), []byte("bar")},
+ consume: 2,
+ want: [][]byte{[]byte("o"), []byte("bar")},
+ },
+ {
+ in: [][]byte{[]byte("foo"), []byte("bar")},
+ consume: 3,
+ want: [][]byte{[]byte("bar")},
+ },
+ {
+ in: [][]byte{[]byte("foo"), []byte("bar")},
+ consume: 4,
+ want: [][]byte{[]byte("ar")},
+ },
+ {
+ in: [][]byte{nil, nil, nil, []byte("bar")},
+ consume: 1,
+ want: [][]byte{[]byte("ar")},
+ },
+ {
+ in: [][]byte{nil, nil, nil, []byte("foo")},
+ consume: 0,
+ want: [][]byte{[]byte("foo")},
+ },
+ {
+ in: [][]byte{nil, nil, nil},
+ consume: 0,
+ want: [][]byte{},
+ },
+ }
+ for i, tt := range tests {
+ in := tt.in
+ poll.Consume(&in, tt.consume)
+ if !reflect.DeepEqual(in, tt.want) {
+ t.Errorf("%d. after consume(%d) = %+v, want %+v", i, tt.consume, in, tt.want)
+ }
+ }
+}
diff --git a/src/internal/profile/encode.go b/src/internal/profile/encode.go
new file mode 100644
index 0000000..77d77f1
--- /dev/null
+++ b/src/internal/profile/encode.go
@@ -0,0 +1,482 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ Label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ vs := s.NumLabel[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ Label{
+ keyX: addString(strings, k),
+ numX: v,
+ },
+ )
+ }
+ }
+ s.locationIDX = nil
+ for _, l := range s.Location {
+ s.locationIDX = append(s.locationIDX, l.ID)
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // repeated int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // repeated int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // repeated int64 time_nanos = 9
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).TimeNanos) },
+ // repeated int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // optional string period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // repeated int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+ // repeated int64 comment = 13
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+ // int64 defaultSampleType = 14
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+
+ mappings := make(map[uint64]*Mapping)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ mappings[m.ID] = m
+ }
+
+ functions := make(map[uint64]*Function)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ functions[f.ID] = f
+ }
+
+ locations := make(map[uint64]*Location)
+ for _, l := range p.Location {
+ l.Mapping = mappings[l.mappingIDX]
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].Function = functions[id]
+ if l.Line[i].Function == nil {
+ return fmt.Errorf("Function ID %d not found", id)
+ }
+ l.Line[i].functionIDX = 0
+ }
+ }
+ locations[l.ID] = l
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ for _, s := range p.Sample {
+ labels := make(map[string][]string)
+ numLabels := make(map[string][]int64)
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else {
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ }
+ s.Location = nil
+ for _, lid := range s.locationIDX {
+ s.Location = append(s.Location, locations[lid])
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+ for _, i := range p.commentX {
+ var c string
+ c, err = getString(p.stringTable, &i, err)
+ p.Comments = append(p.Comments, c)
+ }
+
+ p.commentX = nil
+ p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.stringTable = nil
+ return nil
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ for _, x := range p.Value {
+ encodeInt64(b, 2, x)
+ }
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, Label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p Label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p Label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).numX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/src/internal/profile/filter.go b/src/internal/profile/filter.go
new file mode 100644
index 0000000..141dd1f
--- /dev/null
+++ b/src/internal/profile/filter.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implements methods to filter samples from profiles.
+
+package profile
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, im, hm bool) {
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// matchesName reports whether the function name or file in the
+// location matches the regular expression.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) {
+ return true
+ }
+ if re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) {
+ continue
+ }
+ if re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(key, val string, nval int64) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := focusedSample(s, focus, ignore)
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
+
+// focusedSample checks a sample against focus and ignore regexps.
+// Returns whether the focus/ignore regexps match any tags.
+func focusedSample(s *Sample, focus, ignore TagMatch) (fm, im bool) {
+ fm = focus == nil
+ for key, vals := range s.Label {
+ for _, val := range vals {
+ if ignore != nil && ignore(key, val, 0) {
+ im = true
+ }
+ if !fm && focus(key, val, 0) {
+ fm = true
+ }
+ }
+ }
+ for key, vals := range s.NumLabel {
+ for _, val := range vals {
+ if ignore != nil && ignore(key, "", val) {
+ im = true
+ }
+ if !fm && focus(key, "", val) {
+ fm = true
+ }
+ }
+ }
+ return fm, im
+}
diff --git a/src/internal/profile/legacy_profile.go b/src/internal/profile/legacy_profile.go
new file mode 100644
index 0000000..373a6c0
--- /dev/null
+++ b/src/internal/profile/legacy_profile.go
@@ -0,0 +1,1268 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "internal/lazyregexp"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = lazyregexp.New(`\A(\w+) profile: total \d+\n\z`)
+ countRE = lazyregexp.New(`\A(\d+) @(( 0x[0-9a-f]+)+)\n\z`)
+
+ heapHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = lazyregexp.New(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = lazyregexp.New(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = lazyregexp.New(`0x[0-9a-f]+`)
+
+ growthHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz`)
+
+ fragmentationHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz`)
+
+ threadzStartRE = lazyregexp.New(`--- threadz \d+ ---`)
+ threadStartRE = lazyregexp.New(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ procMapsRE = lazyregexp.New(`([[:xdigit:]]+)-([[:xdigit:]]+)\s+([-rwxp]+)\s+([[:xdigit:]]+)\s+([[:xdigit:]]+):([[:xdigit:]]+)\s+([[:digit:]]+)\s*(\S+)?`)
+
+ briefMapsRE = lazyregexp.New(`\s*([[:xdigit:]]+)-([[:xdigit:]]+):\s*(\S+)(\s.*@)?([[:xdigit:]]+)?`)
+
+ // LegacyHeapAllocated instructs the heapz parsers to use the
+ // allocated memory stats instead of the default in-use memory. Note
+ // that tcmalloc doesn't provide all allocated memory, only in-use
+ // stats.
+ LegacyHeapAllocated bool
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ var line string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(line) {
+ break
+ }
+ }
+
+ m := countStartRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for {
+ line, err = r.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, err
+ }
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+
+ if err = parseAdditionalSections(strings.TrimSpace(line), r, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ if len(p.Mapping) == 0 {
+ return
+ }
+
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+
+ for _, l := range p.Location {
+ if a := l.Address; a != 0 {
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ break
+ }
+ }
+ }
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// ParseTracebacks parses a set of tracebacks and returns a newly
+// populated profile. It will accept any text file and generate a
+// Profile out of it with any hex addresses it can identify, including
+// a process map if it can recognize one. Each sample will include a
+// tag "source" with the addresses recognized in string format.
+func ParseTracebacks(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "trace", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "trace", Unit: "count"},
+ },
+ }
+
+ var sources []string
+ var sloc []*Location
+
+ locs := make(map[uint64]*Location)
+ for {
+ l, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if sectionTrigger(l) == memoryMapSection {
+ break
+ }
+ if s, addrs := extractHexAddresses(l); len(s) > 0 {
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ sources = append(sources, s...)
+ } else {
+ if len(sources) > 0 || len(sloc) > 0 {
+ addTracebackSample(sloc, sources, p)
+ sloc, sources = nil, nil
+ }
+ }
+ }
+
+ // Add final sample to save any leftover data.
+ if len(sources) > 0 || len(sloc) > 0 {
+ addTracebackSample(sloc, sources, p)
+ }
+
+ if err := p.ParseMemoryMap(r); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func addTracebackSample(l []*Location, s []string, p *Profile) {
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{1},
+ Location: l,
+ Label: map[string][]string{"source": s},
+ })
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If all samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+ if len(p.Sample) > 1 && len(p.Sample[0].Location) > 1 {
+ allSame := true
+ id1 := p.Sample[0].Location[1].Address
+ for _, s := range p.Sample {
+ if len(s.Location) < 2 || id1 != s.Location[1].Address {
+ allSame = false
+ break
+ }
+ }
+ if allSame {
+ for _, s := range p.Sample {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
+// The last stack trace is of the form:
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ r := bytes.NewBuffer(b)
+ l, err := r.ReadString('\n')
+ if err != nil {
+ return nil, errUnrecognized
+ }
+
+ sampling := ""
+
+ if header := heapHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "objects", Unit: "bytes"},
+ }
+
+ var period int64
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ sampling, p.Period = "v2", period
+ case "heapprofile":
+ sampling, p.Period = "", 1
+ case "heap":
+ sampling, p.Period = "v2", period/2
+ default:
+ return nil, errUnrecognized
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "heapgrowth", Unit: "count"},
+ Period: 1,
+ }
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "allocations", Unit: "count"},
+ Period: 1,
+ }
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if LegacyHeapAllocated {
+ for _, st := range p.SampleType {
+ st.Type = "alloc_" + st.Type
+ }
+ } else {
+ for _, st := range p.SampleType {
+ st.Type = "inuse_" + st.Type
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ l, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if l == "" {
+ break
+ }
+ }
+
+ if isSpaceOrComment(l) {
+ continue
+ }
+ l = strings.TrimSpace(l)
+
+ if sectionTrigger(l) != unrecognizedSection {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(l, p.Period, sampling)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+
+ if err = parseAdditionalSections(l, r, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return value, blocksize, addrs, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // Use first two values by default; tcmalloc sampling generates the
+ // same value for both, only the older heap-profile collect separate
+ // stats for in-use and allocated objects.
+ valueIndex := 1
+ if LegacyHeapAllocated {
+ valueIndex = 3
+ }
+
+ var v1, v2 int64
+ if v1, err = strconv.ParseInt(sampleData[valueIndex], 10, 64); err != nil {
+ return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if v2, err = strconv.ParseInt(sampleData[valueIndex+1], 10, 64); err != nil {
+ return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ if v1 == 0 {
+ if v2 != 0 {
+ return value, blocksize, addrs, fmt.Errorf("allocation count was 0 but allocation bytes was %d", v2)
+ }
+ } else {
+ blocksize = v2 / v1
+ if sampling == "v2" {
+ v1, v2 = scaleHeapSample(v1, v2, rate)
+ }
+ }
+
+ value = []int64{v1, v2}
+ addrs = parseHexAddresses(sampleData[5])
+
+ return value, blocksize, addrs, nil
+}
+
+// extractHexAddresses extracts hex numbers from a string and returns
+// them, together with their numeric value, in a slice.
+func extractHexAddresses(s string) ([]string, []uint64) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var ids []uint64
+ for _, s := range hexStrings {
+ if id, err := strconv.ParseUint(s, 0, 64); err == nil {
+ ids = append(ids, id)
+ } else {
+ // Do not expect any parsing failures due to the regexp matching.
+ panic("failed to parse hex value:" + s)
+ }
+ }
+ return hexStrings, ids
+}
+
+// parseHexAddresses parses hex numbers from a string and returns them
+// in a slice.
+func parseHexAddresses(s string) []uint64 {
+ _, ids := extractHexAddresses(s)
+ return ids
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+// This code converts the text output from runtime into a *Profile. (In the future
+// the runtime might write a serialized Profile directly making this unnecessary.)
+func parseContention(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+ var l string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ l, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(l) {
+ break
+ }
+ }
+
+ if strings.HasPrefix(l, "--- contentionz ") {
+ return parseCppContention(r)
+ } else if strings.HasPrefix(l, "--- mutex:") {
+ return parseCppContention(r)
+ } else if strings.HasPrefix(l, "--- contention:") {
+ return parseCppContention(r)
+ }
+ return nil, errUnrecognized
+}
+
+// parseCppContention parses the output from synchronization_profiling.cc
+// for backward compatibility, and the compatible (non-debug) block profile
+// output from the Go runtime.
+func parseCppContention(r *bytes.Buffer) (*Profile, error) {
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ var l string
+ var err error
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = '='
+ for {
+ l, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if l == "" {
+ break
+ }
+ }
+ if isSpaceOrComment(l) {
+ continue
+ }
+
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if strings.HasPrefix(l, "---") {
+ break
+ }
+
+ index := strings.IndexByte(l, delimiter)
+ if index < 0 {
+ break
+ }
+ key := l[:index]
+ val := l[index+1:]
+
+ key, val = strings.TrimSpace(key), strings.TrimSpace(val)
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ if !isSpaceOrComment(l) {
+ if l = strings.TrimSpace(l); strings.HasPrefix(l, "---") {
+ break
+ }
+ value, addrs, err := parseContentionSample(l, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+
+ if l, err = r.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ }
+
+ if err = parseAdditionalSections(l, r, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return value, addrs, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs = parseHexAddresses(sampleData[3])
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ var line string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(line) {
+ break
+ }
+ }
+
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for {
+ line, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if line == "" {
+ break
+ }
+ }
+ if sectionTrigger(line) != unrecognizedSection || line[0] == '-' {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for sectionTrigger(line) == unrecognizedSection {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ line = ""
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ line, addrs, err = parseThreadSample(r)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err = parseAdditionalSections(line, r, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(b *bytes.Buffer) (nextl string, addrs []uint64, err error) {
+ var l string
+ sameAsPrevious := false
+ for {
+ if l, err = b.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return "", nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if strings.HasPrefix(l, "---") {
+ break
+ }
+ if strings.Contains(l, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ addrs = append(addrs, parseHexAddresses(l)...)
+ }
+
+ if sameAsPrevious {
+ return l, nil, nil
+ }
+ return l, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(l string, b *bytes.Buffer, p *Profile) (err error) {
+ for {
+ if sectionTrigger(l) == memoryMapSection {
+ break
+ }
+ // Ignore any unrecognized sections.
+ if l, err := b.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if l == "" {
+ break
+ }
+ }
+ }
+ return p.ParseMemoryMap(b)
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ b := bufio.NewReader(rd)
+
+ var attrs []string
+ var r *strings.Replacer
+ const delimiter = '='
+ for {
+ l, err := b.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if r != nil {
+ l = r.Replace(l)
+ }
+ m, err := parseMappingEntry(l)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ idx := strings.IndexByte(l, delimiter)
+ if idx >= 0 {
+ attr := l[:idx]
+ value := l[idx+1:]
+ attrs = append(attrs, "$"+strings.TrimSpace(attr), strings.TrimSpace(value))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return err
+ }
+ if m == nil || (m.File == "" && len(p.Mapping) != 0) {
+ // In some cases the first entry may include the address range
+ // but not the name of the file. It should be followed by
+ // another entry with the name.
+ continue
+ }
+ if len(p.Mapping) == 1 && p.Mapping[0].File == "" {
+ // Update the name if this is the entry following that empty one.
+ p.Mapping[0].File = m.File
+ continue
+ }
+ p.Mapping = append(p.Mapping, m)
+ }
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ mapping := &Mapping{}
+ var err error
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 9 {
+ if !strings.Contains(me[3], "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if me[4] != "" {
+ if mapping.Offset, err = strconv.ParseUint(me[4], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ mapping.File = me[8]
+ return mapping, nil
+ }
+
+ if me := briefMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ mapping.File = me[3]
+ if me[5] != "" {
+ if mapping.Offset, err = strconv.ParseUint(me[5], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+ }
+
+ return nil, errUnrecognized
+}
+
+type sectionType int
+
+const (
+ unrecognizedSection sectionType = iota
+ memoryMapSection
+)
+
+var memoryMapTriggers = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+func sectionTrigger(line string) sectionType {
+ for _, trigger := range memoryMapTriggers {
+ if strings.Contains(line, trigger) {
+ return memoryMapSection
+ }
+ }
+ return unrecognizedSection
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes) ||
+ isProfileType(p, heapzInUseSampleTypes) ||
+ isProfileType(p, heapzAllocSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = []string{"allocations", "size"} // early Go pprof profiles
+var heapzInUseSampleTypes = []string{"inuse_objects", "inuse_space"}
+var heapzAllocSampleTypes = []string{"alloc_objects", "alloc_space"}
+var contentionzSampleTypes = []string{"contentions", "delay"}
+
+func isProfileType(p *Profile, t []string) bool {
+ st := p.SampleType
+ if len(st) != len(t) {
+ return false
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ return false
+ }
+ }
+ return true
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/src/internal/profile/merge.go b/src/internal/profile/merge.go
new file mode 100644
index 0000000..3ea7d4c
--- /dev/null
+++ b/src/internal/profile/merge.go
@@ -0,0 +1,461 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = make(map[uint64]*Location, len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID map[uint64]*Location
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping. Add current values to the
+ // existing sample.
+ k := s.key()
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+// key generates sampleKey to be used as a key for maps.
+func (sample *Sample) key() sampleKey {
+ ids := make([]string, len(sample.Location))
+ for i, l := range sample.Location {
+ ids[i] = strconv.FormatUint(l.ID, 16)
+ }
+
+ labels := make([]string, 0, len(sample.Label))
+ for k, v := range sample.Label {
+ labels = append(labels, fmt.Sprintf("%q%q", k, v))
+ }
+ sort.Strings(labels)
+
+ numlabels := make([]string, 0, len(sample.NumLabel))
+ for k, v := range sample.NumLabel {
+ numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
+ }
+ sort.Strings(numlabels)
+
+ return sampleKey{
+ strings.Join(ids, "|"),
+ strings.Join(labels, ""),
+ strings.Join(numlabels, ""),
+ }
+}
+
+type sampleKey struct {
+ locations string
+ labels string
+ numlabels string
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l, ok := pm.locationsByID[src.ID]; ok {
+ pm.locationsByID[src.ID] = l
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID[src.ID] = ll
+ return ll
+ }
+ pm.locationsByID[src.ID] = l
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*2)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
diff --git a/src/internal/profile/profile.go b/src/internal/profile/profile.go
new file mode 100644
index 0000000..c779bb2
--- /dev/null
+++ b/src/internal/profile/profile.go
@@ -0,0 +1,613 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package profile provides a representation of
+// github.com/google/pprof/proto/profile.proto and
+// methods to encode/decode/merge profiles in this format.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "internal/lazyregexp"
+ "io"
+ "strings"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ DefaultSampleType string
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+ Comments []string
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ commentX []int64
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+ defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ Label map[string][]string
+ NumLabel map[string][]int64
+ NumUnit map[string][]string
+
+ locationIDX []uint64
+ labelX []Label
+}
+
+// Label corresponds to Profile.Label
+type Label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ orig, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *Profile
+ if len(orig) >= 2 && orig[0] == 0x1f && orig[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(orig))
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ data, err := io.ReadAll(gz)
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ orig = data
+ }
+ if p, err = parseUncompressed(orig); err != nil {
+ if p, err = parseLegacy(orig); err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.setMain()
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+func parseUncompressed(data []byte) (*Profile, error) {
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = lazyregexp.New(`([.]so$|[.]so[._][0-9]+)`)
+
+// setMain scans Mapping entries and guesses which entry is main
+// because legacy profiles don't obey the convention of putting main
+// first.
+func (p *Profile) setMain() {
+ for i := 0; i < len(p.Mapping); i++ {
+ file := strings.TrimSpace(strings.ReplaceAll(p.Mapping[i].File, "(deleted)", ""))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if strings.HasPrefix(file, "[") {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ p.Mapping[i], p.Mapping[0] = p.Mapping[0], p.Mapping[i]
+ break
+ }
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ p.preEncode()
+ b := marshal(p)
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(b)
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has: %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ if f := ln.Function; f != nil {
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// Print dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+
+ ss := make([]string, 0, len(p.Sample)+len(p.Mapping)+len(p.Location))
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ sh1 = sh1 + fmt.Sprintf("%s/%s ", s.Type, s.Unit)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ls := labelHeader
+ for k, v := range s.Label {
+ ls = ls + fmt.Sprintf("%s:%v ", k, v)
+ }
+ ss = append(ss, ls)
+ }
+ if len(s.NumLabel) > 0 {
+ ls := labelHeader
+ for k, v := range s.NumLabel {
+ ls = ls + fmt.Sprintf("%s:%v ", k, v)
+ }
+ ss = append(ss, ls)
+ }
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ bits := ""
+ if m.HasFunctions {
+ bits += "[FN]"
+ }
+ if m.HasFilenames {
+ bits += "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits += "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits += "[IN]"
+ }
+ ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits))
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// Merge adds profile p adjusted by ratio r into profile p. Profiles
+// must be compatible (same Type and SampleType).
+// TODO(rsilvera): consider normalizing the profiles based on the
+// total samples collected.
+func (p *Profile) Merge(pb *Profile, r float64) error {
+ if err := p.Compatible(pb); err != nil {
+ return err
+ }
+
+ pb = pb.Copy()
+
+ // Keep the largest of the two periods.
+ if pb.Period > p.Period {
+ p.Period = pb.Period
+ }
+
+ p.DurationNanos += pb.DurationNanos
+
+ p.Mapping = append(p.Mapping, pb.Mapping...)
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+ p.Location = append(p.Location, pb.Location...)
+ for i, l := range p.Location {
+ l.ID = uint64(i + 1)
+ }
+ p.Function = append(p.Function, pb.Function...)
+ for i, f := range p.Function {
+ f.ID = uint64(i + 1)
+ }
+
+ if r != 1.0 {
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ s.Value[i] = int64((float64(v) * r))
+ }
+ }
+ }
+ p.Sample = append(p.Sample, pb.Sample...)
+ return p.CheckValid()
+}
+
+// Compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) Compatible(pb *Profile) error {
+ if !compatibleValueTypes(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !compatibleValueTypes(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping == nil || !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping == nil || (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+func compatibleValueTypes(v1, v2 *ValueType) bool {
+ if v1 == nil || v2 == nil {
+ return true // No grounds to disqualify.
+ }
+ return v1.Type == v2.Type && v1.Unit == v2.Unit
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ p.preEncode()
+ b := marshal(p)
+
+ pp := &Profile{}
+ if err := unmarshal(b, pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
+
+// Demangler maps symbol names to a human-readable form. This may
+// include C++ demangling and additional simplification. Names that
+// are not demangled may be missing from the resulting map.
+type Demangler func(name []string) (map[string]string, error)
+
+// Demangle attempts to demangle and optionally simplify any function
+// names referenced in the profile. It works on a best-effort basis:
+// it will silently preserve the original names in case of any errors.
+func (p *Profile) Demangle(d Demangler) error {
+ // Collect names to demangle.
+ var names []string
+ for _, fn := range p.Function {
+ names = append(names, fn.SystemName)
+ }
+
+ // Update profile with demangled names.
+ demangled, err := d(names)
+ if err != nil {
+ return err
+ }
+ for _, fn := range p.Function {
+ if dd, ok := demangled[fn.SystemName]; ok {
+ fn.Name = dd
+ }
+ }
+ return nil
+}
+
+// Empty reports whether the profile contains no samples.
+func (p *Profile) Empty() bool {
+ return len(p.Sample) == 0
+}
+
+// Scale multiplies all sample values in a profile by a constant.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ s.Value[i] = int64(float64(v) * ratios[i])
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/internal/profile/profile_test.go b/src/internal/profile/profile_test.go
new file mode 100644
index 0000000..e1963f3
--- /dev/null
+++ b/src/internal/profile/profile_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestEmptyProfile(t *testing.T) {
+ var buf bytes.Buffer
+ p, err := Parse(&buf)
+ if err != nil {
+ t.Error("Want no error, got", err)
+ }
+ if p == nil {
+ t.Fatal("Want a valid profile, got <nil>")
+ }
+ if !p.Empty() {
+ t.Errorf("Profile should be empty, got %#v", p)
+ }
+}
+
+func TestParseContention(t *testing.T) {
+ tests := []struct {
+ name string
+ in string
+ wantErr bool
+ }{
+ {
+ name: "valid",
+ in: `--- mutex:
+cycles/second=3491920901
+sampling period=1
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31
+34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31
+`,
+ },
+ {
+ name: "valid with comment",
+ in: `--- mutex:
+cycles/second=3491920901
+sampling period=1
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31
+# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126
+# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125
+# 0x4a2be0 main.main.func3+0x70 /go/src/internal/pprof/profile/a_binary.go:58
+
+34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31
+# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126
+# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125
+# 0x4a2b16 main.main.func2+0xd6 /go/src/internal/pprof/profile/a_binary.go:48
+`,
+ },
+ {
+ name: "empty",
+ in: `--- mutex:`,
+ wantErr: true,
+ },
+ {
+ name: "invalid header",
+ in: `--- channel:
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31`,
+ wantErr: true,
+ },
+ }
+ for _, tc := range tests {
+ _, err := parseContention([]byte(tc.in))
+ if tc.wantErr && err == nil {
+ t.Errorf("parseContention(%q) succeeded unexpectedly", tc.name)
+ }
+ if !tc.wantErr && err != nil {
+ t.Errorf("parseContention(%q) failed unexpectedly: %v", tc.name, err)
+ }
+ }
+
+}
diff --git a/src/internal/profile/proto.go b/src/internal/profile/proto.go
new file mode 100644
index 0000000..58ff0ad
--- /dev/null
+++ b/src/internal/profile/proto.go
@@ -0,0 +1,356 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a simple protocol buffer encoder and decoder.
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+)
+
+type buffer struct {
+ field int
+ typ int
+ u64 uint64
+ data []byte
+ tmp [16]byte
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3|0)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if !x {
+ return
+ }
+ encodeBool(b, tag, x)
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var i int
+ var u uint64
+ for i = 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/src/internal/profile/proto_test.go b/src/internal/profile/proto_test.go
new file mode 100644
index 0000000..46c6d83
--- /dev/null
+++ b/src/internal/profile/proto_test.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestPackedEncoding(t *testing.T) {
+
+ type testcase struct {
+ uint64s []uint64
+ int64s []int64
+ encoded []byte
+ }
+ for i, tc := range []testcase{
+ {
+ []uint64{0, 1, 10, 100, 1000, 10000},
+ []int64{1000, 0, 1000},
+ []byte{10, 8, 0, 1, 10, 100, 232, 7, 144, 78, 18, 5, 232, 7, 0, 232, 7},
+ },
+ {
+ []uint64{10000},
+ nil,
+ []byte{8, 144, 78},
+ },
+ {
+ nil,
+ []int64{-10000},
+ []byte{16, 240, 177, 255, 255, 255, 255, 255, 255, 255, 1},
+ },
+ } {
+ source := &packedInts{tc.uint64s, tc.int64s}
+ if got, want := marshal(source), tc.encoded; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed encode %d, got %v, want %v", i, got, want)
+ }
+
+ dest := new(packedInts)
+ if err := unmarshal(tc.encoded, dest); err != nil {
+ t.Errorf("failed decode %d: %v", i, err)
+ continue
+ }
+ if got, want := dest.uint64s, tc.uint64s; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed decode uint64s %d, got %v, want %v", i, got, want)
+ }
+ if got, want := dest.int64s, tc.int64s; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed decode int64s %d, got %v, want %v", i, got, want)
+ }
+ }
+}
+
+type packedInts struct {
+ uint64s []uint64
+ int64s []int64
+}
+
+func (u *packedInts) decoder() []decoder {
+ return []decoder{
+ nil,
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*packedInts).uint64s) },
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*packedInts).int64s) },
+ }
+}
+
+func (u *packedInts) encode(b *buffer) {
+ encodeUint64s(b, 1, u.uint64s)
+ encodeInt64s(b, 2, u.int64s)
+}
diff --git a/src/internal/profile/prune.go b/src/internal/profile/prune.go
new file mode 100644
index 0000000..1924fad
--- /dev/null
+++ b/src/internal/profile/prune.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := fn.Name
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ if funcName[0] == '.' {
+ funcName = funcName[1:]
+ }
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ break
+ }
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
diff --git a/src/internal/race/doc.go b/src/internal/race/doc.go
new file mode 100644
index 0000000..8fa44ce
--- /dev/null
+++ b/src/internal/race/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package race contains helper functions for manually instrumenting code for the race detector.
+
+The runtime package intentionally exports these functions only in the race build;
+this package exports them unconditionally but without the "race" build tag they are no-ops.
+*/
+package race
diff --git a/src/internal/race/norace.go b/src/internal/race/norace.go
new file mode 100644
index 0000000..67b1305
--- /dev/null
+++ b/src/internal/race/norace.go
@@ -0,0 +1,43 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+// +build !race
+
+package race
+
+import (
+ "unsafe"
+)
+
+const Enabled = false
+
+func Acquire(addr unsafe.Pointer) {
+}
+
+func Release(addr unsafe.Pointer) {
+}
+
+func ReleaseMerge(addr unsafe.Pointer) {
+}
+
+func Disable() {
+}
+
+func Enable() {
+}
+
+func Read(addr unsafe.Pointer) {
+}
+
+func Write(addr unsafe.Pointer) {
+}
+
+func ReadRange(addr unsafe.Pointer, len int) {
+}
+
+func WriteRange(addr unsafe.Pointer, len int) {
+}
+
+func Errors() int { return 0 }
diff --git a/src/internal/race/race.go b/src/internal/race/race.go
new file mode 100644
index 0000000..40f2c99
--- /dev/null
+++ b/src/internal/race/race.go
@@ -0,0 +1,55 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+// +build race
+
+package race
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+const Enabled = true
+
+func Acquire(addr unsafe.Pointer) {
+ runtime.RaceAcquire(addr)
+}
+
+func Release(addr unsafe.Pointer) {
+ runtime.RaceRelease(addr)
+}
+
+func ReleaseMerge(addr unsafe.Pointer) {
+ runtime.RaceReleaseMerge(addr)
+}
+
+func Disable() {
+ runtime.RaceDisable()
+}
+
+func Enable() {
+ runtime.RaceEnable()
+}
+
+func Read(addr unsafe.Pointer) {
+ runtime.RaceRead(addr)
+}
+
+func Write(addr unsafe.Pointer) {
+ runtime.RaceWrite(addr)
+}
+
+func ReadRange(addr unsafe.Pointer, len int) {
+ runtime.RaceReadRange(addr, len)
+}
+
+func WriteRange(addr unsafe.Pointer, len int) {
+ runtime.RaceWriteRange(addr, len)
+}
+
+func Errors() int {
+ return runtime.RaceErrors()
+}
diff --git a/src/internal/reflectlite/all_test.go b/src/internal/reflectlite/all_test.go
new file mode 100644
index 0000000..820b4ae
--- /dev/null
+++ b/src/internal/reflectlite/all_test.go
@@ -0,0 +1,1039 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite_test
+
+import (
+ "encoding/base64"
+ "fmt"
+ "internal/abi"
+ . "internal/reflectlite"
+ "math"
+ "reflect"
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+func ToValue(v Value) reflect.Value {
+ return reflect.ValueOf(ToInterface(v))
+}
+
+func TypeString(t Type) string {
+ return fmt.Sprintf("%T", ToInterface(Zero(t)))
+}
+
+type integer int
+type T struct {
+ a int
+ b float64
+ c string
+ d *int
+}
+
+type pair struct {
+ i any
+ s string
+}
+
+func assert(t *testing.T, s, want string) {
+ t.Helper()
+ if s != want {
+ t.Errorf("have %#q want %#q", s, want)
+ }
+}
+
+var typeTests = []pair{
+ {struct{ x int }{}, "int"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x int16 }{}, "int16"},
+ {struct{ x int32 }{}, "int32"},
+ {struct{ x int64 }{}, "int64"},
+ {struct{ x uint }{}, "uint"},
+ {struct{ x uint8 }{}, "uint8"},
+ {struct{ x uint16 }{}, "uint16"},
+ {struct{ x uint32 }{}, "uint32"},
+ {struct{ x uint64 }{}, "uint64"},
+ {struct{ x float32 }{}, "float32"},
+ {struct{ x float64 }{}, "float64"},
+ {struct{ x int8 }{}, "int8"},
+ {struct{ x (**int8) }{}, "**int8"},
+ {struct{ x (**integer) }{}, "**reflectlite_test.integer"},
+ {struct{ x ([32]int32) }{}, "[32]int32"},
+ {struct{ x ([]int8) }{}, "[]int8"},
+ {struct{ x (map[string]int32) }{}, "map[string]int32"},
+ {struct{ x (chan<- string) }{}, "chan<- string"},
+ {struct {
+ x struct {
+ c chan *int32
+ d float32
+ }
+ }{},
+ "struct { c chan *int32; d float32 }",
+ },
+ {struct{ x (func(a int8, b int32)) }{}, "func(int8, int32)"},
+ {struct {
+ x struct {
+ c func(chan *integer, *int8)
+ }
+ }{},
+ "struct { c func(chan *reflectlite_test.integer, *int8) }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int32
+ }
+ }{},
+ "struct { a int8; b int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int32
+ }
+ }{},
+ "struct { a int8; b int8; c int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int32 }",
+ },
+ {struct {
+ x struct {
+ a int8
+ b int8
+ c int8
+ d int8
+ e int8
+ f int32
+ }
+ }{},
+ "struct { a int8; b int8; c int8; d int8; e int8; f int32 }",
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi there"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi there\"" }`,
+ },
+ {struct {
+ x struct {
+ a int8 `reflect:"hi \x00there\t\n\"\\"`
+ }
+ }{},
+ `struct { a int8 "reflect:\"hi \\x00there\\t\\n\\\"\\\\\"" }`,
+ },
+ {struct {
+ x struct {
+ f func(args ...int)
+ }
+ }{},
+ "struct { f func(...int) }",
+ },
+ // {struct {
+ // x (interface {
+ // a(func(func(int) int) func(func(int)) int)
+ // b()
+ // })
+ // }{},
+ // "interface { reflectlite_test.a(func(func(int) int) func(func(int)) int); reflectlite_test.b() }",
+ // },
+ {struct {
+ x struct {
+ int32
+ int64
+ }
+ }{},
+ "struct { int32; int64 }",
+ },
+}
+
+var valueTests = []pair{
+ {new(int), "132"},
+ {new(int8), "8"},
+ {new(int16), "16"},
+ {new(int32), "32"},
+ {new(int64), "64"},
+ {new(uint), "132"},
+ {new(uint8), "8"},
+ {new(uint16), "16"},
+ {new(uint32), "32"},
+ {new(uint64), "64"},
+ {new(float32), "256.25"},
+ {new(float64), "512.125"},
+ {new(complex64), "532.125+10i"},
+ {new(complex128), "564.25+1i"},
+ {new(string), "stringy cheese"},
+ {new(bool), "true"},
+ {new(*int8), "*int8(0)"},
+ {new(**int8), "**int8(0)"},
+ {new([5]int32), "[5]int32{0, 0, 0, 0, 0}"},
+ {new(**integer), "**reflectlite_test.integer(0)"},
+ {new(map[string]int32), "map[string]int32{<can't iterate on maps>}"},
+ {new(chan<- string), "chan<- string"},
+ {new(func(a int8, b int32)), "func(int8, int32)(arg)"},
+ {new(struct {
+ c chan *int32
+ d float32
+ }),
+ "struct { c chan *int32; d float32 }{chan *int32, 0}",
+ },
+ {new(struct{ c func(chan *integer, *int8) }),
+ "struct { c func(chan *reflectlite_test.integer, *int8) }{func(chan *reflectlite_test.integer, *int8)(arg)}",
+ },
+ {new(struct {
+ a int8
+ b int32
+ }),
+ "struct { a int8; b int32 }{0, 0}",
+ },
+ {new(struct {
+ a int8
+ b int8
+ c int32
+ }),
+ "struct { a int8; b int8; c int32 }{0, 0, 0}",
+ },
+}
+
+func testType(t *testing.T, i int, typ Type, want string) {
+ s := TypeString(typ)
+ if s != want {
+ t.Errorf("#%d: have %#q, want %#q", i, s, want)
+ }
+}
+
+func testReflectType(t *testing.T, i int, typ Type, want string) {
+ s := TypeString(typ)
+ if s != want {
+ t.Errorf("#%d: have %#q, want %#q", i, s, want)
+ }
+}
+
+func TestTypes(t *testing.T) {
+ for i, tt := range typeTests {
+ testReflectType(t, i, Field(ValueOf(tt.i), 0).Type(), tt.s)
+ }
+}
+
+func TestSetValue(t *testing.T) {
+ for i, tt := range valueTests {
+ v := ValueOf(tt.i).Elem()
+ switch v.Kind() {
+ case abi.Int:
+ v.Set(ValueOf(int(132)))
+ case abi.Int8:
+ v.Set(ValueOf(int8(8)))
+ case abi.Int16:
+ v.Set(ValueOf(int16(16)))
+ case abi.Int32:
+ v.Set(ValueOf(int32(32)))
+ case abi.Int64:
+ v.Set(ValueOf(int64(64)))
+ case abi.Uint:
+ v.Set(ValueOf(uint(132)))
+ case abi.Uint8:
+ v.Set(ValueOf(uint8(8)))
+ case abi.Uint16:
+ v.Set(ValueOf(uint16(16)))
+ case abi.Uint32:
+ v.Set(ValueOf(uint32(32)))
+ case abi.Uint64:
+ v.Set(ValueOf(uint64(64)))
+ case abi.Float32:
+ v.Set(ValueOf(float32(256.25)))
+ case abi.Float64:
+ v.Set(ValueOf(512.125))
+ case abi.Complex64:
+ v.Set(ValueOf(complex64(532.125 + 10i)))
+ case abi.Complex128:
+ v.Set(ValueOf(complex128(564.25 + 1i)))
+ case abi.String:
+ v.Set(ValueOf("stringy cheese"))
+ case abi.Bool:
+ v.Set(ValueOf(true))
+ }
+ s := valueToString(v)
+ if s != tt.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, tt.s)
+ }
+ }
+}
+
+func TestCanSetField(t *testing.T) {
+ type embed struct{ x, X int }
+ type Embed struct{ x, X int }
+ type S1 struct {
+ embed
+ x, X int
+ }
+ type S2 struct {
+ *embed
+ x, X int
+ }
+ type S3 struct {
+ Embed
+ x, X int
+ }
+ type S4 struct {
+ *Embed
+ x, X int
+ }
+
+ type testCase struct {
+ index []int
+ canSet bool
+ }
+ tests := []struct {
+ val Value
+ cases []testCase
+ }{{
+ val: ValueOf(&S1{}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S2{embed: &embed{}}),
+ cases: []testCase{
+ {[]int{0}, false},
+ {[]int{0, 0}, false},
+ {[]int{0, 1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S3{}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }, {
+ val: ValueOf(&S4{Embed: &Embed{}}),
+ cases: []testCase{
+ {[]int{0}, true},
+ {[]int{0, 0}, false},
+ {[]int{0, 1}, true},
+ {[]int{1}, false},
+ {[]int{2}, true},
+ },
+ }}
+
+ for _, tt := range tests {
+ t.Run(tt.val.Type().Name(), func(t *testing.T) {
+ for _, tc := range tt.cases {
+ f := tt.val
+ for _, i := range tc.index {
+ if f.Kind() == Ptr {
+ f = f.Elem()
+ }
+ f = Field(f, i)
+ }
+ if got := f.CanSet(); got != tc.canSet {
+ t.Errorf("CanSet() = %v, want %v", got, tc.canSet)
+ }
+ }
+ })
+ }
+}
+
+var _i = 7
+
+var valueToStringTests = []pair{
+ {123, "123"},
+ {123.5, "123.5"},
+ {byte(123), "123"},
+ {"abc", "abc"},
+ {T{123, 456.75, "hello", &_i}, "reflectlite_test.T{123, 456.75, hello, *int(&7)}"},
+ {new(chan *T), "*chan *reflectlite_test.T(&chan *reflectlite_test.T)"},
+ {[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[10]int(&[10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+ {[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}"},
+ {&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, "*[]int(&[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})"},
+}
+
+func TestValueToString(t *testing.T) {
+ for i, test := range valueToStringTests {
+ s := valueToString(ValueOf(test.i))
+ if s != test.s {
+ t.Errorf("#%d: have %#q, want %#q", i, s, test.s)
+ }
+ }
+}
+
+func TestPtrSetNil(t *testing.T) {
+ var i int32 = 1234
+ ip := &i
+ vip := ValueOf(&ip)
+ vip.Elem().Set(Zero(vip.Elem().Type()))
+ if ip != nil {
+ t.Errorf("got non-nil (%d), want nil", *ip)
+ }
+}
+
+func TestMapSetNil(t *testing.T) {
+ m := make(map[string]int)
+ vm := ValueOf(&m)
+ vm.Elem().Set(Zero(vm.Elem().Type()))
+ if m != nil {
+ t.Errorf("got non-nil (%p), want nil", m)
+ }
+}
+
+func TestAll(t *testing.T) {
+ testType(t, 1, TypeOf((int8)(0)), "int8")
+ testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8")
+
+ typ := TypeOf((*struct {
+ c chan *int32
+ d float32
+ })(nil))
+ testType(t, 3, typ, "*struct { c chan *int32; d float32 }")
+ etyp := typ.Elem()
+ testType(t, 4, etyp, "struct { c chan *int32; d float32 }")
+}
+
+func TestInterfaceValue(t *testing.T) {
+ var inter struct {
+ E any
+ }
+ inter.E = 123.456
+ v1 := ValueOf(&inter)
+ v2 := Field(v1.Elem(), 0)
+ // assert(t, TypeString(v2.Type()), "interface {}")
+ v3 := v2.Elem()
+ assert(t, TypeString(v3.Type()), "float64")
+
+ i3 := ToInterface(v2)
+ if _, ok := i3.(float64); !ok {
+ t.Error("v2.Interface() did not return float64, got ", TypeOf(i3))
+ }
+}
+
+func TestFunctionValue(t *testing.T) {
+ var x any = func() {}
+ v := ValueOf(x)
+ if fmt.Sprint(ToInterface(v)) != fmt.Sprint(x) {
+ t.Fatalf("TestFunction returned wrong pointer")
+ }
+ assert(t, TypeString(v.Type()), "func()")
+}
+
+var appendTests = []struct {
+ orig, extra []int
+}{
+ {make([]int, 2, 4), []int{22}},
+ {make([]int, 2, 4), []int{22, 33, 44}},
+}
+
+func sameInts(x, y []int) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ for i, xx := range x {
+ if xx != y[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestBigUnnamedStruct(t *testing.T) {
+ b := struct{ a, b, c, d int64 }{1, 2, 3, 4}
+ v := ValueOf(b)
+ b1 := ToInterface(v).(struct {
+ a, b, c, d int64
+ })
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d {
+ t.Errorf("ValueOf(%v).Interface().(*Big) = %v", b, b1)
+ }
+}
+
+type big struct {
+ a, b, c, d, e int64
+}
+
+func TestBigStruct(t *testing.T) {
+ b := big{1, 2, 3, 4, 5}
+ v := ValueOf(b)
+ b1 := ToInterface(v).(big)
+ if b1.a != b.a || b1.b != b.b || b1.c != b.c || b1.d != b.d || b1.e != b.e {
+ t.Errorf("ValueOf(%v).Interface().(big) = %v", b, b1)
+ }
+}
+
+type Basic struct {
+ x int
+ y float32
+}
+
+type NotBasic Basic
+
+type DeepEqualTest struct {
+ a, b any
+ eq bool
+}
+
+// Simple functions for DeepEqual tests.
+var (
+ fn1 func() // nil.
+ fn2 func() // nil.
+ fn3 = func() { fn1() } // Not nil.
+)
+
+type self struct{}
+
+type Loop *Loop
+type Loopy any
+
+var loop1, loop2 Loop
+var loopy1, loopy2 Loopy
+
+func init() {
+ loop1 = &loop2
+ loop2 = &loop1
+
+ loopy1 = &loopy2
+ loopy2 = &loopy1
+}
+
+var typeOfTests = []DeepEqualTest{
+ // Equalities
+ {nil, nil, true},
+ {1, 1, true},
+ {int32(1), int32(1), true},
+ {0.5, 0.5, true},
+ {float32(0.5), float32(0.5), true},
+ {"hello", "hello", true},
+ {make([]int, 10), make([]int, 10), true},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true},
+ {Basic{1, 0.5}, Basic{1, 0.5}, true},
+ {error(nil), error(nil), true},
+ {map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
+ {fn1, fn2, true},
+
+ // Inequalities
+ {1, 2, false},
+ {int32(1), int32(2), false},
+ {0.5, 0.6, false},
+ {float32(0.5), float32(0.6), false},
+ {"hello", "hey", false},
+ {make([]int, 10), make([]int, 11), false},
+ {&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false},
+ {Basic{1, 0.5}, Basic{1, 0.6}, false},
+ {Basic{1, 0}, Basic{2, 0}, false},
+ {map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false},
+ {map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false},
+ {nil, 1, false},
+ {1, nil, false},
+ {fn1, fn3, false},
+ {fn3, fn3, false},
+ {[][]int{{1}}, [][]int{{2}}, false},
+ {math.NaN(), math.NaN(), false},
+ {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
+ {&[1]float64{math.NaN()}, self{}, true},
+ {[]float64{math.NaN()}, []float64{math.NaN()}, false},
+ {[]float64{math.NaN()}, self{}, true},
+ {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
+ {map[float64]float64{math.NaN(): 1}, self{}, true},
+
+ // Nil vs empty: not the same.
+ {[]int{}, []int(nil), false},
+ {[]int{}, []int{}, true},
+ {[]int(nil), []int(nil), true},
+ {map[int]int{}, map[int]int(nil), false},
+ {map[int]int{}, map[int]int{}, true},
+ {map[int]int(nil), map[int]int(nil), true},
+
+ // Mismatched types
+ {1, 1.0, false},
+ {int32(1), int64(1), false},
+ {0.5, "hello", false},
+ {[]int{1, 2, 3}, [3]int{1, 2, 3}, false},
+ {&[3]any{1, 2, 4}, &[3]any{1, 2, "s"}, false},
+ {Basic{1, 0.5}, NotBasic{1, 0.5}, false},
+ {map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
+
+ // Possible loops.
+ {&loop1, &loop1, true},
+ {&loop1, &loop2, true},
+ {&loopy1, &loopy1, true},
+ {&loopy1, &loopy2, true},
+}
+
+func TestTypeOf(t *testing.T) {
+ // Special case for nil
+ if typ := TypeOf(nil); typ != nil {
+ t.Errorf("expected nil type for nil value; got %v", typ)
+ }
+ for _, test := range typeOfTests {
+ v := ValueOf(test.a)
+ if !v.IsValid() {
+ continue
+ }
+ typ := TypeOf(test.a)
+ if typ != v.Type() {
+ t.Errorf("TypeOf(%v) = %v, but ValueOf(%v).Type() = %v", test.a, typ, test.a, v.Type())
+ }
+ }
+}
+
+func Nil(a any, t *testing.T) {
+ n := Field(ValueOf(a), 0)
+ if !n.IsNil() {
+ t.Errorf("%v should be nil", a)
+ }
+}
+
+func NotNil(a any, t *testing.T) {
+ n := Field(ValueOf(a), 0)
+ if n.IsNil() {
+ t.Errorf("value of type %v should not be nil", TypeString(ValueOf(a).Type()))
+ }
+}
+
+func TestIsNil(t *testing.T) {
+ // These implement IsNil.
+ // Wrap in extra struct to hide interface type.
+ doNil := []any{
+ struct{ x *int }{},
+ struct{ x any }{},
+ struct{ x map[string]int }{},
+ struct{ x func() bool }{},
+ struct{ x chan int }{},
+ struct{ x []string }{},
+ struct{ x unsafe.Pointer }{},
+ }
+ for _, ts := range doNil {
+ ty := TField(TypeOf(ts), 0)
+ v := Zero(ty)
+ v.IsNil() // panics if not okay to call
+ }
+
+ // Check the implementations
+ var pi struct {
+ x *int
+ }
+ Nil(pi, t)
+ pi.x = new(int)
+ NotNil(pi, t)
+
+ var si struct {
+ x []int
+ }
+ Nil(si, t)
+ si.x = make([]int, 10)
+ NotNil(si, t)
+
+ var ci struct {
+ x chan int
+ }
+ Nil(ci, t)
+ ci.x = make(chan int)
+ NotNil(ci, t)
+
+ var mi struct {
+ x map[int]int
+ }
+ Nil(mi, t)
+ mi.x = make(map[int]int)
+ NotNil(mi, t)
+
+ var ii struct {
+ x any
+ }
+ Nil(ii, t)
+ ii.x = 2
+ NotNil(ii, t)
+
+ var fi struct {
+ x func(t *testing.T)
+ }
+ Nil(fi, t)
+ fi.x = TestIsNil
+ NotNil(fi, t)
+}
+
+// Indirect returns the value that v points to.
+// If v is a nil pointer, Indirect returns a zero Value.
+// If v is not a pointer, Indirect returns v.
+func Indirect(v Value) Value {
+ if v.Kind() != Ptr {
+ return v
+ }
+ return v.Elem()
+}
+
+func TestNilPtrValueSub(t *testing.T) {
+ var pi *int
+ if pv := ValueOf(pi); pv.Elem().IsValid() {
+ t.Error("ValueOf((*int)(nil)).Elem().IsValid()")
+ }
+}
+
+type Point struct {
+ x, y int
+}
+
+// This will be index 0.
+func (p Point) AnotherMethod(scale int) int {
+ return -1
+}
+
+// This will be index 1.
+func (p Point) Dist(scale int) int {
+ //println("Point.Dist", p.x, p.y, scale)
+ return p.x*p.x*scale + p.y*p.y*scale
+}
+
+// This will be index 2.
+func (p Point) GCMethod(k int) int {
+ runtime.GC()
+ return k + p.x
+}
+
+// This will be index 3.
+func (p Point) NoArgs() {
+ // Exercise no-argument/no-result paths.
+}
+
+// This will be index 4.
+func (p Point) TotalDist(points ...Point) int {
+ tot := 0
+ for _, q := range points {
+ dx := q.x - p.x
+ dy := q.y - p.y
+ tot += dx*dx + dy*dy // Should call Sqrt, but it's just a test.
+
+ }
+ return tot
+}
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+func TestImportPath(t *testing.T) {
+ tests := []struct {
+ t Type
+ path string
+ }{
+ {TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"},
+ {TypeOf(int(0)), ""},
+ {TypeOf(int8(0)), ""},
+ {TypeOf(int16(0)), ""},
+ {TypeOf(int32(0)), ""},
+ {TypeOf(int64(0)), ""},
+ {TypeOf(uint(0)), ""},
+ {TypeOf(uint8(0)), ""},
+ {TypeOf(uint16(0)), ""},
+ {TypeOf(uint32(0)), ""},
+ {TypeOf(uint64(0)), ""},
+ {TypeOf(uintptr(0)), ""},
+ {TypeOf(float32(0)), ""},
+ {TypeOf(float64(0)), ""},
+ {TypeOf(complex64(0)), ""},
+ {TypeOf(complex128(0)), ""},
+ {TypeOf(byte(0)), ""},
+ {TypeOf(rune(0)), ""},
+ {TypeOf([]byte(nil)), ""},
+ {TypeOf([]rune(nil)), ""},
+ {TypeOf(string("")), ""},
+ {TypeOf((*any)(nil)).Elem(), ""},
+ {TypeOf((*byte)(nil)), ""},
+ {TypeOf((*rune)(nil)), ""},
+ {TypeOf((*int64)(nil)), ""},
+ {TypeOf(map[string]int{}), ""},
+ {TypeOf((*error)(nil)).Elem(), ""},
+ {TypeOf((*Point)(nil)), ""},
+ {TypeOf((*Point)(nil)).Elem(), "internal/reflectlite_test"},
+ }
+ for _, test := range tests {
+ if path := test.t.PkgPath(); path != test.path {
+ t.Errorf("%v.PkgPath() = %q, want %q", test.t, path, test.path)
+ }
+ }
+}
+
+func noAlloc(t *testing.T, n int, f func(int)) {
+ if testing.Short() {
+ t.Skip("skipping malloc count in short mode")
+ }
+ if runtime.GOMAXPROCS(0) > 1 {
+ t.Skip("skipping; GOMAXPROCS>1")
+ }
+ i := -1
+ allocs := testing.AllocsPerRun(n, func() {
+ f(i)
+ i++
+ })
+ if allocs > 0 {
+ t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
+ }
+}
+
+func TestAllocations(t *testing.T) {
+ noAlloc(t, 100, func(j int) {
+ var i any
+ var v Value
+
+ // We can uncomment this when compiler escape analysis
+ // is good enough to see that the integer assigned to i
+ // does not escape and therefore need not be allocated.
+ //
+ // i = 42 + j
+ // v = ValueOf(i)
+ // if int(v.Int()) != 42+j {
+ // panic("wrong int")
+ // }
+
+ i = func(j int) int { return j }
+ v = ValueOf(i)
+ if ToInterface(v).(func(int) int)(j) != j {
+ panic("wrong result")
+ }
+ })
+}
+
+func TestSetPanic(t *testing.T) {
+ ok := func(f func()) { f() }
+ bad := shouldPanic
+ clear := func(v Value) { v.Set(Zero(v.Type())) }
+
+ type t0 struct {
+ W int
+ }
+
+ type t1 struct {
+ Y int
+ t0
+ }
+
+ type T2 struct {
+ Z int
+ namedT0 t0
+ }
+
+ type T struct {
+ X int
+ t1
+ T2
+ NamedT1 t1
+ NamedT2 T2
+ namedT1 t1
+ namedT2 T2
+ }
+
+ // not addressable
+ v := ValueOf(T{})
+ bad(func() { clear(Field(v, 0)) }) // .X
+ bad(func() { clear(Field(v, 1)) }) // .t1
+ bad(func() { clear(Field(Field(v, 1), 0)) }) // .t1.Y
+ bad(func() { clear(Field(Field(v, 1), 1)) }) // .t1.t0
+ bad(func() { clear(Field(Field(Field(v, 1), 1), 0)) }) // .t1.t0.W
+ bad(func() { clear(Field(v, 2)) }) // .T2
+ bad(func() { clear(Field(Field(v, 2), 0)) }) // .T2.Z
+ bad(func() { clear(Field(Field(v, 2), 1)) }) // .T2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 2), 1), 0)) }) // .T2.namedT0.W
+ bad(func() { clear(Field(v, 3)) }) // .NamedT1
+ bad(func() { clear(Field(Field(v, 3), 0)) }) // .NamedT1.Y
+ bad(func() { clear(Field(Field(v, 3), 1)) }) // .NamedT1.t0
+ bad(func() { clear(Field(Field(Field(v, 3), 1), 0)) }) // .NamedT1.t0.W
+ bad(func() { clear(Field(v, 4)) }) // .NamedT2
+ bad(func() { clear(Field(Field(v, 4), 0)) }) // .NamedT2.Z
+ bad(func() { clear(Field(Field(v, 4), 1)) }) // .NamedT2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 4), 1), 0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(Field(v, 5)) }) // .namedT1
+ bad(func() { clear(Field(Field(v, 5), 0)) }) // .namedT1.Y
+ bad(func() { clear(Field(Field(v, 5), 1)) }) // .namedT1.t0
+ bad(func() { clear(Field(Field(Field(v, 5), 1), 0)) }) // .namedT1.t0.W
+ bad(func() { clear(Field(v, 6)) }) // .namedT2
+ bad(func() { clear(Field(Field(v, 6), 0)) }) // .namedT2.Z
+ bad(func() { clear(Field(Field(v, 6), 1)) }) // .namedT2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 6), 1), 0)) }) // .namedT2.namedT0.W
+
+ // addressable
+ v = ValueOf(&T{}).Elem()
+ ok(func() { clear(Field(v, 0)) }) // .X
+ bad(func() { clear(Field(v, 1)) }) // .t1
+ ok(func() { clear(Field(Field(v, 1), 0)) }) // .t1.Y
+ bad(func() { clear(Field(Field(v, 1), 1)) }) // .t1.t0
+ ok(func() { clear(Field(Field(Field(v, 1), 1), 0)) }) // .t1.t0.W
+ ok(func() { clear(Field(v, 2)) }) // .T2
+ ok(func() { clear(Field(Field(v, 2), 0)) }) // .T2.Z
+ bad(func() { clear(Field(Field(v, 2), 1)) }) // .T2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 2), 1), 0)) }) // .T2.namedT0.W
+ ok(func() { clear(Field(v, 3)) }) // .NamedT1
+ ok(func() { clear(Field(Field(v, 3), 0)) }) // .NamedT1.Y
+ bad(func() { clear(Field(Field(v, 3), 1)) }) // .NamedT1.t0
+ ok(func() { clear(Field(Field(Field(v, 3), 1), 0)) }) // .NamedT1.t0.W
+ ok(func() { clear(Field(v, 4)) }) // .NamedT2
+ ok(func() { clear(Field(Field(v, 4), 0)) }) // .NamedT2.Z
+ bad(func() { clear(Field(Field(v, 4), 1)) }) // .NamedT2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 4), 1), 0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(Field(v, 5)) }) // .namedT1
+ bad(func() { clear(Field(Field(v, 5), 0)) }) // .namedT1.Y
+ bad(func() { clear(Field(Field(v, 5), 1)) }) // .namedT1.t0
+ bad(func() { clear(Field(Field(Field(v, 5), 1), 0)) }) // .namedT1.t0.W
+ bad(func() { clear(Field(v, 6)) }) // .namedT2
+ bad(func() { clear(Field(Field(v, 6), 0)) }) // .namedT2.Z
+ bad(func() { clear(Field(Field(v, 6), 1)) }) // .namedT2.namedT0
+ bad(func() { clear(Field(Field(Field(v, 6), 1), 0)) }) // .namedT2.namedT0.W
+}
+
+func shouldPanic(f func()) {
+ defer func() {
+ if recover() == nil {
+ panic("did not panic")
+ }
+ }()
+ f()
+}
+
+type S struct {
+ i1 int64
+ i2 int64
+}
+
+func TestBigZero(t *testing.T) {
+ const size = 1 << 10
+ var v [size]byte
+ z := ToInterface(Zero(ValueOf(v).Type())).([size]byte)
+ for i := 0; i < size; i++ {
+ if z[i] != 0 {
+ t.Fatalf("Zero object not all zero, index %d", i)
+ }
+ }
+}
+
+func TestInvalid(t *testing.T) {
+ // Used to have inconsistency between IsValid() and Kind() != Invalid.
+ type T struct{ v any }
+
+ v := Field(ValueOf(T{}), 0)
+ if v.IsValid() != true || v.Kind() != Interface {
+ t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind())
+ }
+ v = v.Elem()
+ if v.IsValid() != false || v.Kind() != abi.Invalid {
+ t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind())
+ }
+}
+
+type TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678 int
+
+type nameTest struct {
+ v any
+ want string
+}
+
+type A struct{}
+type B[T any] struct{}
+
+var nameTests = []nameTest{
+ {(*int32)(nil), "int32"},
+ {(*D1)(nil), "D1"},
+ {(*[]D1)(nil), ""},
+ {(*chan D1)(nil), ""},
+ {(*func() D1)(nil), ""},
+ {(*<-chan D1)(nil), ""},
+ {(*chan<- D1)(nil), ""},
+ {(*any)(nil), ""},
+ {(*interface {
+ F()
+ })(nil), ""},
+ {(*TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678)(nil), "TheNameOfThisTypeIsExactly255BytesLongSoWhenTheCompilerPrependsTheReflectTestPackageNameAndExtraStarTheLinkerRuntimeAndReflectPackagesWillHaveToCorrectlyDecodeTheSecondLengthByte0123456789_0123456789_0123456789_0123456789_0123456789_012345678"},
+ {(*B[A])(nil), "B[internal/reflectlite_test.A]"},
+ {(*B[B[A]])(nil), "B[internal/reflectlite_test.B[internal/reflectlite_test.A]]"},
+}
+
+func TestNames(t *testing.T) {
+ for _, test := range nameTests {
+ typ := TypeOf(test.v).Elem()
+ if got := typ.Name(); got != test.want {
+ t.Errorf("%v Name()=%q, want %q", typ, got, test.want)
+ }
+ }
+}
+
+// TestUnaddressableField tests that the reflect package will not allow
+// a type from another package to be used as a named type with an
+// unexported field.
+//
+// This ensures that unexported fields cannot be modified by other packages.
+func TestUnaddressableField(t *testing.T) {
+ var b Buffer // type defined in reflect, a different package
+ var localBuffer struct {
+ buf []byte
+ }
+ lv := ValueOf(&localBuffer).Elem()
+ rv := ValueOf(b)
+ shouldPanic(func() {
+ lv.Set(rv)
+ })
+}
+
+type Tint int
+
+type Tint2 = Tint
+
+type Talias1 struct {
+ byte
+ uint8
+ int
+ int32
+ rune
+}
+
+type Talias2 struct {
+ Tint
+ Tint2
+}
+
+func TestAliasNames(t *testing.T) {
+ t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
+ out := fmt.Sprintf("%#v", t1)
+ want := "reflectlite_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
+ if out != want {
+ t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
+ }
+
+ t2 := Talias2{Tint: 1, Tint2: 2}
+ out = fmt.Sprintf("%#v", t2)
+ want = "reflectlite_test.Talias2{Tint:1, Tint2:2}"
+ if out != want {
+ t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
+ }
+}
diff --git a/src/internal/reflectlite/asm.s b/src/internal/reflectlite/asm.s
new file mode 100644
index 0000000..a7b69b6
--- /dev/null
+++ b/src/internal/reflectlite/asm.s
@@ -0,0 +1,5 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Trigger build without complete flag. \ No newline at end of file
diff --git a/src/internal/reflectlite/export_test.go b/src/internal/reflectlite/export_test.go
new file mode 100644
index 0000000..88be6e2
--- /dev/null
+++ b/src/internal/reflectlite/export_test.go
@@ -0,0 +1,117 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite
+
+import (
+ "unsafe"
+)
+
+// Field returns the i'th field of the struct v.
+// It panics if v's Kind is not Struct or i is out of range.
+func Field(v Value, i int) Value {
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.Fields)) {
+ panic("reflect: Field index out of range")
+ }
+ field := &tt.Fields[i]
+ typ := field.Typ
+
+ // Inherit permission bits from v, but clear flagEmbedRO.
+ fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
+ // Using an unexported field forces flagRO.
+ if !field.Name.IsExported() {
+ if field.Embedded() {
+ fl |= flagEmbedRO
+ } else {
+ fl |= flagStickyRO
+ }
+ }
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must have field.offset = 0,
+ // so v.ptr + field.offset is still the correct address.
+ ptr := add(v.ptr, field.Offset, "same as non-reflect &v.field")
+ return Value{typ, ptr, fl}
+}
+
+func TField(typ Type, i int) Type {
+ t := typ.(rtype)
+ if t.Kind() != Struct {
+ panic("reflect: Field of non-struct type")
+ }
+ tt := (*structType)(unsafe.Pointer(t.Type))
+
+ return StructFieldType(tt, i)
+}
+
+// Field returns the i'th struct field.
+func StructFieldType(t *structType, i int) Type {
+ if i < 0 || i >= len(t.Fields) {
+ panic("reflect: Field index out of bounds")
+ }
+ p := &t.Fields[i]
+ return toType(p.Typ)
+}
+
+// Zero returns a Value representing the zero value for the specified type.
+// The result is different from the zero value of the Value struct,
+// which represents no value at all.
+// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// The returned value is neither addressable nor settable.
+func Zero(typ Type) Value {
+ if typ == nil {
+ panic("reflect: Zero(nil)")
+ }
+ t := typ.common()
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ return Value{t, unsafe_New(t), fl | flagIndir}
+ }
+ return Value{t, nil, fl}
+}
+
+// ToInterface returns v's current value as an interface{}.
+// It is equivalent to:
+//
+// var i interface{} = (v's underlying value)
+//
+// It panics if the Value was obtained by accessing
+// unexported struct fields.
+func ToInterface(v Value) (i any) {
+ return valueInterface(v)
+}
+
+type EmbedWithUnexpMeth struct{}
+
+func (EmbedWithUnexpMeth) f() {}
+
+type pinUnexpMeth interface {
+ f()
+}
+
+var pinUnexpMethI = pinUnexpMeth(EmbedWithUnexpMeth{})
+
+func FirstMethodNameBytes(t Type) *byte {
+ _ = pinUnexpMethI
+
+ ut := t.uncommon()
+ if ut == nil {
+ panic("type has no methods")
+ }
+ m := ut.Methods()[0]
+ mname := t.(rtype).nameOff(m.Name)
+ if *mname.DataChecked(0, "name flag field")&(1<<2) == 0 {
+ panic("method name does not have pkgPath *string")
+ }
+ return mname.Bytes
+}
+
+type Buffer struct {
+ buf []byte
+}
diff --git a/src/internal/reflectlite/reflect_mirror_test.go b/src/internal/reflectlite/reflect_mirror_test.go
new file mode 100644
index 0000000..0fd004e
--- /dev/null
+++ b/src/internal/reflectlite/reflect_mirror_test.go
@@ -0,0 +1,133 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+)
+
+var typeNames = []string{
+ "uncommonType",
+ "arrayType",
+ "chanType",
+ "funcType",
+ "interfaceType",
+ "mapType",
+ "ptrType",
+ "sliceType",
+ "structType",
+}
+
+type visitor struct {
+ m map[string]map[string]bool
+}
+
+func newVisitor() visitor {
+ v := visitor{}
+ v.m = make(map[string]map[string]bool)
+
+ return v
+}
+func (v visitor) filter(name string) bool {
+ for _, typeName := range typeNames {
+ if typeName == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (v visitor) Visit(n ast.Node) ast.Visitor {
+ switch x := n.(type) {
+ case *ast.TypeSpec:
+ if v.filter(x.Name.String()) {
+ if st, ok := x.Type.(*ast.StructType); ok {
+ v.m[x.Name.String()] = make(map[string]bool)
+ for _, field := range st.Fields.List {
+ k := fmt.Sprintf("%s", field.Type)
+ if len(field.Names) > 0 {
+ k = field.Names[0].Name
+ }
+ v.m[x.Name.String()][k] = true
+ }
+ }
+ }
+ }
+ return v
+}
+
+func loadTypes(path, pkgName string, v visitor) {
+ fset := token.NewFileSet()
+
+ filter := func(fi fs.FileInfo) bool {
+ return strings.HasSuffix(fi.Name(), ".go")
+ }
+ pkgs, err := parser.ParseDir(fset, path, filter, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ pkg := pkgs[pkgName]
+
+ for _, f := range pkg.Files {
+ ast.Walk(v, f)
+ }
+}
+
+func TestMirrorWithReflect(t *testing.T) {
+ // TODO when the dust clears, figure out what this should actually test.
+ t.Skipf("reflect and reflectlite are out of sync for now")
+ reflectDir := filepath.Join(runtime.GOROOT(), "src", "reflect")
+ if _, err := os.Stat(reflectDir); os.IsNotExist(err) {
+ // On some mobile builders, the test binary executes on a machine without a
+ // complete GOROOT source tree.
+ t.Skipf("GOROOT source not present")
+ }
+
+ var wg sync.WaitGroup
+ rl, r := newVisitor(), newVisitor()
+
+ for _, tc := range []struct {
+ path, pkg string
+ v visitor
+ }{
+ {".", "reflectlite", rl},
+ {reflectDir, "reflect", r},
+ } {
+ tc := tc
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ loadTypes(tc.path, tc.pkg, tc.v)
+ }()
+ }
+ wg.Wait()
+
+ if len(rl.m) != len(r.m) {
+ t.Fatalf("number of types mismatch, reflect: %d, reflectlite: %d (%+v, %+v)", len(r.m), len(rl.m), r.m, rl.m)
+ }
+
+ for typName := range r.m {
+ if len(r.m[typName]) != len(rl.m[typName]) {
+ t.Errorf("type %s number of fields mismatch, reflect: %d, reflectlite: %d", typName, len(r.m[typName]), len(rl.m[typName]))
+ continue
+ }
+ for field := range r.m[typName] {
+ if _, ok := rl.m[typName][field]; !ok {
+ t.Errorf(`Field mismatch, reflect have "%s", relectlite does not.`, field)
+ }
+ }
+ }
+}
diff --git a/src/internal/reflectlite/set_test.go b/src/internal/reflectlite/set_test.go
new file mode 100644
index 0000000..ca7ea9b
--- /dev/null
+++ b/src/internal/reflectlite/set_test.go
@@ -0,0 +1,101 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite_test
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ . "internal/reflectlite"
+ "io"
+ "testing"
+)
+
+func TestImplicitSetConversion(t *testing.T) {
+ // Assume TestImplicitMapConversion covered the basics.
+ // Just make sure conversions are being applied at all.
+ var r io.Reader
+ b := new(bytes.Buffer)
+ rv := ValueOf(&r).Elem()
+ rv.Set(ValueOf(b))
+ if r != b {
+ t.Errorf("after Set: r=%T(%v)", r, r)
+ }
+}
+
+var implementsTests = []struct {
+ x any
+ t any
+ b bool
+}{
+ {new(*bytes.Buffer), new(io.Reader), true},
+ {new(bytes.Buffer), new(io.Reader), false},
+ {new(*bytes.Buffer), new(io.ReaderAt), false},
+ {new(*ast.Ident), new(ast.Expr), true},
+ {new(*notAnExpr), new(ast.Expr), false},
+ {new(*ast.Ident), new(notASTExpr), false},
+ {new(notASTExpr), new(ast.Expr), false},
+ {new(ast.Expr), new(notASTExpr), false},
+ {new(*notAnExpr), new(notASTExpr), true},
+ {new(mapError), new(error), true},
+ {new(*mapError), new(error), true},
+}
+
+type notAnExpr struct{}
+
+func (notAnExpr) Pos() token.Pos { return token.NoPos }
+func (notAnExpr) End() token.Pos { return token.NoPos }
+func (notAnExpr) exprNode() {}
+
+type notASTExpr interface {
+ Pos() token.Pos
+ End() token.Pos
+ exprNode()
+}
+
+type mapError map[string]string
+
+func (mapError) Error() string { return "mapError" }
+
+var _ error = mapError{}
+var _ error = new(mapError)
+
+func TestImplements(t *testing.T) {
+ for _, tt := range implementsTests {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.Implements(xt); b != tt.b {
+ t.Errorf("(%s).Implements(%s) = %v, want %v", TypeString(xv), TypeString(xt), b, tt.b)
+ }
+ }
+}
+
+var assignableTests = []struct {
+ x any
+ t any
+ b bool
+}{
+ {new(chan int), new(<-chan int), true},
+ {new(<-chan int), new(chan int), false},
+ {new(*int), new(IntPtr), true},
+ {new(IntPtr), new(*int), true},
+ {new(IntPtr), new(IntPtr1), false},
+ {new(Ch), new(<-chan any), true},
+ // test runs implementsTests too
+}
+
+type IntPtr *int
+type IntPtr1 *int
+type Ch <-chan any
+
+func TestAssignableTo(t *testing.T) {
+ for i, tt := range append(assignableTests, implementsTests...) {
+ xv := TypeOf(tt.x).Elem()
+ xt := TypeOf(tt.t).Elem()
+ if b := xv.AssignableTo(xt); b != tt.b {
+ t.Errorf("%d:AssignableTo: got %v, want %v", i, b, tt.b)
+ }
+ }
+}
diff --git a/src/internal/reflectlite/swapper.go b/src/internal/reflectlite/swapper.go
new file mode 100644
index 0000000..ac17d9b
--- /dev/null
+++ b/src/internal/reflectlite/swapper.go
@@ -0,0 +1,78 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite
+
+import (
+ "internal/goarch"
+ "internal/unsafeheader"
+ "unsafe"
+)
+
+// Swapper returns a function that swaps the elements in the provided
+// slice.
+//
+// Swapper panics if the provided interface is not a slice.
+func Swapper(slice any) func(i, j int) {
+ v := ValueOf(slice)
+ if v.Kind() != Slice {
+ panic(&ValueError{Method: "Swapper", Kind: v.Kind()})
+ }
+ // Fast path for slices of size 0 and 1. Nothing to swap.
+ switch v.Len() {
+ case 0:
+ return func(i, j int) { panic("reflect: slice index out of range") }
+ case 1:
+ return func(i, j int) {
+ if i != 0 || j != 0 {
+ panic("reflect: slice index out of range")
+ }
+ }
+ }
+
+ typ := v.Type().Elem().common()
+ size := typ.Size()
+ hasPtr := typ.PtrBytes != 0
+
+ // Some common & small cases, without using memmove:
+ if hasPtr {
+ if size == goarch.PtrSize {
+ ps := *(*[]unsafe.Pointer)(v.ptr)
+ return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
+ }
+ if typ.Kind() == String {
+ ss := *(*[]string)(v.ptr)
+ return func(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
+ }
+ } else {
+ switch size {
+ case 8:
+ is := *(*[]int64)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 4:
+ is := *(*[]int32)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 2:
+ is := *(*[]int16)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ case 1:
+ is := *(*[]int8)(v.ptr)
+ return func(i, j int) { is[i], is[j] = is[j], is[i] }
+ }
+ }
+
+ s := (*unsafeheader.Slice)(v.ptr)
+ tmp := unsafe_New(typ) // swap scratch space
+
+ return func(i, j int) {
+ if uint(i) >= uint(s.Len) || uint(j) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ val1 := arrayAt(s.Data, i, size, "i < s.Len")
+ val2 := arrayAt(s.Data, j, size, "j < s.Len")
+ typedmemmove(typ, tmp, val1)
+ typedmemmove(typ, val1, val2)
+ typedmemmove(typ, val2, tmp)
+ }
+}
diff --git a/src/internal/reflectlite/tostring_test.go b/src/internal/reflectlite/tostring_test.go
new file mode 100644
index 0000000..966b0bd
--- /dev/null
+++ b/src/internal/reflectlite/tostring_test.go
@@ -0,0 +1,98 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Formatting of reflection types and values for debugging.
+// Not defined as methods so they do not need to be linked into most binaries;
+// the functions are not used by the library itself, only in tests.
+
+package reflectlite_test
+
+import (
+ . "internal/reflectlite"
+ "reflect"
+ "strconv"
+)
+
+// valueToString returns a textual representation of the reflection value val.
+// For debugging only.
+func valueToString(v Value) string {
+ return valueToStringImpl(reflect.ValueOf(ToInterface(v)))
+}
+
+func valueToStringImpl(val reflect.Value) string {
+ var str string
+ if !val.IsValid() {
+ return "<zero Value>"
+ }
+ typ := val.Type()
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(val.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(val.Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, 64)
+ case reflect.Complex64, reflect.Complex128:
+ c := val.Complex()
+ return strconv.FormatFloat(real(c), 'g', -1, 64) + "+" + strconv.FormatFloat(imag(c), 'g', -1, 64) + "i"
+ case reflect.String:
+ return val.String()
+ case reflect.Bool:
+ if val.Bool() {
+ return "true"
+ } else {
+ return "false"
+ }
+ case reflect.Pointer:
+ v := val
+ str = typ.String() + "("
+ if v.IsNil() {
+ str += "0"
+ } else {
+ str += "&" + valueToStringImpl(v.Elem())
+ }
+ str += ")"
+ return str
+ case reflect.Array, reflect.Slice:
+ v := val
+ str += typ.String()
+ str += "{"
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToStringImpl(v.Index(i))
+ }
+ str += "}"
+ return str
+ case reflect.Map:
+ str += typ.String()
+ str += "{"
+ str += "<can't iterate on maps>"
+ str += "}"
+ return str
+ case reflect.Chan:
+ str = typ.String()
+ return str
+ case reflect.Struct:
+ t := typ
+ v := val
+ str += t.String()
+ str += "{"
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if i > 0 {
+ str += ", "
+ }
+ str += valueToStringImpl(v.Field(i))
+ }
+ str += "}"
+ return str
+ case reflect.Interface:
+ return typ.String() + "(" + valueToStringImpl(val.Elem()) + ")"
+ case reflect.Func:
+ return typ.String() + "(arg)"
+ default:
+ panic("valueToString: can't print type " + typ.String())
+ }
+}
diff --git a/src/internal/reflectlite/type.go b/src/internal/reflectlite/type.go
new file mode 100644
index 0000000..f13ce8f
--- /dev/null
+++ b/src/internal/reflectlite/type.go
@@ -0,0 +1,659 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflectlite implements lightweight version of reflect, not using
+// any package except for "runtime", "unsafe", and "internal/abi"
+package reflectlite
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// Type is the representation of a Go type.
+//
+// Not all methods apply to all kinds of types. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of type before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run-time panic.
+//
+// Type values are comparable, such as with the == operator,
+// so they can be used as map keys.
+// Two Type values are equal if they represent identical types.
+type Type interface {
+ // Methods applicable to all types.
+
+ // Name returns the type's name within its package for a defined type.
+ // For other (non-defined) types it returns the empty string.
+ Name() string
+
+ // PkgPath returns a defined type's package path, that is, the import path
+ // that uniquely identifies the package, such as "encoding/base64".
+ // If the type was predeclared (string, error) or not defined (*T, struct{},
+ // []int, or A where A is an alias for a non-defined type), the package path
+ // will be the empty string.
+ PkgPath() string
+
+ // Size returns the number of bytes needed to store
+ // a value of the given type; it is analogous to unsafe.Sizeof.
+ Size() uintptr
+
+ // Kind returns the specific kind of this type.
+ Kind() Kind
+
+ // Implements reports whether the type implements the interface type u.
+ Implements(u Type) bool
+
+ // AssignableTo reports whether a value of the type is assignable to type u.
+ AssignableTo(u Type) bool
+
+ // Comparable reports whether values of this type are comparable.
+ Comparable() bool
+
+ // String returns a string representation of the type.
+ // The string representation may use shortened package names
+ // (e.g., base64 instead of "encoding/base64") and is not
+ // guaranteed to be unique among types. To test for type identity,
+ // compare the Types directly.
+ String() string
+
+ // Elem returns a type's element type.
+ // It panics if the type's Kind is not Ptr.
+ Elem() Type
+
+ common() *abi.Type
+ uncommon() *uncommonType
+}
+
+/*
+ * These data structures are known to the compiler (../../cmd/internal/reflectdata/reflect.go).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.go.
+ */
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind = abi.Kind
+
+const Ptr = abi.Pointer
+
+const (
+ // Import-and-export these constants as necessary
+ Interface = abi.Interface
+ Slice = abi.Slice
+ String = abi.String
+ Struct = abi.Struct
+)
+
+type nameOff = abi.NameOff
+type typeOff = abi.TypeOff
+type textOff = abi.TextOff
+
+type rtype struct {
+ *abi.Type
+}
+
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType = abi.UncommonType
+
+// arrayType represents a fixed array type.
+type arrayType = abi.ArrayType
+
+// chanType represents a channel type.
+type chanType = abi.ChanType
+
+type funcType = abi.FuncType
+
+type interfaceType = abi.InterfaceType
+
+// mapType represents a map type.
+type mapType struct {
+ rtype
+ Key *abi.Type // map key type
+ Elem *abi.Type // map element (value) type
+ Bucket *abi.Type // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ Hasher func(unsafe.Pointer, uintptr) uintptr
+ KeySize uint8 // size of key slot
+ ValueSize uint8 // size of value slot
+ BucketSize uint16 // size of bucket
+ Flags uint32
+}
+
+// ptrType represents a pointer type.
+type ptrType = abi.PtrType
+
+// sliceType represents a slice type.
+type sliceType = abi.SliceType
+
+// structType represents a struct type.
+type structType = abi.StructType
+
+// name is an encoded type name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+//
+// The next two bytes are the data length:
+//
+// l := uint16(data[1])<<8 | uint16(data[2])
+//
+// Bytes [3:3+l] are the string data.
+//
+// If tag data follows then bytes 3+l and 3+l+1 are the tag length,
+// with the data following.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) hasTag() bool {
+ return (*n.bytes)&(1<<1) != 0
+}
+
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
+// readVarint parses a varint as encoded by encoding/binary.
+// It returns the number of encoded bytes and the encoded value.
+func (n name) readVarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.data(off+i, "read varint")
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+func (n name) name() string {
+ if n.bytes == nil {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ return unsafe.String(n.data(1+i, "non-empty string"), l)
+}
+
+func (n name) tag() string {
+ if !n.hasTag() {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ i2, l2 := n.readVarint(1 + i + l)
+ return unsafe.String(n.data(1+i+l+i2, "non-empty string"), l2)
+}
+
+func pkgPath(n abi.Name) string {
+ if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.ReadVarint(1)
+ off := 1 + i + l
+ if n.HasTag() {
+ i2, l2 := n.ReadVarint(off)
+ off += i2 + l2
+ }
+ var nameOff int32
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:])
+ pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))}
+ return pkgPathName.name()
+}
+
+/*
+ * The compiler knows the exact layout of all the data structures above.
+ * The compiler does not know about the data structures and methods below.
+ */
+
+// resolveNameOff resolves a name offset from a base pointer.
+// The (*rtype).nameOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+func (t rtype) nameOff(off nameOff) abi.Name {
+ return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t.Type), int32(off)))}
+}
+
+func (t rtype) typeOff(off typeOff) *abi.Type {
+ return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t.Type), int32(off)))
+}
+
+func (t rtype) uncommon() *uncommonType {
+ return t.Uncommon()
+}
+
+func (t rtype) String() string {
+ s := t.nameOff(t.Str).Name()
+ if t.TFlag&abi.TFlagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t rtype) common() *abi.Type { return t.Type }
+
+func (t rtype) exportedMethods() []abi.Method {
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.ExportedMethods()
+}
+
+func (t rtype) NumMethod() int {
+ tt := t.Type.InterfaceType()
+ if tt != nil {
+ return tt.NumMethod()
+ }
+ return len(t.exportedMethods())
+}
+
+func (t rtype) PkgPath() string {
+ if t.TFlag&abi.TFlagNamed == 0 {
+ return ""
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return ""
+ }
+ return t.nameOff(ut.PkgPath).Name()
+}
+
+func (t rtype) Name() string {
+ if !t.HasName() {
+ return ""
+ }
+ s := t.String()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+func toRType(t *abi.Type) rtype {
+ return rtype{t}
+}
+
+func elem(t *abi.Type) *abi.Type {
+ et := t.Elem()
+ if et != nil {
+ return et
+ }
+ panic("reflect: Elem of invalid type " + toRType(t).String())
+}
+
+func (t rtype) Elem() Type {
+ return toType(elem(t.common()))
+}
+
+func (t rtype) In(i int) Type {
+ tt := t.Type.FuncType()
+ if tt == nil {
+ panic("reflect: In of non-func type")
+ }
+ return toType(tt.InSlice()[i])
+}
+
+func (t rtype) Key() Type {
+ tt := t.Type.MapType()
+ if tt == nil {
+ panic("reflect: Key of non-map type")
+ }
+ return toType(tt.Key)
+}
+
+func (t rtype) Len() int {
+ tt := t.Type.ArrayType()
+ if tt == nil {
+ panic("reflect: Len of non-array type")
+ }
+ return int(tt.Len)
+}
+
+func (t rtype) NumField() int {
+ tt := t.Type.StructType()
+ if tt == nil {
+ panic("reflect: NumField of non-struct type")
+ }
+ return len(tt.Fields)
+}
+
+func (t rtype) NumIn() int {
+ tt := t.Type.FuncType()
+ if tt == nil {
+ panic("reflect: NumIn of non-func type")
+ }
+ return int(tt.InCount)
+}
+
+func (t rtype) NumOut() int {
+ tt := t.Type.FuncType()
+ if tt == nil {
+ panic("reflect: NumOut of non-func type")
+ }
+ return tt.NumOut()
+}
+
+func (t rtype) Out(i int) Type {
+ tt := t.Type.FuncType()
+ if tt == nil {
+ panic("reflect: Out of non-func type")
+ }
+ return toType(tt.OutSlice()[i])
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
+func TypeOf(i any) Type {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return toType(eface.typ)
+}
+
+func (t rtype) Implements(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.Implements")
+ }
+ if u.Kind() != Interface {
+ panic("reflect: non-interface type passed to Type.Implements")
+ }
+ return implements(u.common(), t.common())
+}
+
+func (t rtype) AssignableTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.AssignableTo")
+ }
+ uu := u.common()
+ tt := t.common()
+ return directlyAssignable(uu, tt) || implements(uu, tt)
+}
+
+func (t rtype) Comparable() bool {
+ return t.Equal != nil
+}
+
+// implements reports whether the type V implements the interface type T.
+func implements(T, V *abi.Type) bool {
+ t := T.InterfaceType()
+ if t == nil {
+ return false
+ }
+ if len(t.Methods) == 0 {
+ return true
+ }
+ rT := toRType(T)
+ rV := toRType(V)
+
+ // The same algorithm applies in both cases, but the
+ // method tables for an interface type and a concrete type
+ // are different, so the code is duplicated.
+ // In both cases the algorithm is a linear scan over the two
+ // lists - T's methods and V's methods - simultaneously.
+ // Since method tables are stored in a unique sorted order
+ // (alphabetical, with no duplicate method names), the scan
+ // through V's methods must hit a match for each of T's
+ // methods along the way, or else V does not implement T.
+ // This lets us run the scan in overall linear time instead of
+ // the quadratic time a naive search would require.
+ // See also ../runtime/iface.go.
+ if V.Kind() == Interface {
+ v := (*interfaceType)(unsafe.Pointer(V))
+ i := 0
+ for j := 0; j < len(v.Methods); j++ {
+ tm := &t.Methods[i]
+ tmName := rT.nameOff(tm.Name)
+ vm := &v.Methods[j]
+ vmName := rV.nameOff(vm.Name)
+ if vmName.Name() == tmName.Name() && rV.typeOff(vm.Typ) == rT.typeOff(tm.Typ) {
+ if !tmName.IsExported() {
+ tmPkgPath := pkgPath(tmName)
+ if tmPkgPath == "" {
+ tmPkgPath = t.PkgPath.Name()
+ }
+ vmPkgPath := pkgPath(vmName)
+ if vmPkgPath == "" {
+ vmPkgPath = v.PkgPath.Name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.Methods) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ v := V.Uncommon()
+ if v == nil {
+ return false
+ }
+ i := 0
+ vmethods := v.Methods()
+ for j := 0; j < int(v.Mcount); j++ {
+ tm := &t.Methods[i]
+ tmName := rT.nameOff(tm.Name)
+ vm := vmethods[j]
+ vmName := rV.nameOff(vm.Name)
+ if vmName.Name() == tmName.Name() && rV.typeOff(vm.Mtyp) == rT.typeOff(tm.Typ) {
+ if !tmName.IsExported() {
+ tmPkgPath := pkgPath(tmName)
+ if tmPkgPath == "" {
+ tmPkgPath = t.PkgPath.Name()
+ }
+ vmPkgPath := pkgPath(vmName)
+ if vmPkgPath == "" {
+ vmPkgPath = rV.nameOff(v.PkgPath).Name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.Methods) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// directlyAssignable reports whether a value x of type V can be directly
+// assigned (using memmove) to a value of type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// Ignoring the interface rules (implemented elsewhere)
+// and the ideal constant rules (no ideal constants at run time).
+func directlyAssignable(T, V *abi.Type) bool {
+ // x's type V is identical to T?
+ if T == V {
+ return true
+ }
+
+ // Otherwise at least one of T and V must not be defined
+ // and they must have the same kind.
+ if T.HasName() && V.HasName() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V, true)
+}
+
+func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool {
+ if cmpTags {
+ return T == V
+ }
+
+ if toRType(T).Name() != toRType(V).Name() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ return haveIdenticalUnderlyingType(T, V, false)
+}
+
+func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if abi.Bool <= kind && kind <= abi.Complex128 || kind == abi.String || kind == abi.UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
+ case abi.Array:
+ return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case abi.Chan:
+ // Special case:
+ // x is a bidirectional channel value, T is a channel type,
+ // and x's type V and T have identical element types.
+ if V.ChanDir() == abi.BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
+ return true
+ }
+
+ // Otherwise continue test for identical underlying type.
+ return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case abi.Func:
+ t := (*funcType)(unsafe.Pointer(T))
+ v := (*funcType)(unsafe.Pointer(V))
+ if t.OutCount != v.OutCount || t.InCount != v.InCount {
+ return false
+ }
+ for i := 0; i < t.NumIn(); i++ {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
+ return false
+ }
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
+ return false
+ }
+ }
+ return true
+
+ case Interface:
+ t := (*interfaceType)(unsafe.Pointer(T))
+ v := (*interfaceType)(unsafe.Pointer(V))
+ if len(t.Methods) == 0 && len(v.Methods) == 0 {
+ return true
+ }
+ // Might have the same methods but still
+ // need a run time conversion.
+ return false
+
+ case abi.Map:
+ return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Ptr, abi.Slice:
+ return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case abi.Struct:
+ t := (*structType)(unsafe.Pointer(T))
+ v := (*structType)(unsafe.Pointer(V))
+ if len(t.Fields) != len(v.Fields) {
+ return false
+ }
+ if t.PkgPath.Name() != v.PkgPath.Name() {
+ return false
+ }
+ for i := range t.Fields {
+ tf := &t.Fields[i]
+ vf := &v.Fields[i]
+ if tf.Name.Name() != vf.Name.Name() {
+ return false
+ }
+ if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) {
+ return false
+ }
+ if cmpTags && tf.Name.Tag() != vf.Name.Tag() {
+ return false
+ }
+ if tf.Offset != vf.Offset {
+ return false
+ }
+ if tf.Embedded() != vf.Embedded() {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *abi.Type) Type {
+ if t == nil {
+ return nil
+ }
+ return toRType(t)
+}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *abi.Type) bool {
+ return t.Kind_&abi.KindDirectIface == 0
+}
diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go
new file mode 100644
index 0000000..eb79894
--- /dev/null
+++ b/src/internal/reflectlite/value.go
@@ -0,0 +1,478 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "internal/unsafeheader"
+ "runtime"
+ "unsafe"
+)
+
+// Value is the reflection interface to a Go value.
+//
+// Not all methods apply to all kinds of values. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of value before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run time panic.
+//
+// The zero Value represents no value.
+// Its IsValid method returns false, its Kind method returns Invalid,
+// its String method returns "<invalid Value>", and all other methods panic.
+// Most functions and methods never return an invalid value.
+// If one does, its documentation states the conditions explicitly.
+//
+// A Value can be used concurrently by multiple goroutines provided that
+// the underlying Go value can be used concurrently for the equivalent
+// direct operations.
+//
+// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
+type Value struct {
+ // typ holds the type of the value represented by a Value.
+ typ *abi.Type
+
+ // Pointer-valued data or, if flagIndir is set, pointer to data.
+ // Valid when either flagIndir is set or typ.pointers() is true.
+ ptr unsafe.Pointer
+
+ // flag holds metadata about the value.
+ // The lowest bits are flag bits:
+ // - flagStickyRO: obtained via unexported not embedded field, so read-only
+ // - flagEmbedRO: obtained via unexported embedded field, so read-only
+ // - flagIndir: val holds a pointer to the data
+ // - flagAddr: v.CanAddr is true (implies flagIndir)
+ // Value cannot represent method values.
+ // The next five bits give the Kind of the value.
+ // This repeats typ.Kind() except for method values.
+ // The remaining 23+ bits give a method number for method values.
+ // If flag.kind() != Func, code can assume that flagMethod is unset.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
+ flag
+
+ // A method value represents a curried method invocation
+ // like r.Read for some receiver r. The typ+val+flag bits describe
+ // the receiver r, but the flag's Kind bits say Func (methods are
+ // functions), and the top bits of the flag give the method number
+ // in r's type's method table.
+}
+
+type flag uintptr
+
+const (
+ flagKindWidth = 5 // there are 27 kinds
+ flagKindMask flag = 1<<flagKindWidth - 1
+ flagStickyRO flag = 1 << 5
+ flagEmbedRO flag = 1 << 6
+ flagIndir flag = 1 << 7
+ flagAddr flag = 1 << 8
+ flagMethod flag = 1 << 9
+ flagMethodShift = 10
+ flagRO flag = flagStickyRO | flagEmbedRO
+)
+
+func (f flag) kind() Kind {
+ return Kind(f & flagKindMask)
+}
+
+func (f flag) ro() flag {
+ if f&flagRO != 0 {
+ return flagStickyRO
+ }
+ return 0
+}
+
+// pointer returns the underlying pointer represented by v.
+// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
+func (v Value) pointer() unsafe.Pointer {
+ if v.typ.Size() != goarch.PtrSize || !v.typ.Pointers() {
+ panic("can't call pointer on a non-pointer Value")
+ }
+ if v.flag&flagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+// packEface converts v to the empty interface.
+func packEface(v Value) any {
+ t := v.typ
+ var i any
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // First, fill in the data portion of the interface.
+ switch {
+ case ifaceIndir(t):
+ if v.flag&flagIndir == 0 {
+ panic("bad indir")
+ }
+ // Value is indirect, and so is the interface we're making.
+ ptr := v.ptr
+ if v.flag&flagAddr != 0 {
+ // TODO: pass safe boolean from valueInterface so
+ // we don't need to copy if safe==true?
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ }
+ e.word = ptr
+ case v.flag&flagIndir != 0:
+ // Value is indirect, but interface is direct. We need
+ // to load the data at v.ptr into the interface data word.
+ e.word = *(*unsafe.Pointer)(v.ptr)
+ default:
+ // Value is direct, and so is the interface.
+ e.word = v.ptr
+ }
+ // Now, fill in the type portion. We're very careful here not
+ // to have any operation between the e.word and e.typ assignments
+ // that would let the garbage collector observe the partially-built
+ // interface value.
+ e.typ = t
+ return i
+}
+
+// unpackEface converts the empty interface i to a Value.
+func unpackEface(i any) Value {
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // NOTE: don't read e.word until we know whether it is really a pointer or not.
+ t := e.typ
+ if t == nil {
+ return Value{}
+ }
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
+ f |= flagIndir
+ }
+ return Value{t, e.word, f}
+}
+
+// A ValueError occurs when a Value method is invoked on
+// a Value that does not support it. Such cases are documented
+// in the description of each method.
+type ValueError struct {
+ Method string
+ Kind Kind
+}
+
+func (e *ValueError) Error() string {
+ if e.Kind == 0 {
+ return "reflect: call of " + e.Method + " on zero Value"
+ }
+ return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
+}
+
+// methodName returns the name of the calling method,
+// assumed to be two stack frames above.
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+// emptyInterface is the header for an interface{} value.
+type emptyInterface struct {
+ typ *abi.Type
+ word unsafe.Pointer
+}
+
+// mustBeExported panics if f records that the value was obtained using
+// an unexported field.
+func (f flag) mustBeExported() {
+ if f == 0 {
+ panic(&ValueError{methodName(), 0})
+ }
+ if f&flagRO != 0 {
+ panic("reflect: " + methodName() + " using value obtained using unexported field")
+ }
+}
+
+// mustBeAssignable panics if f records that the value is not assignable,
+// which is to say that either it was obtained using an unexported field
+// or it is not addressable.
+func (f flag) mustBeAssignable() {
+ if f == 0 {
+ panic(&ValueError{methodName(), abi.Invalid})
+ }
+ // Assignable if addressable and not read-only.
+ if f&flagRO != 0 {
+ panic("reflect: " + methodName() + " using value obtained using unexported field")
+ }
+ if f&flagAddr == 0 {
+ panic("reflect: " + methodName() + " using unaddressable value")
+ }
+}
+
+// CanSet reports whether the value of v can be changed.
+// A Value can be changed only if it is addressable and was not
+// obtained by the use of unexported struct fields.
+// If CanSet returns false, calling Set or any type-specific
+// setter (e.g., SetBool, SetInt) will panic.
+func (v Value) CanSet() bool {
+ return v.flag&(flagAddr|flagRO) == flagAddr
+}
+
+// Elem returns the value that the interface v contains
+// or that the pointer v points to.
+// It panics if v's Kind is not Interface or Pointer.
+// It returns the zero Value if v is nil.
+func (v Value) Elem() Value {
+ k := v.kind()
+ switch k {
+ case abi.Interface:
+ var eface any
+ if v.typ.NumMethod() == 0 {
+ eface = *(*any)(v.ptr)
+ } else {
+ eface = (any)(*(*interface {
+ M()
+ })(v.ptr))
+ }
+ x := unpackEface(eface)
+ if x.flag != 0 {
+ x.flag |= v.flag.ro()
+ }
+ return x
+ case abi.Pointer:
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ // The returned value's address is v's value.
+ if ptr == nil {
+ return Value{}
+ }
+ tt := (*ptrType)(unsafe.Pointer(v.typ))
+ typ := tt.Elem
+ fl := v.flag&flagRO | flagIndir | flagAddr
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
+ }
+ panic(&ValueError{"reflectlite.Value.Elem", v.kind()})
+}
+
+func valueInterface(v Value) any {
+ if v.flag == 0 {
+ panic(&ValueError{"reflectlite.Value.Interface", 0})
+ }
+
+ if v.kind() == abi.Interface {
+ // Special case: return the element inside the interface.
+ // Empty interface has one layout, all interfaces with
+ // methods have a second layout.
+ if v.numMethod() == 0 {
+ return *(*any)(v.ptr)
+ }
+ return *(*interface {
+ M()
+ })(v.ptr)
+ }
+
+ // TODO: pass safe to packEface so we don't need to copy if safe==true?
+ return packEface(v)
+}
+
+// IsNil reports whether its argument v is nil. The argument must be
+// a chan, func, interface, map, pointer, or slice value; if it is
+// not, IsNil panics. Note that IsNil is not always equivalent to a
+// regular comparison with nil in Go. For example, if v was created
+// by calling ValueOf with an uninitialized interface variable i,
+// i==nil will be true but v.IsNil will panic as v will be the zero
+// Value.
+func (v Value) IsNil() bool {
+ k := v.kind()
+ switch k {
+ case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
+ // if v.flag&flagMethod != 0 {
+ // return false
+ // }
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ return ptr == nil
+ case abi.Interface, abi.Slice:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return *(*unsafe.Pointer)(v.ptr) == nil
+ }
+ panic(&ValueError{"reflectlite.Value.IsNil", v.kind()})
+}
+
+// IsValid reports whether v represents a value.
+// It returns false if v is the zero Value.
+// If IsValid returns false, all other methods except String panic.
+// Most functions and methods never return an invalid Value.
+// If one does, its documentation states the conditions explicitly.
+func (v Value) IsValid() bool {
+ return v.flag != 0
+}
+
+// Kind returns v's Kind.
+// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+func (v Value) Kind() Kind {
+ return v.kind()
+}
+
+// implemented in runtime:
+func chanlen(unsafe.Pointer) int
+func maplen(unsafe.Pointer) int
+
+// Len returns v's length.
+// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
+func (v Value) Len() int {
+ k := v.kind()
+ switch k {
+ case abi.Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ return int(tt.Len)
+ case abi.Chan:
+ return chanlen(v.pointer())
+ case abi.Map:
+ return maplen(v.pointer())
+ case abi.Slice:
+ // Slice is bigger than a word; assume flagIndir.
+ return (*unsafeheader.Slice)(v.ptr).Len
+ case abi.String:
+ // String is bigger than a word; assume flagIndir.
+ return (*unsafeheader.String)(v.ptr).Len
+ }
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
+}
+
+// NumMethod returns the number of exported methods in the value's method set.
+func (v Value) numMethod() int {
+ if v.typ == nil {
+ panic(&ValueError{"reflectlite.Value.NumMethod", abi.Invalid})
+ }
+ return v.typ.NumMethod()
+}
+
+// Set assigns x to the value v.
+// It panics if CanSet returns false.
+// As in Go, x's value must be assignable to v's type.
+func (v Value) Set(x Value) {
+ v.mustBeAssignable()
+ x.mustBeExported() // do not let unexported x leak
+ var target unsafe.Pointer
+ if v.kind() == abi.Interface {
+ target = v.ptr
+ }
+ x = x.assignTo("reflectlite.Set", v.typ, target)
+ if x.flag&flagIndir != 0 {
+ typedmemmove(v.typ, v.ptr, x.ptr)
+ } else {
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
+ }
+}
+
+// Type returns v's type.
+func (v Value) Type() Type {
+ f := v.flag
+ if f == 0 {
+ panic(&ValueError{"reflectlite.Value.Type", abi.Invalid})
+ }
+ // Method values not supported.
+ return toRType(v.typ)
+}
+
+/*
+ * constructors
+ */
+
+// implemented in package runtime
+func unsafe_New(*abi.Type) unsafe.Pointer
+
+// ValueOf returns a new Value initialized to the concrete value
+// stored in the interface i. ValueOf(nil) returns the zero Value.
+func ValueOf(i any) Value {
+ if i == nil {
+ return Value{}
+ }
+
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
+ escapes(i)
+
+ return unpackEface(i)
+}
+
+// assignTo returns a value v that can be assigned directly to typ.
+// It panics if v is not assignable to typ.
+// For a conversion to an interface type, target is a suggested scratch space to use.
+func (v Value) assignTo(context string, dst *abi.Type, target unsafe.Pointer) Value {
+ // if v.flag&flagMethod != 0 {
+ // v = makeMethodValue(context, v)
+ // }
+
+ switch {
+ case directlyAssignable(dst, v.typ):
+ // Overwrite type so that they match.
+ // Same memory layout, so no harm done.
+ fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
+
+ case implements(dst, v.typ):
+ if target == nil {
+ target = unsafe_New(dst)
+ }
+ if v.Kind() == abi.Interface && v.IsNil() {
+ // A nil ReadWriter passed to nil Reader is OK,
+ // but using ifaceE2I below will panic.
+ // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
+ return Value{dst, nil, flag(abi.Interface)}
+ }
+ x := valueInterface(v)
+ if dst.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(dst, x, target)
+ }
+ return Value{dst, target, flagIndir | flag(abi.Interface)}
+ }
+
+ // Failed.
+ panic(context + ": value of type " + toRType(v.typ).String() + " is not assignable to type " + toRType(dst).String())
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
+}
+
+func ifaceE2I(t *abi.Type, src any, dst unsafe.Pointer)
+
+// typedmemmove copies a value of type t to dst from src.
+//
+//go:noescape
+func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
+
+// Dummy annotation marking that the value x escapes,
+// for use in cases where the reflect code is so clever that
+// the compiler cannot follow.
+func escapes(x any) {
+ if dummy.b {
+ dummy.x = x
+ }
+}
+
+var dummy struct {
+ b bool
+ x any
+}
diff --git a/src/internal/safefilepath/path.go b/src/internal/safefilepath/path.go
new file mode 100644
index 0000000..0f0a270
--- /dev/null
+++ b/src/internal/safefilepath/path.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package safefilepath manipulates operating-system file paths.
+package safefilepath
+
+import (
+ "errors"
+)
+
+var errInvalidPath = errors.New("invalid path")
+
+// FromFS converts a slash-separated path into an operating-system path.
+//
+// FromFS returns an error if the path cannot be represented by the operating
+// system. For example, paths containing '\' and ':' characters are rejected
+// on Windows.
+func FromFS(path string) (string, error) {
+ return fromFS(path)
+}
diff --git a/src/internal/safefilepath/path_other.go b/src/internal/safefilepath/path_other.go
new file mode 100644
index 0000000..974e775
--- /dev/null
+++ b/src/internal/safefilepath/path_other.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package safefilepath
+
+import "runtime"
+
+func fromFS(path string) (string, error) {
+ if runtime.GOOS == "plan9" {
+ if len(path) > 0 && path[0] == '#' {
+ return "", errInvalidPath
+ }
+ }
+ for i := range path {
+ if path[i] == 0 {
+ return "", errInvalidPath
+ }
+ }
+ return path, nil
+}
diff --git a/src/internal/safefilepath/path_test.go b/src/internal/safefilepath/path_test.go
new file mode 100644
index 0000000..dc662c1
--- /dev/null
+++ b/src/internal/safefilepath/path_test.go
@@ -0,0 +1,88 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package safefilepath_test
+
+import (
+ "internal/safefilepath"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+type PathTest struct {
+ path, result string
+}
+
+const invalid = ""
+
+var fspathtests = []PathTest{
+ {".", "."},
+ {"/a/b/c", "/a/b/c"},
+ {"a\x00b", invalid},
+}
+
+var winreservedpathtests = []PathTest{
+ {`a\b`, `a\b`},
+ {`a:b`, `a:b`},
+ {`a/b:c`, `a/b:c`},
+ {`NUL`, `NUL`},
+ {`./com1`, `./com1`},
+ {`a/nul/b`, `a/nul/b`},
+}
+
+// Whether a reserved name with an extension is reserved or not varies by
+// Windows version.
+var winreservedextpathtests = []PathTest{
+ {"nul.txt", "nul.txt"},
+ {"a/nul.txt/b", "a/nul.txt/b"},
+}
+
+var plan9reservedpathtests = []PathTest{
+ {`#c`, `#c`},
+}
+
+func TestFromFS(t *testing.T) {
+ switch runtime.GOOS {
+ case "windows":
+ if canWriteFile(t, "NUL") {
+ t.Errorf("can unexpectedly write a file named NUL on Windows")
+ }
+ if canWriteFile(t, "nul.txt") {
+ fspathtests = append(fspathtests, winreservedextpathtests...)
+ } else {
+ winreservedpathtests = append(winreservedpathtests, winreservedextpathtests...)
+ }
+ for i := range winreservedpathtests {
+ winreservedpathtests[i].result = invalid
+ }
+ for i := range fspathtests {
+ fspathtests[i].result = filepath.FromSlash(fspathtests[i].result)
+ }
+ case "plan9":
+ for i := range plan9reservedpathtests {
+ plan9reservedpathtests[i].result = invalid
+ }
+ }
+ tests := fspathtests
+ tests = append(tests, winreservedpathtests...)
+ tests = append(tests, plan9reservedpathtests...)
+ for _, test := range tests {
+ got, err := safefilepath.FromFS(test.path)
+ if (got == "") != (err != nil) {
+ t.Errorf(`FromFS(%q) = %q, %v; want "" only if err != nil`, test.path, got, err)
+ }
+ if got != test.result {
+ t.Errorf("FromFS(%q) = %q, %v; want %q", test.path, got, err, test.result)
+ }
+ }
+}
+
+func canWriteFile(t *testing.T, name string) bool {
+ path := filepath.Join(t.TempDir(), name)
+ os.WriteFile(path, []byte("ok"), 0666)
+ b, _ := os.ReadFile(path)
+ return string(b) == "ok"
+}
diff --git a/src/internal/safefilepath/path_windows.go b/src/internal/safefilepath/path_windows.go
new file mode 100644
index 0000000..7cfd6ce
--- /dev/null
+++ b/src/internal/safefilepath/path_windows.go
@@ -0,0 +1,141 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package safefilepath
+
+import (
+ "syscall"
+ "unicode/utf8"
+)
+
+func fromFS(path string) (string, error) {
+ if !utf8.ValidString(path) {
+ return "", errInvalidPath
+ }
+ for len(path) > 1 && path[0] == '/' && path[1] == '/' {
+ path = path[1:]
+ }
+ containsSlash := false
+ for p := path; p != ""; {
+ // Find the next path element.
+ i := 0
+ for i < len(p) && p[i] != '/' {
+ switch p[i] {
+ case 0, '\\', ':':
+ return "", errInvalidPath
+ }
+ i++
+ }
+ part := p[:i]
+ if i < len(p) {
+ containsSlash = true
+ p = p[i+1:]
+ } else {
+ p = ""
+ }
+ if IsReservedName(part) {
+ return "", errInvalidPath
+ }
+ }
+ if containsSlash {
+ // We can't depend on strings, so substitute \ for / manually.
+ buf := []byte(path)
+ for i, b := range buf {
+ if b == '/' {
+ buf[i] = '\\'
+ }
+ }
+ path = string(buf)
+ }
+ return path, nil
+}
+
+// IsReservedName reports if name is a Windows reserved device name.
+// It does not detect names with an extension, which are also reserved on some Windows versions.
+//
+// For details, search for PRN in
+// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file.
+func IsReservedName(name string) bool {
+ // Device names can have arbitrary trailing characters following a dot or colon.
+ base := name
+ for i := 0; i < len(base); i++ {
+ switch base[i] {
+ case ':', '.':
+ base = base[:i]
+ }
+ }
+ // Trailing spaces in the last path element are ignored.
+ for len(base) > 0 && base[len(base)-1] == ' ' {
+ base = base[:len(base)-1]
+ }
+ if !isReservedBaseName(base) {
+ return false
+ }
+ if len(base) == len(name) {
+ return true
+ }
+ // The path element is a reserved name with an extension.
+ // Some Windows versions consider this a reserved name,
+ // while others do not. Use FullPath to see if the name is
+ // reserved.
+ if p, _ := syscall.FullPath(name); len(p) >= 4 && p[:4] == `\\.\` {
+ return true
+ }
+ return false
+}
+
+func isReservedBaseName(name string) bool {
+ if len(name) == 3 {
+ switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) {
+ case "CON", "PRN", "AUX", "NUL":
+ return true
+ }
+ }
+ if len(name) >= 4 {
+ switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) {
+ case "COM", "LPT":
+ if len(name) == 4 && '1' <= name[3] && name[3] <= '9' {
+ return true
+ }
+ // Superscript ¹, ², and ³ are considered numbers as well.
+ switch name[3:] {
+ case "\u00b2", "\u00b3", "\u00b9":
+ return true
+ }
+ return false
+ }
+ }
+
+ // Passing CONIN$ or CONOUT$ to CreateFile opens a console handle.
+ // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#consoles
+ //
+ // While CONIN$ and CONOUT$ aren't documented as being files,
+ // they behave the same as CON. For example, ./CONIN$ also opens the console input.
+ if len(name) == 6 && name[5] == '$' && equalFold(name, "CONIN$") {
+ return true
+ }
+ if len(name) == 7 && name[6] == '$' && equalFold(name, "CONOUT$") {
+ return true
+ }
+ return false
+}
+
+func equalFold(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if toUpper(a[i]) != toUpper(b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func toUpper(c byte) byte {
+ if 'a' <= c && c <= 'z' {
+ return c - ('a' - 'A')
+ }
+ return c
+}
diff --git a/src/internal/saferio/io.go b/src/internal/saferio/io.go
new file mode 100644
index 0000000..66cc044
--- /dev/null
+++ b/src/internal/saferio/io.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package saferio provides I/O functions that avoid allocating large
+// amounts of memory unnecessarily. This is intended for packages that
+// read data from an [io.Reader] where the size is part of the input
+// data but the input may be corrupt, or may be provided by an
+// untrustworthy attacker.
+package saferio
+
+import (
+ "io"
+ "reflect"
+)
+
+// chunk is an arbitrary limit on how much memory we are willing
+// to allocate without concern.
+const chunk = 10 << 20 // 10M
+
+// ReadData reads n bytes from the input stream, but avoids allocating
+// all n bytes if n is large. This avoids crashing the program by
+// allocating all n bytes in cases where n is incorrect.
+//
+// The error is io.EOF only if no bytes were read.
+// If an io.EOF happens after reading some but not all the bytes,
+// ReadData returns io.ErrUnexpectedEOF.
+func ReadData(r io.Reader, n uint64) ([]byte, error) {
+ if int64(n) < 0 || n != uint64(int(n)) {
+ // n is too large to fit in int, so we can't allocate
+ // a buffer large enough. Treat this as a read failure.
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if n < chunk {
+ buf := make([]byte, n)
+ _, err := io.ReadFull(r, buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ }
+
+ var buf []byte
+ buf1 := make([]byte, chunk)
+ for n > 0 {
+ next := n
+ if next > chunk {
+ next = chunk
+ }
+ _, err := io.ReadFull(r, buf1[:next])
+ if err != nil {
+ if len(buf) > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ buf = append(buf, buf1[:next]...)
+ n -= next
+ }
+ return buf, nil
+}
+
+// ReadDataAt reads n bytes from the input stream at off, but avoids
+// allocating all n bytes if n is large. This avoids crashing the program
+// by allocating all n bytes in cases where n is incorrect.
+func ReadDataAt(r io.ReaderAt, n uint64, off int64) ([]byte, error) {
+ if int64(n) < 0 || n != uint64(int(n)) {
+ // n is too large to fit in int, so we can't allocate
+ // a buffer large enough. Treat this as a read failure.
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if n < chunk {
+ buf := make([]byte, n)
+ _, err := r.ReadAt(buf, off)
+ if err != nil {
+ // io.SectionReader can return EOF for n == 0,
+ // but for our purposes that is a success.
+ if err != io.EOF || n > 0 {
+ return nil, err
+ }
+ }
+ return buf, nil
+ }
+
+ var buf []byte
+ buf1 := make([]byte, chunk)
+ for n > 0 {
+ next := n
+ if next > chunk {
+ next = chunk
+ }
+ _, err := r.ReadAt(buf1[:next], off)
+ if err != nil {
+ return nil, err
+ }
+ buf = append(buf, buf1[:next]...)
+ n -= next
+ off += int64(next)
+ }
+ return buf, nil
+}
+
+// SliceCap returns the capacity to use when allocating a slice.
+// After the slice is allocated with the capacity, it should be
+// built using append. This will avoid allocating too much memory
+// if the capacity is large and incorrect.
+//
+// A negative result means that the value is always too big.
+//
+// The element type is described by passing a pointer to a value of that type.
+// This would ideally use generics, but this code is built with
+// the bootstrap compiler which need not support generics.
+// We use a pointer so that we can handle slices of interface type.
+func SliceCap(v any, c uint64) int {
+ if int64(c) < 0 || c != uint64(int(c)) {
+ return -1
+ }
+ typ := reflect.TypeOf(v)
+ if typ.Kind() != reflect.Ptr {
+ panic("SliceCap called with non-pointer type")
+ }
+ size := uint64(typ.Elem().Size())
+ if size > 0 && c > (1<<64-1)/size {
+ return -1
+ }
+ if c*size > chunk {
+ c = uint64(chunk / size)
+ if c == 0 {
+ c = 1
+ }
+ }
+ return int(c)
+}
diff --git a/src/internal/saferio/io_test.go b/src/internal/saferio/io_test.go
new file mode 100644
index 0000000..356c9eb
--- /dev/null
+++ b/src/internal/saferio/io_test.go
@@ -0,0 +1,136 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package saferio
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestReadData(t *testing.T) {
+ const count = 100
+ input := bytes.Repeat([]byte{'a'}, count)
+
+ t.Run("small", func(t *testing.T) {
+ got, err := ReadData(bytes.NewReader(input), count)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, input) {
+ t.Errorf("got %v, want %v", got, input)
+ }
+ })
+
+ t.Run("large", func(t *testing.T) {
+ _, err := ReadData(bytes.NewReader(input), 10<<30)
+ if err == nil {
+ t.Error("large read succeeded unexpectedly")
+ }
+ })
+
+ t.Run("maxint", func(t *testing.T) {
+ _, err := ReadData(bytes.NewReader(input), 1<<62)
+ if err == nil {
+ t.Error("large read succeeded unexpectedly")
+ }
+ })
+
+ t.Run("small-EOF", func(t *testing.T) {
+ _, err := ReadData(bytes.NewReader(nil), chunk-1)
+ if err != io.EOF {
+ t.Errorf("ReadData = %v, want io.EOF", err)
+ }
+ })
+
+ t.Run("large-EOF", func(t *testing.T) {
+ _, err := ReadData(bytes.NewReader(nil), chunk+1)
+ if err != io.EOF {
+ t.Errorf("ReadData = %v, want io.EOF", err)
+ }
+ })
+
+ t.Run("large-UnexpectedEOF", func(t *testing.T) {
+ _, err := ReadData(bytes.NewReader(make([]byte, chunk)), chunk+1)
+ if err != io.ErrUnexpectedEOF {
+ t.Errorf("ReadData = %v, want io.ErrUnexpectedEOF", err)
+ }
+ })
+}
+
+func TestReadDataAt(t *testing.T) {
+ const count = 100
+ input := bytes.Repeat([]byte{'a'}, count)
+
+ t.Run("small", func(t *testing.T) {
+ got, err := ReadDataAt(bytes.NewReader(input), count, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, input) {
+ t.Errorf("got %v, want %v", got, input)
+ }
+ })
+
+ t.Run("large", func(t *testing.T) {
+ _, err := ReadDataAt(bytes.NewReader(input), 10<<30, 0)
+ if err == nil {
+ t.Error("large read succeeded unexpectedly")
+ }
+ })
+
+ t.Run("maxint", func(t *testing.T) {
+ _, err := ReadDataAt(bytes.NewReader(input), 1<<62, 0)
+ if err == nil {
+ t.Error("large read succeeded unexpectedly")
+ }
+ })
+
+ t.Run("SectionReader", func(t *testing.T) {
+ // Reading 0 bytes from an io.SectionReader at the end
+ // of the section will return EOF, but ReadDataAt
+ // should succeed and return 0 bytes.
+ sr := io.NewSectionReader(bytes.NewReader(input), 0, 0)
+ got, err := ReadDataAt(sr, 0, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(got) > 0 {
+ t.Errorf("got %d bytes, expected 0", len(got))
+ }
+ })
+}
+
+func TestSliceCap(t *testing.T) {
+ t.Run("small", func(t *testing.T) {
+ c := SliceCap((*int)(nil), 10)
+ if c != 10 {
+ t.Errorf("got capacity %d, want %d", c, 10)
+ }
+ })
+
+ t.Run("large", func(t *testing.T) {
+ c := SliceCap((*byte)(nil), 1<<30)
+ if c < 0 {
+ t.Error("SliceCap failed unexpectedly")
+ } else if c == 1<<30 {
+ t.Errorf("got capacity %d which is too high", c)
+ }
+ })
+
+ t.Run("maxint", func(t *testing.T) {
+ c := SliceCap((*byte)(nil), 1<<63)
+ if c >= 0 {
+ t.Errorf("SliceCap returned %d, expected failure", c)
+ }
+ })
+
+ t.Run("overflow", func(t *testing.T) {
+ c := SliceCap((*int64)(nil), 1<<62)
+ if c >= 0 {
+ t.Errorf("SliceCap returned %d, expected failure", c)
+ }
+ })
+}
diff --git a/src/internal/singleflight/singleflight.go b/src/internal/singleflight/singleflight.go
new file mode 100644
index 0000000..d0e6d2f
--- /dev/null
+++ b/src/internal/singleflight/singleflight.go
@@ -0,0 +1,123 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import "sync"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val any
+ err error
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val any
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (any, error)) (v any, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+func (g *Group) DoChan(key string, fn func() (any, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (any, error)) {
+ c.val, c.err = fn()
+
+ g.mu.Lock()
+ c.wg.Done()
+ if g.m[key] == c {
+ delete(g.m, key)
+ }
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ g.mu.Unlock()
+}
+
+// ForgetUnshared tells the singleflight to forget about a key if it is not
+// shared with any other goroutines. Future calls to Do for a forgotten key
+// will call the function rather than waiting for an earlier call to complete.
+// Returns whether the key was forgotten or unknown--that is, whether no
+// other goroutines are waiting for the result.
+func (g *Group) ForgetUnshared(key string) bool {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ c, ok := g.m[key]
+ if !ok {
+ return true
+ }
+ if c.dups == 0 {
+ delete(g.m, key)
+ return true
+ }
+ return false
+}
diff --git a/src/internal/singleflight/singleflight_test.go b/src/internal/singleflight/singleflight_test.go
new file mode 100644
index 0000000..279e1be
--- /dev/null
+++ b/src/internal/singleflight/singleflight_test.go
@@ -0,0 +1,186 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package singleflight
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestDo(t *testing.T) {
+ var g Group
+ v, err, _ := g.Do("key", func() (any, error) {
+ return "bar", nil
+ })
+ if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
+ t.Errorf("Do = %v; want %v", got, want)
+ }
+ if err != nil {
+ t.Errorf("Do error = %v", err)
+ }
+}
+
+func TestDoErr(t *testing.T) {
+ var g Group
+ someErr := errors.New("some error")
+ v, err, _ := g.Do("key", func() (any, error) {
+ return nil, someErr
+ })
+ if err != someErr {
+ t.Errorf("Do error = %v; want someErr %v", err, someErr)
+ }
+ if v != nil {
+ t.Errorf("unexpected non-nil value %#v", v)
+ }
+}
+
+func TestDoDupSuppress(t *testing.T) {
+ var g Group
+ var wg1, wg2 sync.WaitGroup
+ c := make(chan string, 1)
+ var calls atomic.Int32
+ fn := func() (any, error) {
+ if calls.Add(1) == 1 {
+ // First invocation.
+ wg1.Done()
+ }
+ v := <-c
+ c <- v // pump; make available for any future calls
+
+ time.Sleep(10 * time.Millisecond) // let more goroutines enter Do
+
+ return v, nil
+ }
+
+ const n = 10
+ wg1.Add(1)
+ for i := 0; i < n; i++ {
+ wg1.Add(1)
+ wg2.Add(1)
+ go func() {
+ defer wg2.Done()
+ wg1.Done()
+ v, err, _ := g.Do("key", fn)
+ if err != nil {
+ t.Errorf("Do error: %v", err)
+ return
+ }
+ if s, _ := v.(string); s != "bar" {
+ t.Errorf("Do = %T %v; want %q", v, v, "bar")
+ }
+ }()
+ }
+ wg1.Wait()
+ // At least one goroutine is in fn now and all of them have at
+ // least reached the line before the Do.
+ c <- "bar"
+ wg2.Wait()
+ if got := calls.Load(); got <= 0 || got >= n {
+ t.Errorf("number of calls = %d; want over 0 and less than %d", got, n)
+ }
+}
+
+func TestForgetUnshared(t *testing.T) {
+ var g Group
+
+ var firstStarted, firstFinished sync.WaitGroup
+
+ firstStarted.Add(1)
+ firstFinished.Add(1)
+
+ key := "key"
+ firstCh := make(chan struct{})
+ go func() {
+ g.Do(key, func() (i interface{}, e error) {
+ firstStarted.Done()
+ <-firstCh
+ return
+ })
+ firstFinished.Done()
+ }()
+
+ firstStarted.Wait()
+ g.ForgetUnshared(key) // from this point no two function using same key should be executed concurrently
+
+ secondCh := make(chan struct{})
+ go func() {
+ g.Do(key, func() (i interface{}, e error) {
+ // Notify that we started
+ secondCh <- struct{}{}
+ <-secondCh
+ return 2, nil
+ })
+ }()
+
+ <-secondCh
+
+ resultCh := g.DoChan(key, func() (i interface{}, e error) {
+ panic("third must not be started")
+ })
+
+ if g.ForgetUnshared(key) {
+ t.Errorf("Before first goroutine finished, key %q is shared, should return false", key)
+ }
+
+ close(firstCh)
+ firstFinished.Wait()
+
+ if g.ForgetUnshared(key) {
+ t.Errorf("After first goroutine finished, key %q is still shared, should return false", key)
+ }
+
+ secondCh <- struct{}{}
+
+ if result := <-resultCh; result.Val != 2 {
+ t.Errorf("We should receive result produced by second call, expected: 2, got %d", result.Val)
+ }
+}
+
+func TestDoAndForgetUnsharedRace(t *testing.T) {
+ t.Parallel()
+
+ var g Group
+ key := "key"
+ d := time.Millisecond
+ for {
+ var calls, shared atomic.Int64
+ const n = 1000
+ var wg sync.WaitGroup
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func() {
+ g.Do(key, func() (interface{}, error) {
+ time.Sleep(d)
+ return calls.Add(1), nil
+ })
+ if !g.ForgetUnshared(key) {
+ shared.Add(1)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ if calls.Load() != 1 {
+ // The goroutines didn't park in g.Do in time,
+ // so the key was re-added and may have been shared after the call.
+ // Try again with more time to park.
+ d *= 2
+ continue
+ }
+
+ // All of the Do calls ended up sharing the first
+ // invocation, so the key should have been unused
+ // (and therefore unshared) when they returned.
+ if shared.Load() > 0 {
+ t.Errorf("after a single shared Do, ForgetUnshared returned false %d times", shared.Load())
+ }
+ break
+ }
+}
diff --git a/src/internal/syscall/execenv/execenv_default.go b/src/internal/syscall/execenv/execenv_default.go
new file mode 100644
index 0000000..335647c
--- /dev/null
+++ b/src/internal/syscall/execenv/execenv_default.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package execenv
+
+import "syscall"
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// Defaults to syscall.Environ() on all platforms
+// other than Windows.
+func Default(sys *syscall.SysProcAttr) ([]string, error) {
+ return syscall.Environ(), nil
+}
diff --git a/src/internal/syscall/execenv/execenv_windows.go b/src/internal/syscall/execenv/execenv_windows.go
new file mode 100644
index 0000000..2a89ed1
--- /dev/null
+++ b/src/internal/syscall/execenv/execenv_windows.go
@@ -0,0 +1,47 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package execenv
+
+import (
+ "internal/syscall/windows"
+ "syscall"
+ "unsafe"
+)
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// If the process attributes contain a token, then
+// the environment variables will be sourced from
+// the defaults for that user token, otherwise they
+// will be sourced from syscall.Environ().
+func Default(sys *syscall.SysProcAttr) (env []string, err error) {
+ if sys == nil || sys.Token == 0 {
+ return syscall.Environ(), nil
+ }
+ var blockp *uint16
+ err = windows.CreateEnvironmentBlock(&blockp, sys.Token, false)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.DestroyEnvironmentBlock(blockp)
+
+ const size = unsafe.Sizeof(*blockp)
+ for *blockp != 0 { // environment block ends with empty string
+ // find NUL terminator
+ end := unsafe.Add(unsafe.Pointer(blockp), size)
+ for *(*uint16)(end) != 0 {
+ end = unsafe.Add(end, size)
+ }
+
+ entry := unsafe.Slice(blockp, (uintptr(end)-uintptr(unsafe.Pointer(blockp)))/2)
+ env = append(env, syscall.UTF16ToString(entry))
+ blockp = (*uint16)(unsafe.Add(end, size))
+ }
+ return
+}
diff --git a/src/internal/syscall/unix/asm_aix_ppc64.s b/src/internal/syscall/unix/asm_aix_ppc64.s
new file mode 100644
index 0000000..9e82e3e
--- /dev/null
+++ b/src/internal/syscall/unix/asm_aix_ppc64.s
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System calls for aix/ppc64 are implemented in syscall/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0
+ JMP syscall·syscall6(SB)
diff --git a/src/internal/syscall/unix/asm_darwin.s b/src/internal/syscall/unix/asm_darwin.s
new file mode 100644
index 0000000..8662c28
--- /dev/null
+++ b/src/internal/syscall/unix/asm_darwin.s
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0; JMP libc_getentropy(SB)
+TEXT ·libc_getaddrinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_getaddrinfo(SB)
+TEXT ·libc_freeaddrinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_freeaddrinfo(SB)
+TEXT ·libc_getnameinfo_trampoline(SB),NOSPLIT,$0-0; JMP libc_getnameinfo(SB)
+TEXT ·libc_gai_strerror_trampoline(SB),NOSPLIT,$0-0; JMP libc_gai_strerror(SB)
+TEXT ·libresolv_res_9_ninit_trampoline(SB),NOSPLIT,$0-0; JMP libresolv_res_9_ninit(SB)
+TEXT ·libresolv_res_9_nclose_trampoline(SB),NOSPLIT,$0-0; JMP libresolv_res_9_nclose(SB)
+TEXT ·libresolv_res_9_nsearch_trampoline(SB),NOSPLIT,$0-0; JMP libresolv_res_9_nsearch(SB)
+TEXT ·libc_grantpt_trampoline(SB),NOSPLIT,$0-0; JMP libc_grantpt(SB)
+TEXT ·libc_unlockpt_trampoline(SB),NOSPLIT,$0-0; JMP libc_unlockpt(SB)
+TEXT ·libc_ptsname_r_trampoline(SB),NOSPLIT,$0-0; JMP libc_ptsname_r(SB)
+TEXT ·libc_posix_openpt_trampoline(SB),NOSPLIT,$0-0; JMP libc_posix_openpt(SB)
+TEXT ·libc_getgrouplist_trampoline(SB),NOSPLIT,$0-0; JMP libc_getgrouplist(SB)
+TEXT ·libc_getpwnam_r_trampoline(SB),NOSPLIT,$0-0; JMP libc_getpwnam_r(SB)
+TEXT ·libc_getpwuid_r_trampoline(SB),NOSPLIT,$0-0; JMP libc_getpwuid_r(SB)
+TEXT ·libc_getgrnam_r_trampoline(SB),NOSPLIT,$0-0; JMP libc_getgrnam_r(SB)
+TEXT ·libc_getgrgid_r_trampoline(SB),NOSPLIT,$0-0; JMP libc_getgrgid_r(SB)
+TEXT ·libc_sysconf_trampoline(SB),NOSPLIT,$0-0; JMP libc_sysconf(SB)
diff --git a/src/internal/syscall/unix/asm_solaris.s b/src/internal/syscall/unix/asm_solaris.s
new file mode 100644
index 0000000..2057338
--- /dev/null
+++ b/src/internal/syscall/unix/asm_solaris.s
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// System calls for Solaris are implemented in runtime/syscall_solaris.go
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·sysvicall6(SB)
diff --git a/src/internal/syscall/unix/at.go b/src/internal/syscall/unix/at.go
new file mode 100644
index 0000000..cfb6e41
--- /dev/null
+++ b/src/internal/syscall/unix/at.go
@@ -0,0 +1,40 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || (openbsd && mips64)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Unlinkat(dirfd int, path string, flags int) error {
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall.Syscall(unlinkatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags))
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+}
+
+func Openat(dirfd int, path string, flags int, perm uint32) (int, error) {
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+
+ fd, _, errno := syscall.Syscall6(openatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0)
+ if errno != 0 {
+ return 0, errno
+ }
+
+ return int(fd), nil
+}
diff --git a/src/internal/syscall/unix/at_aix.go b/src/internal/syscall/unix/at_aix.go
new file mode 100644
index 0000000..3fe3285
--- /dev/null
+++ b/src/internal/syscall/unix/at_aix.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_openat openat "libc.a/shr_64.o"
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.a/shr_64.o"
+
+const (
+ AT_REMOVEDIR = 0x1
+ AT_SYMLINK_NOFOLLOW = 0x1
+ UTIME_OMIT = -0x3
+)
diff --git a/src/internal/syscall/unix/at_fstatat.go b/src/internal/syscall/unix/at_fstatat.go
new file mode 100644
index 0000000..8f25fe9
--- /dev/null
+++ b/src/internal/syscall/unix/at_fstatat.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || (linux && !loong64) || netbsd || (openbsd && mips64)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
+ var p *byte
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall.Syscall6(fstatatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+
+}
diff --git a/src/internal/syscall/unix/at_fstatat2.go b/src/internal/syscall/unix/at_fstatat2.go
new file mode 100644
index 0000000..8d20e1a
--- /dev/null
+++ b/src/internal/syscall/unix/at_fstatat2.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd || (linux && loong64)
+
+package unix
+
+import "syscall"
+
+func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
+ return syscall.Fstatat(dirfd, path, stat, flags)
+}
diff --git a/src/internal/syscall/unix/at_js.go b/src/internal/syscall/unix/at_js.go
new file mode 100644
index 0000000..d05ccce
--- /dev/null
+++ b/src/internal/syscall/unix/at_js.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ // UTIME_OMIT is the sentinel value to indicate that a time value should not
+ // be changed. It is useful for example to indicate for example with UtimesNano
+ // to avoid changing AccessTime or ModifiedTime.
+ // Its value must match syscall/fs_js.go
+ UTIME_OMIT = -0x2
+)
diff --git a/src/internal/syscall/unix/at_libc.go b/src/internal/syscall/unix/at_libc.go
new file mode 100644
index 0000000..f48d379
--- /dev/null
+++ b/src/internal/syscall/unix/at_libc.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || solaris
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:linkname procFstatat libc_fstatat
+//go:linkname procOpenat libc_openat
+//go:linkname procUnlinkat libc_unlinkat
+
+var (
+ procFstatat,
+ procOpenat,
+ procUnlinkat uintptr
+)
+
+func Unlinkat(dirfd int, path string, flags int) error {
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0)
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+}
+
+func Openat(dirfd int, path string, flags int, perm uint32) (int, error) {
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+
+ fd, _, errno := syscall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0)
+ if errno != 0 {
+ return 0, errno
+ }
+
+ return int(fd), nil
+}
+
+func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+}
diff --git a/src/internal/syscall/unix/at_libc2.go b/src/internal/syscall/unix/at_libc2.go
new file mode 100644
index 0000000..93d0cf4
--- /dev/null
+++ b/src/internal/syscall/unix/at_libc2.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || (openbsd && !mips64)
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe" // for linkname
+)
+
+func Unlinkat(dirfd int, path string, flags int) error {
+ return unlinkat(dirfd, path, flags)
+}
+
+func Openat(dirfd int, path string, flags int, perm uint32) (int, error) {
+ return openat(dirfd, path, flags, perm)
+}
+
+func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
+ return fstatat(dirfd, path, stat, flags)
+}
+
+//go:linkname unlinkat syscall.unlinkat
+func unlinkat(dirfd int, path string, flags int) error
+
+//go:linkname openat syscall.openat
+func openat(dirfd int, path string, flags int, perm uint32) (int, error)
+
+//go:linkname fstatat syscall.fstatat
+func fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error
diff --git a/src/internal/syscall/unix/at_solaris.go b/src/internal/syscall/unix/at_solaris.go
new file mode 100644
index 0000000..4ab224d
--- /dev/null
+++ b/src/internal/syscall/unix/at_solaris.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+// Implemented as sysvicall6 in runtime/syscall_solaris.go.
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
+//go:cgo_import_dynamic libc_openat openat "libc.so"
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+
+const (
+ AT_REMOVEDIR = 0x1
+ AT_SYMLINK_NOFOLLOW = 0x1000
+
+ UTIME_OMIT = -0x2
+)
diff --git a/src/internal/syscall/unix/at_sysnum_darwin.go b/src/internal/syscall/unix/at_sysnum_darwin.go
new file mode 100644
index 0000000..208ff34
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_darwin.go
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const AT_REMOVEDIR = 0x80
+const AT_SYMLINK_NOFOLLOW = 0x0020
+
+const UTIME_OMIT = -0x2
diff --git a/src/internal/syscall/unix/at_sysnum_dragonfly.go b/src/internal/syscall/unix/at_sysnum_dragonfly.go
new file mode 100644
index 0000000..b7ed3f7
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_dragonfly.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+const unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+const openatTrap uintptr = syscall.SYS_OPENAT
+const fstatatTrap uintptr = syscall.SYS_FSTATAT
+
+const AT_REMOVEDIR = 0x2
+const AT_SYMLINK_NOFOLLOW = 0x1
+
+const UTIME_OMIT = -0x1
diff --git a/src/internal/syscall/unix/at_sysnum_freebsd.go b/src/internal/syscall/unix/at_sysnum_freebsd.go
new file mode 100644
index 0000000..9cd5da6
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_freebsd.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+const (
+ AT_REMOVEDIR = 0x800
+ AT_SYMLINK_NOFOLLOW = 0x200
+
+ UTIME_OMIT = -0x2
+
+ unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+ openatTrap uintptr = syscall.SYS_OPENAT
+ posixFallocateTrap uintptr = syscall.SYS_POSIX_FALLOCATE
+)
diff --git a/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go
new file mode 100644
index 0000000..445b0c3
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm || mips || mipsle || 386
+
+package unix
+
+import "syscall"
+
+const fstatatTrap uintptr = syscall.SYS_FSTATAT64
diff --git a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go
new file mode 100644
index 0000000..73a3da5
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 || riscv64
+
+package unix
+
+import "syscall"
+
+const fstatatTrap uintptr = syscall.SYS_FSTATAT
diff --git a/src/internal/syscall/unix/at_sysnum_linux.go b/src/internal/syscall/unix/at_sysnum_linux.go
new file mode 100644
index 0000000..7c3b15c
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_linux.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+const unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+const openatTrap uintptr = syscall.SYS_OPENAT
+
+const (
+ AT_EACCESS = 0x200
+ AT_FDCWD = -0x64
+ AT_REMOVEDIR = 0x200
+ AT_SYMLINK_NOFOLLOW = 0x100
+
+ UTIME_OMIT = 0x3ffffffe
+)
diff --git a/src/internal/syscall/unix/at_sysnum_netbsd.go b/src/internal/syscall/unix/at_sysnum_netbsd.go
new file mode 100644
index 0000000..becc1bd
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_netbsd.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+const unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+const openatTrap uintptr = syscall.SYS_OPENAT
+const fstatatTrap uintptr = syscall.SYS_FSTATAT
+
+const AT_REMOVEDIR = 0x800
+const AT_SYMLINK_NOFOLLOW = 0x200
+
+const UTIME_OMIT = (1 << 30) - 2
diff --git a/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go b/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go
new file mode 100644
index 0000000..76edf67
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || mips64 || mips64le || ppc64 || ppc64le || s390x
+
+package unix
+
+import "syscall"
+
+const fstatatTrap uintptr = syscall.SYS_NEWFSTATAT
diff --git a/src/internal/syscall/unix/at_sysnum_openbsd.go b/src/internal/syscall/unix/at_sysnum_openbsd.go
new file mode 100644
index 0000000..fd38947
--- /dev/null
+++ b/src/internal/syscall/unix/at_sysnum_openbsd.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+const unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+const openatTrap uintptr = syscall.SYS_OPENAT
+const fstatatTrap uintptr = syscall.SYS_FSTATAT
+
+const AT_REMOVEDIR = 0x08
+const AT_SYMLINK_NOFOLLOW = 0x02
+
+const UTIME_OMIT = -0x1
diff --git a/src/internal/syscall/unix/at_wasip1.go b/src/internal/syscall/unix/at_wasip1.go
new file mode 100644
index 0000000..3d47d7e
--- /dev/null
+++ b/src/internal/syscall/unix/at_wasip1.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ // UTIME_OMIT is the sentinel value to indicate that a time value should not
+ // be changed. It is useful for example to indicate for example with UtimesNano
+ // to avoid changing AccessTime or ModifiedTime.
+ // Its value must match syscall/fs_wasip1.go
+ UTIME_OMIT = -0x2
+)
diff --git a/src/internal/syscall/unix/constants.go b/src/internal/syscall/unix/constants.go
new file mode 100644
index 0000000..e324589
--- /dev/null
+++ b/src/internal/syscall/unix/constants.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/src/internal/syscall/unix/copy_file_range_linux.go b/src/internal/syscall/unix/copy_file_range_linux.go
new file mode 100644
index 0000000..cf0a279
--- /dev/null
+++ b/src/internal/syscall/unix/copy_file_range_linux.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) {
+ r1, _, errno := syscall.Syscall6(copyFileRangeTrap,
+ uintptr(rfd),
+ uintptr(unsafe.Pointer(roff)),
+ uintptr(wfd),
+ uintptr(unsafe.Pointer(woff)),
+ uintptr(len),
+ uintptr(flags),
+ )
+ n = int(r1)
+ if errno != 0 {
+ err = errno
+ }
+ return
+}
diff --git a/src/internal/syscall/unix/eaccess_linux.go b/src/internal/syscall/unix/eaccess_linux.go
new file mode 100644
index 0000000..5695a5e
--- /dev/null
+++ b/src/internal/syscall/unix/eaccess_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+func Eaccess(path string, mode uint32) error {
+ return syscall.Faccessat(AT_FDCWD, path, mode, AT_EACCESS)
+}
diff --git a/src/internal/syscall/unix/eaccess_other.go b/src/internal/syscall/unix/eaccess_other.go
new file mode 100644
index 0000000..23be118
--- /dev/null
+++ b/src/internal/syscall/unix/eaccess_other.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix && !linux
+
+package unix
+
+import "syscall"
+
+func Eaccess(path string, mode uint32) error {
+ return syscall.ENOSYS
+}
diff --git a/src/internal/syscall/unix/fallocate_freebsd_386.go b/src/internal/syscall/unix/fallocate_freebsd_386.go
new file mode 100644
index 0000000..535b23d
--- /dev/null
+++ b/src/internal/syscall/unix/fallocate_freebsd_386.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+func PosixFallocate(fd int, off int64, size int64) error {
+ // If successful, posix_fallocate() returns zero. It returns an error on failure, without
+ // setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ r1, _, _ := syscall.Syscall6(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(size), uintptr(size>>32), 0)
+ if r1 != 0 {
+ return syscall.Errno(r1)
+ }
+ return nil
+}
diff --git a/src/internal/syscall/unix/fallocate_freebsd_64bit.go b/src/internal/syscall/unix/fallocate_freebsd_64bit.go
new file mode 100644
index 0000000..a9d5228
--- /dev/null
+++ b/src/internal/syscall/unix/fallocate_freebsd_64bit.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd && (amd64 || arm64 || riscv64)
+
+package unix
+
+import "syscall"
+
+func PosixFallocate(fd int, off int64, size int64) error {
+ // If successful, posix_fallocate() returns zero. It returns an error on failure, without
+ // setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ r1, _, _ := syscall.Syscall(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(size))
+ if r1 != 0 {
+ return syscall.Errno(r1)
+ }
+ return nil
+}
diff --git a/src/internal/syscall/unix/fallocate_freebsd_arm.go b/src/internal/syscall/unix/fallocate_freebsd_arm.go
new file mode 100644
index 0000000..1ded50f
--- /dev/null
+++ b/src/internal/syscall/unix/fallocate_freebsd_arm.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "syscall"
+
+func PosixFallocate(fd int, off int64, size int64) error {
+ // If successful, posix_fallocate() returns zero. It returns an error on failure, without
+ // setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ //
+ // The padding 0 argument is needed because the ARM calling convention requires that if an
+ // argument (off in this case) needs double-word alignment (8-byte), the NCRN (next core
+ // register number) is rounded up to the next even register number.
+ // See https://github.com/ARM-software/abi-aa/blob/2bcab1e3b22d55170c563c3c7940134089176746/aapcs32/aapcs32.rst#parameter-passing
+ r1, _, _ := syscall.Syscall6(posixFallocateTrap, uintptr(fd), 0, uintptr(off), uintptr(off>>32), uintptr(size), uintptr(size>>32))
+ if r1 != 0 {
+ return syscall.Errno(r1)
+ }
+ return nil
+}
diff --git a/src/internal/syscall/unix/fcntl_js.go b/src/internal/syscall/unix/fcntl_js.go
new file mode 100644
index 0000000..bdfb8e0
--- /dev/null
+++ b/src/internal/syscall/unix/fcntl_js.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+
+package unix
+
+import "syscall"
+
+func Fcntl(fd int, cmd int, arg int) (int, error) {
+ return 0, syscall.ENOSYS
+}
diff --git a/src/internal/syscall/unix/fcntl_unix.go b/src/internal/syscall/unix/fcntl_unix.go
new file mode 100644
index 0000000..6f9e124
--- /dev/null
+++ b/src/internal/syscall/unix/fcntl_unix.go
@@ -0,0 +1,25 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+// Implemented in the runtime package.
+//
+//go:linkname fcntl runtime.fcntl
+func fcntl(fd int32, cmd int32, arg int32) (int32, int32)
+
+func Fcntl(fd int, cmd int, arg int) (int, error) {
+ val, errno := fcntl(int32(fd), int32(cmd), int32(arg))
+ if val == -1 {
+ return int(val), syscall.Errno(errno)
+ }
+ return int(val), nil
+}
diff --git a/src/internal/syscall/unix/fcntl_wasip1.go b/src/internal/syscall/unix/fcntl_wasip1.go
new file mode 100644
index 0000000..e70cd74
--- /dev/null
+++ b/src/internal/syscall/unix/fcntl_wasip1.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasip1
+
+package unix
+
+import "syscall"
+
+func Fcntl(fd int, cmd int, arg int) (int, error) {
+ if cmd == syscall.F_GETFL {
+ flags, err := fd_fdstat_get_flags(fd)
+ return int(flags), err
+ }
+ return 0, syscall.ENOSYS
+}
diff --git a/src/internal/syscall/unix/getentropy_darwin.go b/src/internal/syscall/unix/getentropy_darwin.go
new file mode 100644
index 0000000..834099f
--- /dev/null
+++ b/src/internal/syscall/unix/getentropy_darwin.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin && !ios
+
+package unix
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_getentropy getentropy "/usr/lib/libSystem.B.dylib"
+
+func libc_getentropy_trampoline()
+
+// GetEntropy calls the macOS getentropy system call.
+func GetEntropy(p []byte) error {
+ _, _, errno := syscall_syscall(abi.FuncPCABI0(libc_getentropy_trampoline),
+ uintptr(unsafe.Pointer(&p[0])),
+ uintptr(len(p)),
+ 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/src/internal/syscall/unix/getentropy_openbsd.go b/src/internal/syscall/unix/getentropy_openbsd.go
new file mode 100644
index 0000000..ad0914d
--- /dev/null
+++ b/src/internal/syscall/unix/getentropy_openbsd.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build openbsd && !mips64
+
+package unix
+
+import _ "unsafe" // for linkname
+
+// GetEntropy calls the OpenBSD getentropy system call.
+func GetEntropy(p []byte) error {
+ return getentropy(p)
+}
+
+//go:linkname getentropy syscall.getentropy
+func getentropy(p []byte) error
diff --git a/src/internal/syscall/unix/getentropy_openbsd_mips64.go b/src/internal/syscall/unix/getentropy_openbsd_mips64.go
new file mode 100644
index 0000000..d5caa80
--- /dev/null
+++ b/src/internal/syscall/unix/getentropy_openbsd_mips64.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// getentropy(2)'s syscall number, from /usr/src/sys/kern/syscalls.master
+const entropyTrap uintptr = 7
+
+// GetEntropy calls the OpenBSD getentropy system call.
+func GetEntropy(p []byte) error {
+ _, _, errno := syscall.Syscall(entropyTrap,
+ uintptr(unsafe.Pointer(&p[0])),
+ uintptr(len(p)),
+ 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/src/internal/syscall/unix/getrandom.go b/src/internal/syscall/unix/getrandom.go
new file mode 100644
index 0000000..e83f0cd
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom.go
@@ -0,0 +1,39 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux
+
+package unix
+
+import (
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+var getrandomUnsupported atomic.Bool
+
+// GetRandomFlag is a flag supported by the getrandom system call.
+type GetRandomFlag uintptr
+
+// GetRandom calls the getrandom system call.
+func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if getrandomUnsupported.Load() {
+ return 0, syscall.ENOSYS
+ }
+ r1, _, errno := syscall.Syscall(getrandomTrap,
+ uintptr(unsafe.Pointer(&p[0])),
+ uintptr(len(p)),
+ uintptr(flags))
+ if errno != 0 {
+ if errno == syscall.ENOSYS {
+ getrandomUnsupported.Store(true)
+ }
+ return 0, errno
+ }
+ return int(r1), nil
+}
diff --git a/src/internal/syscall/unix/getrandom_dragonfly.go b/src/internal/syscall/unix/getrandom_dragonfly.go
new file mode 100644
index 0000000..fbf78f9
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom_dragonfly.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+// DragonFlyBSD getrandom system call number.
+const getrandomTrap uintptr = 550
+
+const (
+ // GRND_RANDOM is only set for portability purpose, no-op on DragonFlyBSD.
+ GRND_RANDOM GetRandomFlag = 0x0001
+
+ // GRND_NONBLOCK means return EAGAIN rather than blocking.
+ GRND_NONBLOCK GetRandomFlag = 0x0002
+)
diff --git a/src/internal/syscall/unix/getrandom_freebsd.go b/src/internal/syscall/unix/getrandom_freebsd.go
new file mode 100644
index 0000000..8c4f3df
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom_freebsd.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+// FreeBSD getrandom system call number.
+const getrandomTrap uintptr = 563
+
+const (
+ // GRND_NONBLOCK means return EAGAIN rather than blocking.
+ GRND_NONBLOCK GetRandomFlag = 0x0001
+
+ // GRND_RANDOM is only set for portability purpose, no-op on FreeBSD.
+ GRND_RANDOM GetRandomFlag = 0x0002
+)
diff --git a/src/internal/syscall/unix/getrandom_linux.go b/src/internal/syscall/unix/getrandom_linux.go
new file mode 100644
index 0000000..8ccd8d3
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom_linux.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ // GRND_NONBLOCK means return EAGAIN rather than blocking.
+ GRND_NONBLOCK GetRandomFlag = 0x0001
+
+ // GRND_RANDOM means use the /dev/random pool instead of /dev/urandom.
+ GRND_RANDOM GetRandomFlag = 0x0002
+)
diff --git a/src/internal/syscall/unix/getrandom_netbsd.go b/src/internal/syscall/unix/getrandom_netbsd.go
new file mode 100644
index 0000000..c83e3b2
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom_netbsd.go
@@ -0,0 +1,56 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+// NetBSD getrandom system call number.
+const getrandomTrap uintptr = 91
+
+var getrandomUnsupported atomic.Bool
+
+// GetRandomFlag is a flag supported by the getrandom system call.
+type GetRandomFlag uintptr
+
+// GetRandom calls the getrandom system call.
+func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if getrandomUnsupported.Load() {
+ return 0, syscall.ENOSYS
+ }
+ // getrandom(2) was added in NetBSD 10.0
+ if getOSRevision() < 1000000000 {
+ getrandomUnsupported.Store(true)
+ return 0, syscall.ENOSYS
+ }
+ r1, _, errno := syscall.Syscall(getrandomTrap,
+ uintptr(unsafe.Pointer(&p[0])),
+ uintptr(len(p)),
+ uintptr(flags))
+ if errno != 0 {
+ if errno == syscall.ENOSYS {
+ getrandomUnsupported.Store(true)
+ }
+ return 0, errno
+ }
+ return int(r1), nil
+}
+
+var (
+ osrevisionOnce sync.Once
+ osrevision uint32
+)
+
+func getOSRevision() uint32 {
+ osrevisionOnce.Do(func() { osrevision, _ = syscall.SysctlUint32("kern.osrevision") })
+ return osrevision
+}
diff --git a/src/internal/syscall/unix/getrandom_solaris.go b/src/internal/syscall/unix/getrandom_solaris.go
new file mode 100644
index 0000000..cf4f35a
--- /dev/null
+++ b/src/internal/syscall/unix/getrandom_solaris.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_getrandom getrandom "libc.so"
+
+//go:linkname procGetrandom libc_getrandom
+
+var procGetrandom uintptr
+
+var getrandomUnsupported atomic.Bool
+
+// GetRandomFlag is a flag supported by the getrandom system call.
+type GetRandomFlag uintptr
+
+const (
+ // GRND_NONBLOCK means return EAGAIN rather than blocking.
+ GRND_NONBLOCK GetRandomFlag = 0x0001
+
+ // GRND_RANDOM means use the /dev/random pool instead of /dev/urandom.
+ GRND_RANDOM GetRandomFlag = 0x0002
+)
+
+// GetRandom calls the getrandom system call.
+func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if getrandomUnsupported.Load() {
+ return 0, syscall.ENOSYS
+ }
+ r1, _, errno := syscall6(uintptr(unsafe.Pointer(&procGetrandom)),
+ 3,
+ uintptr(unsafe.Pointer(&p[0])),
+ uintptr(len(p)),
+ uintptr(flags),
+ 0, 0, 0)
+ if errno != 0 {
+ if errno == syscall.ENOSYS {
+ getrandomUnsupported.Store(true)
+ }
+ return 0, errno
+ }
+ return int(r1), nil
+}
diff --git a/src/internal/syscall/unix/ioctl_aix.go b/src/internal/syscall/unix/ioctl_aix.go
new file mode 100644
index 0000000..d361533
--- /dev/null
+++ b/src/internal/syscall/unix/ioctl_aix.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o"
+//go:linkname libc_ioctl libc_ioctl
+var libc_ioctl uintptr
+
+// Implemented in syscall/syscall_aix.go.
+func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func Ioctl(fd int, cmd int, args unsafe.Pointer) (err error) {
+ _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(cmd), uintptr(args), 0, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/internal/syscall/unix/kernel_version_linux.go b/src/internal/syscall/unix/kernel_version_linux.go
new file mode 100644
index 0000000..71e8aa4
--- /dev/null
+++ b/src/internal/syscall/unix/kernel_version_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+)
+
+// KernelVersion returns major and minor kernel version numbers, parsed from
+// the syscall.Uname's Release field, or 0, 0 if the version can't be obtained
+// or parsed.
+//
+// Currently only implemented for Linux.
+func KernelVersion() (major, minor int) {
+ var uname syscall.Utsname
+ if err := syscall.Uname(&uname); err != nil {
+ return
+ }
+
+ var (
+ values [2]int
+ value, vi int
+ )
+ for _, c := range uname.Release {
+ if '0' <= c && c <= '9' {
+ value = (value * 10) + int(c-'0')
+ } else {
+ // Note that we're assuming N.N.N here.
+ // If we see anything else, we are likely to mis-parse it.
+ values[vi] = value
+ vi++
+ if vi >= len(values) {
+ break
+ }
+ value = 0
+ }
+ }
+
+ return values[0], values[1]
+}
diff --git a/src/internal/syscall/unix/kernel_version_other.go b/src/internal/syscall/unix/kernel_version_other.go
new file mode 100644
index 0000000..00af9f2
--- /dev/null
+++ b/src/internal/syscall/unix/kernel_version_other.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package unix
+
+func KernelVersion() (major int, minor int) {
+ return 0, 0
+}
diff --git a/src/internal/syscall/unix/net.go b/src/internal/syscall/unix/net.go
new file mode 100644
index 0000000..5618d40
--- /dev/null
+++ b/src/internal/syscall/unix/net.go
@@ -0,0 +1,44 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe"
+)
+
+//go:linkname RecvfromInet4 syscall.recvfromInet4
+//go:noescape
+func RecvfromInet4(fd int, p []byte, flags int, from *syscall.SockaddrInet4) (int, error)
+
+//go:linkname RecvfromInet6 syscall.recvfromInet6
+//go:noescape
+func RecvfromInet6(fd int, p []byte, flags int, from *syscall.SockaddrInet6) (n int, err error)
+
+//go:linkname SendtoInet4 syscall.sendtoInet4
+//go:noescape
+func SendtoInet4(fd int, p []byte, flags int, to *syscall.SockaddrInet4) (err error)
+
+//go:linkname SendtoInet6 syscall.sendtoInet6
+//go:noescape
+func SendtoInet6(fd int, p []byte, flags int, to *syscall.SockaddrInet6) (err error)
+
+//go:linkname SendmsgNInet4 syscall.sendmsgNInet4
+//go:noescape
+func SendmsgNInet4(fd int, p, oob []byte, to *syscall.SockaddrInet4, flags int) (n int, err error)
+
+//go:linkname SendmsgNInet6 syscall.sendmsgNInet6
+//go:noescape
+func SendmsgNInet6(fd int, p, oob []byte, to *syscall.SockaddrInet6, flags int) (n int, err error)
+
+//go:linkname RecvmsgInet4 syscall.recvmsgInet4
+//go:noescape
+func RecvmsgInet4(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet4) (n, oobn int, recvflags int, err error)
+
+//go:linkname RecvmsgInet6 syscall.recvmsgInet6
+//go:noescape
+func RecvmsgInet6(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet6) (n, oobn int, recvflags int, err error)
diff --git a/src/internal/syscall/unix/net_darwin.go b/src/internal/syscall/unix/net_darwin.go
new file mode 100644
index 0000000..5601b49
--- /dev/null
+++ b/src/internal/syscall/unix/net_darwin.go
@@ -0,0 +1,162 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "internal/abi"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ AI_CANONNAME = 0x2
+ AI_ALL = 0x100
+ AI_V4MAPPED = 0x800
+ AI_MASK = 0x1407
+
+ EAI_AGAIN = 2
+ EAI_NODATA = 7
+ EAI_NONAME = 8
+ EAI_SYSTEM = 11
+ EAI_OVERFLOW = 14
+
+ NI_NAMEREQD = 4
+)
+
+type Addrinfo struct {
+ Flags int32
+ Family int32
+ Socktype int32
+ Protocol int32
+ Addrlen uint32
+ Canonname *byte
+ Addr *syscall.RawSockaddr
+ Next *Addrinfo
+}
+
+//go:cgo_ldflag "-lresolv"
+
+//go:cgo_import_dynamic libc_getaddrinfo getaddrinfo "/usr/lib/libSystem.B.dylib"
+func libc_getaddrinfo_trampoline()
+
+func Getaddrinfo(hostname, servname *byte, hints *Addrinfo, res **Addrinfo) (int, error) {
+ gerrno, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_getaddrinfo_trampoline),
+ uintptr(unsafe.Pointer(hostname)),
+ uintptr(unsafe.Pointer(servname)),
+ uintptr(unsafe.Pointer(hints)),
+ uintptr(unsafe.Pointer(res)),
+ 0,
+ 0)
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(gerrno), err
+}
+
+//go:cgo_import_dynamic libc_freeaddrinfo freeaddrinfo "/usr/lib/libSystem.B.dylib"
+func libc_freeaddrinfo_trampoline()
+
+func Freeaddrinfo(ai *Addrinfo) {
+ syscall_syscall6(abi.FuncPCABI0(libc_freeaddrinfo_trampoline),
+ uintptr(unsafe.Pointer(ai)),
+ 0, 0, 0, 0, 0)
+}
+
+//go:cgo_import_dynamic libc_getnameinfo getnameinfo "/usr/lib/libSystem.B.dylib"
+func libc_getnameinfo_trampoline()
+
+func Getnameinfo(sa *syscall.RawSockaddr, salen int, host *byte, hostlen int, serv *byte, servlen int, flags int) (int, error) {
+ gerrno, _, errno := syscall_syscall9(abi.FuncPCABI0(libc_getnameinfo_trampoline),
+ uintptr(unsafe.Pointer(sa)),
+ uintptr(salen),
+ uintptr(unsafe.Pointer(host)),
+ uintptr(hostlen),
+ uintptr(unsafe.Pointer(serv)),
+ uintptr(servlen),
+ uintptr(flags),
+ 0,
+ 0)
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(gerrno), err
+}
+
+//go:cgo_import_dynamic libc_gai_strerror gai_strerror "/usr/lib/libSystem.B.dylib"
+func libc_gai_strerror_trampoline()
+
+func GaiStrerror(ecode int) string {
+ r1, _, _ := syscall_syscall(abi.FuncPCABI0(libc_gai_strerror_trampoline),
+ uintptr(ecode),
+ 0, 0)
+ return GoString((*byte)(unsafe.Pointer(r1)))
+}
+
+// Implemented in the runtime package.
+func gostring(*byte) string
+
+func GoString(p *byte) string {
+ return gostring(p)
+}
+
+//go:linkname syscall_syscall syscall.syscall
+func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:linkname syscall_syscallPtr syscall.syscallPtr
+func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:linkname syscall_syscall6 syscall.syscall6
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:linkname syscall_syscall6X syscall.syscall6X
+func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//go:linkname syscall_syscall9 syscall.syscall9
+func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+type ResState struct {
+ unexported [69]uintptr
+}
+
+//go:cgo_import_dynamic libresolv_res_9_ninit res_9_ninit "/usr/lib/libresolv.9.dylib"
+func libresolv_res_9_ninit_trampoline()
+
+func ResNinit(state *ResState) error {
+ _, _, errno := syscall_syscall(abi.FuncPCABI0(libresolv_res_9_ninit_trampoline),
+ uintptr(unsafe.Pointer(state)),
+ 0, 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+//go:cgo_import_dynamic libresolv_res_9_nclose res_9_nclose "/usr/lib/libresolv.9.dylib"
+func libresolv_res_9_nclose_trampoline()
+
+func ResNclose(state *ResState) {
+ syscall_syscall(abi.FuncPCABI0(libresolv_res_9_nclose_trampoline),
+ uintptr(unsafe.Pointer(state)),
+ 0, 0)
+}
+
+//go:cgo_import_dynamic libresolv_res_9_nsearch res_9_nsearch "/usr/lib/libresolv.9.dylib"
+func libresolv_res_9_nsearch_trampoline()
+
+func ResNsearch(state *ResState, dname *byte, class, typ int, ans *byte, anslen int) (int, error) {
+ r1, _, errno := syscall_syscall6(abi.FuncPCABI0(libresolv_res_9_nsearch_trampoline),
+ uintptr(unsafe.Pointer(state)),
+ uintptr(unsafe.Pointer(dname)),
+ uintptr(class),
+ uintptr(typ),
+ uintptr(unsafe.Pointer(ans)),
+ uintptr(anslen))
+ if errno != 0 {
+ return 0, errno
+ }
+ return int(int32(r1)), nil
+}
diff --git a/src/internal/syscall/unix/net_js.go b/src/internal/syscall/unix/net_js.go
new file mode 100644
index 0000000..622fc8e
--- /dev/null
+++ b/src/internal/syscall/unix/net_js.go
@@ -0,0 +1,44 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe"
+)
+
+func RecvfromInet4(fd int, p []byte, flags int, from *syscall.SockaddrInet4) (int, error) {
+ return 0, syscall.ENOSYS
+}
+
+func RecvfromInet6(fd int, p []byte, flags int, from *syscall.SockaddrInet6) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func SendtoInet4(fd int, p []byte, flags int, to *syscall.SockaddrInet4) (err error) {
+ return syscall.ENOSYS
+}
+
+func SendtoInet6(fd int, p []byte, flags int, to *syscall.SockaddrInet6) (err error) {
+ return syscall.ENOSYS
+}
+
+func SendmsgNInet4(fd int, p, oob []byte, to *syscall.SockaddrInet4, flags int) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func SendmsgNInet6(fd int, p, oob []byte, to *syscall.SockaddrInet6, flags int) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func RecvmsgInet4(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet4) (n, oobn int, recvflags int, err error) {
+ return 0, 0, 0, syscall.ENOSYS
+}
+
+func RecvmsgInet6(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet6) (n, oobn int, recvflags int, err error) {
+ return 0, 0, 0, syscall.ENOSYS
+}
diff --git a/src/internal/syscall/unix/net_wasip1.go b/src/internal/syscall/unix/net_wasip1.go
new file mode 100644
index 0000000..8a60e8f
--- /dev/null
+++ b/src/internal/syscall/unix/net_wasip1.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasip1
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe"
+)
+
+func RecvfromInet4(fd int, p []byte, flags int, from *syscall.SockaddrInet4) (int, error) {
+ return 0, syscall.ENOSYS
+}
+
+func RecvfromInet6(fd int, p []byte, flags int, from *syscall.SockaddrInet6) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func SendtoInet4(fd int, p []byte, flags int, to *syscall.SockaddrInet4) (err error) {
+ return syscall.ENOSYS
+}
+
+func SendtoInet6(fd int, p []byte, flags int, to *syscall.SockaddrInet6) (err error) {
+ return syscall.ENOSYS
+}
+
+func SendmsgNInet4(fd int, p, oob []byte, to *syscall.SockaddrInet4, flags int) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func SendmsgNInet6(fd int, p, oob []byte, to *syscall.SockaddrInet6, flags int) (n int, err error) {
+ return 0, syscall.ENOSYS
+}
+
+func RecvmsgInet4(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet4) (n, oobn int, recvflags int, err error) {
+ return 0, 0, 0, syscall.ENOSYS
+}
+
+func RecvmsgInet6(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet6) (n, oobn int, recvflags int, err error) {
+ return 0, 0, 0, syscall.ENOSYS
+}
diff --git a/src/internal/syscall/unix/nonblocking_js.go b/src/internal/syscall/unix/nonblocking_js.go
new file mode 100644
index 0000000..cfe78c5
--- /dev/null
+++ b/src/internal/syscall/unix/nonblocking_js.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+
+package unix
+
+func IsNonblock(fd int) (nonblocking bool, err error) {
+ return false, nil
+}
+
+func HasNonblockFlag(flag int) bool {
+ return false
+}
diff --git a/src/internal/syscall/unix/nonblocking_unix.go b/src/internal/syscall/unix/nonblocking_unix.go
new file mode 100644
index 0000000..fc0bc27
--- /dev/null
+++ b/src/internal/syscall/unix/nonblocking_unix.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package unix
+
+import "syscall"
+
+func IsNonblock(fd int) (nonblocking bool, err error) {
+ flag, e1 := Fcntl(fd, syscall.F_GETFL, 0)
+ if e1 != nil {
+ return false, e1
+ }
+ return flag&syscall.O_NONBLOCK != 0, nil
+}
+
+func HasNonblockFlag(flag int) bool {
+ return flag&syscall.O_NONBLOCK != 0
+}
diff --git a/src/internal/syscall/unix/nonblocking_wasip1.go b/src/internal/syscall/unix/nonblocking_wasip1.go
new file mode 100644
index 0000000..5b2b53b
--- /dev/null
+++ b/src/internal/syscall/unix/nonblocking_wasip1.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasip1
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+func IsNonblock(fd int) (nonblocking bool, err error) {
+ flags, e1 := fd_fdstat_get_flags(fd)
+ if e1 != nil {
+ return false, e1
+ }
+ return flags&syscall.FDFLAG_NONBLOCK != 0, nil
+}
+
+func HasNonblockFlag(flag int) bool {
+ return flag&syscall.FDFLAG_NONBLOCK != 0
+}
+
+// This helper is implemented in the syscall package. It means we don't have
+// to redefine the fd_fdstat_get host import or the fdstat struct it
+// populates.
+//
+//go:linkname fd_fdstat_get_flags syscall.fd_fdstat_get_flags
+func fd_fdstat_get_flags(fd int) (uint32, error)
diff --git a/src/internal/syscall/unix/pty_darwin.go b/src/internal/syscall/unix/pty_darwin.go
new file mode 100644
index 0000000..b43321a
--- /dev/null
+++ b/src/internal/syscall/unix/pty_darwin.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_grantpt grantpt "/usr/lib/libSystem.B.dylib"
+func libc_grantpt_trampoline()
+
+func Grantpt(fd int) error {
+ _, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_grantpt_trampoline), uintptr(fd), 0, 0, 0, 0, 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+//go:cgo_import_dynamic libc_unlockpt unlockpt "/usr/lib/libSystem.B.dylib"
+func libc_unlockpt_trampoline()
+
+func Unlockpt(fd int) error {
+ _, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_unlockpt_trampoline), uintptr(fd), 0, 0, 0, 0, 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+//go:cgo_import_dynamic libc_ptsname_r ptsname_r "/usr/lib/libSystem.B.dylib"
+func libc_ptsname_r_trampoline()
+
+func Ptsname(fd int) (string, error) {
+ buf := make([]byte, 256)
+ _, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_ptsname_r_trampoline),
+ uintptr(fd),
+ uintptr(unsafe.Pointer(&buf[0])),
+ uintptr(len(buf)-1),
+ 0, 0, 0)
+ if errno != 0 {
+ return "", errno
+ }
+ for i, c := range buf {
+ if c == 0 {
+ buf = buf[:i]
+ break
+ }
+ }
+ return string(buf), nil
+}
+
+//go:cgo_import_dynamic libc_posix_openpt posix_openpt "/usr/lib/libSystem.B.dylib"
+func libc_posix_openpt_trampoline()
+
+func PosixOpenpt(flag int) (fd int, err error) {
+ ufd, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_posix_openpt_trampoline), uintptr(flag), 0, 0, 0, 0, 0)
+ if errno != 0 {
+ return -1, errno
+ }
+ return int(ufd), nil
+}
diff --git a/src/internal/syscall/unix/sysnum_linux_386.go b/src/internal/syscall/unix/sysnum_linux_386.go
new file mode 100644
index 0000000..2bda08c
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_386.go
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ getrandomTrap uintptr = 355
+ copyFileRangeTrap uintptr = 377
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_amd64.go b/src/internal/syscall/unix/sysnum_linux_amd64.go
new file mode 100644
index 0000000..ae5239e
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_amd64.go
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ getrandomTrap uintptr = 318
+ copyFileRangeTrap uintptr = 326
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_arm.go b/src/internal/syscall/unix/sysnum_linux_arm.go
new file mode 100644
index 0000000..acaec05
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_arm.go
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ getrandomTrap uintptr = 384
+ copyFileRangeTrap uintptr = 391
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_generic.go b/src/internal/syscall/unix/sysnum_linux_generic.go
new file mode 100644
index 0000000..8c132c6
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_generic.go
@@ -0,0 +1,16 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (arm64 || loong64 || riscv64)
+
+package unix
+
+// This file is named "generic" because at a certain point Linux started
+// standardizing on system call numbers across architectures. So far this
+// means only arm64 loong64 and riscv64 use the standard numbers.
+
+const (
+ getrandomTrap uintptr = 278
+ copyFileRangeTrap uintptr = 285
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_mips64x.go b/src/internal/syscall/unix/sysnum_linux_mips64x.go
new file mode 100644
index 0000000..bca526d
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_mips64x.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips64 || mips64le
+
+package unix
+
+const (
+ getrandomTrap uintptr = 5313
+ copyFileRangeTrap uintptr = 5320
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_mipsx.go b/src/internal/syscall/unix/sysnum_linux_mipsx.go
new file mode 100644
index 0000000..c86195e
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_mipsx.go
@@ -0,0 +1,12 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build mips || mipsle
+
+package unix
+
+const (
+ getrandomTrap uintptr = 4353
+ copyFileRangeTrap uintptr = 4360
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_ppc64x.go b/src/internal/syscall/unix/sysnum_linux_ppc64x.go
new file mode 100644
index 0000000..a4dcf2b
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_ppc64x.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package unix
+
+const (
+ getrandomTrap uintptr = 359
+ copyFileRangeTrap uintptr = 379
+)
diff --git a/src/internal/syscall/unix/sysnum_linux_s390x.go b/src/internal/syscall/unix/sysnum_linux_s390x.go
new file mode 100644
index 0000000..bf2c01e
--- /dev/null
+++ b/src/internal/syscall/unix/sysnum_linux_s390x.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ getrandomTrap uintptr = 349
+ copyFileRangeTrap uintptr = 375
+)
diff --git a/src/internal/syscall/unix/user_darwin.go b/src/internal/syscall/unix/user_darwin.go
new file mode 100644
index 0000000..d05acda
--- /dev/null
+++ b/src/internal/syscall/unix/user_darwin.go
@@ -0,0 +1,121 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "internal/abi"
+ "syscall"
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_getgrouplist getgrouplist "/usr/lib/libSystem.B.dylib"
+func libc_getgrouplist_trampoline()
+
+func Getgrouplist(name *byte, gid uint32, gids *uint32, n *int32) error {
+ _, _, errno := syscall_syscall6(abi.FuncPCABI0(libc_getgrouplist_trampoline),
+ uintptr(unsafe.Pointer(name)), uintptr(gid), uintptr(unsafe.Pointer(gids)),
+ uintptr(unsafe.Pointer(n)), 0, 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+const (
+ SC_GETGR_R_SIZE_MAX = 0x46
+ SC_GETPW_R_SIZE_MAX = 0x47
+)
+
+type Passwd struct {
+ Name *byte
+ Passwd *byte
+ Uid uint32 // uid_t
+ Gid uint32 // gid_t
+ Change int64 // time_t
+ Class *byte
+ Gecos *byte
+ Dir *byte
+ Shell *byte
+ Expire int64 // time_t
+}
+
+type Group struct {
+ Name *byte
+ Passwd *byte
+ Gid uint32 // gid_t
+ Mem **byte
+}
+
+//go:cgo_import_dynamic libc_getpwnam_r getpwnam_r "/usr/lib/libSystem.B.dylib"
+func libc_getpwnam_r_trampoline()
+
+func Getpwnam(name *byte, pwd *Passwd, buf *byte, size uintptr, result **Passwd) syscall.Errno {
+ // Note: Returns an errno as its actual result, not in global errno.
+ errno, _, _ := syscall_syscall6(abi.FuncPCABI0(libc_getpwnam_r_trampoline),
+ uintptr(unsafe.Pointer(name)),
+ uintptr(unsafe.Pointer(pwd)),
+ uintptr(unsafe.Pointer(buf)),
+ size,
+ uintptr(unsafe.Pointer(result)),
+ 0)
+ return syscall.Errno(errno)
+}
+
+//go:cgo_import_dynamic libc_getpwuid_r getpwuid_r "/usr/lib/libSystem.B.dylib"
+func libc_getpwuid_r_trampoline()
+
+func Getpwuid(uid uint32, pwd *Passwd, buf *byte, size uintptr, result **Passwd) syscall.Errno {
+ // Note: Returns an errno as its actual result, not in global errno.
+ errno, _, _ := syscall_syscall6(abi.FuncPCABI0(libc_getpwuid_r_trampoline),
+ uintptr(uid),
+ uintptr(unsafe.Pointer(pwd)),
+ uintptr(unsafe.Pointer(buf)),
+ size,
+ uintptr(unsafe.Pointer(result)),
+ 0)
+ return syscall.Errno(errno)
+}
+
+//go:cgo_import_dynamic libc_getgrnam_r getgrnam_r "/usr/lib/libSystem.B.dylib"
+func libc_getgrnam_r_trampoline()
+
+func Getgrnam(name *byte, grp *Group, buf *byte, size uintptr, result **Group) syscall.Errno {
+ // Note: Returns an errno as its actual result, not in global errno.
+ errno, _, _ := syscall_syscall6(abi.FuncPCABI0(libc_getgrnam_r_trampoline),
+ uintptr(unsafe.Pointer(name)),
+ uintptr(unsafe.Pointer(grp)),
+ uintptr(unsafe.Pointer(buf)),
+ size,
+ uintptr(unsafe.Pointer(result)),
+ 0)
+ return syscall.Errno(errno)
+}
+
+//go:cgo_import_dynamic libc_getgrgid_r getgrgid_r "/usr/lib/libSystem.B.dylib"
+func libc_getgrgid_r_trampoline()
+
+func Getgrgid(gid uint32, grp *Group, buf *byte, size uintptr, result **Group) syscall.Errno {
+ // Note: Returns an errno as its actual result, not in global errno.
+ errno, _, _ := syscall_syscall6(abi.FuncPCABI0(libc_getgrgid_r_trampoline),
+ uintptr(gid),
+ uintptr(unsafe.Pointer(grp)),
+ uintptr(unsafe.Pointer(buf)),
+ size,
+ uintptr(unsafe.Pointer(result)),
+ 0)
+ return syscall.Errno(errno)
+}
+
+//go:cgo_import_dynamic libc_sysconf sysconf "/usr/lib/libSystem.B.dylib"
+func libc_sysconf_trampoline()
+
+func Sysconf(key int32) int64 {
+ val, _, errno := syscall_syscall6X(abi.FuncPCABI0(libc_sysconf_trampoline),
+ uintptr(key), 0, 0, 0, 0, 0)
+ if errno != 0 {
+ return -1
+ }
+ return int64(val)
+}
diff --git a/src/internal/syscall/windows/exec_windows_test.go b/src/internal/syscall/windows/exec_windows_test.go
new file mode 100644
index 0000000..3311da5
--- /dev/null
+++ b/src/internal/syscall/windows/exec_windows_test.go
@@ -0,0 +1,139 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package windows_test
+
+import (
+ "fmt"
+ "internal/syscall/windows"
+ "os"
+ "os/exec"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestRunAtLowIntegrity(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ wil, err := getProcessIntegrityLevel()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %s\n", err.Error())
+ os.Exit(9)
+ return
+ }
+ fmt.Printf("%s", wil)
+ os.Exit(0)
+ return
+ }
+
+ cmd := exec.Command(os.Args[0], "-test.run=TestRunAtLowIntegrity", "--")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+
+ token, err := getIntegrityLevelToken(sidWilLow)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer token.Close()
+
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Token: token,
+ }
+
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(out) != sidWilLow {
+ t.Fatalf("Child process did not run as low integrity level: %s", string(out))
+ }
+}
+
+const (
+ sidWilLow = `S-1-16-4096`
+)
+
+func getProcessIntegrityLevel() (string, error) {
+ procToken, err := syscall.OpenCurrentProcessToken()
+ if err != nil {
+ return "", err
+ }
+ defer procToken.Close()
+
+ p, err := tokenGetInfo(procToken, syscall.TokenIntegrityLevel, 64)
+ if err != nil {
+ return "", err
+ }
+
+ tml := (*windows.TOKEN_MANDATORY_LABEL)(p)
+
+ sid := (*syscall.SID)(unsafe.Pointer(tml.Label.Sid))
+
+ return sid.String()
+}
+
+func tokenGetInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) {
+ n := uint32(initSize)
+ for {
+ b := make([]byte, n)
+ e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
+ if e == nil {
+ return unsafe.Pointer(&b[0]), nil
+ }
+ if e != syscall.ERROR_INSUFFICIENT_BUFFER {
+ return nil, e
+ }
+ if n <= uint32(len(b)) {
+ return nil, e
+ }
+ }
+}
+
+func getIntegrityLevelToken(wns string) (syscall.Token, error) {
+ var procToken, token syscall.Token
+
+ proc, err := syscall.GetCurrentProcess()
+ if err != nil {
+ return 0, err
+ }
+ defer syscall.CloseHandle(proc)
+
+ err = syscall.OpenProcessToken(proc,
+ syscall.TOKEN_DUPLICATE|
+ syscall.TOKEN_ADJUST_DEFAULT|
+ syscall.TOKEN_QUERY|
+ syscall.TOKEN_ASSIGN_PRIMARY,
+ &procToken)
+ if err != nil {
+ return 0, err
+ }
+ defer procToken.Close()
+
+ sid, err := syscall.StringToSid(wns)
+ if err != nil {
+ return 0, err
+ }
+
+ tml := &windows.TOKEN_MANDATORY_LABEL{}
+ tml.Label.Attributes = windows.SE_GROUP_INTEGRITY
+ tml.Label.Sid = sid
+
+ err = windows.DuplicateTokenEx(procToken, 0, nil, windows.SecurityImpersonation,
+ windows.TokenPrimary, &token)
+ if err != nil {
+ return 0, err
+ }
+
+ err = windows.SetTokenInformation(token,
+ syscall.TokenIntegrityLevel,
+ uintptr(unsafe.Pointer(tml)),
+ tml.Size())
+ if err != nil {
+ token.Close()
+ return 0, err
+ }
+ return token, nil
+}
diff --git a/src/internal/syscall/windows/memory_windows.go b/src/internal/syscall/windows/memory_windows.go
new file mode 100644
index 0000000..8fb34cf
--- /dev/null
+++ b/src/internal/syscall/windows/memory_windows.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type MemoryBasicInformation struct {
+ // A pointer to the base address of the region of pages.
+ BaseAddress uintptr
+ // A pointer to the base address of a range of pages allocated by the VirtualAlloc function.
+ // The page pointed to by the BaseAddress member is contained within this allocation range.
+ AllocationBase uintptr
+ // The memory protection option when the region was initially allocated
+ AllocationProtect uint32
+ PartitionId uint16
+ // The size of the region beginning at the base address in which all pages have identical attributes, in bytes.
+ RegionSize uintptr
+ // The state of the pages in the region.
+ State uint32
+ // The access protection of the pages in the region.
+ Protect uint32
+ // The type of pages in the region.
+ Type uint32
+}
diff --git a/src/internal/syscall/windows/mksyscall.go b/src/internal/syscall/windows/mksyscall.go
new file mode 100644
index 0000000..81f08c6
--- /dev/null
+++ b/src/internal/syscall/windows/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package windows
+
+//go:generate go run ../../../syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go symlink_windows.go
diff --git a/src/internal/syscall/windows/net_windows.go b/src/internal/syscall/windows/net_windows.go
new file mode 100644
index 0000000..42c600c
--- /dev/null
+++ b/src/internal/syscall/windows/net_windows.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+ "sync"
+ "syscall"
+ _ "unsafe"
+)
+
+//go:linkname WSASendtoInet4 syscall.wsaSendtoInet4
+//go:noescape
+func WSASendtoInet4(s syscall.Handle, bufs *syscall.WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *syscall.SockaddrInet4, overlapped *syscall.Overlapped, croutine *byte) (err error)
+
+//go:linkname WSASendtoInet6 syscall.wsaSendtoInet6
+//go:noescape
+func WSASendtoInet6(s syscall.Handle, bufs *syscall.WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *syscall.SockaddrInet6, overlapped *syscall.Overlapped, croutine *byte) (err error)
+
+const (
+ SIO_TCP_INITIAL_RTO = syscall.IOC_IN | syscall.IOC_VENDOR | 17
+ TCP_INITIAL_RTO_UNSPECIFIED_RTT = ^uint16(0)
+ TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS = ^uint8(1)
+)
+
+type TCP_INITIAL_RTO_PARAMETERS struct {
+ Rtt uint16
+ MaxSynRetransmissions uint8
+}
+
+var Support_TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS = sync.OnceValue(func() bool {
+ var maj, min, build uint32
+ rtlGetNtVersionNumbers(&maj, &min, &build)
+ return maj >= 10 && build&0xffff >= 16299
+})
+
+//go:linkname rtlGetNtVersionNumbers syscall.rtlGetNtVersionNumbers
+//go:noescape
+func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32)
diff --git a/src/internal/syscall/windows/psapi_windows.go b/src/internal/syscall/windows/psapi_windows.go
new file mode 100644
index 0000000..b138e65
--- /dev/null
+++ b/src/internal/syscall/windows/psapi_windows.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type PROCESS_MEMORY_COUNTERS struct {
+ CB uint32
+ PageFaultCount uint32
+ PeakWorkingSetSize uintptr
+ WorkingSetSize uintptr
+ QuotaPeakPagedPoolUsage uintptr
+ QuotaPagedPoolUsage uintptr
+ QuotaPeakNonPagedPoolUsage uintptr
+ QuotaNonPagedPoolUsage uintptr
+ PagefileUsage uintptr
+ PeakPagefileUsage uintptr
+}
+
+//sys GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) = psapi.GetProcessMemoryInfo
diff --git a/src/internal/syscall/windows/registry/export_test.go b/src/internal/syscall/windows/registry/export_test.go
new file mode 100644
index 0000000..7f1ac70
--- /dev/null
+++ b/src/internal/syscall/windows/registry/export_test.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+func (k Key) SetValue(name string, valtype uint32, data []byte) error {
+ return k.setValue(name, valtype, data)
+}
diff --git a/src/internal/syscall/windows/registry/key.go b/src/internal/syscall/windows/registry/key.go
new file mode 100644
index 0000000..ce6397f
--- /dev/null
+++ b/src/internal/syscall/windows/registry/key.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+// if err != nil {
+// log.Fatal(err)
+// }
+// defer k.Close()
+//
+// s, _, err := k.GetStringValue("SystemRoot")
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("Windows system root is %q\n", s)
+//
+// NOTE: This package is a copy of golang.org/x/sys/windows/registry
+// with KeyInfo.ModTime removed to prevent dependency cycles.
+package registry
+
+import (
+ "runtime"
+ "syscall"
+)
+
+const (
+ // Registry key security and access rights.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+ // for details.
+ ALL_ACCESS = 0xf003f
+ CREATE_LINK = 0x00020
+ CREATE_SUB_KEY = 0x00004
+ ENUMERATE_SUB_KEYS = 0x00008
+ EXECUTE = 0x20019
+ NOTIFY = 0x00010
+ QUERY_VALUE = 0x00001
+ READ = 0x20019
+ SET_VALUE = 0x00002
+ WOW64_32KEY = 0x00200
+ WOW64_64KEY = 0x00100
+ WRITE = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+ // Windows defines some predefined root keys that are always open.
+ // An application can use these keys as entry points to the registry.
+ // Normally these keys are used in OpenKey to open new keys,
+ // but they can also be used anywhere a Key is required.
+ CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
+ CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
+ LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
+ USERS = Key(syscall.HKEY_USERS)
+ CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+ return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+ p, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+ var subkey syscall.Handle
+ err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(subkey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+func (k Key) ReadSubKeyNames() ([]string, error) {
+ // RegEnumKeyEx must be called repeatedly and to completion.
+ // During this time, this goroutine cannot migrate away from
+ // its current thread. See #49320.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ names := make([]string, 0)
+ // Registry key size limit is 255 bytes and described there:
+ // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+ buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+ for i := uint32(0); ; i++ {
+ l := uint32(len(buf))
+ for {
+ err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+ var h syscall.Handle
+ var d uint32
+ err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+ 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+ if err != nil {
+ return 0, false, err
+ }
+ return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+ return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+ SubKeyCount uint32
+ MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+ ValueCount uint32
+ MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+ MaxValueLen uint32 // longest data component among the key's values, in bytes
+ lastWriteTime syscall.Filetime
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+ var ki KeyInfo
+ err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+ &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+ &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+ if err != nil {
+ return nil, err
+ }
+ return &ki, nil
+}
diff --git a/src/internal/syscall/windows/registry/mksyscall.go b/src/internal/syscall/windows/registry/mksyscall.go
new file mode 100644
index 0000000..0e0b421
--- /dev/null
+++ b/src/internal/syscall/windows/registry/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package registry
+
+//go:generate go run ../../../../syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go
diff --git a/src/internal/syscall/windows/registry/registry_test.go b/src/internal/syscall/windows/registry/registry_test.go
new file mode 100644
index 0000000..278b0b4
--- /dev/null
+++ b/src/internal/syscall/windows/registry/registry_test.go
@@ -0,0 +1,672 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry_test
+
+import (
+ "bytes"
+ "crypto/rand"
+ "os"
+ "syscall"
+ "testing"
+ "unsafe"
+
+ "internal/syscall/windows/registry"
+)
+
+func randKeyName(prefix string) string {
+ const numbers = "0123456789"
+ buf := make([]byte, 10)
+ rand.Read(buf)
+ for i, b := range buf {
+ buf[i] = numbers[b%byte(len(numbers))]
+ }
+ return prefix + string(buf)
+}
+
+func TestReadSubKeyNames(t *testing.T) {
+ k, err := registry.OpenKey(registry.CLASSES_ROOT, "TypeLib", registry.ENUMERATE_SUB_KEYS)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer k.Close()
+
+ names, err := k.ReadSubKeyNames()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var foundStdOle bool
+ for _, name := range names {
+ // Every PC has "stdole 2.0 OLE Automation" library installed.
+ if name == "{00020430-0000-0000-C000-000000000046}" {
+ foundStdOle = true
+ }
+ }
+ if !foundStdOle {
+ t.Fatal("could not find stdole 2.0 OLE Automation")
+ }
+}
+
+func TestCreateOpenDeleteKey(t *testing.T) {
+ k, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer k.Close()
+
+ testKName := randKeyName("TestCreateOpenDeleteKey_")
+
+ testK, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer testK.Close()
+
+ if exist {
+ t.Fatalf("key %q already exists", testKName)
+ }
+
+ testKAgain, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer testKAgain.Close()
+
+ if !exist {
+ t.Fatalf("key %q should already exist", testKName)
+ }
+
+ testKOpened, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer testKOpened.Close()
+
+ err = registry.DeleteKey(k, testKName)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testKOpenedAgain, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS)
+ if err == nil {
+ defer testKOpenedAgain.Close()
+ t.Fatalf("key %q should already been deleted", testKName)
+ }
+ if err != registry.ErrNotExist {
+ t.Fatalf(`unexpected error ("not exist" expected): %v`, err)
+ }
+}
+
+func equalStringSlice(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+type ValueTest struct {
+ Type uint32
+ Name string
+ Value any
+ WillFail bool
+}
+
+var ValueTests = []ValueTest{
+ {Type: registry.SZ, Name: "String1", Value: ""},
+ {Type: registry.SZ, Name: "String2", Value: "\000", WillFail: true},
+ {Type: registry.SZ, Name: "String3", Value: "Hello World"},
+ {Type: registry.SZ, Name: "String4", Value: "Hello World\000", WillFail: true},
+ {Type: registry.EXPAND_SZ, Name: "ExpString1", Value: ""},
+ {Type: registry.EXPAND_SZ, Name: "ExpString2", Value: "\000", WillFail: true},
+ {Type: registry.EXPAND_SZ, Name: "ExpString3", Value: "Hello World"},
+ {Type: registry.EXPAND_SZ, Name: "ExpString4", Value: "Hello\000World", WillFail: true},
+ {Type: registry.EXPAND_SZ, Name: "ExpString5", Value: "%PATH%"},
+ {Type: registry.EXPAND_SZ, Name: "ExpString6", Value: "%NO_SUCH_VARIABLE%"},
+ {Type: registry.EXPAND_SZ, Name: "ExpString7", Value: "%PATH%;."},
+ {Type: registry.BINARY, Name: "Binary1", Value: []byte{}},
+ {Type: registry.BINARY, Name: "Binary2", Value: []byte{1, 2, 3}},
+ {Type: registry.BINARY, Name: "Binary3", Value: []byte{3, 2, 1, 0, 1, 2, 3}},
+ {Type: registry.DWORD, Name: "Dword1", Value: uint64(0)},
+ {Type: registry.DWORD, Name: "Dword2", Value: uint64(1)},
+ {Type: registry.DWORD, Name: "Dword3", Value: uint64(0xff)},
+ {Type: registry.DWORD, Name: "Dword4", Value: uint64(0xffff)},
+ {Type: registry.QWORD, Name: "Qword1", Value: uint64(0)},
+ {Type: registry.QWORD, Name: "Qword2", Value: uint64(1)},
+ {Type: registry.QWORD, Name: "Qword3", Value: uint64(0xff)},
+ {Type: registry.QWORD, Name: "Qword4", Value: uint64(0xffff)},
+ {Type: registry.QWORD, Name: "Qword5", Value: uint64(0xffffff)},
+ {Type: registry.QWORD, Name: "Qword6", Value: uint64(0xffffffff)},
+ {Type: registry.MULTI_SZ, Name: "MultiString1", Value: []string{"a", "b", "c"}},
+ {Type: registry.MULTI_SZ, Name: "MultiString2", Value: []string{"abc", "", "cba"}},
+ {Type: registry.MULTI_SZ, Name: "MultiString3", Value: []string{""}},
+ {Type: registry.MULTI_SZ, Name: "MultiString4", Value: []string{"abcdef"}},
+ {Type: registry.MULTI_SZ, Name: "MultiString5", Value: []string{"\000"}, WillFail: true},
+ {Type: registry.MULTI_SZ, Name: "MultiString6", Value: []string{"a\000b"}, WillFail: true},
+ {Type: registry.MULTI_SZ, Name: "MultiString7", Value: []string{"ab", "\000", "cd"}, WillFail: true},
+ {Type: registry.MULTI_SZ, Name: "MultiString8", Value: []string{"\000", "cd"}, WillFail: true},
+ {Type: registry.MULTI_SZ, Name: "MultiString9", Value: []string{"ab", "\000"}, WillFail: true},
+}
+
+func setValues(t *testing.T, k registry.Key) {
+ for _, test := range ValueTests {
+ var err error
+ switch test.Type {
+ case registry.SZ:
+ err = k.SetStringValue(test.Name, test.Value.(string))
+ case registry.EXPAND_SZ:
+ err = k.SetExpandStringValue(test.Name, test.Value.(string))
+ case registry.MULTI_SZ:
+ err = k.SetStringsValue(test.Name, test.Value.([]string))
+ case registry.BINARY:
+ err = k.SetBinaryValue(test.Name, test.Value.([]byte))
+ case registry.DWORD:
+ err = k.SetDWordValue(test.Name, uint32(test.Value.(uint64)))
+ case registry.QWORD:
+ err = k.SetQWordValue(test.Name, test.Value.(uint64))
+ default:
+ t.Fatalf("unsupported type %d for %s value", test.Type, test.Name)
+ }
+ if test.WillFail {
+ if err == nil {
+ t.Fatalf("setting %s value %q should fail, but succeeded", test.Name, test.Value)
+ }
+ } else {
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func enumerateValues(t *testing.T, k registry.Key) {
+ names, err := k.ReadValueNames()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ haveNames := make(map[string]bool)
+ for _, n := range names {
+ haveNames[n] = false
+ }
+ for _, test := range ValueTests {
+ wantFound := !test.WillFail
+ _, haveFound := haveNames[test.Name]
+ if wantFound && !haveFound {
+ t.Errorf("value %s is not found while enumerating", test.Name)
+ }
+ if haveFound && !wantFound {
+ t.Errorf("value %s is found while enumerating, but expected to fail", test.Name)
+ }
+ if haveFound {
+ delete(haveNames, test.Name)
+ }
+ }
+ for n, v := range haveNames {
+ t.Errorf("value %s (%v) is found while enumerating, but has not been created", n, v)
+ }
+}
+
+func testErrNotExist(t *testing.T, name string, err error) {
+ if err == nil {
+ t.Errorf("%s value should not exist", name)
+ return
+ }
+ if err != registry.ErrNotExist {
+ t.Errorf("reading %s value should return 'not exist' error, but got: %s", name, err)
+ return
+ }
+}
+
+func testErrUnexpectedType(t *testing.T, test ValueTest, gottype uint32, err error) {
+ if err == nil {
+ t.Errorf("GetXValue(%q) should not succeed", test.Name)
+ return
+ }
+ if err != registry.ErrUnexpectedType {
+ t.Errorf("reading %s value should return 'unexpected key value type' error, but got: %s", test.Name, err)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+}
+
+func testGetStringValue(t *testing.T, k registry.Key, test ValueTest) {
+ got, gottype, err := k.GetStringValue(test.Name)
+ if err != nil {
+ t.Errorf("GetStringValue(%s) failed: %v", test.Name, err)
+ return
+ }
+ if got != test.Value {
+ t.Errorf("want %s value %q, got %q", test.Name, test.Value, got)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+ if gottype == registry.EXPAND_SZ {
+ _, err = registry.ExpandString(got)
+ if err != nil {
+ t.Errorf("ExpandString(%s) failed: %v", got, err)
+ return
+ }
+ }
+}
+
+func testGetIntegerValue(t *testing.T, k registry.Key, test ValueTest) {
+ got, gottype, err := k.GetIntegerValue(test.Name)
+ if err != nil {
+ t.Errorf("GetIntegerValue(%s) failed: %v", test.Name, err)
+ return
+ }
+ if got != test.Value.(uint64) {
+ t.Errorf("want %s value %v, got %v", test.Name, test.Value, got)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+}
+
+func testGetBinaryValue(t *testing.T, k registry.Key, test ValueTest) {
+ got, gottype, err := k.GetBinaryValue(test.Name)
+ if err != nil {
+ t.Errorf("GetBinaryValue(%s) failed: %v", test.Name, err)
+ return
+ }
+ if !bytes.Equal(got, test.Value.([]byte)) {
+ t.Errorf("want %s value %v, got %v", test.Name, test.Value, got)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+}
+
+func testGetStringsValue(t *testing.T, k registry.Key, test ValueTest) {
+ got, gottype, err := k.GetStringsValue(test.Name)
+ if err != nil {
+ t.Errorf("GetStringsValue(%s) failed: %v", test.Name, err)
+ return
+ }
+ if !equalStringSlice(got, test.Value.([]string)) {
+ t.Errorf("want %s value %#v, got %#v", test.Name, test.Value, got)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+}
+
+func testGetValue(t *testing.T, k registry.Key, test ValueTest, size int) {
+ if size <= 0 {
+ return
+ }
+ // read data with no buffer
+ gotsize, gottype, err := k.GetValue(test.Name, nil)
+ if err != nil {
+ t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err)
+ return
+ }
+ if gotsize != size {
+ t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+ // read data with short buffer
+ gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size-1))
+ if err == nil {
+ t.Errorf("GetValue(%s, [%d]byte) should fail, but succeeded", test.Name, size-1)
+ return
+ }
+ if err != registry.ErrShortBuffer {
+ t.Errorf("reading %s value should return 'short buffer' error, but got: %s", test.Name, err)
+ return
+ }
+ if gotsize != size {
+ t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+ // read full data
+ gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size))
+ if err != nil {
+ t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err)
+ return
+ }
+ if gotsize != size {
+ t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize)
+ return
+ }
+ if gottype != test.Type {
+ t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype)
+ return
+ }
+ // check GetValue returns ErrNotExist as required
+ _, _, err = k.GetValue(test.Name+"_not_there", make([]byte, size))
+ if err == nil {
+ t.Errorf("GetValue(%q) should not succeed", test.Name)
+ return
+ }
+ if err != registry.ErrNotExist {
+ t.Errorf("GetValue(%q) should return 'not exist' error, but got: %s", test.Name, err)
+ return
+ }
+}
+
+func testValues(t *testing.T, k registry.Key) {
+ for _, test := range ValueTests {
+ switch test.Type {
+ case registry.SZ, registry.EXPAND_SZ:
+ if test.WillFail {
+ _, _, err := k.GetStringValue(test.Name)
+ testErrNotExist(t, test.Name, err)
+ } else {
+ testGetStringValue(t, k, test)
+ _, gottype, err := k.GetIntegerValue(test.Name)
+ testErrUnexpectedType(t, test, gottype, err)
+ // Size of utf16 string in bytes is not perfect,
+ // but correct for current test values.
+ // Size also includes terminating 0.
+ testGetValue(t, k, test, (len(test.Value.(string))+1)*2)
+ }
+ _, _, err := k.GetStringValue(test.Name + "_string_not_created")
+ testErrNotExist(t, test.Name+"_string_not_created", err)
+ case registry.DWORD, registry.QWORD:
+ testGetIntegerValue(t, k, test)
+ _, gottype, err := k.GetBinaryValue(test.Name)
+ testErrUnexpectedType(t, test, gottype, err)
+ _, _, err = k.GetIntegerValue(test.Name + "_int_not_created")
+ testErrNotExist(t, test.Name+"_int_not_created", err)
+ size := 8
+ if test.Type == registry.DWORD {
+ size = 4
+ }
+ testGetValue(t, k, test, size)
+ case registry.BINARY:
+ testGetBinaryValue(t, k, test)
+ _, gottype, err := k.GetStringsValue(test.Name)
+ testErrUnexpectedType(t, test, gottype, err)
+ _, _, err = k.GetBinaryValue(test.Name + "_byte_not_created")
+ testErrNotExist(t, test.Name+"_byte_not_created", err)
+ testGetValue(t, k, test, len(test.Value.([]byte)))
+ case registry.MULTI_SZ:
+ if test.WillFail {
+ _, _, err := k.GetStringsValue(test.Name)
+ testErrNotExist(t, test.Name, err)
+ } else {
+ testGetStringsValue(t, k, test)
+ _, gottype, err := k.GetStringValue(test.Name)
+ testErrUnexpectedType(t, test, gottype, err)
+ size := 0
+ for _, s := range test.Value.([]string) {
+ size += len(s) + 1 // nil terminated
+ }
+ size += 1 // extra nil at the end
+ size *= 2 // count bytes, not uint16
+ testGetValue(t, k, test, size)
+ }
+ _, _, err := k.GetStringsValue(test.Name + "_strings_not_created")
+ testErrNotExist(t, test.Name+"_strings_not_created", err)
+ default:
+ t.Errorf("unsupported type %d for %s value", test.Type, test.Name)
+ continue
+ }
+ }
+}
+
+func testStat(t *testing.T, k registry.Key) {
+ subk, _, err := registry.CreateKey(k, "subkey", registry.CREATE_SUB_KEY)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer subk.Close()
+
+ defer registry.DeleteKey(k, "subkey")
+
+ ki, err := k.Stat()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if ki.SubKeyCount != 1 {
+ t.Error("key must have 1 subkey")
+ }
+ if ki.MaxSubKeyLen != 6 {
+ t.Error("key max subkey name length must be 6")
+ }
+ if ki.ValueCount != 24 {
+ t.Errorf("key must have 24 values, but is %d", ki.ValueCount)
+ }
+ if ki.MaxValueNameLen != 12 {
+ t.Errorf("key max value name length must be 10, but is %d", ki.MaxValueNameLen)
+ }
+ if ki.MaxValueLen != 38 {
+ t.Errorf("key max value length must be 38, but is %d", ki.MaxValueLen)
+ }
+}
+
+func deleteValues(t *testing.T, k registry.Key) {
+ for _, test := range ValueTests {
+ if test.WillFail {
+ continue
+ }
+ err := k.DeleteValue(test.Name)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ }
+ names, err := k.ReadValueNames()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(names) != 0 {
+ t.Errorf("some values remain after deletion: %v", names)
+ }
+}
+
+func TestValues(t *testing.T) {
+ softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer softwareK.Close()
+
+ testKName := randKeyName("TestValues_")
+
+ k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer k.Close()
+
+ if exist {
+ t.Fatalf("key %q already exists", testKName)
+ }
+
+ defer registry.DeleteKey(softwareK, testKName)
+
+ setValues(t, k)
+
+ enumerateValues(t, k)
+
+ testValues(t, k)
+
+ testStat(t, k)
+
+ deleteValues(t, k)
+}
+
+func TestExpandString(t *testing.T) {
+ got, err := registry.ExpandString("%PATH%")
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := os.Getenv("PATH")
+ if got != want {
+ t.Errorf("want %q string expanded, got %q", want, got)
+ }
+}
+
+func TestInvalidValues(t *testing.T) {
+ softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer softwareK.Close()
+
+ testKName := randKeyName("TestInvalidValues_")
+
+ k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer k.Close()
+
+ if exist {
+ t.Fatalf("key %q already exists", testKName)
+ }
+
+ defer registry.DeleteKey(softwareK, testKName)
+
+ var tests = []struct {
+ Type uint32
+ Name string
+ Data []byte
+ }{
+ {registry.DWORD, "Dword1", nil},
+ {registry.DWORD, "Dword2", []byte{1, 2, 3}},
+ {registry.QWORD, "Qword1", nil},
+ {registry.QWORD, "Qword2", []byte{1, 2, 3}},
+ {registry.QWORD, "Qword3", []byte{1, 2, 3, 4, 5, 6, 7}},
+ {registry.MULTI_SZ, "MultiString1", nil},
+ {registry.MULTI_SZ, "MultiString2", []byte{0}},
+ {registry.MULTI_SZ, "MultiString3", []byte{'a', 'b', 0}},
+ {registry.MULTI_SZ, "MultiString4", []byte{'a', 0, 0, 'b', 0}},
+ {registry.MULTI_SZ, "MultiString5", []byte{'a', 0, 0}},
+ }
+
+ for _, test := range tests {
+ err := k.SetValue(test.Name, test.Type, test.Data)
+ if err != nil {
+ t.Fatalf("SetValue for %q failed: %v", test.Name, err)
+ }
+ }
+
+ for _, test := range tests {
+ switch test.Type {
+ case registry.DWORD, registry.QWORD:
+ value, valType, err := k.GetIntegerValue(test.Name)
+ if err == nil {
+ t.Errorf("GetIntegerValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value)
+ }
+ case registry.MULTI_SZ:
+ value, valType, err := k.GetStringsValue(test.Name)
+ if err == nil {
+ if len(value) != 0 {
+ t.Errorf("GetStringsValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value)
+ }
+ }
+ default:
+ t.Errorf("unsupported type %d for %s value", test.Type, test.Name)
+ }
+ }
+}
+
+func TestGetMUIStringValue(t *testing.T) {
+ if err := registry.LoadRegLoadMUIString(); err != nil {
+ t.Skip("regLoadMUIString not supported; skipping")
+ }
+ if err := procGetDynamicTimeZoneInformation.Find(); err != nil {
+ t.Skipf("%s not supported; skipping", procGetDynamicTimeZoneInformation.Name)
+ }
+ var dtzi DynamicTimezoneinformation
+ if _, err := GetDynamicTimeZoneInformation(&dtzi); err != nil {
+ t.Fatal(err)
+ }
+ tzKeyName := syscall.UTF16ToString(dtzi.TimeZoneKeyName[:])
+ timezoneK, err := registry.OpenKey(registry.LOCAL_MACHINE,
+ `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones\`+tzKeyName, registry.READ)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer timezoneK.Close()
+
+ type testType struct {
+ name string
+ want string
+ }
+ var tests = []testType{
+ {"MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])},
+ }
+ if dtzi.DynamicDaylightTimeDisabled == 0 {
+ tests = append(tests, testType{"MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])})
+ }
+
+ for _, test := range tests {
+ got, err := timezoneK.GetMUIStringValue(test.name)
+ if err != nil {
+ t.Error("GetMUIStringValue:", err)
+ }
+
+ if got != test.want {
+ t.Errorf("GetMUIStringValue: %s: Got %q, want %q", test.name, got, test.want)
+ }
+ }
+}
+
+type DynamicTimezoneinformation struct {
+ Bias int32
+ StandardName [32]uint16
+ StandardDate syscall.Systemtime
+ StandardBias int32
+ DaylightName [32]uint16
+ DaylightDate syscall.Systemtime
+ DaylightBias int32
+ TimeZoneKeyName [128]uint16
+ DynamicDaylightTimeDisabled uint8
+}
+
+var (
+ kernel32DLL = syscall.NewLazyDLL("kernel32")
+
+ procGetDynamicTimeZoneInformation = kernel32DLL.NewProc("GetDynamicTimeZoneInformation")
+)
+
+func GetDynamicTimeZoneInformation(dtzi *DynamicTimezoneinformation) (rc uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetDynamicTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(dtzi)), 0, 0)
+ rc = uint32(r0)
+ if rc == 0xffffffff {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
diff --git a/src/internal/syscall/windows/registry/syscall.go b/src/internal/syscall/windows/registry/syscall.go
new file mode 100644
index 0000000..cb315ad
--- /dev/null
+++ b/src/internal/syscall/windows/registry/syscall.go
@@ -0,0 +1,31 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import "syscall"
+
+const (
+ _REG_OPTION_NON_VOLATILE = 0
+
+ _REG_CREATED_NEW_KEY = 1
+ _REG_OPENED_EXISTING_KEY = 2
+
+ _ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+ return procRegLoadMUIStringW.Find()
+}
+
+//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+
+//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/src/internal/syscall/windows/registry/value.go b/src/internal/syscall/windows/registry/value.go
new file mode 100644
index 0000000..7dfee03
--- /dev/null
+++ b/src/internal/syscall/windows/registry/value.go
@@ -0,0 +1,372 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import (
+ "errors"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ // Registry value types.
+ NONE = 0
+ SZ = 1
+ EXPAND_SZ = 2
+ BINARY = 3
+ DWORD = 4
+ DWORD_BIG_ENDIAN = 5
+ LINK = 6
+ MULTI_SZ = 7
+ RESOURCE_LIST = 8
+ FULL_RESOURCE_DESCRIPTOR = 9
+ RESOURCE_REQUIREMENTS_LIST = 10
+ QWORD = 11
+)
+
+var (
+ // ErrShortBuffer is returned when the buffer was too short for the operation.
+ ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+ // ErrNotExist is returned when a registry key or value does not exist.
+ ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+ // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+ ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return 0, 0, err
+ }
+ var pbuf *byte
+ if len(buf) > 0 {
+ pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+ }
+ l := uint32(len(buf))
+ err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+ if err != nil {
+ return int(l), valtype, err
+ }
+ return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return nil, 0, err
+ }
+ var t uint32
+ n := uint32(len(buf))
+ for {
+ err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+ if err == nil {
+ return buf[:n], t, nil
+ }
+ if err != syscall.ERROR_MORE_DATA {
+ return nil, 0, err
+ }
+ if n <= uint32(len(buf)) {
+ return nil, 0, err
+ }
+ buf = make([]byte, n)
+ }
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return "", typ, err2
+ }
+ switch typ {
+ case SZ, EXPAND_SZ:
+ default:
+ return "", typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return "", typ, nil
+ }
+ u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return "", err
+ }
+
+ buf := make([]uint16, 1024)
+ var buflen uint32
+ var pdir *uint16
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+ // Try to resolve the string value using the system directory as
+ // a DLL search path; this assumes the string value is of the form
+ // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+ // This approach works with tzres.dll but may have to be revised
+ // in the future to allow callers to provide custom search paths.
+
+ var s string
+ s, err = ExpandString("%SystemRoot%\\system32\\")
+ if err != nil {
+ return "", err
+ }
+ pdir, err = syscall.UTF16PtrFromString(s)
+ if err != nil {
+ return "", err
+ }
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+ if buflen <= uint32(len(buf)) {
+ break // Buffer not growing, assume race; break
+ }
+ buf = make([]uint16, buflen)
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+ if value == "" {
+ return "", nil
+ }
+ p, err := syscall.UTF16PtrFromString(value)
+ if err != nil {
+ return "", err
+ }
+ r := make([]uint16, 100)
+ for {
+ n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+ if err != nil {
+ return "", err
+ }
+ if n <= uint32(len(r)) {
+ return syscall.UTF16ToString(r[:n]), nil
+ }
+ r = make([]uint16, n)
+ }
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != MULTI_SZ {
+ return nil, typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return nil, typ, nil
+ }
+ p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ if len(p) == 0 {
+ return nil, typ, nil
+ }
+ if p[len(p)-1] == 0 {
+ p = p[:len(p)-1] // remove terminating null
+ }
+ val = make([]string, 0, 5)
+ from := 0
+ for i, c := range p {
+ if c == 0 {
+ val = append(val, syscall.UTF16ToString(p[from:i]))
+ from = i + 1
+ }
+ }
+ return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 8))
+ if err2 != nil {
+ return 0, typ, err2
+ }
+ switch typ {
+ case DWORD:
+ if len(data) != 4 {
+ return 0, typ, errors.New("DWORD value is not 4 bytes long")
+ }
+ return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil
+ case QWORD:
+ if len(data) != 8 {
+ return 0, typ, errors.New("QWORD value is not 8 bytes long")
+ }
+ return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil
+ default:
+ return 0, typ, ErrUnexpectedType
+ }
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != BINARY {
+ return nil, typ, ErrUnexpectedType
+ }
+ return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+ }
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+ return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+ return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+ v, err := syscall.UTF16FromString(value)
+ if err != nil {
+ return err
+ }
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+ return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+ return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+ ss := ""
+ for _, s := range value {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return errors.New("string cannot have 0 inside")
+ }
+ }
+ ss += s + "\x00"
+ }
+ v := utf16.Encode([]rune(ss + "\x00"))
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+ return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+ return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+}
+
+// ReadValueNames returns the value names of key k.
+func (k Key) ReadValueNames() ([]string, error) {
+ ki, err := k.Stat()
+ if err != nil {
+ return nil, err
+ }
+ names := make([]string, 0, ki.ValueCount)
+ buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+ for i := uint32(0); ; i++ {
+ l := uint32(len(buf))
+ for {
+ err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ return names, nil
+}
diff --git a/src/internal/syscall/windows/registry/zsyscall_windows.go b/src/internal/syscall/windows/registry/zsyscall_windows.go
new file mode 100644
index 0000000..cab1319
--- /dev/null
+++ b/src/internal/syscall/windows/registry/zsyscall_windows.go
@@ -0,0 +1,107 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+ "internal/syscall/windows/sysdll"
+ "syscall"
+ "unsafe"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll"))
+ modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll"))
+
+ procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
+ procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
+ procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
+ procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
+ procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
+ procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
+ procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/src/internal/syscall/windows/reparse_windows.go b/src/internal/syscall/windows/reparse_windows.go
new file mode 100644
index 0000000..6caf47e
--- /dev/null
+++ b/src/internal/syscall/windows/reparse_windows.go
@@ -0,0 +1,91 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ FSCTL_SET_REPARSE_POINT = 0x000900A4
+ IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
+ IO_REPARSE_TAG_DEDUP = 0x80000013
+
+ SYMLINK_FLAG_RELATIVE = 1
+)
+
+// These structures are described
+// in https://msdn.microsoft.com/en-us/library/cc232007.aspx
+// and https://msdn.microsoft.com/en-us/library/cc232006.aspx.
+
+type REPARSE_DATA_BUFFER struct {
+ ReparseTag uint32
+ ReparseDataLength uint16
+ Reserved uint16
+ DUMMYUNIONNAME byte
+}
+
+// REPARSE_DATA_BUFFER_HEADER is a common part of REPARSE_DATA_BUFFER structure.
+type REPARSE_DATA_BUFFER_HEADER struct {
+ ReparseTag uint32
+ // The size, in bytes, of the reparse data that follows
+ // the common portion of the REPARSE_DATA_BUFFER element.
+ // This value is the length of the data starting at the
+ // SubstituteNameOffset field.
+ ReparseDataLength uint16
+ Reserved uint16
+}
+
+type SymbolicLinkReparseBuffer struct {
+ // The integer that contains the offset, in bytes,
+ // of the substitute name string in the PathBuffer array,
+ // computed as an offset from byte 0 of PathBuffer. Note that
+ // this offset must be divided by 2 to get the array index.
+ SubstituteNameOffset uint16
+ // The integer that contains the length, in bytes, of the
+ // substitute name string. If this string is null-terminated,
+ // SubstituteNameLength does not include the Unicode null character.
+ SubstituteNameLength uint16
+ // PrintNameOffset is similar to SubstituteNameOffset.
+ PrintNameOffset uint16
+ // PrintNameLength is similar to SubstituteNameLength.
+ PrintNameLength uint16
+ // Flags specifies whether the substitute name is a full path name or
+ // a path name relative to the directory containing the symbolic link.
+ Flags uint32
+ PathBuffer [1]uint16
+}
+
+// Path returns path stored in rb.
+func (rb *SymbolicLinkReparseBuffer) Path() string {
+ n1 := rb.SubstituteNameOffset / 2
+ n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2])
+}
+
+type MountPointReparseBuffer struct {
+ // The integer that contains the offset, in bytes,
+ // of the substitute name string in the PathBuffer array,
+ // computed as an offset from byte 0 of PathBuffer. Note that
+ // this offset must be divided by 2 to get the array index.
+ SubstituteNameOffset uint16
+ // The integer that contains the length, in bytes, of the
+ // substitute name string. If this string is null-terminated,
+ // SubstituteNameLength does not include the Unicode null character.
+ SubstituteNameLength uint16
+ // PrintNameOffset is similar to SubstituteNameOffset.
+ PrintNameOffset uint16
+ // PrintNameLength is similar to SubstituteNameLength.
+ PrintNameLength uint16
+ PathBuffer [1]uint16
+}
+
+// Path returns path stored in rb.
+func (rb *MountPointReparseBuffer) Path() string {
+ n1 := rb.SubstituteNameOffset / 2
+ n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2
+ return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2])
+}
diff --git a/src/internal/syscall/windows/security_windows.go b/src/internal/syscall/windows/security_windows.go
new file mode 100644
index 0000000..4a2dfc0
--- /dev/null
+++ b/src/internal/syscall/windows/security_windows.go
@@ -0,0 +1,128 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ SecurityAnonymous = 0
+ SecurityIdentification = 1
+ SecurityImpersonation = 2
+ SecurityDelegation = 3
+)
+
+//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf
+//sys RevertToSelf() (err error) = advapi32.RevertToSelf
+
+const (
+ TOKEN_ADJUST_PRIVILEGES = 0x0020
+ SE_PRIVILEGE_ENABLED = 0x00000002
+)
+
+type LUID struct {
+ LowPart uint32
+ HighPart int32
+}
+
+type LUID_AND_ATTRIBUTES struct {
+ Luid LUID
+ Attributes uint32
+}
+
+type TOKEN_PRIVILEGES struct {
+ PrivilegeCount uint32
+ Privileges [1]LUID_AND_ATTRIBUTES
+}
+
+//sys OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) = advapi32.OpenThreadToken
+//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW
+//sys adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) [true] = advapi32.AdjustTokenPrivileges
+
+func AdjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) error {
+ ret, err := adjustTokenPrivileges(token, disableAllPrivileges, newstate, buflen, prevstate, returnlen)
+ if ret == 0 {
+ // AdjustTokenPrivileges call failed
+ return err
+ }
+ // AdjustTokenPrivileges call succeeded
+ if err == syscall.EINVAL {
+ // GetLastError returned ERROR_SUCCESS
+ return nil
+ }
+ return err
+}
+
+//sys DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) = advapi32.DuplicateTokenEx
+//sys SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) = advapi32.SetTokenInformation
+
+type SID_AND_ATTRIBUTES struct {
+ Sid *syscall.SID
+ Attributes uint32
+}
+
+type TOKEN_MANDATORY_LABEL struct {
+ Label SID_AND_ATTRIBUTES
+}
+
+func (tml *TOKEN_MANDATORY_LABEL) Size() uint32 {
+ return uint32(unsafe.Sizeof(TOKEN_MANDATORY_LABEL{})) + syscall.GetLengthSid(tml.Label.Sid)
+}
+
+const SE_GROUP_INTEGRITY = 0x00000020
+
+type TokenType uint32
+
+const (
+ TokenPrimary TokenType = 1
+ TokenImpersonation TokenType = 2
+)
+
+//sys GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) = userenv.GetProfilesDirectoryW
+
+const (
+ LG_INCLUDE_INDIRECT = 0x1
+ MAX_PREFERRED_LENGTH = 0xFFFFFFFF
+)
+
+type LocalGroupUserInfo0 struct {
+ Name *uint16
+}
+
+type UserInfo4 struct {
+ Name *uint16
+ Password *uint16
+ PasswordAge uint32
+ Priv uint32
+ HomeDir *uint16
+ Comment *uint16
+ Flags uint32
+ ScriptPath *uint16
+ AuthFlags uint32
+ FullName *uint16
+ UsrComment *uint16
+ Parms *uint16
+ Workstations *uint16
+ LastLogon uint32
+ LastLogoff uint32
+ AcctExpires uint32
+ MaxStorage uint32
+ UnitsPerWeek uint32
+ LogonHours *byte
+ BadPwCount uint32
+ NumLogons uint32
+ LogonServer *uint16
+ CountryCode uint32
+ CodePage uint32
+ UserSid *syscall.SID
+ PrimaryGroupID uint32
+ Profile *uint16
+ HomeDirDrive *uint16
+ PasswordExpired uint32
+}
+
+//sys NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) = netapi32.NetUserGetLocalGroups
diff --git a/src/internal/syscall/windows/symlink_windows.go b/src/internal/syscall/windows/symlink_windows.go
new file mode 100644
index 0000000..b64d058
--- /dev/null
+++ b/src/internal/syscall/windows/symlink_windows.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import "syscall"
+
+const (
+ ERROR_INVALID_PARAMETER syscall.Errno = 87
+
+ // symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972)
+ SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2
+
+ // FileInformationClass values
+ FileBasicInfo = 0 // FILE_BASIC_INFO
+ FileStandardInfo = 1 // FILE_STANDARD_INFO
+ FileNameInfo = 2 // FILE_NAME_INFO
+ FileStreamInfo = 7 // FILE_STREAM_INFO
+ FileCompressionInfo = 8 // FILE_COMPRESSION_INFO
+ FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO
+ FileIdBothDirectoryInfo = 0xa // FILE_ID_BOTH_DIR_INFO
+ FileIdBothDirectoryRestartInfo = 0xb // FILE_ID_BOTH_DIR_INFO
+ FileRemoteProtocolInfo = 0xd // FILE_REMOTE_PROTOCOL_INFO
+ FileFullDirectoryInfo = 0xe // FILE_FULL_DIR_INFO
+ FileFullDirectoryRestartInfo = 0xf // FILE_FULL_DIR_INFO
+ FileStorageInfo = 0x10 // FILE_STORAGE_INFO
+ FileAlignmentInfo = 0x11 // FILE_ALIGNMENT_INFO
+ FileIdInfo = 0x12 // FILE_ID_INFO
+ FileIdExtdDirectoryInfo = 0x13 // FILE_ID_EXTD_DIR_INFO
+ FileIdExtdDirectoryRestartInfo = 0x14 // FILE_ID_EXTD_DIR_INFO
+)
+
+type FILE_ATTRIBUTE_TAG_INFO struct {
+ FileAttributes uint32
+ ReparseTag uint32
+}
+
+//sys GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error)
diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go
new file mode 100644
index 0000000..ab2f9a1
--- /dev/null
+++ b/src/internal/syscall/windows/syscall_windows.go
@@ -0,0 +1,390 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// UTF16PtrToString is like UTF16ToString, but takes *uint16
+// as a parameter instead of []uint16.
+func UTF16PtrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+ end := unsafe.Pointer(p)
+ n := 0
+ for *(*uint16)(end) != 0 {
+ end = unsafe.Pointer(uintptr(end) + unsafe.Sizeof(*p))
+ n++
+ }
+ return syscall.UTF16ToString(unsafe.Slice(p, n))
+}
+
+const (
+ ERROR_BAD_LENGTH syscall.Errno = 24
+ ERROR_SHARING_VIOLATION syscall.Errno = 32
+ ERROR_LOCK_VIOLATION syscall.Errno = 33
+ ERROR_NOT_SUPPORTED syscall.Errno = 50
+ ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120
+ ERROR_INVALID_NAME syscall.Errno = 123
+ ERROR_LOCK_FAILED syscall.Errno = 167
+ ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113
+)
+
+const GAA_FLAG_INCLUDE_PREFIX = 0x00000010
+
+const (
+ IF_TYPE_OTHER = 1
+ IF_TYPE_ETHERNET_CSMACD = 6
+ IF_TYPE_ISO88025_TOKENRING = 9
+ IF_TYPE_PPP = 23
+ IF_TYPE_SOFTWARE_LOOPBACK = 24
+ IF_TYPE_ATM = 37
+ IF_TYPE_IEEE80211 = 71
+ IF_TYPE_TUNNEL = 131
+ IF_TYPE_IEEE1394 = 144
+)
+
+type SocketAddress struct {
+ Sockaddr *syscall.RawSockaddrAny
+ SockaddrLength int32
+}
+
+type IpAdapterUnicastAddress struct {
+ Length uint32
+ Flags uint32
+ Next *IpAdapterUnicastAddress
+ Address SocketAddress
+ PrefixOrigin int32
+ SuffixOrigin int32
+ DadState int32
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ LeaseLifetime uint32
+ OnLinkPrefixLength uint8
+}
+
+type IpAdapterAnycastAddress struct {
+ Length uint32
+ Flags uint32
+ Next *IpAdapterAnycastAddress
+ Address SocketAddress
+}
+
+type IpAdapterMulticastAddress struct {
+ Length uint32
+ Flags uint32
+ Next *IpAdapterMulticastAddress
+ Address SocketAddress
+}
+
+type IpAdapterDnsServerAdapter struct {
+ Length uint32
+ Reserved uint32
+ Next *IpAdapterDnsServerAdapter
+ Address SocketAddress
+}
+
+type IpAdapterPrefix struct {
+ Length uint32
+ Flags uint32
+ Next *IpAdapterPrefix
+ Address SocketAddress
+ PrefixLength uint32
+}
+
+type IpAdapterAddresses struct {
+ Length uint32
+ IfIndex uint32
+ Next *IpAdapterAddresses
+ AdapterName *byte
+ FirstUnicastAddress *IpAdapterUnicastAddress
+ FirstAnycastAddress *IpAdapterAnycastAddress
+ FirstMulticastAddress *IpAdapterMulticastAddress
+ FirstDnsServerAddress *IpAdapterDnsServerAdapter
+ DnsSuffix *uint16
+ Description *uint16
+ FriendlyName *uint16
+ PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte
+ PhysicalAddressLength uint32
+ Flags uint32
+ Mtu uint32
+ IfType uint32
+ OperStatus uint32
+ Ipv6IfIndex uint32
+ ZoneIndices [16]uint32
+ FirstPrefix *IpAdapterPrefix
+ /* more fields might be present here. */
+}
+
+type SecurityAttributes struct {
+ Length uint16
+ SecurityDescriptor uintptr
+ InheritHandle bool
+}
+
+type FILE_BASIC_INFO struct {
+ CreationTime int64
+ LastAccessTime int64
+ LastWriteTime int64
+ ChangedTime int64
+ FileAttributes uint32
+
+ // Pad out to 8-byte alignment.
+ //
+ // Without this padding, TestChmod fails due to an argument validation error
+ // in SetFileInformationByHandle on windows/386.
+ //
+ // https://learn.microsoft.com/en-us/cpp/build/reference/zp-struct-member-alignment?view=msvc-170
+ // says that “The C/C++ headers in the Windows SDK assume the platform's
+ // default alignment is used.” What we see here is padding rather than
+ // alignment, but maybe it is related.
+ _ uint32
+}
+
+const (
+ IfOperStatusUp = 1
+ IfOperStatusDown = 2
+ IfOperStatusTesting = 3
+ IfOperStatusUnknown = 4
+ IfOperStatusDormant = 5
+ IfOperStatusNotPresent = 6
+ IfOperStatusLowerLayerDown = 7
+)
+
+//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses
+//sys GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
+//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW
+//sys GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW
+//sys SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle
+//sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery
+//sys GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPath2W
+
+const (
+ // flags for CreateToolhelp32Snapshot
+ TH32CS_SNAPMODULE = 0x08
+ TH32CS_SNAPMODULE32 = 0x10
+)
+
+const MAX_MODULE_NAME32 = 255
+
+type ModuleEntry32 struct {
+ Size uint32
+ ModuleID uint32
+ ProcessID uint32
+ GlblcntUsage uint32
+ ProccntUsage uint32
+ ModBaseAddr uintptr
+ ModBaseSize uint32
+ ModuleHandle syscall.Handle
+ Module [MAX_MODULE_NAME32 + 1]uint16
+ ExePath [syscall.MAX_PATH]uint16
+}
+
+const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{})
+
+//sys Module32First(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
+//sys Module32Next(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW
+
+const (
+ WSA_FLAG_OVERLAPPED = 0x01
+ WSA_FLAG_NO_HANDLE_INHERIT = 0x80
+
+ WSAEMSGSIZE syscall.Errno = 10040
+
+ MSG_PEEK = 0x2
+ MSG_TRUNC = 0x0100
+ MSG_CTRUNC = 0x0200
+
+ socket_error = uintptr(^uint32(0))
+)
+
+var WSAID_WSASENDMSG = syscall.GUID{
+ Data1: 0xa441e712,
+ Data2: 0x754f,
+ Data3: 0x43ca,
+ Data4: [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d},
+}
+
+var WSAID_WSARECVMSG = syscall.GUID{
+ Data1: 0xf689d7c8,
+ Data2: 0x6f1f,
+ Data3: 0x436b,
+ Data4: [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22},
+}
+
+var sendRecvMsgFunc struct {
+ once sync.Once
+ sendAddr uintptr
+ recvAddr uintptr
+ err error
+}
+
+type WSAMsg struct {
+ Name syscall.Pointer
+ Namelen int32
+ Buffers *syscall.WSABuf
+ BufferCount uint32
+ Control syscall.WSABuf
+ Flags uint32
+}
+
+//sys WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = ws2_32.WSASocketW
+
+func loadWSASendRecvMsg() error {
+ sendRecvMsgFunc.once.Do(func() {
+ var s syscall.Handle
+ s, sendRecvMsgFunc.err = syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP)
+ if sendRecvMsgFunc.err != nil {
+ return
+ }
+ defer syscall.CloseHandle(s)
+ var n uint32
+ sendRecvMsgFunc.err = syscall.WSAIoctl(s,
+ syscall.SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)),
+ uint32(unsafe.Sizeof(WSAID_WSARECVMSG)),
+ (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)),
+ uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)),
+ &n, nil, 0)
+ if sendRecvMsgFunc.err != nil {
+ return
+ }
+ sendRecvMsgFunc.err = syscall.WSAIoctl(s,
+ syscall.SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)),
+ uint32(unsafe.Sizeof(WSAID_WSASENDMSG)),
+ (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)),
+ uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)),
+ &n, nil, 0)
+ })
+ return sendRecvMsgFunc.err
+}
+
+func WSASendMsg(fd syscall.Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *syscall.Overlapped, croutine *byte) error {
+ err := loadWSASendRecvMsg()
+ if err != nil {
+ return err
+ }
+ r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ if r1 == socket_error {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
+
+func WSARecvMsg(fd syscall.Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *syscall.Overlapped, croutine *byte) error {
+ err := loadWSASendRecvMsg()
+ if err != nil {
+ return err
+ }
+ r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0)
+ if r1 == socket_error {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
+
+const (
+ ComputerNameNetBIOS = 0
+ ComputerNameDnsHostname = 1
+ ComputerNameDnsDomain = 2
+ ComputerNameDnsFullyQualified = 3
+ ComputerNamePhysicalNetBIOS = 4
+ ComputerNamePhysicalDnsHostname = 5
+ ComputerNamePhysicalDnsDomain = 6
+ ComputerNamePhysicalDnsFullyQualified = 7
+ ComputerNameMax = 8
+
+ MOVEFILE_REPLACE_EXISTING = 0x1
+ MOVEFILE_COPY_ALLOWED = 0x2
+ MOVEFILE_DELAY_UNTIL_REBOOT = 0x4
+ MOVEFILE_WRITE_THROUGH = 0x8
+ MOVEFILE_CREATE_HARDLINK = 0x10
+ MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20
+)
+
+func Rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING)
+}
+
+//sys LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.LockFileEx
+//sys UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.UnlockFileEx
+
+const (
+ LOCKFILE_FAIL_IMMEDIATELY = 0x00000001
+ LOCKFILE_EXCLUSIVE_LOCK = 0x00000002
+)
+
+const MB_ERR_INVALID_CHARS = 8
+
+//sys GetACP() (acp uint32) = kernel32.GetACP
+//sys GetConsoleCP() (ccp uint32) = kernel32.GetConsoleCP
+//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
+//sys GetCurrentThread() (pseudoHandle syscall.Handle, err error) = kernel32.GetCurrentThread
+
+const STYPE_DISKTREE = 0x00
+
+type SHARE_INFO_2 struct {
+ Netname *uint16
+ Type uint32
+ Remark *uint16
+ Permissions uint32
+ MaxUses uint32
+ CurrentUses uint32
+ Path *uint16
+ Passwd *uint16
+}
+
+//sys NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) = netapi32.NetShareAdd
+//sys NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) = netapi32.NetShareDel
+
+const (
+ FILE_NAME_NORMALIZED = 0x0
+ FILE_NAME_OPENED = 0x8
+
+ VOLUME_NAME_DOS = 0x0
+ VOLUME_NAME_GUID = 0x1
+ VOLUME_NAME_NONE = 0x4
+ VOLUME_NAME_NT = 0x2
+)
+
+//sys GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW
+
+func LoadGetFinalPathNameByHandle() error {
+ return procGetFinalPathNameByHandleW.Find()
+}
+
+func ErrorLoadingGetTempPath2() error {
+ return procGetTempPath2W.Find()
+}
+
+//sys CreateEnvironmentBlock(block **uint16, token syscall.Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock
+//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
+//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) = kernel32.CreateEventW
+
+//sys ProcessPrng(buf []byte) (err error) = bcryptprimitives.ProcessPrng
+
+//sys RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) = kernel32.RtlLookupFunctionEntry
+//sys RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) = kernel32.RtlVirtualUnwind
diff --git a/src/internal/syscall/windows/sysdll/sysdll.go b/src/internal/syscall/windows/sysdll/sysdll.go
new file mode 100644
index 0000000..e79fd19
--- /dev/null
+++ b/src/internal/syscall/windows/sysdll/sysdll.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package sysdll is an internal leaf package that records and reports
+// which Windows DLL names are used by Go itself. These DLLs are then
+// only loaded from the System32 directory. See Issue 14959.
+package sysdll
+
+// IsSystemDLL reports whether the named dll key (a base name, like
+// "foo.dll") is a system DLL which should only be loaded from the
+// Windows SYSTEM32 directory.
+//
+// Filenames are case sensitive, but that doesn't matter because
+// the case registered with Add is also the same case used with
+// LoadDLL later.
+//
+// It has no associated mutex and should only be mutated serially
+// (currently: during init), and not concurrent with DLL loading.
+var IsSystemDLL = map[string]bool{}
+
+// Add notes that dll is a system32 DLL which should only be loaded
+// from the Windows SYSTEM32 directory. It returns its argument back,
+// for ease of use in generated code.
+func Add(dll string) string {
+ IsSystemDLL[dll] = true
+ return dll
+}
diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go
new file mode 100644
index 0000000..6be7aa4
--- /dev/null
+++ b/src/internal/syscall/windows/zsyscall_windows.go
@@ -0,0 +1,389 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package windows
+
+import (
+ "internal/syscall/windows/sysdll"
+ "syscall"
+ "unsafe"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll"))
+ modbcryptprimitives = syscall.NewLazyDLL(sysdll.Add("bcryptprimitives.dll"))
+ modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll"))
+ modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll"))
+ modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll"))
+ modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll"))
+ moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll"))
+ modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll"))
+
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
+ procProcessPrng = modbcryptprimitives.NewProc("ProcessPrng")
+ procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
+ procCreateEventW = modkernel32.NewProc("CreateEventW")
+ procGetACP = modkernel32.NewProc("GetACP")
+ procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
+ procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
+ procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW")
+ procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW")
+ procGetTempPath2W = modkernel32.NewProc("GetTempPath2W")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+ procModule32FirstW = modkernel32.NewProc("Module32FirstW")
+ procModule32NextW = modkernel32.NewProc("Module32NextW")
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+ procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar")
+ procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry")
+ procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind")
+ procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
+ procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+ procVirtualQuery = modkernel32.NewProc("VirtualQuery")
+ procNetShareAdd = modnetapi32.NewProc("NetShareAdd")
+ procNetShareDel = modnetapi32.NewProc("NetShareDel")
+ procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups")
+ procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
+ procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
+ procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
+ procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW")
+ procWSASocketW = modws2_32.NewProc("WSASocketW")
+)
+
+func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) {
+ var _p0 uint32
+ if disableAllPrivileges {
+ _p0 = 1
+ }
+ r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ ret = uint32(r0)
+ if true {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) {
+ r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(hExistingToken), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpTokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(phNewToken)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func ImpersonateSelf(impersonationlevel uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
+ r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) {
+ var _p0 uint32
+ if openasself {
+ _p0 = 1
+ }
+ r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(h), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func RevertToSelf() (err error) {
+ r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func ProcessPrng(buf []byte) (err error) {
+ var _p0 *byte
+ if len(buf) > 0 {
+ _p0 = &buf[0]
+ }
+ r1, _, e1 := syscall.Syscall(procProcessPrng.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
+ r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+ handle = syscall.Handle(r0)
+ if handle == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetACP() (acp uint32) {
+ r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+ acp = uint32(r0)
+ return
+}
+
+func GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nameformat), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetConsoleCP() (ccp uint32) {
+ r0, _, _ := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ ccp = uint32(r0)
+ return
+}
+
+func GetCurrentThread() (pseudoHandle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
+ pseudoHandle = syscall.Handle(r0)
+ if pseudoHandle == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0)
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len))
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetTempPath2W.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func Module32First(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) {
+ r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func Module32Next(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) {
+ r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
+ r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+ nwrite = int32(r0)
+ if nwrite == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) {
+ r0, _, _ := syscall.Syscall(procRtlLookupFunctionEntry.Addr(), 3, uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(unsafe.Pointer(table)))
+ ret = uintptr(r0)
+ return
+}
+
+func RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) {
+ r0, _, _ := syscall.Syscall9(procRtlVirtualUnwind.Addr(), 8, uintptr(handlerType), uintptr(baseAddress), uintptr(pc), uintptr(entry), uintptr(ctxt), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(frame)), uintptr(unsafe.Pointer(ctxptrs)), 0)
+ ret = uintptr(r0)
+ return
+}
+
+func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(fileInformationClass), uintptr(buf), uintptr(bufsize), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
+ r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) {
+ r0, _, _ := syscall.Syscall6(procNetShareAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0)
+ if r0 != 0 {
+ neterr = syscall.Errno(r0)
+ }
+ return
+}
+
+func NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) {
+ r0, _, _ := syscall.Syscall(procNetShareDel.Addr(), 3, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(netName)), uintptr(reserved))
+ if r0 != 0 {
+ neterr = syscall.Errno(r0)
+ }
+ return
+}
+
+func NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) {
+ r0, _, _ := syscall.Syscall9(procNetUserGetLocalGroups.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(flags), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), 0)
+ if r0 != 0 {
+ neterr = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(memCounters)), uintptr(cb))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func CreateEnvironmentBlock(block **uint16, token syscall.Token, inheritExisting bool) (err error) {
+ var _p0 uint32
+ if inheritExisting {
+ _p0 = 1
+ }
+ r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func DestroyEnvironmentBlock(block *uint16) (err error) {
+ r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetProfilesDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/src/internal/sysinfo/sysinfo.go b/src/internal/sysinfo/sysinfo.go
new file mode 100644
index 0000000..961be7a
--- /dev/null
+++ b/src/internal/sysinfo/sysinfo.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sysinfo implements high level hardware information gathering
+// that can be used for debugging or information purposes.
+package sysinfo
+
+import (
+ internalcpu "internal/cpu"
+ "sync"
+)
+
+type cpuInfo struct {
+ once sync.Once
+ name string
+}
+
+var CPU cpuInfo
+
+func (cpu *cpuInfo) Name() string {
+ cpu.once.Do(func() {
+ // Try to get the information from internal/cpu.
+ if name := internalcpu.Name(); name != "" {
+ cpu.name = name
+ return
+ }
+ // TODO(martisch): use /proc/cpuinfo and /sys/devices/system/cpu/ on Linux as fallback.
+ })
+ return cpu.name
+}
diff --git a/src/internal/testenv/exec.go b/src/internal/testenv/exec.go
new file mode 100644
index 0000000..50d3b0d
--- /dev/null
+++ b/src/internal/testenv/exec.go
@@ -0,0 +1,219 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testenv
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// MustHaveExec checks that the current system can start new processes
+// using os.StartProcess or (more commonly) exec.Command.
+// If not, MustHaveExec calls t.Skip with an explanation.
+//
+// On some platforms MustHaveExec checks for exec support by re-executing the
+// current executable, which must be a binary built by 'go test'.
+// We intentionally do not provide a HasExec function because of the risk of
+// inappropriate recursion in TestMain functions.
+//
+// To check for exec support outside of a test, just try to exec the command.
+// If exec is not supported, testenv.SyscallIsNotSupported will return true
+// for the resulting error.
+func MustHaveExec(t testing.TB) {
+ tryExecOnce.Do(func() {
+ tryExecErr = tryExec()
+ })
+ if tryExecErr != nil {
+ t.Skipf("skipping test: cannot exec subprocess on %s/%s: %v", runtime.GOOS, runtime.GOARCH, tryExecErr)
+ }
+}
+
+var (
+ tryExecOnce sync.Once
+ tryExecErr error
+)
+
+func tryExec() error {
+ switch runtime.GOOS {
+ case "wasip1", "js", "ios":
+ default:
+ // Assume that exec always works on non-mobile platforms and Android.
+ return nil
+ }
+
+ // ios has an exec syscall but on real iOS devices it might return a
+ // permission error. In an emulated environment (such as a Corellium host)
+ // it might succeed, so if we need to exec we'll just have to try it and
+ // find out.
+ //
+ // As of 2023-04-19 wasip1 and js don't have exec syscalls at all, but we
+ // may as well use the same path so that this branch can be tested without
+ // an ios environment.
+
+ if !testing.Testing() {
+ // This isn't a standard 'go test' binary, so we don't know how to
+ // self-exec in a way that should succeed without side effects.
+ // Just forget it.
+ return errors.New("can't probe for exec support with a non-test executable")
+ }
+
+ // We know that this is a test executable. We should be able to run it with a
+ // no-op flag to check for overall exec support.
+ exe, err := os.Executable()
+ if err != nil {
+ return fmt.Errorf("can't probe for exec support: %w", err)
+ }
+ cmd := exec.Command(exe, "-test.list=^$")
+ cmd.Env = origEnv
+ return cmd.Run()
+}
+
+var execPaths sync.Map // path -> error
+
+// MustHaveExecPath checks that the current system can start the named executable
+// using os.StartProcess or (more commonly) exec.Command.
+// If not, MustHaveExecPath calls t.Skip with an explanation.
+func MustHaveExecPath(t testing.TB, path string) {
+ MustHaveExec(t)
+
+ err, found := execPaths.Load(path)
+ if !found {
+ _, err = exec.LookPath(path)
+ err, _ = execPaths.LoadOrStore(path, err)
+ }
+ if err != nil {
+ t.Skipf("skipping test: %s: %s", path, err)
+ }
+}
+
+// CleanCmdEnv will fill cmd.Env with the environment, excluding certain
+// variables that could modify the behavior of the Go tools such as
+// GODEBUG and GOTRACEBACK.
+func CleanCmdEnv(cmd *exec.Cmd) *exec.Cmd {
+ if cmd.Env != nil {
+ panic("environment already set")
+ }
+ for _, env := range os.Environ() {
+ // Exclude GODEBUG from the environment to prevent its output
+ // from breaking tests that are trying to parse other command output.
+ if strings.HasPrefix(env, "GODEBUG=") {
+ continue
+ }
+ // Exclude GOTRACEBACK for the same reason.
+ if strings.HasPrefix(env, "GOTRACEBACK=") {
+ continue
+ }
+ cmd.Env = append(cmd.Env, env)
+ }
+ return cmd
+}
+
+// CommandContext is like exec.CommandContext, but:
+// - skips t if the platform does not support os/exec,
+// - sends SIGQUIT (if supported by the platform) instead of SIGKILL
+// in its Cancel function
+// - if the test has a deadline, adds a Context timeout and WaitDelay
+// for an arbitrary grace period before the test's deadline expires,
+// - fails the test if the command does not complete before the test's deadline, and
+// - sets a Cleanup function that verifies that the test did not leak a subprocess.
+func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd {
+ t.Helper()
+ MustHaveExec(t)
+
+ var (
+ cancelCtx context.CancelFunc
+ gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging)
+ )
+
+ if t, ok := t.(interface {
+ testing.TB
+ Deadline() (time.Time, bool)
+ }); ok {
+ if td, ok := t.Deadline(); ok {
+ // Start with a minimum grace period, just long enough to consume the
+ // output of a reasonable program after it terminates.
+ gracePeriod = 100 * time.Millisecond
+ if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
+ scale, err := strconv.Atoi(s)
+ if err != nil {
+ t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err)
+ }
+ gracePeriod *= time.Duration(scale)
+ }
+
+ // If time allows, increase the termination grace period to 5% of the
+ // test's remaining time.
+ testTimeout := time.Until(td)
+ if gp := testTimeout / 20; gp > gracePeriod {
+ gracePeriod = gp
+ }
+
+ // When we run commands that execute subprocesses, we want to reserve two
+ // grace periods to clean up: one for the delay between the first
+ // termination signal being sent (via the Cancel callback when the Context
+ // expires) and the process being forcibly terminated (via the WaitDelay
+ // field), and a second one for the delay between the process being
+ // terminated and the test logging its output for debugging.
+ //
+ // (We want to ensure that the test process itself has enough time to
+ // log the output before it is also terminated.)
+ cmdTimeout := testTimeout - 2*gracePeriod
+
+ if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout {
+ // Either ctx doesn't have a deadline, or its deadline would expire
+ // after (or too close before) the test has already timed out.
+ // Add a shorter timeout so that the test will produce useful output.
+ ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout)
+ }
+ }
+ }
+
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Cancel = func() error {
+ if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded {
+ // The command timed out due to running too close to the test's deadline.
+ // There is no way the test did that intentionally — it's too close to the
+ // wire! — so mark it as a test failure. That way, if the test expects the
+ // command to fail for some other reason, it doesn't have to distinguish
+ // between that reason and a timeout.
+ t.Errorf("test timed out while running command: %v", cmd)
+ } else {
+ // The command is being terminated due to ctx being canceled, but
+ // apparently not due to an explicit test deadline that we added.
+ // Log that information in case it is useful for diagnosing a failure,
+ // but don't actually fail the test because of it.
+ t.Logf("%v: terminating command: %v", ctx.Err(), cmd)
+ }
+ return cmd.Process.Signal(Sigquit)
+ }
+ cmd.WaitDelay = gracePeriod
+
+ t.Cleanup(func() {
+ if cancelCtx != nil {
+ cancelCtx()
+ }
+ if cmd.Process != nil && cmd.ProcessState == nil {
+ t.Errorf("command was started, but test did not wait for it to complete: %v", cmd)
+ }
+ })
+
+ return cmd
+}
+
+// Command is like exec.Command, but applies the same changes as
+// testenv.CommandContext (with a default Context).
+func Command(t testing.TB, name string, args ...string) *exec.Cmd {
+ t.Helper()
+ return CommandContext(t, context.Background(), name, args...)
+}
diff --git a/src/internal/testenv/noopt.go b/src/internal/testenv/noopt.go
new file mode 100644
index 0000000..ae2a3d0
--- /dev/null
+++ b/src/internal/testenv/noopt.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build noopt
+
+package testenv
+
+// OptimizationOff reports whether optimization is disabled.
+func OptimizationOff() bool {
+ return true
+}
diff --git a/src/internal/testenv/opt.go b/src/internal/testenv/opt.go
new file mode 100644
index 0000000..1bb96f7
--- /dev/null
+++ b/src/internal/testenv/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !noopt
+
+package testenv
+
+// OptimizationOff reports whether optimization is disabled.
+func OptimizationOff() bool {
+ return false
+}
diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go
new file mode 100644
index 0000000..31b58dd
--- /dev/null
+++ b/src/internal/testenv/testenv.go
@@ -0,0 +1,506 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testenv provides information about what functionality
+// is available in different testing environments run by the Go team.
+//
+// It is an internal package because these details are specific
+// to the Go team's test setup (on build.golang.org) and not
+// fundamental to tests in general.
+package testenv
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "internal/cfg"
+ "internal/platform"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+)
+
+// Save the original environment during init for use in checks. A test
+// binary may modify its environment before calling HasExec to change its
+// behavior (such as mimicking a command-line tool), and that modified
+// environment might cause environment checks to behave erratically.
+var origEnv = os.Environ()
+
+// Builder reports the name of the builder running this test
+// (for example, "linux-amd64" or "windows-386-gce").
+// If the test is not running on the build infrastructure,
+// Builder returns the empty string.
+func Builder() string {
+ return os.Getenv("GO_BUILDER_NAME")
+}
+
+// HasGoBuild reports whether the current system can build programs with “go build”
+// and then run them with os.StartProcess or exec.Command.
+func HasGoBuild() bool {
+ if os.Getenv("GO_GCFLAGS") != "" {
+ // It's too much work to require every caller of the go command
+ // to pass along "-gcflags="+os.Getenv("GO_GCFLAGS").
+ // For now, if $GO_GCFLAGS is set, report that we simply can't
+ // run go build.
+ return false
+ }
+
+ goBuildOnce.Do(func() {
+ // To run 'go build', we need to be able to exec a 'go' command.
+ // We somewhat arbitrarily choose to exec 'go tool -n compile' because that
+ // also confirms that cmd/go can find the compiler. (Before CL 472096,
+ // we sometimes ended up with cmd/go installed in the test environment
+ // without a cmd/compile it could use to actually build things.)
+ cmd := exec.Command("go", "tool", "-n", "compile")
+ cmd.Env = origEnv
+ out, err := cmd.Output()
+ if err != nil {
+ goBuildErr = fmt.Errorf("%v: %w", cmd, err)
+ return
+ }
+ out = bytes.TrimSpace(out)
+ if len(out) == 0 {
+ goBuildErr = fmt.Errorf("%v: no tool reported", cmd)
+ return
+ }
+ if _, err := exec.LookPath(string(out)); err != nil {
+ goBuildErr = err
+ return
+ }
+
+ if platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) {
+ // We can assume that we always have a complete Go toolchain available.
+ // However, this platform requires a C linker to build even pure Go
+ // programs, including tests. Do we have one in the test environment?
+ // (On Android, for example, the device running the test might not have a
+ // C toolchain installed.)
+ //
+ // If CC is set explicitly, assume that we do. Otherwise, use 'go env CC'
+ // to determine which toolchain it would use by default.
+ if os.Getenv("CC") == "" {
+ cmd := exec.Command("go", "env", "CC")
+ cmd.Env = origEnv
+ out, err := cmd.Output()
+ if err != nil {
+ goBuildErr = fmt.Errorf("%v: %w", cmd, err)
+ return
+ }
+ out = bytes.TrimSpace(out)
+ if len(out) == 0 {
+ goBuildErr = fmt.Errorf("%v: no CC reported", cmd)
+ return
+ }
+ _, goBuildErr = exec.LookPath(string(out))
+ }
+ }
+ })
+
+ return goBuildErr == nil
+}
+
+var (
+ goBuildOnce sync.Once
+ goBuildErr error
+)
+
+// MustHaveGoBuild checks that the current system can build programs with “go build”
+// and then run them with os.StartProcess or exec.Command.
+// If not, MustHaveGoBuild calls t.Skip with an explanation.
+func MustHaveGoBuild(t testing.TB) {
+ if os.Getenv("GO_GCFLAGS") != "" {
+ t.Helper()
+ t.Skipf("skipping test: 'go build' not compatible with setting $GO_GCFLAGS")
+ }
+ if !HasGoBuild() {
+ t.Helper()
+ t.Skipf("skipping test: 'go build' unavailable: %v", goBuildErr)
+ }
+}
+
+// HasGoRun reports whether the current system can run programs with “go run.”
+func HasGoRun() bool {
+ // For now, having go run and having go build are the same.
+ return HasGoBuild()
+}
+
+// MustHaveGoRun checks that the current system can run programs with “go run.”
+// If not, MustHaveGoRun calls t.Skip with an explanation.
+func MustHaveGoRun(t testing.TB) {
+ if !HasGoRun() {
+ t.Skipf("skipping test: 'go run' not available on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+// HasParallelism reports whether the current system can execute multiple
+// threads in parallel.
+// There is a copy of this function in cmd/dist/test.go.
+func HasParallelism() bool {
+ switch runtime.GOOS {
+ case "js", "wasip1":
+ return false
+ }
+ return true
+}
+
+// MustHaveParallelism checks that the current system can execute multiple
+// threads in parallel. If not, MustHaveParallelism calls t.Skip with an explanation.
+func MustHaveParallelism(t testing.TB) {
+ if !HasParallelism() {
+ t.Skipf("skipping test: no parallelism available on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+// GoToolPath reports the path to the Go tool.
+// It is a convenience wrapper around GoTool.
+// If the tool is unavailable GoToolPath calls t.Skip.
+// If the tool should be available and isn't, GoToolPath calls t.Fatal.
+func GoToolPath(t testing.TB) string {
+ MustHaveGoBuild(t)
+ path, err := GoTool()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Add all environment variables that affect the Go command to test metadata.
+ // Cached test results will be invalidate when these variables change.
+ // See golang.org/issue/32285.
+ for _, envVar := range strings.Fields(cfg.KnownEnv) {
+ os.Getenv(envVar)
+ }
+ return path
+}
+
+var (
+ gorootOnce sync.Once
+ gorootPath string
+ gorootErr error
+)
+
+func findGOROOT() (string, error) {
+ gorootOnce.Do(func() {
+ gorootPath = runtime.GOROOT()
+ if gorootPath != "" {
+ // If runtime.GOROOT() is non-empty, assume that it is valid.
+ //
+ // (It might not be: for example, the user may have explicitly set GOROOT
+ // to the wrong directory, or explicitly set GOROOT_FINAL but not GOROOT
+ // and hasn't moved the tree to GOROOT_FINAL yet. But those cases are
+ // rare, and if that happens the user can fix what they broke.)
+ return
+ }
+
+ // runtime.GOROOT doesn't know where GOROOT is (perhaps because the test
+ // binary was built with -trimpath, or perhaps because GOROOT_FINAL was set
+ // without GOROOT and the tree hasn't been moved there yet).
+ //
+ // Since this is internal/testenv, we can cheat and assume that the caller
+ // is a test of some package in a subdirectory of GOROOT/src. ('go test'
+ // runs the test in the directory containing the packaged under test.) That
+ // means that if we start walking up the tree, we should eventually find
+ // GOROOT/src/go.mod, and we can report the parent directory of that.
+ //
+ // Notably, this works even if we can't run 'go env GOROOT' as a
+ // subprocess.
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ gorootErr = fmt.Errorf("finding GOROOT: %w", err)
+ return
+ }
+
+ dir := cwd
+ for {
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ // dir is either "." or only a volume name.
+ gorootErr = fmt.Errorf("failed to locate GOROOT/src in any parent directory")
+ return
+ }
+
+ if base := filepath.Base(dir); base != "src" {
+ dir = parent
+ continue // dir cannot be GOROOT/src if it doesn't end in "src".
+ }
+
+ b, err := os.ReadFile(filepath.Join(dir, "go.mod"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ dir = parent
+ continue
+ }
+ gorootErr = fmt.Errorf("finding GOROOT: %w", err)
+ return
+ }
+ goMod := string(b)
+
+ for goMod != "" {
+ var line string
+ line, goMod, _ = strings.Cut(goMod, "\n")
+ fields := strings.Fields(line)
+ if len(fields) >= 2 && fields[0] == "module" && fields[1] == "std" {
+ // Found "module std", which is the module declaration in GOROOT/src!
+ gorootPath = parent
+ return
+ }
+ }
+ }
+ })
+
+ return gorootPath, gorootErr
+}
+
+// GOROOT reports the path to the directory containing the root of the Go
+// project source tree. This is normally equivalent to runtime.GOROOT, but
+// works even if the test binary was built with -trimpath and cannot exec
+// 'go env GOROOT'.
+//
+// If GOROOT cannot be found, GOROOT skips t if t is non-nil,
+// or panics otherwise.
+func GOROOT(t testing.TB) string {
+ path, err := findGOROOT()
+ if err != nil {
+ if t == nil {
+ panic(err)
+ }
+ t.Helper()
+ t.Skip(err)
+ }
+ return path
+}
+
+// GoTool reports the path to the Go tool.
+func GoTool() (string, error) {
+ if !HasGoBuild() {
+ return "", errors.New("platform cannot run go tool")
+ }
+ goToolOnce.Do(func() {
+ goToolPath, goToolErr = exec.LookPath("go")
+ })
+ return goToolPath, goToolErr
+}
+
+var (
+ goToolOnce sync.Once
+ goToolPath string
+ goToolErr error
+)
+
+// HasSrc reports whether the entire source tree is available under GOROOT.
+func HasSrc() bool {
+ switch runtime.GOOS {
+ case "ios":
+ return false
+ }
+ return true
+}
+
+// HasExternalNetwork reports whether the current system can use
+// external (non-localhost) networks.
+func HasExternalNetwork() bool {
+ return !testing.Short() && runtime.GOOS != "js" && runtime.GOOS != "wasip1"
+}
+
+// MustHaveExternalNetwork checks that the current system can use
+// external (non-localhost) networks.
+// If not, MustHaveExternalNetwork calls t.Skip with an explanation.
+func MustHaveExternalNetwork(t testing.TB) {
+ if runtime.GOOS == "js" || runtime.GOOS == "wasip1" {
+ t.Helper()
+ t.Skipf("skipping test: no external network on %s", runtime.GOOS)
+ }
+ if testing.Short() {
+ t.Helper()
+ t.Skipf("skipping test: no external network in -short mode")
+ }
+}
+
+// HasCGO reports whether the current system can use cgo.
+func HasCGO() bool {
+ hasCgoOnce.Do(func() {
+ goTool, err := GoTool()
+ if err != nil {
+ return
+ }
+ cmd := exec.Command(goTool, "env", "CGO_ENABLED")
+ cmd.Env = origEnv
+ out, err := cmd.Output()
+ if err != nil {
+ panic(fmt.Sprintf("%v: %v", cmd, out))
+ }
+ hasCgo, err = strconv.ParseBool(string(bytes.TrimSpace(out)))
+ if err != nil {
+ panic(fmt.Sprintf("%v: non-boolean output %q", cmd, out))
+ }
+ })
+ return hasCgo
+}
+
+var (
+ hasCgoOnce sync.Once
+ hasCgo bool
+)
+
+// MustHaveCGO calls t.Skip if cgo is not available.
+func MustHaveCGO(t testing.TB) {
+ if !HasCGO() {
+ t.Skipf("skipping test: no cgo")
+ }
+}
+
+// CanInternalLink reports whether the current system can link programs with
+// internal linking.
+func CanInternalLink(withCgo bool) bool {
+ return !platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, withCgo)
+}
+
+// MustInternalLink checks that the current system can link programs with internal
+// linking.
+// If not, MustInternalLink calls t.Skip with an explanation.
+func MustInternalLink(t testing.TB, withCgo bool) {
+ if !CanInternalLink(withCgo) {
+ if withCgo && CanInternalLink(false) {
+ t.Skipf("skipping test: internal linking on %s/%s is not supported with cgo", runtime.GOOS, runtime.GOARCH)
+ }
+ t.Skipf("skipping test: internal linking on %s/%s is not supported", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+// MustHaveBuildMode reports whether the current system can build programs in
+// the given build mode.
+// If not, MustHaveBuildMode calls t.Skip with an explanation.
+func MustHaveBuildMode(t testing.TB, buildmode string) {
+ if !platform.BuildModeSupported(runtime.Compiler, buildmode, runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping test: build mode %s on %s/%s is not supported by the %s compiler", buildmode, runtime.GOOS, runtime.GOARCH, runtime.Compiler)
+ }
+}
+
+// HasSymlink reports whether the current system can use os.Symlink.
+func HasSymlink() bool {
+ ok, _ := hasSymlink()
+ return ok
+}
+
+// MustHaveSymlink reports whether the current system can use os.Symlink.
+// If not, MustHaveSymlink calls t.Skip with an explanation.
+func MustHaveSymlink(t testing.TB) {
+ ok, reason := hasSymlink()
+ if !ok {
+ t.Skipf("skipping test: cannot make symlinks on %s/%s: %s", runtime.GOOS, runtime.GOARCH, reason)
+ }
+}
+
+// HasLink reports whether the current system can use os.Link.
+func HasLink() bool {
+ // From Android release M (Marshmallow), hard linking files is blocked
+ // and an attempt to call link() on a file will return EACCES.
+ // - https://code.google.com/p/android-developer-preview/issues/detail?id=3150
+ return runtime.GOOS != "plan9" && runtime.GOOS != "android"
+}
+
+// MustHaveLink reports whether the current system can use os.Link.
+// If not, MustHaveLink calls t.Skip with an explanation.
+func MustHaveLink(t testing.TB) {
+ if !HasLink() {
+ t.Skipf("skipping test: hardlinks are not supported on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+var flaky = flag.Bool("flaky", false, "run known-flaky tests too")
+
+func SkipFlaky(t testing.TB, issue int) {
+ t.Helper()
+ if !*flaky {
+ t.Skipf("skipping known flaky test without the -flaky flag; see golang.org/issue/%d", issue)
+ }
+}
+
+func SkipFlakyNet(t testing.TB) {
+ t.Helper()
+ if v, _ := strconv.ParseBool(os.Getenv("GO_BUILDER_FLAKY_NET")); v {
+ t.Skip("skipping test on builder known to have frequent network failures")
+ }
+}
+
+// CPUIsSlow reports whether the CPU running the test is suspected to be slow.
+func CPUIsSlow() bool {
+ switch runtime.GOARCH {
+ case "arm", "mips", "mipsle", "mips64", "mips64le", "wasm":
+ return true
+ }
+ return false
+}
+
+// SkipIfShortAndSlow skips t if -short is set and the CPU running the test is
+// suspected to be slow.
+//
+// (This is useful for CPU-intensive tests that otherwise complete quickly.)
+func SkipIfShortAndSlow(t testing.TB) {
+ if testing.Short() && CPUIsSlow() {
+ t.Helper()
+ t.Skipf("skipping test in -short mode on %s", runtime.GOARCH)
+ }
+}
+
+// SkipIfOptimizationOff skips t if optimization is disabled.
+func SkipIfOptimizationOff(t testing.TB) {
+ if OptimizationOff() {
+ t.Helper()
+ t.Skip("skipping test with optimization disabled")
+ }
+}
+
+// WriteImportcfg writes an importcfg file used by the compiler or linker to
+// dstPath containing entries for the file mappings in packageFiles, as well
+// as for the packages transitively imported by the package(s) in pkgs.
+//
+// pkgs may include any package pattern that is valid to pass to 'go list',
+// so it may also be a list of Go source files all in the same directory.
+func WriteImportcfg(t testing.TB, dstPath string, packageFiles map[string]string, pkgs ...string) {
+ t.Helper()
+
+ icfg := new(bytes.Buffer)
+ icfg.WriteString("# import config\n")
+ for k, v := range packageFiles {
+ fmt.Fprintf(icfg, "packagefile %s=%s\n", k, v)
+ }
+
+ if len(pkgs) > 0 {
+ // Use 'go list' to resolve any missing packages and rewrite the import map.
+ cmd := Command(t, GoToolPath(t), "list", "-export", "-deps", "-f", `{{if ne .ImportPath "command-line-arguments"}}{{if .Export}}{{.ImportPath}}={{.Export}}{{end}}{{end}}`)
+ cmd.Args = append(cmd.Args, pkgs...)
+ cmd.Stderr = new(strings.Builder)
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("%v: %v\n%s", cmd, err, cmd.Stderr)
+ }
+
+ for _, line := range strings.Split(string(out), "\n") {
+ if line == "" {
+ continue
+ }
+ importPath, export, ok := strings.Cut(line, "=")
+ if !ok {
+ t.Fatalf("invalid line in output from %v:\n%s", cmd, line)
+ }
+ if packageFiles[importPath] == "" {
+ fmt.Fprintf(icfg, "packagefile %s=%s\n", importPath, export)
+ }
+ }
+ }
+
+ if err := os.WriteFile(dstPath, icfg.Bytes(), 0666); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// SyscallIsNotSupported reports whether err may indicate that a system call is
+// not supported by the current platform or execution environment.
+func SyscallIsNotSupported(err error) bool {
+ return syscallIsNotSupported(err)
+}
diff --git a/src/internal/testenv/testenv_notunix.go b/src/internal/testenv/testenv_notunix.go
new file mode 100644
index 0000000..a7df5f5
--- /dev/null
+++ b/src/internal/testenv/testenv_notunix.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows || plan9 || (js && wasm) || wasip1
+
+package testenv
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+)
+
+// Sigquit is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var Sigquit = os.Kill
+
+func syscallIsNotSupported(err error) bool {
+ return errors.Is(err, fs.ErrPermission) || errors.Is(err, errors.ErrUnsupported)
+}
diff --git a/src/internal/testenv/testenv_notwin.go b/src/internal/testenv/testenv_notwin.go
new file mode 100644
index 0000000..30e159a
--- /dev/null
+++ b/src/internal/testenv/testenv_notwin.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package testenv
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+func hasSymlink() (ok bool, reason string) {
+ switch runtime.GOOS {
+ case "plan9":
+ return false, ""
+ case "android", "wasip1":
+ // For wasip1, some runtimes forbid absolute symlinks,
+ // or symlinks that escape the current working directory.
+ // Perform a simple test to see whether the runtime
+ // supports symlinks or not. If we get a permission
+ // error, the runtime does not support symlinks.
+ dir, err := os.MkdirTemp("", "")
+ if err != nil {
+ return false, ""
+ }
+ defer func() {
+ _ = os.RemoveAll(dir)
+ }()
+ fpath := filepath.Join(dir, "testfile.txt")
+ if err := os.WriteFile(fpath, nil, 0644); err != nil {
+ return false, ""
+ }
+ if err := os.Symlink(fpath, filepath.Join(dir, "testlink")); err != nil {
+ if SyscallIsNotSupported(err) {
+ return false, fmt.Sprintf("symlinks unsupported: %s", err.Error())
+ }
+ return false, ""
+ }
+ }
+
+ return true, ""
+}
diff --git a/src/internal/testenv/testenv_test.go b/src/internal/testenv/testenv_test.go
new file mode 100644
index 0000000..e4ef3bc
--- /dev/null
+++ b/src/internal/testenv/testenv_test.go
@@ -0,0 +1,185 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testenv_test
+
+import (
+ "internal/platform"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestGoToolLocation(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var exeSuffix string
+ if runtime.GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+
+ // Tests are defined to run within their package source directory,
+ // and this package's source directory is $GOROOT/src/internal/testenv.
+ // The 'go' command is installed at $GOROOT/bin/go, so if the environment
+ // is correct then testenv.GoTool() should be identical to ../../../bin/go.
+
+ relWant := "../../../bin/go" + exeSuffix
+ absWant, err := filepath.Abs(relWant)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantInfo, err := os.Stat(absWant)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("found go tool at %q (%q)", relWant, absWant)
+
+ goTool, err := testenv.GoTool()
+ if err != nil {
+ t.Fatalf("testenv.GoTool(): %v", err)
+ }
+ t.Logf("testenv.GoTool() = %q", goTool)
+
+ gotInfo, err := os.Stat(goTool)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !os.SameFile(wantInfo, gotInfo) {
+ t.Fatalf("%q is not the same file as %q", absWant, goTool)
+ }
+}
+
+func TestHasGoBuild(t *testing.T) {
+ if !testenv.HasGoBuild() {
+ switch runtime.GOOS {
+ case "js", "wasip1":
+ // No exec syscall, so these shouldn't be able to 'go build'.
+ t.Logf("HasGoBuild is false on %s", runtime.GOOS)
+ return
+ }
+
+ b := testenv.Builder()
+ if b == "" {
+ // We shouldn't make assumptions about what kind of sandbox or build
+ // environment external Go users may be running in.
+ t.Skipf("skipping: 'go build' unavailable")
+ }
+
+ // Since we control the Go builders, we know which ones ought
+ // to be able to run 'go build'. Check that they can.
+ //
+ // (Note that we don't verify that any builders *can't* run 'go build'.
+ // If a builder starts running 'go build' tests when it shouldn't,
+ // we will presumably find out about it when those tests fail.)
+ switch runtime.GOOS {
+ case "ios":
+ if isCorelliumBuilder(b) {
+ // The corellium environment is self-hosting, so it should be able
+ // to build even though real "ios" devices can't exec.
+ } else {
+ // The usual iOS sandbox does not allow the app to start another
+ // process. If we add builders on stock iOS devices, they presumably
+ // will not be able to exec, so we may as well allow that now.
+ t.Logf("HasGoBuild is false on %s", b)
+ return
+ }
+ case "android":
+ if isEmulatedBuilder(b) && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) {
+ // As of 2023-05-02, the test environment on the emulated builders is
+ // missing a C linker.
+ t.Logf("HasGoBuild is false on %s", b)
+ return
+ }
+ }
+
+ if strings.Contains(b, "-noopt") {
+ // The -noopt builder sets GO_GCFLAGS, which causes tests of 'go build' to
+ // be skipped.
+ t.Logf("HasGoBuild is false on %s", b)
+ return
+ }
+
+ t.Fatalf("HasGoBuild unexpectedly false on %s", b)
+ }
+
+ t.Logf("HasGoBuild is true; checking consistency with other functions")
+
+ hasExec := false
+ hasExecGo := false
+ t.Run("MustHaveExec", func(t *testing.T) {
+ testenv.MustHaveExec(t)
+ hasExec = true
+ })
+ t.Run("MustHaveExecPath", func(t *testing.T) {
+ testenv.MustHaveExecPath(t, "go")
+ hasExecGo = true
+ })
+ if !hasExec {
+ t.Errorf(`MustHaveExec(t) skipped unexpectedly`)
+ }
+ if !hasExecGo {
+ t.Errorf(`MustHaveExecPath(t, "go") skipped unexpectedly`)
+ }
+
+ dir := t.TempDir()
+ mainGo := filepath.Join(dir, "main.go")
+ if err := os.WriteFile(mainGo, []byte("package main\nfunc main() {}\n"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ cmd := testenv.Command(t, "go", "build", "-o", os.DevNull, mainGo)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%v: %v\n%s", cmd, err, out)
+ }
+}
+
+func TestMustHaveExec(t *testing.T) {
+ hasExec := false
+ t.Run("MustHaveExec", func(t *testing.T) {
+ testenv.MustHaveExec(t)
+ t.Logf("MustHaveExec did not skip")
+ hasExec = true
+ })
+
+ switch runtime.GOOS {
+ case "js", "wasip1":
+ if hasExec {
+ // js and wasip1 lack an “exec” syscall.
+ t.Errorf("expected MustHaveExec to skip on %v", runtime.GOOS)
+ }
+ case "ios":
+ if b := testenv.Builder(); isCorelliumBuilder(b) && !hasExec {
+ // Most ios environments can't exec, but the corellium builder can.
+ t.Errorf("expected MustHaveExec not to skip on %v", b)
+ }
+ default:
+ if b := testenv.Builder(); b != "" && !hasExec {
+ t.Errorf("expected MustHaveExec not to skip on %v", b)
+ }
+ }
+}
+
+func isCorelliumBuilder(builderName string) bool {
+ // Support both the old infra's builder names and the LUCI builder names.
+ // The former's names are ad-hoc so we could maintain this invariant on
+ // the builder side. The latter's names are structured, and "corellium" will
+ // appear as a "host" suffix after the GOOS and GOARCH, which always begin
+ // with an underscore.
+ return strings.HasSuffix(builderName, "-corellium") || strings.Contains(builderName, "_corellium")
+}
+
+func isEmulatedBuilder(builderName string) bool {
+ // Support both the old infra's builder names and the LUCI builder names.
+ // The former's names are ad-hoc so we could maintain this invariant on
+ // the builder side. The latter's names are structured, and the signifier
+ // of emulation "emu" will appear as a "host" suffix after the GOOS and
+ // GOARCH because it modifies the run environment in such a way that it
+ // the target GOOS and GOARCH may not match the host. This suffix always
+ // begins with an underscore.
+ return strings.HasSuffix(builderName, "-emu") || strings.Contains(builderName, "_emu")
+} \ No newline at end of file
diff --git a/src/internal/testenv/testenv_unix.go b/src/internal/testenv/testenv_unix.go
new file mode 100644
index 0000000..a629078
--- /dev/null
+++ b/src/internal/testenv/testenv_unix.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package testenv
+
+import (
+ "errors"
+ "io/fs"
+ "syscall"
+)
+
+// Sigquit is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var Sigquit = syscall.SIGQUIT
+
+func syscallIsNotSupported(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ switch errno {
+ case syscall.EPERM, syscall.EROFS:
+ // User lacks permission: either the call requires root permission and the
+ // user is not root, or the call is denied by a container security policy.
+ return true
+ case syscall.EINVAL:
+ // Some containers return EINVAL instead of EPERM if a system call is
+ // denied by security policy.
+ return true
+ }
+ }
+
+ if errors.Is(err, fs.ErrPermission) || errors.Is(err, errors.ErrUnsupported) {
+ return true
+ }
+
+ return false
+}
diff --git a/src/internal/testenv/testenv_windows.go b/src/internal/testenv/testenv_windows.go
new file mode 100644
index 0000000..4802b13
--- /dev/null
+++ b/src/internal/testenv/testenv_windows.go
@@ -0,0 +1,47 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testenv
+
+import (
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+)
+
+var symlinkOnce sync.Once
+var winSymlinkErr error
+
+func initWinHasSymlink() {
+ tmpdir, err := os.MkdirTemp("", "symtest")
+ if err != nil {
+ panic("failed to create temp directory: " + err.Error())
+ }
+ defer os.RemoveAll(tmpdir)
+
+ err = os.Symlink("target", filepath.Join(tmpdir, "symlink"))
+ if err != nil {
+ err = err.(*os.LinkError).Err
+ switch err {
+ case syscall.EWINDOWS, syscall.ERROR_PRIVILEGE_NOT_HELD:
+ winSymlinkErr = err
+ }
+ }
+}
+
+func hasSymlink() (ok bool, reason string) {
+ symlinkOnce.Do(initWinHasSymlink)
+
+ switch winSymlinkErr {
+ case nil:
+ return true, ""
+ case syscall.EWINDOWS:
+ return false, ": symlinks are not supported on your version of Windows"
+ case syscall.ERROR_PRIVILEGE_NOT_HELD:
+ return false, ": you don't have enough privileges to create symlinks"
+ }
+
+ return false, ""
+}
diff --git a/src/internal/testlog/exit.go b/src/internal/testlog/exit.go
new file mode 100644
index 0000000..e15defd
--- /dev/null
+++ b/src/internal/testlog/exit.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testlog
+
+import "sync"
+
+// PanicOnExit0 reports whether to panic on a call to os.Exit(0).
+// This is in the testlog package because, like other definitions in
+// package testlog, it is a hook between the testing package and the
+// os package. This is used to ensure that an early call to os.Exit(0)
+// does not cause a test to pass.
+func PanicOnExit0() bool {
+ panicOnExit0.mu.Lock()
+ defer panicOnExit0.mu.Unlock()
+ return panicOnExit0.val
+}
+
+// panicOnExit0 is the flag used for PanicOnExit0. This uses a lock
+// because the value can be cleared via a timer call that may race
+// with calls to os.Exit
+var panicOnExit0 struct {
+ mu sync.Mutex
+ val bool
+}
+
+// SetPanicOnExit0 sets panicOnExit0 to v.
+func SetPanicOnExit0(v bool) {
+ panicOnExit0.mu.Lock()
+ defer panicOnExit0.mu.Unlock()
+ panicOnExit0.val = v
+}
diff --git a/src/internal/testlog/log.go b/src/internal/testlog/log.go
new file mode 100644
index 0000000..3c5f780
--- /dev/null
+++ b/src/internal/testlog/log.go
@@ -0,0 +1,69 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testlog provides a back-channel communication path
+// between tests and package os, so that cmd/go can see which
+// environment variables and files a test consults.
+package testlog
+
+import "sync/atomic"
+
+// Interface is the interface required of test loggers.
+// The os package will invoke the interface's methods to indicate that
+// it is inspecting the given environment variables or files.
+// Multiple goroutines may call these methods simultaneously.
+type Interface interface {
+ Getenv(key string)
+ Stat(file string)
+ Open(file string)
+ Chdir(dir string)
+}
+
+// logger is the current logger Interface.
+// We use an atomic.Value in case test startup
+// is racing with goroutines started during init.
+// That must not cause a race detector failure,
+// although it will still result in limited visibility
+// into exactly what those goroutines do.
+var logger atomic.Value
+
+// SetLogger sets the test logger implementation for the current process.
+// It must be called only once, at process startup.
+func SetLogger(impl Interface) {
+ if logger.Load() != nil {
+ panic("testlog: SetLogger must be called only once")
+ }
+ logger.Store(&impl)
+}
+
+// Logger returns the current test logger implementation.
+// It returns nil if there is no logger.
+func Logger() Interface {
+ impl := logger.Load()
+ if impl == nil {
+ return nil
+ }
+ return *impl.(*Interface)
+}
+
+// Getenv calls Logger().Getenv, if a logger has been set.
+func Getenv(name string) {
+ if log := Logger(); log != nil {
+ log.Getenv(name)
+ }
+}
+
+// Open calls Logger().Open, if a logger has been set.
+func Open(name string) {
+ if log := Logger(); log != nil {
+ log.Open(name)
+ }
+}
+
+// Stat calls Logger().Stat, if a logger has been set.
+func Stat(name string) {
+ if log := Logger(); log != nil {
+ log.Stat(name)
+ }
+}
diff --git a/src/internal/testpty/pty.go b/src/internal/testpty/pty.go
new file mode 100644
index 0000000..f0b2a33
--- /dev/null
+++ b/src/internal/testpty/pty.go
@@ -0,0 +1,38 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testpty is a simple pseudo-terminal package for Unix systems,
+// implemented by calling C functions via cgo.
+package testpty
+
+import (
+ "errors"
+ "fmt"
+ "os"
+)
+
+type PtyError struct {
+ FuncName string
+ ErrorString string
+ Errno error
+}
+
+func ptyError(name string, err error) *PtyError {
+ return &PtyError{name, err.Error(), err}
+}
+
+func (e *PtyError) Error() string {
+ return fmt.Sprintf("%s: %s", e.FuncName, e.ErrorString)
+}
+
+func (e *PtyError) Unwrap() error { return e.Errno }
+
+var ErrNotSupported = errors.New("testpty.Open not implemented on this platform")
+
+// Open returns a control pty and the name of the linked process tty.
+//
+// If Open is not implemented on this platform, it returns ErrNotSupported.
+func Open() (pty *os.File, processTTY string, err error) {
+ return open()
+}
diff --git a/src/internal/testpty/pty_cgo.go b/src/internal/testpty/pty_cgo.go
new file mode 100644
index 0000000..442fbcf
--- /dev/null
+++ b/src/internal/testpty/pty_cgo.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && (aix || dragonfly || freebsd || (linux && !android) || netbsd || openbsd)
+
+package testpty
+
+/*
+#define _XOPEN_SOURCE 600
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+*/
+import "C"
+
+import "os"
+
+func open() (pty *os.File, processTTY string, err error) {
+ m, err := C.posix_openpt(C.O_RDWR)
+ if m < 0 {
+ return nil, "", ptyError("posix_openpt", err)
+ }
+ if res, err := C.grantpt(m); res < 0 {
+ C.close(m)
+ return nil, "", ptyError("grantpt", err)
+ }
+ if res, err := C.unlockpt(m); res < 0 {
+ C.close(m)
+ return nil, "", ptyError("unlockpt", err)
+ }
+ processTTY = C.GoString(C.ptsname(m))
+ return os.NewFile(uintptr(m), "pty"), processTTY, nil
+}
diff --git a/src/internal/testpty/pty_darwin.go b/src/internal/testpty/pty_darwin.go
new file mode 100644
index 0000000..f29517c
--- /dev/null
+++ b/src/internal/testpty/pty_darwin.go
@@ -0,0 +1,32 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testpty
+
+import (
+ "internal/syscall/unix"
+ "os"
+ "syscall"
+)
+
+func open() (pty *os.File, processTTY string, err error) {
+ m, err := unix.PosixOpenpt(syscall.O_RDWR)
+ if err != nil {
+ return nil, "", ptyError("posix_openpt", err)
+ }
+ if err := unix.Grantpt(m); err != nil {
+ syscall.Close(m)
+ return nil, "", ptyError("grantpt", err)
+ }
+ if err := unix.Unlockpt(m); err != nil {
+ syscall.Close(m)
+ return nil, "", ptyError("unlockpt", err)
+ }
+ processTTY, err = unix.Ptsname(m)
+ if err != nil {
+ syscall.Close(m)
+ return nil, "", ptyError("ptsname", err)
+ }
+ return os.NewFile(uintptr(m), "pty"), processTTY, nil
+}
diff --git a/src/internal/testpty/pty_none.go b/src/internal/testpty/pty_none.go
new file mode 100644
index 0000000..4f9e2b7
--- /dev/null
+++ b/src/internal/testpty/pty_none.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(cgo && (aix || dragonfly || freebsd || (linux && !android) || netbsd || openbsd)) && !darwin
+
+package testpty
+
+import "os"
+
+func open() (pty *os.File, processTTY string, err error) {
+ return nil, "", ErrNotSupported
+}
diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go
new file mode 100644
index 0000000..3bd284e
--- /dev/null
+++ b/src/internal/trace/gc.go
@@ -0,0 +1,826 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "container/heap"
+ "math"
+ "sort"
+ "strings"
+ "time"
+)
+
+// MutatorUtil is a change in mutator utilization at a particular
+// time. Mutator utilization functions are represented as a
+// time-ordered []MutatorUtil.
+type MutatorUtil struct {
+ Time int64
+ // Util is the mean mutator utilization starting at Time. This
+ // is in the range [0, 1].
+ Util float64
+}
+
+// UtilFlags controls the behavior of MutatorUtilization.
+type UtilFlags int
+
+const (
+ // UtilSTW means utilization should account for STW events.
+ // This includes non-GC STW events, which are typically user-requested.
+ UtilSTW UtilFlags = 1 << iota
+ // UtilBackground means utilization should account for
+ // background mark workers.
+ UtilBackground
+ // UtilAssist means utilization should account for mark
+ // assists.
+ UtilAssist
+ // UtilSweep means utilization should account for sweeping.
+ UtilSweep
+
+ // UtilPerProc means each P should be given a separate
+ // utilization function. Otherwise, there is a single function
+ // and each P is given a fraction of the utilization.
+ UtilPerProc
+)
+
+// MutatorUtilization returns a set of mutator utilization functions
+// for the given trace. Each function will always end with 0
+// utilization. The bounds of each function are implicit in the first
+// and last event; outside of these bounds each function is undefined.
+//
+// If the UtilPerProc flag is not given, this always returns a single
+// utilization function. Otherwise, it returns one function per P.
+func MutatorUtilization(events []*Event, flags UtilFlags) [][]MutatorUtil {
+ if len(events) == 0 {
+ return nil
+ }
+
+ type perP struct {
+ // gc > 0 indicates that GC is active on this P.
+ gc int
+ // series the logical series number for this P. This
+ // is necessary because Ps may be removed and then
+ // re-added, and then the new P needs a new series.
+ series int
+ }
+ ps := []perP{}
+ stw := 0
+
+ out := [][]MutatorUtil{}
+ assists := map[uint64]bool{}
+ block := map[uint64]*Event{}
+ bgMark := map[uint64]bool{}
+
+ for _, ev := range events {
+ switch ev.Type {
+ case EvGomaxprocs:
+ gomaxprocs := int(ev.Args[0])
+ if len(ps) > gomaxprocs {
+ if flags&UtilPerProc != 0 {
+ // End each P's series.
+ for _, p := range ps[gomaxprocs:] {
+ out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, 0})
+ }
+ }
+ ps = ps[:gomaxprocs]
+ }
+ for len(ps) < gomaxprocs {
+ // Start new P's series.
+ series := 0
+ if flags&UtilPerProc != 0 || len(out) == 0 {
+ series = len(out)
+ out = append(out, []MutatorUtil{{ev.Ts, 1}})
+ }
+ ps = append(ps, perP{series: series})
+ }
+ case EvSTWStart:
+ if flags&UtilSTW != 0 {
+ stw++
+ }
+ case EvSTWDone:
+ if flags&UtilSTW != 0 {
+ stw--
+ }
+ case EvGCMarkAssistStart:
+ if flags&UtilAssist != 0 {
+ ps[ev.P].gc++
+ assists[ev.G] = true
+ }
+ case EvGCMarkAssistDone:
+ if flags&UtilAssist != 0 {
+ ps[ev.P].gc--
+ delete(assists, ev.G)
+ }
+ case EvGCSweepStart:
+ if flags&UtilSweep != 0 {
+ ps[ev.P].gc++
+ }
+ case EvGCSweepDone:
+ if flags&UtilSweep != 0 {
+ ps[ev.P].gc--
+ }
+ case EvGoStartLabel:
+ if flags&UtilBackground != 0 && strings.HasPrefix(ev.SArgs[0], "GC ") && ev.SArgs[0] != "GC (idle)" {
+ // Background mark worker.
+ //
+ // If we're in per-proc mode, we don't
+ // count dedicated workers because
+ // they kick all of the goroutines off
+ // that P, so don't directly
+ // contribute to goroutine latency.
+ if !(flags&UtilPerProc != 0 && ev.SArgs[0] == "GC (dedicated)") {
+ bgMark[ev.G] = true
+ ps[ev.P].gc++
+ }
+ }
+ fallthrough
+ case EvGoStart:
+ if assists[ev.G] {
+ // Unblocked during assist.
+ ps[ev.P].gc++
+ }
+ block[ev.G] = ev.Link
+ default:
+ if ev != block[ev.G] {
+ continue
+ }
+
+ if assists[ev.G] {
+ // Blocked during assist.
+ ps[ev.P].gc--
+ }
+ if bgMark[ev.G] {
+ // Background mark worker done.
+ ps[ev.P].gc--
+ delete(bgMark, ev.G)
+ }
+ delete(block, ev.G)
+ }
+
+ if flags&UtilPerProc == 0 {
+ // Compute the current average utilization.
+ if len(ps) == 0 {
+ continue
+ }
+ gcPs := 0
+ if stw > 0 {
+ gcPs = len(ps)
+ } else {
+ for i := range ps {
+ if ps[i].gc > 0 {
+ gcPs++
+ }
+ }
+ }
+ mu := MutatorUtil{ev.Ts, 1 - float64(gcPs)/float64(len(ps))}
+
+ // Record the utilization change. (Since
+ // len(ps) == len(out), we know len(out) > 0.)
+ out[0] = addUtil(out[0], mu)
+ } else {
+ // Check for per-P utilization changes.
+ for i := range ps {
+ p := &ps[i]
+ util := 1.0
+ if stw > 0 || p.gc > 0 {
+ util = 0.0
+ }
+ out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, util})
+ }
+ }
+ }
+
+ // Add final 0 utilization event to any remaining series. This
+ // is important to mark the end of the trace. The exact value
+ // shouldn't matter since no window should extend beyond this,
+ // but using 0 is symmetric with the start of the trace.
+ mu := MutatorUtil{events[len(events)-1].Ts, 0}
+ for i := range ps {
+ out[ps[i].series] = addUtil(out[ps[i].series], mu)
+ }
+ return out
+}
+
+func addUtil(util []MutatorUtil, mu MutatorUtil) []MutatorUtil {
+ if len(util) > 0 {
+ if mu.Util == util[len(util)-1].Util {
+ // No change.
+ return util
+ }
+ if mu.Time == util[len(util)-1].Time {
+ // Take the lowest utilization at a time stamp.
+ if mu.Util < util[len(util)-1].Util {
+ util[len(util)-1] = mu
+ }
+ return util
+ }
+ }
+ return append(util, mu)
+}
+
+// totalUtil is total utilization, measured in nanoseconds. This is a
+// separate type primarily to distinguish it from mean utilization,
+// which is also a float64.
+type totalUtil float64
+
+func totalUtilOf(meanUtil float64, dur int64) totalUtil {
+ return totalUtil(meanUtil * float64(dur))
+}
+
+// mean returns the mean utilization over dur.
+func (u totalUtil) mean(dur time.Duration) float64 {
+ return float64(u) / float64(dur)
+}
+
+// An MMUCurve is the minimum mutator utilization curve across
+// multiple window sizes.
+type MMUCurve struct {
+ series []mmuSeries
+}
+
+type mmuSeries struct {
+ util []MutatorUtil
+ // sums[j] is the cumulative sum of util[:j].
+ sums []totalUtil
+ // bands summarizes util in non-overlapping bands of duration
+ // bandDur.
+ bands []mmuBand
+ // bandDur is the duration of each band.
+ bandDur int64
+}
+
+type mmuBand struct {
+ // minUtil is the minimum instantaneous mutator utilization in
+ // this band.
+ minUtil float64
+ // cumUtil is the cumulative total mutator utilization between
+ // time 0 and the left edge of this band.
+ cumUtil totalUtil
+
+ // integrator is the integrator for the left edge of this
+ // band.
+ integrator integrator
+}
+
+// NewMMUCurve returns an MMU curve for the given mutator utilization
+// function.
+func NewMMUCurve(utils [][]MutatorUtil) *MMUCurve {
+ series := make([]mmuSeries, len(utils))
+ for i, util := range utils {
+ series[i] = newMMUSeries(util)
+ }
+ return &MMUCurve{series}
+}
+
+// bandsPerSeries is the number of bands to divide each series into.
+// This is only changed by tests.
+var bandsPerSeries = 1000
+
+func newMMUSeries(util []MutatorUtil) mmuSeries {
+ // Compute cumulative sum.
+ sums := make([]totalUtil, len(util))
+ var prev MutatorUtil
+ var sum totalUtil
+ for j, u := range util {
+ sum += totalUtilOf(prev.Util, u.Time-prev.Time)
+ sums[j] = sum
+ prev = u
+ }
+
+ // Divide the utilization curve up into equal size
+ // non-overlapping "bands" and compute a summary for each of
+ // these bands.
+ //
+ // Compute the duration of each band.
+ numBands := bandsPerSeries
+ if numBands > len(util) {
+ // There's no point in having lots of bands if there
+ // aren't many events.
+ numBands = len(util)
+ }
+ dur := util[len(util)-1].Time - util[0].Time
+ bandDur := (dur + int64(numBands) - 1) / int64(numBands)
+ if bandDur < 1 {
+ bandDur = 1
+ }
+ // Compute the bands. There are numBands+1 bands in order to
+ // record the final cumulative sum.
+ bands := make([]mmuBand, numBands+1)
+ s := mmuSeries{util, sums, bands, bandDur}
+ leftSum := integrator{&s, 0}
+ for i := range bands {
+ startTime, endTime := s.bandTime(i)
+ cumUtil := leftSum.advance(startTime)
+ predIdx := leftSum.pos
+ minUtil := 1.0
+ for i := predIdx; i < len(util) && util[i].Time < endTime; i++ {
+ minUtil = math.Min(minUtil, util[i].Util)
+ }
+ bands[i] = mmuBand{minUtil, cumUtil, leftSum}
+ }
+
+ return s
+}
+
+func (s *mmuSeries) bandTime(i int) (start, end int64) {
+ start = int64(i)*s.bandDur + s.util[0].Time
+ end = start + s.bandDur
+ return
+}
+
+type bandUtil struct {
+ // Utilization series index
+ series int
+ // Band index
+ i int
+ // Lower bound of mutator utilization for all windows
+ // with a left edge in this band.
+ utilBound float64
+}
+
+type bandUtilHeap []bandUtil
+
+func (h bandUtilHeap) Len() int {
+ return len(h)
+}
+
+func (h bandUtilHeap) Less(i, j int) bool {
+ return h[i].utilBound < h[j].utilBound
+}
+
+func (h bandUtilHeap) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+}
+
+func (h *bandUtilHeap) Push(x any) {
+ *h = append(*h, x.(bandUtil))
+}
+
+func (h *bandUtilHeap) Pop() any {
+ x := (*h)[len(*h)-1]
+ *h = (*h)[:len(*h)-1]
+ return x
+}
+
+// UtilWindow is a specific window at Time.
+type UtilWindow struct {
+ Time int64
+ // MutatorUtil is the mean mutator utilization in this window.
+ MutatorUtil float64
+}
+
+type utilHeap []UtilWindow
+
+func (h utilHeap) Len() int {
+ return len(h)
+}
+
+func (h utilHeap) Less(i, j int) bool {
+ if h[i].MutatorUtil != h[j].MutatorUtil {
+ return h[i].MutatorUtil > h[j].MutatorUtil
+ }
+ return h[i].Time > h[j].Time
+}
+
+func (h utilHeap) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+}
+
+func (h *utilHeap) Push(x any) {
+ *h = append(*h, x.(UtilWindow))
+}
+
+func (h *utilHeap) Pop() any {
+ x := (*h)[len(*h)-1]
+ *h = (*h)[:len(*h)-1]
+ return x
+}
+
+// An accumulator takes a windowed mutator utilization function and
+// tracks various statistics for that function.
+type accumulator struct {
+ mmu float64
+
+ // bound is the mutator utilization bound where adding any
+ // mutator utilization above this bound cannot affect the
+ // accumulated statistics.
+ bound float64
+
+ // Worst N window tracking
+ nWorst int
+ wHeap utilHeap
+
+ // Mutator utilization distribution tracking
+ mud *mud
+ // preciseMass is the distribution mass that must be precise
+ // before accumulation is stopped.
+ preciseMass float64
+ // lastTime and lastMU are the previous point added to the
+ // windowed mutator utilization function.
+ lastTime int64
+ lastMU float64
+}
+
+// resetTime declares a discontinuity in the windowed mutator
+// utilization function by resetting the current time.
+func (acc *accumulator) resetTime() {
+ // This only matters for distribution collection, since that's
+ // the only thing that depends on the progression of the
+ // windowed mutator utilization function.
+ acc.lastTime = math.MaxInt64
+}
+
+// addMU adds a point to the windowed mutator utilization function at
+// (time, mu). This must be called for monotonically increasing values
+// of time.
+//
+// It returns true if further calls to addMU would be pointless.
+func (acc *accumulator) addMU(time int64, mu float64, window time.Duration) bool {
+ if mu < acc.mmu {
+ acc.mmu = mu
+ }
+ acc.bound = acc.mmu
+
+ if acc.nWorst == 0 {
+ // If the minimum has reached zero, it can't go any
+ // lower, so we can stop early.
+ return mu == 0
+ }
+
+ // Consider adding this window to the n worst.
+ if len(acc.wHeap) < acc.nWorst || mu < acc.wHeap[0].MutatorUtil {
+ // This window is lower than the K'th worst window.
+ //
+ // Check if there's any overlapping window
+ // already in the heap and keep whichever is
+ // worse.
+ for i, ui := range acc.wHeap {
+ if time+int64(window) > ui.Time && ui.Time+int64(window) > time {
+ if ui.MutatorUtil <= mu {
+ // Keep the first window.
+ goto keep
+ } else {
+ // Replace it with this window.
+ heap.Remove(&acc.wHeap, i)
+ break
+ }
+ }
+ }
+
+ heap.Push(&acc.wHeap, UtilWindow{time, mu})
+ if len(acc.wHeap) > acc.nWorst {
+ heap.Pop(&acc.wHeap)
+ }
+ keep:
+ }
+
+ if len(acc.wHeap) < acc.nWorst {
+ // We don't have N windows yet, so keep accumulating.
+ acc.bound = 1.0
+ } else {
+ // Anything above the least worst window has no effect.
+ acc.bound = math.Max(acc.bound, acc.wHeap[0].MutatorUtil)
+ }
+
+ if acc.mud != nil {
+ if acc.lastTime != math.MaxInt64 {
+ // Update distribution.
+ acc.mud.add(acc.lastMU, mu, float64(time-acc.lastTime))
+ }
+ acc.lastTime, acc.lastMU = time, mu
+ if _, mudBound, ok := acc.mud.approxInvCumulativeSum(); ok {
+ acc.bound = math.Max(acc.bound, mudBound)
+ } else {
+ // We haven't accumulated enough total precise
+ // mass yet to even reach our goal, so keep
+ // accumulating.
+ acc.bound = 1
+ }
+ // It's not worth checking percentiles every time, so
+ // just keep accumulating this band.
+ return false
+ }
+
+ // If we've found enough 0 utilizations, we can stop immediately.
+ return len(acc.wHeap) == acc.nWorst && acc.wHeap[0].MutatorUtil == 0
+}
+
+// MMU returns the minimum mutator utilization for the given time
+// window. This is the minimum utilization for all windows of this
+// duration across the execution. The returned value is in the range
+// [0, 1].
+func (c *MMUCurve) MMU(window time.Duration) (mmu float64) {
+ acc := accumulator{mmu: 1.0, bound: 1.0}
+ c.mmu(window, &acc)
+ return acc.mmu
+}
+
+// Examples returns n specific examples of the lowest mutator
+// utilization for the given window size. The returned windows will be
+// disjoint (otherwise there would be a huge number of
+// mostly-overlapping windows at the single lowest point). There are
+// no guarantees on which set of disjoint windows this returns.
+func (c *MMUCurve) Examples(window time.Duration, n int) (worst []UtilWindow) {
+ acc := accumulator{mmu: 1.0, bound: 1.0, nWorst: n}
+ c.mmu(window, &acc)
+ sort.Sort(sort.Reverse(acc.wHeap))
+ return ([]UtilWindow)(acc.wHeap)
+}
+
+// MUD returns mutator utilization distribution quantiles for the
+// given window size.
+//
+// The mutator utilization distribution is the distribution of mean
+// mutator utilization across all windows of the given window size in
+// the trace.
+//
+// The minimum mutator utilization is the minimum (0th percentile) of
+// this distribution. (However, if only the minimum is desired, it's
+// more efficient to use the MMU method.)
+func (c *MMUCurve) MUD(window time.Duration, quantiles []float64) []float64 {
+ if len(quantiles) == 0 {
+ return []float64{}
+ }
+
+ // Each unrefined band contributes a known total mass to the
+ // distribution (bandDur except at the end), but in an unknown
+ // way. However, we know that all the mass it contributes must
+ // be at or above its worst-case mean mutator utilization.
+ //
+ // Hence, we refine bands until the highest desired
+ // distribution quantile is less than the next worst-case mean
+ // mutator utilization. At this point, all further
+ // contributions to the distribution must be beyond the
+ // desired quantile and hence cannot affect it.
+ //
+ // First, find the highest desired distribution quantile.
+ maxQ := quantiles[0]
+ for _, q := range quantiles {
+ if q > maxQ {
+ maxQ = q
+ }
+ }
+ // The distribution's mass is in units of time (it's not
+ // normalized because this would make it more annoying to
+ // account for future contributions of unrefined bands). The
+ // total final mass will be the duration of the trace itself
+ // minus the window size. Using this, we can compute the mass
+ // corresponding to quantile maxQ.
+ var duration int64
+ for _, s := range c.series {
+ duration1 := s.util[len(s.util)-1].Time - s.util[0].Time
+ if duration1 >= int64(window) {
+ duration += duration1 - int64(window)
+ }
+ }
+ qMass := float64(duration) * maxQ
+
+ // Accumulate the MUD until we have precise information for
+ // everything to the left of qMass.
+ acc := accumulator{mmu: 1.0, bound: 1.0, preciseMass: qMass, mud: new(mud)}
+ acc.mud.setTrackMass(qMass)
+ c.mmu(window, &acc)
+
+ // Evaluate the quantiles on the accumulated MUD.
+ out := make([]float64, len(quantiles))
+ for i := range out {
+ mu, _ := acc.mud.invCumulativeSum(float64(duration) * quantiles[i])
+ if math.IsNaN(mu) {
+ // There are a few legitimate ways this can
+ // happen:
+ //
+ // 1. If the window is the full trace
+ // duration, then the windowed MU function is
+ // only defined at a single point, so the MU
+ // distribution is not well-defined.
+ //
+ // 2. If there are no events, then the MU
+ // distribution has no mass.
+ //
+ // Either way, all of the quantiles will have
+ // converged toward the MMU at this point.
+ mu = acc.mmu
+ }
+ out[i] = mu
+ }
+ return out
+}
+
+func (c *MMUCurve) mmu(window time.Duration, acc *accumulator) {
+ if window <= 0 {
+ acc.mmu = 0
+ return
+ }
+
+ var bandU bandUtilHeap
+ windows := make([]time.Duration, len(c.series))
+ for i, s := range c.series {
+ windows[i] = window
+ if max := time.Duration(s.util[len(s.util)-1].Time - s.util[0].Time); window > max {
+ windows[i] = max
+ }
+
+ bandU1 := bandUtilHeap(s.mkBandUtil(i, windows[i]))
+ if bandU == nil {
+ bandU = bandU1
+ } else {
+ bandU = append(bandU, bandU1...)
+ }
+ }
+
+ // Process bands from lowest utilization bound to highest.
+ heap.Init(&bandU)
+
+ // Refine each band into a precise window and MMU until
+ // refining the next lowest band can no longer affect the MMU
+ // or windows.
+ for len(bandU) > 0 && bandU[0].utilBound < acc.bound {
+ i := bandU[0].series
+ c.series[i].bandMMU(bandU[0].i, windows[i], acc)
+ heap.Pop(&bandU)
+ }
+}
+
+func (c *mmuSeries) mkBandUtil(series int, window time.Duration) []bandUtil {
+ // For each band, compute the worst-possible total mutator
+ // utilization for all windows that start in that band.
+
+ // minBands is the minimum number of bands a window can span
+ // and maxBands is the maximum number of bands a window can
+ // span in any alignment.
+ minBands := int((int64(window) + c.bandDur - 1) / c.bandDur)
+ maxBands := int((int64(window) + 2*(c.bandDur-1)) / c.bandDur)
+ if window > 1 && maxBands < 2 {
+ panic("maxBands < 2")
+ }
+ tailDur := int64(window) % c.bandDur
+ nUtil := len(c.bands) - maxBands + 1
+ if nUtil < 0 {
+ nUtil = 0
+ }
+ bandU := make([]bandUtil, nUtil)
+ for i := range bandU {
+ // To compute the worst-case MU, we assume the minimum
+ // for any bands that are only partially overlapped by
+ // some window and the mean for any bands that are
+ // completely covered by all windows.
+ var util totalUtil
+
+ // Find the lowest and second lowest of the partial
+ // bands.
+ l := c.bands[i].minUtil
+ r1 := c.bands[i+minBands-1].minUtil
+ r2 := c.bands[i+maxBands-1].minUtil
+ minBand := math.Min(l, math.Min(r1, r2))
+ // Assume the worst window maximally overlaps the
+ // worst minimum and then the rest overlaps the second
+ // worst minimum.
+ if minBands == 1 {
+ util += totalUtilOf(minBand, int64(window))
+ } else {
+ util += totalUtilOf(minBand, c.bandDur)
+ midBand := 0.0
+ switch {
+ case minBand == l:
+ midBand = math.Min(r1, r2)
+ case minBand == r1:
+ midBand = math.Min(l, r2)
+ case minBand == r2:
+ midBand = math.Min(l, r1)
+ }
+ util += totalUtilOf(midBand, tailDur)
+ }
+
+ // Add the total mean MU of bands that are completely
+ // overlapped by all windows.
+ if minBands > 2 {
+ util += c.bands[i+minBands-1].cumUtil - c.bands[i+1].cumUtil
+ }
+
+ bandU[i] = bandUtil{series, i, util.mean(window)}
+ }
+
+ return bandU
+}
+
+// bandMMU computes the precise minimum mutator utilization for
+// windows with a left edge in band bandIdx.
+func (c *mmuSeries) bandMMU(bandIdx int, window time.Duration, acc *accumulator) {
+ util := c.util
+
+ // We think of the mutator utilization over time as the
+ // box-filtered utilization function, which we call the
+ // "windowed mutator utilization function". The resulting
+ // function is continuous and piecewise linear (unless
+ // window==0, which we handle elsewhere), where the boundaries
+ // between segments occur when either edge of the window
+ // encounters a change in the instantaneous mutator
+ // utilization function. Hence, the minimum of this function
+ // will always occur when one of the edges of the window
+ // aligns with a utilization change, so these are the only
+ // points we need to consider.
+ //
+ // We compute the mutator utilization function incrementally
+ // by tracking the integral from t=0 to the left edge of the
+ // window and to the right edge of the window.
+ left := c.bands[bandIdx].integrator
+ right := left
+ time, endTime := c.bandTime(bandIdx)
+ if utilEnd := util[len(util)-1].Time - int64(window); utilEnd < endTime {
+ endTime = utilEnd
+ }
+ acc.resetTime()
+ for {
+ // Advance edges to time and time+window.
+ mu := (right.advance(time+int64(window)) - left.advance(time)).mean(window)
+ if acc.addMU(time, mu, window) {
+ break
+ }
+ if time == endTime {
+ break
+ }
+
+ // The maximum slope of the windowed mutator
+ // utilization function is 1/window, so we can always
+ // advance the time by at least (mu - mmu) * window
+ // without dropping below mmu.
+ minTime := time + int64((mu-acc.bound)*float64(window))
+
+ // Advance the window to the next time where either
+ // the left or right edge of the window encounters a
+ // change in the utilization curve.
+ if t1, t2 := left.next(time), right.next(time+int64(window))-int64(window); t1 < t2 {
+ time = t1
+ } else {
+ time = t2
+ }
+ if time < minTime {
+ time = minTime
+ }
+ if time >= endTime {
+ // For MMUs we could stop here, but for MUDs
+ // it's important that we span the entire
+ // band.
+ time = endTime
+ }
+ }
+}
+
+// An integrator tracks a position in a utilization function and
+// integrates it.
+type integrator struct {
+ u *mmuSeries
+ // pos is the index in u.util of the current time's non-strict
+ // predecessor.
+ pos int
+}
+
+// advance returns the integral of the utilization function from 0 to
+// time. advance must be called on monotonically increasing values of
+// times.
+func (in *integrator) advance(time int64) totalUtil {
+ util, pos := in.u.util, in.pos
+ // Advance pos until pos+1 is time's strict successor (making
+ // pos time's non-strict predecessor).
+ //
+ // Very often, this will be nearby, so we optimize that case,
+ // but it may be arbitrarily far away, so we handled that
+ // efficiently, too.
+ const maxSeq = 8
+ if pos+maxSeq < len(util) && util[pos+maxSeq].Time > time {
+ // Nearby. Use a linear scan.
+ for pos+1 < len(util) && util[pos+1].Time <= time {
+ pos++
+ }
+ } else {
+ // Far. Binary search for time's strict successor.
+ l, r := pos, len(util)
+ for l < r {
+ h := int(uint(l+r) >> 1)
+ if util[h].Time <= time {
+ l = h + 1
+ } else {
+ r = h
+ }
+ }
+ pos = l - 1 // Non-strict predecessor.
+ }
+ in.pos = pos
+ var partial totalUtil
+ if time != util[pos].Time {
+ partial = totalUtilOf(util[pos].Util, time-util[pos].Time)
+ }
+ return in.u.sums[pos] + partial
+}
+
+// next returns the smallest time t' > time of a change in the
+// utilization function.
+func (in *integrator) next(time int64) int64 {
+ for _, u := range in.u.util[in.pos:] {
+ if u.Time > time {
+ return u.Time
+ }
+ }
+ return 1<<63 - 1
+}
diff --git a/src/internal/trace/gc_test.go b/src/internal/trace/gc_test.go
new file mode 100644
index 0000000..9b9771e
--- /dev/null
+++ b/src/internal/trace/gc_test.go
@@ -0,0 +1,202 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "math"
+ "os"
+ "testing"
+ "time"
+)
+
+// aeq returns true if x and y are equal up to 8 digits (1 part in 100
+// million).
+func aeq(x, y float64) bool {
+ if x < 0 && y < 0 {
+ x, y = -x, -y
+ }
+ const digits = 8
+ factor := 1 - math.Pow(10, -digits+1)
+ return x*factor <= y && y*factor <= x
+}
+
+func TestMMU(t *testing.T) {
+ t.Parallel()
+
+ // MU
+ // 1.0 ***** ***** *****
+ // 0.5 * * * *
+ // 0.0 ***** *****
+ // 0 1 2 3 4 5
+ util := [][]MutatorUtil{{
+ {0e9, 1},
+ {1e9, 0},
+ {2e9, 1},
+ {3e9, 0},
+ {4e9, 1},
+ {5e9, 0},
+ }}
+ mmuCurve := NewMMUCurve(util)
+
+ for _, test := range []struct {
+ window time.Duration
+ want float64
+ worst []float64
+ }{
+ {0, 0, []float64{}},
+ {time.Millisecond, 0, []float64{0, 0}},
+ {time.Second, 0, []float64{0, 0}},
+ {2 * time.Second, 0.5, []float64{0.5, 0.5}},
+ {3 * time.Second, 1 / 3.0, []float64{1 / 3.0}},
+ {4 * time.Second, 0.5, []float64{0.5}},
+ {5 * time.Second, 3 / 5.0, []float64{3 / 5.0}},
+ {6 * time.Second, 3 / 5.0, []float64{3 / 5.0}},
+ } {
+ if got := mmuCurve.MMU(test.window); !aeq(test.want, got) {
+ t.Errorf("for %s window, want mu = %f, got %f", test.window, test.want, got)
+ }
+ worst := mmuCurve.Examples(test.window, 2)
+ // Which exact windows are returned is unspecified
+ // (and depends on the exact banding), so we just
+ // check that we got the right number with the right
+ // utilizations.
+ if len(worst) != len(test.worst) {
+ t.Errorf("for %s window, want worst %v, got %v", test.window, test.worst, worst)
+ } else {
+ for i := range worst {
+ if worst[i].MutatorUtil != test.worst[i] {
+ t.Errorf("for %s window, want worst %v, got %v", test.window, test.worst, worst)
+ break
+ }
+ }
+ }
+ }
+}
+
+func TestMMUTrace(t *testing.T) {
+ // Can't be t.Parallel() because it modifies the
+ // testingOneBand package variable.
+ if testing.Short() {
+ // test input too big for all.bash
+ t.Skip("skipping in -short mode")
+ }
+
+ data, err := os.ReadFile("testdata/stress_1_10_good")
+ if err != nil {
+ t.Fatalf("failed to read input file: %v", err)
+ }
+ _, events, err := parse(bytes.NewReader(data), "")
+ if err != nil {
+ t.Fatalf("failed to parse trace: %s", err)
+ }
+ mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist)
+ mmuCurve := NewMMUCurve(mu)
+
+ // Test the optimized implementation against the "obviously
+ // correct" implementation.
+ for window := time.Nanosecond; window < 10*time.Second; window *= 10 {
+ want := mmuSlow(mu[0], window)
+ got := mmuCurve.MMU(window)
+ if !aeq(want, got) {
+ t.Errorf("want %f, got %f mutator utilization in window %s", want, got, window)
+ }
+ }
+
+ // Test MUD with band optimization against MUD without band
+ // optimization. We don't have a simple testing implementation
+ // of MUDs (the simplest implementation is still quite
+ // complex), but this is still a pretty good test.
+ defer func(old int) { bandsPerSeries = old }(bandsPerSeries)
+ bandsPerSeries = 1
+ mmuCurve2 := NewMMUCurve(mu)
+ quantiles := []float64{0, 1 - .999, 1 - .99}
+ for window := time.Microsecond; window < time.Second; window *= 10 {
+ mud1 := mmuCurve.MUD(window, quantiles)
+ mud2 := mmuCurve2.MUD(window, quantiles)
+ for i := range mud1 {
+ if !aeq(mud1[i], mud2[i]) {
+ t.Errorf("for quantiles %v at window %v, want %v, got %v", quantiles, window, mud2, mud1)
+ break
+ }
+ }
+ }
+}
+
+func BenchmarkMMU(b *testing.B) {
+ data, err := os.ReadFile("testdata/stress_1_10_good")
+ if err != nil {
+ b.Fatalf("failed to read input file: %v", err)
+ }
+ _, events, err := parse(bytes.NewReader(data), "")
+ if err != nil {
+ b.Fatalf("failed to parse trace: %s", err)
+ }
+ mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist|UtilSweep)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ mmuCurve := NewMMUCurve(mu)
+ xMin, xMax := time.Microsecond, time.Second
+ logMin, logMax := math.Log(float64(xMin)), math.Log(float64(xMax))
+ const samples = 100
+ for i := 0; i < samples; i++ {
+ window := time.Duration(math.Exp(float64(i)/(samples-1)*(logMax-logMin) + logMin))
+ mmuCurve.MMU(window)
+ }
+ }
+}
+
+func mmuSlow(util []MutatorUtil, window time.Duration) (mmu float64) {
+ if max := time.Duration(util[len(util)-1].Time - util[0].Time); window > max {
+ window = max
+ }
+
+ mmu = 1.0
+
+ // muInWindow returns the mean mutator utilization between
+ // util[0].Time and end.
+ muInWindow := func(util []MutatorUtil, end int64) float64 {
+ total := 0.0
+ var prevU MutatorUtil
+ for _, u := range util {
+ if u.Time > end {
+ total += prevU.Util * float64(end-prevU.Time)
+ break
+ }
+ total += prevU.Util * float64(u.Time-prevU.Time)
+ prevU = u
+ }
+ return total / float64(end-util[0].Time)
+ }
+ update := func() {
+ for i, u := range util {
+ if u.Time+int64(window) > util[len(util)-1].Time {
+ break
+ }
+ mmu = math.Min(mmu, muInWindow(util[i:], u.Time+int64(window)))
+ }
+ }
+
+ // Consider all left-aligned windows.
+ update()
+ // Reverse the trace. Slightly subtle because each MutatorUtil
+ // is a *change*.
+ rutil := make([]MutatorUtil, len(util))
+ if util[len(util)-1].Util != 0 {
+ panic("irreversible trace")
+ }
+ for i, u := range util {
+ util1 := 0.0
+ if i != 0 {
+ util1 = util[i-1].Util
+ }
+ rutil[len(rutil)-i-1] = MutatorUtil{Time: -u.Time, Util: util1}
+ }
+ util = rutil
+ // Consider all right-aligned windows.
+ update()
+ return
+}
diff --git a/src/internal/trace/goroutines.go b/src/internal/trace/goroutines.go
new file mode 100644
index 0000000..4b4f13d
--- /dev/null
+++ b/src/internal/trace/goroutines.go
@@ -0,0 +1,358 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "sort"
+ "strings"
+)
+
+// GDesc contains statistics and execution details of a single goroutine.
+type GDesc struct {
+ ID uint64
+ Name string
+ PC uint64
+ CreationTime int64
+ StartTime int64
+ EndTime int64
+
+ // List of regions in the goroutine, sorted based on the start time.
+ Regions []*UserRegionDesc
+
+ // Statistics of execution time during the goroutine execution.
+ GExecutionStat
+
+ *gdesc // private part.
+}
+
+// UserRegionDesc represents a region and goroutine execution stats
+// while the region was active.
+type UserRegionDesc struct {
+ TaskID uint64
+ Name string
+
+ // Region start event. Normally EvUserRegion start event or nil,
+ // but can be EvGoCreate event if the region is a synthetic
+ // region representing task inheritance from the parent goroutine.
+ Start *Event
+
+ // Region end event. Normally EvUserRegion end event or nil,
+ // but can be EvGoStop or EvGoEnd event if the goroutine
+ // terminated without explicitly ending the region.
+ End *Event
+
+ GExecutionStat
+}
+
+// GExecutionStat contains statistics about a goroutine's execution
+// during a period of time.
+type GExecutionStat struct {
+ ExecTime int64
+ SchedWaitTime int64
+ IOTime int64
+ BlockTime int64
+ SyscallTime int64
+ GCTime int64
+ SweepTime int64
+ TotalTime int64
+}
+
+// sub returns the stats v-s.
+func (s GExecutionStat) sub(v GExecutionStat) (r GExecutionStat) {
+ r = s
+ r.ExecTime -= v.ExecTime
+ r.SchedWaitTime -= v.SchedWaitTime
+ r.IOTime -= v.IOTime
+ r.BlockTime -= v.BlockTime
+ r.SyscallTime -= v.SyscallTime
+ r.GCTime -= v.GCTime
+ r.SweepTime -= v.SweepTime
+ r.TotalTime -= v.TotalTime
+ return r
+}
+
+// snapshotStat returns the snapshot of the goroutine execution statistics.
+// This is called as we process the ordered trace event stream. lastTs and
+// activeGCStartTime are used to process pending statistics if this is called
+// before any goroutine end event.
+func (g *GDesc) snapshotStat(lastTs, activeGCStartTime int64) (ret GExecutionStat) {
+ ret = g.GExecutionStat
+
+ if g.gdesc == nil {
+ return ret // finalized GDesc. No pending state.
+ }
+
+ if activeGCStartTime != 0 { // terminating while GC is active
+ if g.CreationTime < activeGCStartTime {
+ ret.GCTime += lastTs - activeGCStartTime
+ } else {
+ // The goroutine's lifetime completely overlaps
+ // with a GC.
+ ret.GCTime += lastTs - g.CreationTime
+ }
+ }
+
+ if g.TotalTime == 0 {
+ ret.TotalTime = lastTs - g.CreationTime
+ }
+
+ if g.lastStartTime != 0 {
+ ret.ExecTime += lastTs - g.lastStartTime
+ }
+ if g.blockNetTime != 0 {
+ ret.IOTime += lastTs - g.blockNetTime
+ }
+ if g.blockSyncTime != 0 {
+ ret.BlockTime += lastTs - g.blockSyncTime
+ }
+ if g.blockSyscallTime != 0 {
+ ret.SyscallTime += lastTs - g.blockSyscallTime
+ }
+ if g.blockSchedTime != 0 {
+ ret.SchedWaitTime += lastTs - g.blockSchedTime
+ }
+ if g.blockSweepTime != 0 {
+ ret.SweepTime += lastTs - g.blockSweepTime
+ }
+ return ret
+}
+
+// finalize is called when processing a goroutine end event or at
+// the end of trace processing. This finalizes the execution stat
+// and any active regions in the goroutine, in which case trigger is nil.
+func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {
+ if trigger != nil {
+ g.EndTime = trigger.Ts
+ }
+ finalStat := g.snapshotStat(lastTs, activeGCStartTime)
+
+ g.GExecutionStat = finalStat
+
+ // System goroutines are never part of regions, even though they
+ // "inherit" a task due to creation (EvGoCreate) from within a region.
+ // This may happen e.g. if the first GC is triggered within a region,
+ // starting the GC worker goroutines.
+ if !IsSystemGoroutine(g.Name) {
+ for _, s := range g.activeRegions {
+ s.End = trigger
+ s.GExecutionStat = finalStat.sub(s.GExecutionStat)
+ g.Regions = append(g.Regions, s)
+ }
+ }
+ *(g.gdesc) = gdesc{}
+}
+
+// gdesc is a private part of GDesc that is required only during analysis.
+type gdesc struct {
+ lastStartTime int64
+ blockNetTime int64
+ blockSyncTime int64
+ blockSyscallTime int64
+ blockSweepTime int64
+ blockGCTime int64
+ blockSchedTime int64
+
+ activeRegions []*UserRegionDesc // stack of active regions
+}
+
+// GoroutineStats generates statistics for all goroutines in the trace.
+func GoroutineStats(events []*Event) map[uint64]*GDesc {
+ gs := make(map[uint64]*GDesc)
+ var lastTs int64
+ var gcStartTime int64 // gcStartTime == 0 indicates gc is inactive.
+ for _, ev := range events {
+ lastTs = ev.Ts
+ switch ev.Type {
+ case EvGoCreate:
+ g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gdesc: new(gdesc)}
+ g.blockSchedTime = ev.Ts
+ // When a goroutine is newly created, inherit the task
+ // of the active region. For ease handling of this
+ // case, we create a fake region description with the
+ // task id. This isn't strictly necessary as this
+ // goroutine may not be associated with the task, but
+ // it can be convenient to see all children created
+ // during a region.
+ if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gdesc.activeRegions) > 0 {
+ regions := creatorG.gdesc.activeRegions
+ s := regions[len(regions)-1]
+ if s.TaskID != 0 {
+ g.gdesc.activeRegions = []*UserRegionDesc{
+ {TaskID: s.TaskID, Start: ev},
+ }
+ }
+ }
+ gs[g.ID] = g
+ case EvGoStart, EvGoStartLabel:
+ g := gs[ev.G]
+ if g.PC == 0 && len(ev.Stk) > 0 {
+ g.PC = ev.Stk[0].PC
+ g.Name = ev.Stk[0].Fn
+ }
+ g.lastStartTime = ev.Ts
+ if g.StartTime == 0 {
+ g.StartTime = ev.Ts
+ }
+ if g.blockSchedTime != 0 {
+ g.SchedWaitTime += ev.Ts - g.blockSchedTime
+ g.blockSchedTime = 0
+ }
+ case EvGoEnd, EvGoStop:
+ g := gs[ev.G]
+ g.finalize(ev.Ts, gcStartTime, ev)
+ case EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
+ EvGoBlockSync, EvGoBlockCond:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSyncTime = ev.Ts
+ case EvGoSched, EvGoPreempt:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSchedTime = ev.Ts
+ case EvGoSleep, EvGoBlock:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ case EvGoBlockNet:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockNetTime = ev.Ts
+ case EvGoBlockGC:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockGCTime = ev.Ts
+ case EvGoUnblock:
+ g := gs[ev.Args[0]]
+ if g.blockNetTime != 0 {
+ g.IOTime += ev.Ts - g.blockNetTime
+ g.blockNetTime = 0
+ }
+ if g.blockSyncTime != 0 {
+ g.BlockTime += ev.Ts - g.blockSyncTime
+ g.blockSyncTime = 0
+ }
+ g.blockSchedTime = ev.Ts
+ case EvGoSysBlock:
+ g := gs[ev.G]
+ g.ExecTime += ev.Ts - g.lastStartTime
+ g.lastStartTime = 0
+ g.blockSyscallTime = ev.Ts
+ case EvGoSysExit:
+ g := gs[ev.G]
+ if g.blockSyscallTime != 0 {
+ g.SyscallTime += ev.Ts - g.blockSyscallTime
+ g.blockSyscallTime = 0
+ }
+ g.blockSchedTime = ev.Ts
+ case EvGCSweepStart:
+ g := gs[ev.G]
+ if g != nil {
+ // Sweep can happen during GC on system goroutine.
+ g.blockSweepTime = ev.Ts
+ }
+ case EvGCSweepDone:
+ g := gs[ev.G]
+ if g != nil && g.blockSweepTime != 0 {
+ g.SweepTime += ev.Ts - g.blockSweepTime
+ g.blockSweepTime = 0
+ }
+ case EvGCStart:
+ gcStartTime = ev.Ts
+ case EvGCDone:
+ for _, g := range gs {
+ if g.EndTime != 0 {
+ continue
+ }
+ if gcStartTime < g.CreationTime {
+ g.GCTime += ev.Ts - g.CreationTime
+ } else {
+ g.GCTime += ev.Ts - gcStartTime
+ }
+ }
+ gcStartTime = 0 // indicates gc is inactive.
+ case EvUserRegion:
+ g := gs[ev.G]
+ switch mode := ev.Args[1]; mode {
+ case 0: // region start
+ g.activeRegions = append(g.activeRegions, &UserRegionDesc{
+ Name: ev.SArgs[0],
+ TaskID: ev.Args[0],
+ Start: ev,
+ GExecutionStat: g.snapshotStat(lastTs, gcStartTime),
+ })
+ case 1: // region end
+ var sd *UserRegionDesc
+ if regionStk := g.activeRegions; len(regionStk) > 0 {
+ n := len(regionStk)
+ sd = regionStk[n-1]
+ regionStk = regionStk[:n-1] // pop
+ g.activeRegions = regionStk
+ } else {
+ sd = &UserRegionDesc{
+ Name: ev.SArgs[0],
+ TaskID: ev.Args[0],
+ }
+ }
+ sd.GExecutionStat = g.snapshotStat(lastTs, gcStartTime).sub(sd.GExecutionStat)
+ sd.End = ev
+ g.Regions = append(g.Regions, sd)
+ }
+ }
+ }
+
+ for _, g := range gs {
+ g.finalize(lastTs, gcStartTime, nil)
+
+ // sort based on region start time
+ sort.Slice(g.Regions, func(i, j int) bool {
+ x := g.Regions[i].Start
+ y := g.Regions[j].Start
+ if x == nil {
+ return true
+ }
+ if y == nil {
+ return false
+ }
+ return x.Ts < y.Ts
+ })
+
+ g.gdesc = nil
+ }
+
+ return gs
+}
+
+// RelatedGoroutines finds a set of goroutines related to goroutine goid.
+func RelatedGoroutines(events []*Event, goid uint64) map[uint64]bool {
+ // BFS of depth 2 over "unblock" edges
+ // (what goroutines unblock goroutine goid?).
+ gmap := make(map[uint64]bool)
+ gmap[goid] = true
+ for i := 0; i < 2; i++ {
+ gmap1 := make(map[uint64]bool)
+ for g := range gmap {
+ gmap1[g] = true
+ }
+ for _, ev := range events {
+ if ev.Type == EvGoUnblock && gmap[ev.Args[0]] {
+ gmap1[ev.G] = true
+ }
+ }
+ gmap = gmap1
+ }
+ gmap[0] = true // for GC events
+ return gmap
+}
+
+func IsSystemGoroutine(entryFn string) bool {
+ // This mimics runtime.isSystemGoroutine as closely as
+ // possible.
+ // Also, locked g in extra M (with empty entryFn) is system goroutine.
+ return entryFn == "" || entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
+}
diff --git a/src/internal/trace/mkcanned.bash b/src/internal/trace/mkcanned.bash
new file mode 100755
index 0000000..879cf1c
--- /dev/null
+++ b/src/internal/trace/mkcanned.bash
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Copyright 2016 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# mkcanned.bash creates canned traces for the trace test suite using
+# the current Go version.
+
+set -e
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <label>" >&2
+ exit 1
+fi
+
+go test -run '^$' -bench ClientServerParallel4 -benchtime 10x -trace "testdata/http_$1_good" net/http
+go test -run 'TraceStress$|TraceStressStartStop$|TestUserTaskRegion$' runtime/trace -savetraces
+mv ../../runtime/trace/TestTraceStress.trace "testdata/stress_$1_good"
+mv ../../runtime/trace/TestTraceStressStartStop.trace "testdata/stress_start_stop_$1_good"
+mv ../../runtime/trace/TestUserTaskRegion.trace "testdata/user_task_region_$1_good"
diff --git a/src/internal/trace/mud.go b/src/internal/trace/mud.go
new file mode 100644
index 0000000..8826306
--- /dev/null
+++ b/src/internal/trace/mud.go
@@ -0,0 +1,223 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "math"
+ "sort"
+)
+
+// mud is an updatable mutator utilization distribution.
+//
+// This is a continuous distribution of duration over mutator
+// utilization. For example, the integral from mutator utilization a
+// to b is the total duration during which the mutator utilization was
+// in the range [a, b].
+//
+// This distribution is *not* normalized (it is not a probability
+// distribution). This makes it easier to work with as it's being
+// updated.
+//
+// It is represented as the sum of scaled uniform distribution
+// functions and Dirac delta functions (which are treated as
+// degenerate uniform distributions).
+type mud struct {
+ sorted, unsorted []edge
+
+ // trackMass is the inverse cumulative sum to track as the
+ // distribution is updated.
+ trackMass float64
+ // trackBucket is the bucket in which trackMass falls. If the
+ // total mass of the distribution is < trackMass, this is
+ // len(hist).
+ trackBucket int
+ // trackSum is the cumulative sum of hist[:trackBucket]. Once
+ // trackSum >= trackMass, trackBucket must be recomputed.
+ trackSum float64
+
+ // hist is a hierarchical histogram of distribution mass.
+ hist [mudDegree]float64
+}
+
+const (
+ // mudDegree is the number of buckets in the MUD summary
+ // histogram.
+ mudDegree = 1024
+)
+
+type edge struct {
+ // At x, the function increases by y.
+ x, delta float64
+ // Additionally at x is a Dirac delta function with area dirac.
+ dirac float64
+}
+
+// add adds a uniform function over [l, r] scaled so the total weight
+// of the uniform is area. If l==r, this adds a Dirac delta function.
+func (d *mud) add(l, r, area float64) {
+ if area == 0 {
+ return
+ }
+
+ if r < l {
+ l, r = r, l
+ }
+
+ // Add the edges.
+ if l == r {
+ d.unsorted = append(d.unsorted, edge{l, 0, area})
+ } else {
+ delta := area / (r - l)
+ d.unsorted = append(d.unsorted, edge{l, delta, 0}, edge{r, -delta, 0})
+ }
+
+ // Update the histogram.
+ h := &d.hist
+ lbFloat, lf := math.Modf(l * mudDegree)
+ lb := int(lbFloat)
+ if lb >= mudDegree {
+ lb, lf = mudDegree-1, 1
+ }
+ if l == r {
+ h[lb] += area
+ } else {
+ rbFloat, rf := math.Modf(r * mudDegree)
+ rb := int(rbFloat)
+ if rb >= mudDegree {
+ rb, rf = mudDegree-1, 1
+ }
+ if lb == rb {
+ h[lb] += area
+ } else {
+ perBucket := area / (r - l) / mudDegree
+ h[lb] += perBucket * (1 - lf)
+ h[rb] += perBucket * rf
+ for i := lb + 1; i < rb; i++ {
+ h[i] += perBucket
+ }
+ }
+ }
+
+ // Update mass tracking.
+ if thresh := float64(d.trackBucket) / mudDegree; l < thresh {
+ if r < thresh {
+ d.trackSum += area
+ } else {
+ d.trackSum += area * (thresh - l) / (r - l)
+ }
+ if d.trackSum >= d.trackMass {
+ // The tracked mass now falls in a different
+ // bucket. Recompute the inverse cumulative sum.
+ d.setTrackMass(d.trackMass)
+ }
+ }
+}
+
+// setTrackMass sets the mass to track the inverse cumulative sum for.
+//
+// Specifically, mass is a cumulative duration, and the mutator
+// utilization bounds for this duration can be queried using
+// approxInvCumulativeSum.
+func (d *mud) setTrackMass(mass float64) {
+ d.trackMass = mass
+
+ // Find the bucket currently containing trackMass by computing
+ // the cumulative sum.
+ sum := 0.0
+ for i, val := range d.hist[:] {
+ newSum := sum + val
+ if newSum > mass {
+ // mass falls in bucket i.
+ d.trackBucket = i
+ d.trackSum = sum
+ return
+ }
+ sum = newSum
+ }
+ d.trackBucket = len(d.hist)
+ d.trackSum = sum
+}
+
+// approxInvCumulativeSum is like invCumulativeSum, but specifically
+// operates on the tracked mass and returns an upper and lower bound
+// approximation of the inverse cumulative sum.
+//
+// The true inverse cumulative sum will be in the range [lower, upper).
+func (d *mud) approxInvCumulativeSum() (float64, float64, bool) {
+ if d.trackBucket == len(d.hist) {
+ return math.NaN(), math.NaN(), false
+ }
+ return float64(d.trackBucket) / mudDegree, float64(d.trackBucket+1) / mudDegree, true
+}
+
+// invCumulativeSum returns x such that the integral of d from -∞ to x
+// is y. If the total weight of d is less than y, it returns the
+// maximum of the distribution and false.
+//
+// Specifically, y is a cumulative duration, and invCumulativeSum
+// returns the mutator utilization x such that at least y time has
+// been spent with mutator utilization <= x.
+func (d *mud) invCumulativeSum(y float64) (float64, bool) {
+ if len(d.sorted) == 0 && len(d.unsorted) == 0 {
+ return math.NaN(), false
+ }
+
+ // Sort edges.
+ edges := d.unsorted
+ sort.Slice(edges, func(i, j int) bool {
+ return edges[i].x < edges[j].x
+ })
+ // Merge with sorted edges.
+ d.unsorted = nil
+ if d.sorted == nil {
+ d.sorted = edges
+ } else {
+ oldSorted := d.sorted
+ newSorted := make([]edge, len(oldSorted)+len(edges))
+ i, j := 0, 0
+ for o := range newSorted {
+ if i >= len(oldSorted) {
+ copy(newSorted[o:], edges[j:])
+ break
+ } else if j >= len(edges) {
+ copy(newSorted[o:], oldSorted[i:])
+ break
+ } else if oldSorted[i].x < edges[j].x {
+ newSorted[o] = oldSorted[i]
+ i++
+ } else {
+ newSorted[o] = edges[j]
+ j++
+ }
+ }
+ d.sorted = newSorted
+ }
+
+ // Traverse edges in order computing a cumulative sum.
+ csum, rate, prevX := 0.0, 0.0, 0.0
+ for _, e := range d.sorted {
+ newCsum := csum + (e.x-prevX)*rate
+ if newCsum >= y {
+ // y was exceeded between the previous edge
+ // and this one.
+ if rate == 0 {
+ // Anywhere between prevX and
+ // e.x will do. We return e.x
+ // because that takes care of
+ // the y==0 case naturally.
+ return e.x, true
+ }
+ return (y-csum)/rate + prevX, true
+ }
+ newCsum += e.dirac
+ if newCsum >= y {
+ // y was exceeded by the Dirac delta at e.x.
+ return e.x, true
+ }
+ csum, prevX = newCsum, e.x
+ rate += e.delta
+ }
+ return prevX, false
+}
diff --git a/src/internal/trace/mud_test.go b/src/internal/trace/mud_test.go
new file mode 100644
index 0000000..b3d74dc
--- /dev/null
+++ b/src/internal/trace/mud_test.go
@@ -0,0 +1,87 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "math/rand"
+ "testing"
+)
+
+func TestMUD(t *testing.T) {
+ // Insert random uniforms and check histogram mass and
+ // cumulative sum approximations.
+ rnd := rand.New(rand.NewSource(42))
+ mass := 0.0
+ var mud mud
+ for i := 0; i < 100; i++ {
+ area, l, r := rnd.Float64(), rnd.Float64(), rnd.Float64()
+ if rnd.Intn(10) == 0 {
+ r = l
+ }
+ t.Log(l, r, area)
+ mud.add(l, r, area)
+ mass += area
+
+ // Check total histogram weight.
+ hmass := 0.0
+ for _, val := range mud.hist {
+ hmass += val
+ }
+ if !aeq(mass, hmass) {
+ t.Fatalf("want mass %g, got %g", mass, hmass)
+ }
+
+ // Check inverse cumulative sum approximations.
+ for j := 0.0; j < mass; j += mass * 0.099 {
+ mud.setTrackMass(j)
+ l, u, ok := mud.approxInvCumulativeSum()
+ inv, ok2 := mud.invCumulativeSum(j)
+ if !ok || !ok2 {
+ t.Fatalf("inverse cumulative sum failed: approx %v, exact %v", ok, ok2)
+ }
+ if !(l <= inv && inv < u) {
+ t.Fatalf("inverse(%g) = %g, not ∈ [%g, %g)", j, inv, l, u)
+ }
+ }
+ }
+}
+
+func TestMUDTracking(t *testing.T) {
+ // Test that the tracked mass is tracked correctly across
+ // updates.
+ rnd := rand.New(rand.NewSource(42))
+ const uniforms = 100
+ for trackMass := 0.0; trackMass < uniforms; trackMass += uniforms / 50 {
+ var mud mud
+ mass := 0.0
+ mud.setTrackMass(trackMass)
+ for i := 0; i < uniforms; i++ {
+ area, l, r := rnd.Float64(), rnd.Float64(), rnd.Float64()
+ mud.add(l, r, area)
+ mass += area
+ l, u, ok := mud.approxInvCumulativeSum()
+ inv, ok2 := mud.invCumulativeSum(trackMass)
+
+ if mass < trackMass {
+ if ok {
+ t.Errorf("approx(%g) = [%g, %g), but mass = %g", trackMass, l, u, mass)
+ }
+ if ok2 {
+ t.Errorf("exact(%g) = %g, but mass = %g", trackMass, inv, mass)
+ }
+ } else {
+ if !ok {
+ t.Errorf("approx(%g) failed, but mass = %g", trackMass, mass)
+ }
+ if !ok2 {
+ t.Errorf("exact(%g) failed, but mass = %g", trackMass, mass)
+ }
+ if ok && ok2 && !(l <= inv && inv < u) {
+ t.Errorf("inverse(%g) = %g, not ∈ [%g, %g)", trackMass, inv, l, u)
+ }
+ }
+ }
+ }
+}
diff --git a/src/internal/trace/order.go b/src/internal/trace/order.go
new file mode 100644
index 0000000..07a6e13
--- /dev/null
+++ b/src/internal/trace/order.go
@@ -0,0 +1,285 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "fmt"
+ "sort"
+)
+
+type eventBatch struct {
+ events []*Event
+ selected bool
+}
+
+type orderEvent struct {
+ ev *Event
+ batch int
+ g uint64
+ init gState
+ next gState
+}
+
+type gStatus int
+
+type gState struct {
+ seq uint64
+ status gStatus
+}
+
+const (
+ gDead gStatus = iota
+ gRunnable
+ gRunning
+ gWaiting
+
+ unordered = ^uint64(0)
+ garbage = ^uint64(0) - 1
+ noseq = ^uint64(0)
+ seqinc = ^uint64(0) - 1
+)
+
+// order1007 merges a set of per-P event batches into a single, consistent stream.
+// The high level idea is as follows. Events within an individual batch are in
+// correct order, because they are emitted by a single P. So we need to produce
+// a correct interleaving of the batches. To do this we take first unmerged event
+// from each batch (frontier). Then choose subset that is "ready" to be merged,
+// that is, events for which all dependencies are already merged. Then we choose
+// event with the lowest timestamp from the subset, merge it and repeat.
+// This approach ensures that we form a consistent stream even if timestamps are
+// incorrect (condition observed on some machines).
+func order1007(m map[int][]*Event) (events []*Event, err error) {
+ pending := 0
+ // The ordering of CPU profile sample events in the data stream is based on
+ // when each run of the signal handler was able to acquire the spinlock,
+ // with original timestamps corresponding to when ReadTrace pulled the data
+ // off of the profBuf queue. Re-sort them by the timestamp we captured
+ // inside the signal handler.
+ sort.Stable(eventList(m[ProfileP]))
+ var batches []*eventBatch
+ for _, v := range m {
+ pending += len(v)
+ batches = append(batches, &eventBatch{v, false})
+ }
+ gs := make(map[uint64]gState)
+ var frontier []orderEvent
+ for ; pending != 0; pending-- {
+ for i, b := range batches {
+ if b.selected || len(b.events) == 0 {
+ continue
+ }
+ ev := b.events[0]
+ g, init, next := stateTransition(ev)
+ if !transitionReady(g, gs[g], init) {
+ continue
+ }
+ frontier = append(frontier, orderEvent{ev, i, g, init, next})
+ b.events = b.events[1:]
+ b.selected = true
+ // Get rid of "Local" events, they are intended merely for ordering.
+ switch ev.Type {
+ case EvGoStartLocal:
+ ev.Type = EvGoStart
+ case EvGoUnblockLocal:
+ ev.Type = EvGoUnblock
+ case EvGoSysExitLocal:
+ ev.Type = EvGoSysExit
+ }
+ }
+ if len(frontier) == 0 {
+ return nil, fmt.Errorf("no consistent ordering of events possible")
+ }
+ sort.Sort(orderEventList(frontier))
+ f := frontier[0]
+ frontier[0] = frontier[len(frontier)-1]
+ frontier = frontier[:len(frontier)-1]
+ events = append(events, f.ev)
+ transition(gs, f.g, f.init, f.next)
+ if !batches[f.batch].selected {
+ panic("frontier batch is not selected")
+ }
+ batches[f.batch].selected = false
+ }
+
+ // At this point we have a consistent stream of events.
+ // Make sure time stamps respect the ordering.
+ // The tests will skip (not fail) the test case if they see this error.
+ if !sort.IsSorted(eventList(events)) {
+ return nil, ErrTimeOrder
+ }
+
+ // The last part is giving correct timestamps to EvGoSysExit events.
+ // The problem with EvGoSysExit is that actual syscall exit timestamp (ev.Args[2])
+ // is potentially acquired long before event emission. So far we've used
+ // timestamp of event emission (ev.Ts).
+ // We could not set ev.Ts = ev.Args[2] earlier, because it would produce
+ // seemingly broken timestamps (misplaced event).
+ // We also can't simply update the timestamp and resort events, because
+ // if timestamps are broken we will misplace the event and later report
+ // logically broken trace (instead of reporting broken timestamps).
+ lastSysBlock := make(map[uint64]int64)
+ for _, ev := range events {
+ switch ev.Type {
+ case EvGoSysBlock, EvGoInSyscall:
+ lastSysBlock[ev.G] = ev.Ts
+ case EvGoSysExit:
+ ts := int64(ev.Args[2])
+ if ts == 0 {
+ continue
+ }
+ block := lastSysBlock[ev.G]
+ if block == 0 {
+ return nil, fmt.Errorf("stray syscall exit")
+ }
+ if ts < block {
+ return nil, ErrTimeOrder
+ }
+ ev.Ts = ts
+ }
+ }
+ sort.Stable(eventList(events))
+
+ return
+}
+
+// stateTransition returns goroutine state (sequence and status) when the event
+// becomes ready for merging (init) and the goroutine state after the event (next).
+func stateTransition(ev *Event) (g uint64, init, next gState) {
+ switch ev.Type {
+ case EvGoCreate:
+ g = ev.Args[0]
+ init = gState{0, gDead}
+ next = gState{1, gRunnable}
+ case EvGoWaiting, EvGoInSyscall:
+ g = ev.G
+ init = gState{1, gRunnable}
+ next = gState{2, gWaiting}
+ case EvGoStart, EvGoStartLabel:
+ g = ev.G
+ init = gState{ev.Args[1], gRunnable}
+ next = gState{ev.Args[1] + 1, gRunning}
+ case EvGoStartLocal:
+ // noseq means that this event is ready for merging as soon as
+ // frontier reaches it (EvGoStartLocal is emitted on the same P
+ // as the corresponding EvGoCreate/EvGoUnblock, and thus the latter
+ // is already merged).
+ // seqinc is a stub for cases when event increments g sequence,
+ // but since we don't know current seq we also don't know next seq.
+ g = ev.G
+ init = gState{noseq, gRunnable}
+ next = gState{seqinc, gRunning}
+ case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
+ EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoSleep,
+ EvGoSysBlock, EvGoBlockGC:
+ g = ev.G
+ init = gState{noseq, gRunning}
+ next = gState{noseq, gWaiting}
+ case EvGoSched, EvGoPreempt:
+ g = ev.G
+ init = gState{noseq, gRunning}
+ next = gState{noseq, gRunnable}
+ case EvGoUnblock, EvGoSysExit:
+ g = ev.Args[0]
+ init = gState{ev.Args[1], gWaiting}
+ next = gState{ev.Args[1] + 1, gRunnable}
+ case EvGoUnblockLocal, EvGoSysExitLocal:
+ g = ev.Args[0]
+ init = gState{noseq, gWaiting}
+ next = gState{seqinc, gRunnable}
+ case EvGCStart:
+ g = garbage
+ init = gState{ev.Args[0], gDead}
+ next = gState{ev.Args[0] + 1, gDead}
+ default:
+ // no ordering requirements
+ g = unordered
+ }
+ return
+}
+
+func transitionReady(g uint64, curr, init gState) bool {
+ return g == unordered || (init.seq == noseq || init.seq == curr.seq) && init.status == curr.status
+}
+
+func transition(gs map[uint64]gState, g uint64, init, next gState) {
+ if g == unordered {
+ return
+ }
+ curr := gs[g]
+ if !transitionReady(g, curr, init) {
+ panic("event sequences are broken")
+ }
+ switch next.seq {
+ case noseq:
+ next.seq = curr.seq
+ case seqinc:
+ next.seq = curr.seq + 1
+ }
+ gs[g] = next
+}
+
+// order1005 merges a set of per-P event batches into a single, consistent stream.
+func order1005(m map[int][]*Event) (events []*Event, err error) {
+ for _, batch := range m {
+ events = append(events, batch...)
+ }
+ for _, ev := range events {
+ if ev.Type == EvGoSysExit {
+ // EvGoSysExit emission is delayed until the thread has a P.
+ // Give it the real sequence number and time stamp.
+ ev.seq = int64(ev.Args[1])
+ if ev.Args[2] != 0 {
+ ev.Ts = int64(ev.Args[2])
+ }
+ }
+ }
+ sort.Sort(eventSeqList(events))
+ if !sort.IsSorted(eventList(events)) {
+ return nil, ErrTimeOrder
+ }
+ return
+}
+
+type orderEventList []orderEvent
+
+func (l orderEventList) Len() int {
+ return len(l)
+}
+
+func (l orderEventList) Less(i, j int) bool {
+ return l[i].ev.Ts < l[j].ev.Ts
+}
+
+func (l orderEventList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+type eventList []*Event
+
+func (l eventList) Len() int {
+ return len(l)
+}
+
+func (l eventList) Less(i, j int) bool {
+ return l[i].Ts < l[j].Ts
+}
+
+func (l eventList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+type eventSeqList []*Event
+
+func (l eventSeqList) Len() int {
+ return len(l)
+}
+
+func (l eventSeqList) Less(i, j int) bool {
+ return l[i].seq < l[j].seq
+}
+
+func (l eventSeqList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/src/internal/trace/parser.go b/src/internal/trace/parser.go
new file mode 100644
index 0000000..67fa60b
--- /dev/null
+++ b/src/internal/trace/parser.go
@@ -0,0 +1,1174 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ _ "unsafe"
+)
+
+func goCmd() string {
+ var exeSuffix string
+ if runtime.GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+ path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ return "go"
+}
+
+// Event describes one event in the trace.
+type Event struct {
+ Off int // offset in input file (for debugging and error reporting)
+ Type byte // one of Ev*
+ seq int64 // sequence number
+ Ts int64 // timestamp in nanoseconds
+ P int // P on which the event happened (can be one of TimerP, NetpollP, SyscallP)
+ G uint64 // G on which the event happened
+ StkID uint64 // unique stack ID
+ Stk []*Frame // stack trace (can be empty)
+ Args [3]uint64 // event-type-specific arguments
+ SArgs []string // event-type-specific string args
+ // linked event (can be nil), depends on event type:
+ // for GCStart: the GCStop
+ // for GCSTWStart: the GCSTWDone
+ // for GCSweepStart: the GCSweepDone
+ // for GoCreate: first GoStart of the created goroutine
+ // for GoStart/GoStartLabel: the associated GoEnd, GoBlock or other blocking event
+ // for GoSched/GoPreempt: the next GoStart
+ // for GoBlock and other blocking events: the unblock event
+ // for GoUnblock: the associated GoStart
+ // for blocking GoSysCall: the associated GoSysExit
+ // for GoSysExit: the next GoStart
+ // for GCMarkAssistStart: the associated GCMarkAssistDone
+ // for UserTaskCreate: the UserTaskEnd
+ // for UserRegion: if the start region, the corresponding UserRegion end event
+ Link *Event
+}
+
+// Frame is a frame in stack traces.
+type Frame struct {
+ PC uint64
+ Fn string
+ File string
+ Line int
+}
+
+const (
+ // Special P identifiers:
+ FakeP = 1000000 + iota
+ TimerP // depicts timer unblocks
+ NetpollP // depicts network unblocks
+ SyscallP // depicts returns from syscalls
+ GCP // depicts GC state
+ ProfileP // depicts recording of CPU profile samples
+)
+
+// ParseResult is the result of Parse.
+type ParseResult struct {
+ // Events is the sorted list of Events in the trace.
+ Events []*Event
+ // Stacks is the stack traces keyed by stack IDs from the trace.
+ Stacks map[uint64][]*Frame
+}
+
+// Parse parses, post-processes and verifies the trace.
+func Parse(r io.Reader, bin string) (ParseResult, error) {
+ ver, res, err := parse(r, bin)
+ if err != nil {
+ return ParseResult{}, err
+ }
+ if ver < 1007 && bin == "" {
+ return ParseResult{}, fmt.Errorf("for traces produced by go 1.6 or below, the binary argument must be provided")
+ }
+ return res, nil
+}
+
+// parse parses, post-processes and verifies the trace. It returns the
+// trace version and the list of events.
+func parse(r io.Reader, bin string) (int, ParseResult, error) {
+ ver, rawEvents, strings, err := readTrace(r)
+ if err != nil {
+ return 0, ParseResult{}, err
+ }
+ events, stacks, err := parseEvents(ver, rawEvents, strings)
+ if err != nil {
+ return 0, ParseResult{}, err
+ }
+ events = removeFutile(events)
+ err = postProcessTrace(ver, events)
+ if err != nil {
+ return 0, ParseResult{}, err
+ }
+ // Attach stack traces.
+ for _, ev := range events {
+ if ev.StkID != 0 {
+ ev.Stk = stacks[ev.StkID]
+ }
+ }
+ if ver < 1007 && bin != "" {
+ if err := symbolize(events, bin); err != nil {
+ return 0, ParseResult{}, err
+ }
+ }
+ return ver, ParseResult{Events: events, Stacks: stacks}, nil
+}
+
+// rawEvent is a helper type used during parsing.
+type rawEvent struct {
+ off int
+ typ byte
+ args []uint64
+ sargs []string
+}
+
+// readTrace does wire-format parsing and verification.
+// It does not care about specific event types and argument meaning.
+func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) {
+ // Read and validate trace header.
+ var buf [16]byte
+ off, err := io.ReadFull(r, buf[:])
+ if err != nil {
+ err = fmt.Errorf("failed to read header: read %v, err %v", off, err)
+ return
+ }
+ ver, err = parseHeader(buf[:])
+ if err != nil {
+ return
+ }
+ switch ver {
+ case 1005, 1007, 1008, 1009, 1010, 1011, 1019, 1021:
+ // Note: When adding a new version, confirm that canned traces from the
+ // old version are part of the test suite. Add them using mkcanned.bash.
+ break
+ default:
+ err = fmt.Errorf("unsupported trace file version %v.%v (update Go toolchain) %v", ver/1000, ver%1000, ver)
+ return
+ }
+
+ // Read events.
+ strings = make(map[uint64]string)
+ for {
+ // Read event type and number of arguments (1 byte).
+ off0 := off
+ var n int
+ n, err = r.Read(buf[:1])
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ if err != nil || n != 1 {
+ err = fmt.Errorf("failed to read trace at offset 0x%x: n=%v err=%v", off0, n, err)
+ return
+ }
+ off += n
+ typ := buf[0] << 2 >> 2
+ narg := buf[0]>>6 + 1
+ inlineArgs := byte(4)
+ if ver < 1007 {
+ narg++
+ inlineArgs++
+ }
+ if typ == EvNone || typ >= EvCount || EventDescriptions[typ].minVersion > ver {
+ err = fmt.Errorf("unknown event type %v at offset 0x%x", typ, off0)
+ return
+ }
+ if typ == EvString {
+ // String dictionary entry [ID, length, string].
+ var id uint64
+ id, off, err = readVal(r, off)
+ if err != nil {
+ return
+ }
+ if id == 0 {
+ err = fmt.Errorf("string at offset %d has invalid id 0", off)
+ return
+ }
+ if strings[id] != "" {
+ err = fmt.Errorf("string at offset %d has duplicate id %v", off, id)
+ return
+ }
+ var ln uint64
+ ln, off, err = readVal(r, off)
+ if err != nil {
+ return
+ }
+ if ln == 0 {
+ err = fmt.Errorf("string at offset %d has invalid length 0", off)
+ return
+ }
+ if ln > 1e6 {
+ err = fmt.Errorf("string at offset %d has too large length %v", off, ln)
+ return
+ }
+ buf := make([]byte, ln)
+ var n int
+ n, err = io.ReadFull(r, buf)
+ if err != nil {
+ err = fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, ln, err)
+ return
+ }
+ off += n
+ strings[id] = string(buf)
+ continue
+ }
+ ev := rawEvent{typ: typ, off: off0}
+ if narg < inlineArgs {
+ for i := 0; i < int(narg); i++ {
+ var v uint64
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return
+ }
+ ev.args = append(ev.args, v)
+ }
+ } else {
+ // More than inlineArgs args, the first value is length of the event in bytes.
+ var v uint64
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return
+ }
+ evLen := v
+ off1 := off
+ for evLen > uint64(off-off1) {
+ v, off, err = readVal(r, off)
+ if err != nil {
+ err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
+ return
+ }
+ ev.args = append(ev.args, v)
+ }
+ if evLen != uint64(off-off1) {
+ err = fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
+ return
+ }
+ }
+ switch ev.typ {
+ case EvUserLog: // EvUserLog records are followed by a value string of length ev.args[len(ev.args)-1]
+ var s string
+ s, off, err = readStr(r, off)
+ ev.sargs = append(ev.sargs, s)
+ }
+ events = append(events, ev)
+ }
+ return
+}
+
+func readStr(r io.Reader, off0 int) (s string, off int, err error) {
+ var sz uint64
+ sz, off, err = readVal(r, off0)
+ if err != nil || sz == 0 {
+ return "", off, err
+ }
+ if sz > 1e6 {
+ return "", off, fmt.Errorf("string at offset %d is too large (len=%d)", off, sz)
+ }
+ buf := make([]byte, sz)
+ n, err := io.ReadFull(r, buf)
+ if err != nil || sz != uint64(n) {
+ return "", off + n, fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, sz, err)
+ }
+ return string(buf), off + n, nil
+}
+
+// parseHeader parses trace header of the form "go 1.7 trace\x00\x00\x00\x00"
+// and returns parsed version as 1007.
+func parseHeader(buf []byte) (int, error) {
+ if len(buf) != 16 {
+ return 0, fmt.Errorf("bad header length")
+ }
+ if buf[0] != 'g' || buf[1] != 'o' || buf[2] != ' ' ||
+ buf[3] < '1' || buf[3] > '9' ||
+ buf[4] != '.' ||
+ buf[5] < '1' || buf[5] > '9' {
+ return 0, fmt.Errorf("not a trace file")
+ }
+ ver := int(buf[5] - '0')
+ i := 0
+ for ; buf[6+i] >= '0' && buf[6+i] <= '9' && i < 2; i++ {
+ ver = ver*10 + int(buf[6+i]-'0')
+ }
+ ver += int(buf[3]-'0') * 1000
+ if !bytes.Equal(buf[6+i:], []byte(" trace\x00\x00\x00\x00")[:10-i]) {
+ return 0, fmt.Errorf("not a trace file")
+ }
+ return ver, nil
+}
+
+// Parse events transforms raw events into events.
+// It does analyze and verify per-event-type arguments.
+func parseEvents(ver int, rawEvents []rawEvent, strings map[uint64]string) (events []*Event, stacks map[uint64][]*Frame, err error) {
+ var ticksPerSec, lastSeq, lastTs int64
+ var lastG uint64
+ var lastP int
+ timerGoids := make(map[uint64]bool)
+ lastGs := make(map[int]uint64) // last goroutine running on P
+ stacks = make(map[uint64][]*Frame)
+ batches := make(map[int][]*Event) // events by P
+ for _, raw := range rawEvents {
+ desc := EventDescriptions[raw.typ]
+ if desc.Name == "" {
+ err = fmt.Errorf("missing description for event type %v", raw.typ)
+ return
+ }
+ narg := argNum(raw, ver)
+ if len(raw.args) != narg {
+ err = fmt.Errorf("%v has wrong number of arguments at offset 0x%x: want %v, got %v",
+ desc.Name, raw.off, narg, len(raw.args))
+ return
+ }
+ switch raw.typ {
+ case EvBatch:
+ lastGs[lastP] = lastG
+ lastP = int(raw.args[0])
+ lastG = lastGs[lastP]
+ if ver < 1007 {
+ lastSeq = int64(raw.args[1])
+ lastTs = int64(raw.args[2])
+ } else {
+ lastTs = int64(raw.args[1])
+ }
+ case EvFrequency:
+ ticksPerSec = int64(raw.args[0])
+ if ticksPerSec <= 0 {
+ // The most likely cause for this is tick skew on different CPUs.
+ // For example, solaris/amd64 seems to have wildly different
+ // ticks on different CPUs.
+ err = ErrTimeOrder
+ return
+ }
+ case EvTimerGoroutine:
+ timerGoids[raw.args[0]] = true
+ case EvStack:
+ if len(raw.args) < 2 {
+ err = fmt.Errorf("EvStack has wrong number of arguments at offset 0x%x: want at least 2, got %v",
+ raw.off, len(raw.args))
+ return
+ }
+ size := raw.args[1]
+ if size > 1000 {
+ err = fmt.Errorf("EvStack has bad number of frames at offset 0x%x: %v",
+ raw.off, size)
+ return
+ }
+ want := 2 + 4*size
+ if ver < 1007 {
+ want = 2 + size
+ }
+ if uint64(len(raw.args)) != want {
+ err = fmt.Errorf("EvStack has wrong number of arguments at offset 0x%x: want %v, got %v",
+ raw.off, want, len(raw.args))
+ return
+ }
+ id := raw.args[0]
+ if id != 0 && size > 0 {
+ stk := make([]*Frame, size)
+ for i := 0; i < int(size); i++ {
+ if ver < 1007 {
+ stk[i] = &Frame{PC: raw.args[2+i]}
+ } else {
+ pc := raw.args[2+i*4+0]
+ fn := raw.args[2+i*4+1]
+ file := raw.args[2+i*4+2]
+ line := raw.args[2+i*4+3]
+ stk[i] = &Frame{PC: pc, Fn: strings[fn], File: strings[file], Line: int(line)}
+ }
+ }
+ stacks[id] = stk
+ }
+ default:
+ e := &Event{Off: raw.off, Type: raw.typ, P: lastP, G: lastG}
+ var argOffset int
+ if ver < 1007 {
+ e.seq = lastSeq + int64(raw.args[0])
+ e.Ts = lastTs + int64(raw.args[1])
+ lastSeq = e.seq
+ argOffset = 2
+ } else {
+ e.Ts = lastTs + int64(raw.args[0])
+ argOffset = 1
+ }
+ lastTs = e.Ts
+ for i := argOffset; i < narg; i++ {
+ if i == narg-1 && desc.Stack {
+ e.StkID = raw.args[i]
+ } else {
+ e.Args[i-argOffset] = raw.args[i]
+ }
+ }
+ switch raw.typ {
+ case EvGoStart, EvGoStartLocal, EvGoStartLabel:
+ lastG = e.Args[0]
+ e.G = lastG
+ if raw.typ == EvGoStartLabel {
+ e.SArgs = []string{strings[e.Args[2]]}
+ }
+ case EvSTWStart:
+ e.G = 0
+ if ver < 1021 {
+ switch e.Args[0] {
+ case 0:
+ e.SArgs = []string{"mark termination"}
+ case 1:
+ e.SArgs = []string{"sweep termination"}
+ default:
+ err = fmt.Errorf("unknown STW kind %d", e.Args[0])
+ return
+ }
+ } else if ver == 1021 {
+ if kind := e.Args[0]; kind < uint64(len(stwReasonStringsGo121)) {
+ e.SArgs = []string{stwReasonStringsGo121[kind]}
+ } else {
+ e.SArgs = []string{"unknown"}
+ }
+ } else {
+ // Can't make any assumptions.
+ e.SArgs = []string{"unknown"}
+ }
+ case EvGCStart, EvGCDone, EvSTWDone:
+ e.G = 0
+ case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt,
+ EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
+ EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet,
+ EvGoSysBlock, EvGoBlockGC:
+ lastG = 0
+ case EvGoSysExit, EvGoWaiting, EvGoInSyscall:
+ e.G = e.Args[0]
+ case EvUserTaskCreate:
+ // e.Args 0: taskID, 1:parentID, 2:nameID
+ e.SArgs = []string{strings[e.Args[2]]}
+ case EvUserRegion:
+ // e.Args 0: taskID, 1: mode, 2:nameID
+ e.SArgs = []string{strings[e.Args[2]]}
+ case EvUserLog:
+ // e.Args 0: taskID, 1:keyID, 2: stackID
+ e.SArgs = []string{strings[e.Args[1]], raw.sargs[0]}
+ case EvCPUSample:
+ e.Ts = int64(e.Args[0])
+ e.P = int(e.Args[1])
+ e.G = e.Args[2]
+ e.Args[0] = 0
+ }
+ switch raw.typ {
+ default:
+ batches[lastP] = append(batches[lastP], e)
+ case EvCPUSample:
+ // Most events are written out by the active P at the exact
+ // moment they describe. CPU profile samples are different
+ // because they're written to the tracing log after some delay,
+ // by a separate worker goroutine, into a separate buffer.
+ //
+ // We keep these in their own batch until all of the batches are
+ // merged in timestamp order. We also (right before the merge)
+ // re-sort these events by the timestamp captured in the
+ // profiling signal handler.
+ batches[ProfileP] = append(batches[ProfileP], e)
+ }
+ }
+ }
+ if len(batches) == 0 {
+ err = fmt.Errorf("trace is empty")
+ return
+ }
+ if ticksPerSec == 0 {
+ err = fmt.Errorf("no EvFrequency event")
+ return
+ }
+ if BreakTimestampsForTesting {
+ var batchArr [][]*Event
+ for _, batch := range batches {
+ batchArr = append(batchArr, batch)
+ }
+ for i := 0; i < 5; i++ {
+ batch := batchArr[rand.Intn(len(batchArr))]
+ batch[rand.Intn(len(batch))].Ts += int64(rand.Intn(2000) - 1000)
+ }
+ }
+ if ver < 1007 {
+ events, err = order1005(batches)
+ } else {
+ events, err = order1007(batches)
+ }
+ if err != nil {
+ return
+ }
+
+ // Translate cpu ticks to real time.
+ minTs := events[0].Ts
+ // Use floating point to avoid integer overflows.
+ freq := 1e9 / float64(ticksPerSec)
+ for _, ev := range events {
+ ev.Ts = int64(float64(ev.Ts-minTs) * freq)
+ // Move timers and syscalls to separate fake Ps.
+ if timerGoids[ev.G] && ev.Type == EvGoUnblock {
+ ev.P = TimerP
+ }
+ if ev.Type == EvGoSysExit {
+ ev.P = SyscallP
+ }
+ }
+
+ return
+}
+
+// removeFutile removes all constituents of futile wakeups (block, unblock, start).
+// For example, a goroutine was unblocked on a mutex, but another goroutine got
+// ahead and acquired the mutex before the first goroutine is scheduled,
+// so the first goroutine has to block again. Such wakeups happen on buffered
+// channels and sync.Mutex, but are generally not interesting for end user.
+func removeFutile(events []*Event) []*Event {
+ // Two non-trivial aspects:
+ // 1. A goroutine can be preempted during a futile wakeup and migrate to another P.
+ // We want to remove all of that.
+ // 2. Tracing can start in the middle of a futile wakeup.
+ // That is, we can see a futile wakeup event w/o the actual wakeup before it.
+ // postProcessTrace runs after us and ensures that we leave the trace in a consistent state.
+
+ // Phase 1: determine futile wakeup sequences.
+ type G struct {
+ futile bool
+ wakeup []*Event // wakeup sequence (subject for removal)
+ }
+ gs := make(map[uint64]G)
+ futile := make(map[*Event]bool)
+ for _, ev := range events {
+ switch ev.Type {
+ case EvGoUnblock:
+ g := gs[ev.Args[0]]
+ g.wakeup = []*Event{ev}
+ gs[ev.Args[0]] = g
+ case EvGoStart, EvGoPreempt, EvFutileWakeup:
+ g := gs[ev.G]
+ g.wakeup = append(g.wakeup, ev)
+ if ev.Type == EvFutileWakeup {
+ g.futile = true
+ }
+ gs[ev.G] = g
+ case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond:
+ g := gs[ev.G]
+ if g.futile {
+ futile[ev] = true
+ for _, ev1 := range g.wakeup {
+ futile[ev1] = true
+ }
+ }
+ delete(gs, ev.G)
+ }
+ }
+
+ // Phase 2: remove futile wakeup sequences.
+ newEvents := events[:0] // overwrite the original slice
+ for _, ev := range events {
+ if !futile[ev] {
+ newEvents = append(newEvents, ev)
+ }
+ }
+ return newEvents
+}
+
+// ErrTimeOrder is returned by Parse when the trace contains
+// time stamps that do not respect actual event ordering.
+var ErrTimeOrder = fmt.Errorf("time stamps out of order")
+
+// postProcessTrace does inter-event verification and information restoration.
+// The resulting trace is guaranteed to be consistent
+// (for example, a P does not run two Gs at the same time, or a G is indeed
+// blocked before an unblock event).
+func postProcessTrace(ver int, events []*Event) error {
+ const (
+ gDead = iota
+ gRunnable
+ gRunning
+ gWaiting
+ )
+ type gdesc struct {
+ state int
+ ev *Event
+ evStart *Event
+ evCreate *Event
+ evMarkAssist *Event
+ }
+ type pdesc struct {
+ running bool
+ g uint64
+ evSTW *Event
+ evSweep *Event
+ }
+
+ gs := make(map[uint64]gdesc)
+ ps := make(map[int]pdesc)
+ tasks := make(map[uint64]*Event) // task id to task creation events
+ activeRegions := make(map[uint64][]*Event) // goroutine id to stack of regions
+ gs[0] = gdesc{state: gRunning}
+ var evGC, evSTW *Event
+
+ checkRunning := func(p pdesc, g gdesc, ev *Event, allowG0 bool) error {
+ name := EventDescriptions[ev.Type].Name
+ if g.state != gRunning {
+ return fmt.Errorf("g %v is not running while %v (offset %v, time %v)", ev.G, name, ev.Off, ev.Ts)
+ }
+ if p.g != ev.G {
+ return fmt.Errorf("p %v is not running g %v while %v (offset %v, time %v)", ev.P, ev.G, name, ev.Off, ev.Ts)
+ }
+ if !allowG0 && ev.G == 0 {
+ return fmt.Errorf("g 0 did %v (offset %v, time %v)", EventDescriptions[ev.Type].Name, ev.Off, ev.Ts)
+ }
+ return nil
+ }
+
+ for _, ev := range events {
+ g := gs[ev.G]
+ p := ps[ev.P]
+
+ switch ev.Type {
+ case EvProcStart:
+ if p.running {
+ return fmt.Errorf("p %v is running before start (offset %v, time %v)", ev.P, ev.Off, ev.Ts)
+ }
+ p.running = true
+ case EvProcStop:
+ if !p.running {
+ return fmt.Errorf("p %v is not running before stop (offset %v, time %v)", ev.P, ev.Off, ev.Ts)
+ }
+ if p.g != 0 {
+ return fmt.Errorf("p %v is running a goroutine %v during stop (offset %v, time %v)", ev.P, p.g, ev.Off, ev.Ts)
+ }
+ p.running = false
+ case EvGCStart:
+ if evGC != nil {
+ return fmt.Errorf("previous GC is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ evGC = ev
+ // Attribute this to the global GC state.
+ ev.P = GCP
+ case EvGCDone:
+ if evGC == nil {
+ return fmt.Errorf("bogus GC end (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ evGC.Link = ev
+ evGC = nil
+ case EvSTWStart:
+ evp := &evSTW
+ if ver < 1010 {
+ // Before 1.10, EvSTWStart was per-P.
+ evp = &p.evSTW
+ }
+ if *evp != nil {
+ return fmt.Errorf("previous STW is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ *evp = ev
+ case EvSTWDone:
+ evp := &evSTW
+ if ver < 1010 {
+ // Before 1.10, EvSTWDone was per-P.
+ evp = &p.evSTW
+ }
+ if *evp == nil {
+ return fmt.Errorf("bogus STW end (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ (*evp).Link = ev
+ *evp = nil
+ case EvGCSweepStart:
+ if p.evSweep != nil {
+ return fmt.Errorf("previous sweeping is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ p.evSweep = ev
+ case EvGCMarkAssistStart:
+ if g.evMarkAssist != nil {
+ return fmt.Errorf("previous mark assist is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ g.evMarkAssist = ev
+ case EvGCMarkAssistDone:
+ // Unlike most events, mark assists can be in progress when a
+ // goroutine starts tracing, so we can't report an error here.
+ if g.evMarkAssist != nil {
+ g.evMarkAssist.Link = ev
+ g.evMarkAssist = nil
+ }
+ case EvGCSweepDone:
+ if p.evSweep == nil {
+ return fmt.Errorf("bogus sweeping end (offset %v, time %v)", ev.Off, ev.Ts)
+ }
+ p.evSweep.Link = ev
+ p.evSweep = nil
+ case EvGoWaiting:
+ if g.state != gRunnable {
+ return fmt.Errorf("g %v is not runnable before EvGoWaiting (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
+ }
+ g.state = gWaiting
+ g.ev = ev
+ case EvGoInSyscall:
+ if g.state != gRunnable {
+ return fmt.Errorf("g %v is not runnable before EvGoInSyscall (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
+ }
+ g.state = gWaiting
+ g.ev = ev
+ case EvGoCreate:
+ if err := checkRunning(p, g, ev, true); err != nil {
+ return err
+ }
+ if _, ok := gs[ev.Args[0]]; ok {
+ return fmt.Errorf("g %v already exists (offset %v, time %v)", ev.Args[0], ev.Off, ev.Ts)
+ }
+ gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
+ case EvGoStart, EvGoStartLabel:
+ if g.state != gRunnable {
+ return fmt.Errorf("g %v is not runnable before start (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
+ }
+ if p.g != 0 {
+ return fmt.Errorf("p %v is already running g %v while start g %v (offset %v, time %v)", ev.P, p.g, ev.G, ev.Off, ev.Ts)
+ }
+ g.state = gRunning
+ g.evStart = ev
+ p.g = ev.G
+ if g.evCreate != nil {
+ if ver < 1007 {
+ // +1 because symbolizer expects return pc.
+ ev.Stk = []*Frame{{PC: g.evCreate.Args[1] + 1}}
+ } else {
+ ev.StkID = g.evCreate.Args[1]
+ }
+ g.evCreate = nil
+ }
+
+ if g.ev != nil {
+ g.ev.Link = ev
+ g.ev = nil
+ }
+ case EvGoEnd, EvGoStop:
+ if err := checkRunning(p, g, ev, false); err != nil {
+ return err
+ }
+ g.evStart.Link = ev
+ g.evStart = nil
+ g.state = gDead
+ p.g = 0
+
+ if ev.Type == EvGoEnd { // flush all active regions
+ regions := activeRegions[ev.G]
+ for _, s := range regions {
+ s.Link = ev
+ }
+ delete(activeRegions, ev.G)
+ }
+
+ case EvGoSched, EvGoPreempt:
+ if err := checkRunning(p, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gRunnable
+ g.evStart.Link = ev
+ g.evStart = nil
+ p.g = 0
+ g.ev = ev
+ case EvGoUnblock:
+ if g.state != gRunning {
+ return fmt.Errorf("g %v is not running while unpark (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
+ }
+ if ev.P != TimerP && p.g != ev.G {
+ return fmt.Errorf("p %v is not running g %v while unpark (offset %v, time %v)", ev.P, ev.G, ev.Off, ev.Ts)
+ }
+ g1 := gs[ev.Args[0]]
+ if g1.state != gWaiting {
+ return fmt.Errorf("g %v is not waiting before unpark (offset %v, time %v)", ev.Args[0], ev.Off, ev.Ts)
+ }
+ if g1.ev != nil && g1.ev.Type == EvGoBlockNet && ev.P != TimerP {
+ ev.P = NetpollP
+ }
+ if g1.ev != nil {
+ g1.ev.Link = ev
+ }
+ g1.state = gRunnable
+ g1.ev = ev
+ gs[ev.Args[0]] = g1
+ case EvGoSysCall:
+ if err := checkRunning(p, g, ev, false); err != nil {
+ return err
+ }
+ g.ev = ev
+ case EvGoSysBlock:
+ if err := checkRunning(p, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gWaiting
+ g.evStart.Link = ev
+ g.evStart = nil
+ p.g = 0
+ case EvGoSysExit:
+ if g.state != gWaiting {
+ return fmt.Errorf("g %v is not waiting during syscall exit (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
+ }
+ if g.ev != nil && g.ev.Type == EvGoSysCall {
+ g.ev.Link = ev
+ }
+ g.state = gRunnable
+ g.ev = ev
+ case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
+ EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoBlockGC:
+ if err := checkRunning(p, g, ev, false); err != nil {
+ return err
+ }
+ g.state = gWaiting
+ g.ev = ev
+ g.evStart.Link = ev
+ g.evStart = nil
+ p.g = 0
+ case EvUserTaskCreate:
+ taskid := ev.Args[0]
+ if prevEv, ok := tasks[taskid]; ok {
+ return fmt.Errorf("task id conflicts (id:%d), %q vs %q", taskid, ev, prevEv)
+ }
+ tasks[ev.Args[0]] = ev
+ case EvUserTaskEnd:
+ taskid := ev.Args[0]
+ if taskCreateEv, ok := tasks[taskid]; ok {
+ taskCreateEv.Link = ev
+ delete(tasks, taskid)
+ }
+ case EvUserRegion:
+ mode := ev.Args[1]
+ regions := activeRegions[ev.G]
+ if mode == 0 { // region start
+ activeRegions[ev.G] = append(regions, ev) // push
+ } else if mode == 1 { // region end
+ n := len(regions)
+ if n > 0 { // matching region start event is in the trace.
+ s := regions[n-1]
+ if s.Args[0] != ev.Args[0] || s.SArgs[0] != ev.SArgs[0] { // task id, region name mismatch
+ return fmt.Errorf("misuse of region in goroutine %d: span end %q when the inner-most active span start event is %q", ev.G, ev, s)
+ }
+ // Link region start event with span end event
+ s.Link = ev
+
+ if n > 1 {
+ activeRegions[ev.G] = regions[:n-1]
+ } else {
+ delete(activeRegions, ev.G)
+ }
+ }
+ } else {
+ return fmt.Errorf("invalid user region mode: %q", ev)
+ }
+ }
+
+ gs[ev.G] = g
+ ps[ev.P] = p
+ }
+
+ // TODO(dvyukov): restore stacks for EvGoStart events.
+ // TODO(dvyukov): test that all EvGoStart events has non-nil Link.
+
+ return nil
+}
+
+// symbolize attaches func/file/line info to stack traces.
+func symbolize(events []*Event, bin string) error {
+ // First, collect and dedup all pcs.
+ pcs := make(map[uint64]*Frame)
+ for _, ev := range events {
+ for _, f := range ev.Stk {
+ pcs[f.PC] = nil
+ }
+ }
+
+ // Start addr2line.
+ cmd := exec.Command(goCmd(), "tool", "addr2line", bin)
+ in, err := cmd.StdinPipe()
+ if err != nil {
+ return fmt.Errorf("failed to pipe addr2line stdin: %v", err)
+ }
+ cmd.Stderr = os.Stderr
+ out, err := cmd.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("failed to pipe addr2line stdout: %v", err)
+ }
+ err = cmd.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start addr2line: %v", err)
+ }
+ outb := bufio.NewReader(out)
+
+ // Write all pcs to addr2line.
+ // Need to copy pcs to an array, because map iteration order is non-deterministic.
+ var pcArray []uint64
+ for pc := range pcs {
+ pcArray = append(pcArray, pc)
+ _, err := fmt.Fprintf(in, "0x%x\n", pc-1)
+ if err != nil {
+ return fmt.Errorf("failed to write to addr2line: %v", err)
+ }
+ }
+ in.Close()
+
+ // Read in answers.
+ for _, pc := range pcArray {
+ fn, err := outb.ReadString('\n')
+ if err != nil {
+ return fmt.Errorf("failed to read from addr2line: %v", err)
+ }
+ file, err := outb.ReadString('\n')
+ if err != nil {
+ return fmt.Errorf("failed to read from addr2line: %v", err)
+ }
+ f := &Frame{PC: pc}
+ f.Fn = fn[:len(fn)-1]
+ f.File = file[:len(file)-1]
+ if colon := strings.LastIndex(f.File, ":"); colon != -1 {
+ ln, err := strconv.Atoi(f.File[colon+1:])
+ if err == nil {
+ f.File = f.File[:colon]
+ f.Line = ln
+ }
+ }
+ pcs[pc] = f
+ }
+ cmd.Wait()
+
+ // Replace frames in events array.
+ for _, ev := range events {
+ for i, f := range ev.Stk {
+ ev.Stk[i] = pcs[f.PC]
+ }
+ }
+
+ return nil
+}
+
+// readVal reads unsigned base-128 value from r.
+func readVal(r io.Reader, off0 int) (v uint64, off int, err error) {
+ off = off0
+ for i := 0; i < 10; i++ {
+ var buf [1]byte
+ var n int
+ n, err = r.Read(buf[:])
+ if err != nil || n != 1 {
+ return 0, 0, fmt.Errorf("failed to read trace at offset %d: read %v, error %v", off0, n, err)
+ }
+ off++
+ v |= uint64(buf[0]&0x7f) << (uint(i) * 7)
+ if buf[0]&0x80 == 0 {
+ return
+ }
+ }
+ return 0, 0, fmt.Errorf("bad value at offset 0x%x", off0)
+}
+
+// Print dumps events to stdout. For debugging.
+func Print(events []*Event) {
+ for _, ev := range events {
+ PrintEvent(ev)
+ }
+}
+
+// PrintEvent dumps the event to stdout. For debugging.
+func PrintEvent(ev *Event) {
+ fmt.Printf("%s\n", ev)
+}
+
+func (ev *Event) String() string {
+ desc := EventDescriptions[ev.Type]
+ w := new(strings.Builder)
+ fmt.Fprintf(w, "%v %v p=%v g=%v off=%v", ev.Ts, desc.Name, ev.P, ev.G, ev.Off)
+ for i, a := range desc.Args {
+ fmt.Fprintf(w, " %v=%v", a, ev.Args[i])
+ }
+ for i, a := range desc.SArgs {
+ fmt.Fprintf(w, " %v=%v", a, ev.SArgs[i])
+ }
+ return w.String()
+}
+
+// argNum returns total number of args for the event accounting for timestamps,
+// sequence numbers and differences between trace format versions.
+func argNum(raw rawEvent, ver int) int {
+ desc := EventDescriptions[raw.typ]
+ if raw.typ == EvStack {
+ return len(raw.args)
+ }
+ narg := len(desc.Args)
+ if desc.Stack {
+ narg++
+ }
+ switch raw.typ {
+ case EvBatch, EvFrequency, EvTimerGoroutine:
+ if ver < 1007 {
+ narg++ // there was an unused arg before 1.7
+ }
+ return narg
+ }
+ narg++ // timestamp
+ if ver < 1007 {
+ narg++ // sequence
+ }
+ switch raw.typ {
+ case EvGCSweepDone:
+ if ver < 1009 {
+ narg -= 2 // 1.9 added two arguments
+ }
+ case EvGCStart, EvGoStart, EvGoUnblock:
+ if ver < 1007 {
+ narg-- // 1.7 added an additional seq arg
+ }
+ case EvSTWStart:
+ if ver < 1010 {
+ narg-- // 1.10 added an argument
+ }
+ }
+ return narg
+}
+
+// BreakTimestampsForTesting causes the parser to randomly alter timestamps (for testing of broken cputicks).
+var BreakTimestampsForTesting bool
+
+// Event types in the trace.
+// Verbatim copy from src/runtime/trace.go with the "trace" prefix removed.
+const (
+ EvNone = 0 // unused
+ EvBatch = 1 // start of per-P batch of events [pid, timestamp]
+ EvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
+ EvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
+ EvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
+ EvProcStart = 5 // start of P [timestamp, thread id]
+ EvProcStop = 6 // stop of P [timestamp]
+ EvGCStart = 7 // GC start [timestamp, seq, stack id]
+ EvGCDone = 8 // GC done [timestamp]
+ EvSTWStart = 9 // GC mark termination start [timestamp, kind]
+ EvSTWDone = 10 // GC mark termination done [timestamp]
+ EvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
+ EvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
+ EvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
+ EvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
+ EvGoEnd = 15 // goroutine ends [timestamp]
+ EvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
+ EvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
+ EvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
+ EvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
+ EvGoBlock = 20 // goroutine blocks [timestamp, stack]
+ EvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
+ EvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
+ EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
+ EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
+ EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
+ EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
+ EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
+ EvGoSysCall = 28 // syscall enter [timestamp, stack]
+ EvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
+ EvGoSysBlock = 30 // syscall blocks [timestamp]
+ EvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
+ EvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
+ EvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap live bytes]
+ EvHeapGoal = 34 // gcController.heapGoal change [timestamp, heap goal bytes]
+ EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
+ EvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
+ EvString = 37 // string dictionary entry [ID, length, string]
+ EvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
+ EvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
+ EvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
+ EvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
+ EvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
+ EvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
+ EvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
+ EvUserTaskCreate = 45 // trace.NewTask [timestamp, internal task id, internal parent id, name string, stack]
+ EvUserTaskEnd = 46 // end of task [timestamp, internal task id, stack]
+ EvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), name string, stack]
+ EvUserLog = 48 // trace.Log [timestamp, internal id, key string id, stack, value string]
+ EvCPUSample = 49 // CPU profiling sample [timestamp, real timestamp, real P id (-1 when absent), goroutine id, stack]
+ EvCount = 50
+)
+
+var EventDescriptions = [EvCount]struct {
+ Name string
+ minVersion int
+ Stack bool
+ Args []string
+ SArgs []string // string arguments
+}{
+ EvNone: {"None", 1005, false, []string{}, nil},
+ EvBatch: {"Batch", 1005, false, []string{"p", "ticks"}, nil}, // in 1.5 format it was {"p", "seq", "ticks"}
+ EvFrequency: {"Frequency", 1005, false, []string{"freq"}, nil}, // in 1.5 format it was {"freq", "unused"}
+ EvStack: {"Stack", 1005, false, []string{"id", "siz"}, nil},
+ EvGomaxprocs: {"Gomaxprocs", 1005, true, []string{"procs"}, nil},
+ EvProcStart: {"ProcStart", 1005, false, []string{"thread"}, nil},
+ EvProcStop: {"ProcStop", 1005, false, []string{}, nil},
+ EvGCStart: {"GCStart", 1005, true, []string{"seq"}, nil}, // in 1.5 format it was {}
+ EvGCDone: {"GCDone", 1005, false, []string{}, nil},
+ EvSTWStart: {"STWStart", 1005, false, []string{"kindid"}, []string{"kind"}}, // <= 1.9, args was {} (implicitly {0})
+ EvSTWDone: {"STWDone", 1005, false, []string{}, nil},
+ EvGCSweepStart: {"GCSweepStart", 1005, true, []string{}, nil},
+ EvGCSweepDone: {"GCSweepDone", 1005, false, []string{"swept", "reclaimed"}, nil}, // before 1.9, format was {}
+ EvGoCreate: {"GoCreate", 1005, true, []string{"g", "stack"}, nil},
+ EvGoStart: {"GoStart", 1005, false, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
+ EvGoEnd: {"GoEnd", 1005, false, []string{}, nil},
+ EvGoStop: {"GoStop", 1005, true, []string{}, nil},
+ EvGoSched: {"GoSched", 1005, true, []string{}, nil},
+ EvGoPreempt: {"GoPreempt", 1005, true, []string{}, nil},
+ EvGoSleep: {"GoSleep", 1005, true, []string{}, nil},
+ EvGoBlock: {"GoBlock", 1005, true, []string{}, nil},
+ EvGoUnblock: {"GoUnblock", 1005, true, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
+ EvGoBlockSend: {"GoBlockSend", 1005, true, []string{}, nil},
+ EvGoBlockRecv: {"GoBlockRecv", 1005, true, []string{}, nil},
+ EvGoBlockSelect: {"GoBlockSelect", 1005, true, []string{}, nil},
+ EvGoBlockSync: {"GoBlockSync", 1005, true, []string{}, nil},
+ EvGoBlockCond: {"GoBlockCond", 1005, true, []string{}, nil},
+ EvGoBlockNet: {"GoBlockNet", 1005, true, []string{}, nil},
+ EvGoSysCall: {"GoSysCall", 1005, true, []string{}, nil},
+ EvGoSysExit: {"GoSysExit", 1005, false, []string{"g", "seq", "ts"}, nil},
+ EvGoSysBlock: {"GoSysBlock", 1005, false, []string{}, nil},
+ EvGoWaiting: {"GoWaiting", 1005, false, []string{"g"}, nil},
+ EvGoInSyscall: {"GoInSyscall", 1005, false, []string{"g"}, nil},
+ EvHeapAlloc: {"HeapAlloc", 1005, false, []string{"mem"}, nil},
+ EvHeapGoal: {"HeapGoal", 1005, false, []string{"mem"}, nil},
+ EvTimerGoroutine: {"TimerGoroutine", 1005, false, []string{"g"}, nil}, // in 1.5 format it was {"g", "unused"}
+ EvFutileWakeup: {"FutileWakeup", 1005, false, []string{}, nil},
+ EvString: {"String", 1007, false, []string{}, nil},
+ EvGoStartLocal: {"GoStartLocal", 1007, false, []string{"g"}, nil},
+ EvGoUnblockLocal: {"GoUnblockLocal", 1007, true, []string{"g"}, nil},
+ EvGoSysExitLocal: {"GoSysExitLocal", 1007, false, []string{"g", "ts"}, nil},
+ EvGoStartLabel: {"GoStartLabel", 1008, false, []string{"g", "seq", "labelid"}, []string{"label"}},
+ EvGoBlockGC: {"GoBlockGC", 1008, true, []string{}, nil},
+ EvGCMarkAssistStart: {"GCMarkAssistStart", 1009, true, []string{}, nil},
+ EvGCMarkAssistDone: {"GCMarkAssistDone", 1009, false, []string{}, nil},
+ EvUserTaskCreate: {"UserTaskCreate", 1011, true, []string{"taskid", "pid", "typeid"}, []string{"name"}},
+ EvUserTaskEnd: {"UserTaskEnd", 1011, true, []string{"taskid"}, nil},
+ EvUserRegion: {"UserRegion", 1011, true, []string{"taskid", "mode", "typeid"}, []string{"name"}},
+ EvUserLog: {"UserLog", 1011, true, []string{"id", "keyid"}, []string{"category", "message"}},
+ EvCPUSample: {"CPUSample", 1019, true, []string{"ts", "p", "g"}, nil},
+}
+
+// Copied from src/runtime/proc.go:stwReasonStrings in Go 1.21.
+var stwReasonStringsGo121 = [...]string{
+ "unknown",
+ "GC mark termination",
+ "GC sweep termination",
+ "write heap dump",
+ "goroutine profile",
+ "goroutine profile cleanup",
+ "all goroutines stack trace",
+ "read mem stats",
+ "AllThreadsSyscall",
+ "GOMAXPROCS",
+ "start trace",
+ "stop trace",
+ "CountPagesInUse (test)",
+ "ReadMetricsSlow (test)",
+ "ReadMemStatsSlow (test)",
+ "PageCachePagesLeaked (test)",
+ "ResetDebugLog (test)",
+}
diff --git a/src/internal/trace/parser_test.go b/src/internal/trace/parser_test.go
new file mode 100644
index 0000000..fce660c
--- /dev/null
+++ b/src/internal/trace/parser_test.go
@@ -0,0 +1,123 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestCorruptedInputs(t *testing.T) {
+ // These inputs crashed parser previously.
+ tests := []string{
+ "gotrace\x00\x020",
+ "gotrace\x00Q00\x020",
+ "gotrace\x00T00\x020",
+ "gotrace\x00\xc3\x0200",
+ "go 1.5 trace\x00\x00\x00\x00\x020",
+ "go 1.5 trace\x00\x00\x00\x00Q00\x020",
+ "go 1.5 trace\x00\x00\x00\x00T00\x020",
+ "go 1.5 trace\x00\x00\x00\x00\xc3\x0200",
+ }
+ for _, data := range tests {
+ res, err := Parse(strings.NewReader(data), "")
+ if err == nil || res.Events != nil || res.Stacks != nil {
+ t.Fatalf("no error on input: %q", data)
+ }
+ }
+}
+
+func TestParseCanned(t *testing.T) {
+ files, err := os.ReadDir("./testdata")
+ if err != nil {
+ t.Fatalf("failed to read ./testdata: %v", err)
+ }
+ for _, f := range files {
+ info, err := f.Info()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if testing.Short() && info.Size() > 10000 {
+ continue
+ }
+ name := filepath.Join("./testdata", f.Name())
+ data, err := os.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Instead of Parse that requires a proper binary name for old traces,
+ // we use 'parse' that omits symbol lookup if an empty string is given.
+ ver, res, err := parse(bytes.NewReader(data), "")
+ switch {
+ case strings.HasSuffix(f.Name(), "_good"):
+ if err != nil {
+ t.Errorf("failed to parse good trace %v: %v", f.Name(), err)
+ }
+ checkTrace(t, ver, res)
+ case strings.HasSuffix(f.Name(), "_unordered"):
+ if err != ErrTimeOrder {
+ t.Errorf("unordered trace is not detected %v: %v", f.Name(), err)
+ }
+ default:
+ t.Errorf("unknown input file suffix: %v", f.Name())
+ }
+ }
+}
+
+// checkTrace walks over a good trace and makes a bunch of additional checks
+// that may not cause the parser to outright fail.
+func checkTrace(t *testing.T, ver int, res ParseResult) {
+ for _, ev := range res.Events {
+ if ver >= 1021 {
+ if ev.Type == EvSTWStart && ev.SArgs[0] == "unknown" {
+ t.Errorf("found unknown STW event; update stwReasonStrings?")
+ }
+ }
+ }
+}
+
+func TestParseVersion(t *testing.T) {
+ tests := map[string]int{
+ "go 1.5 trace\x00\x00\x00\x00": 1005,
+ "go 1.7 trace\x00\x00\x00\x00": 1007,
+ "go 1.10 trace\x00\x00\x00": 1010,
+ "go 1.25 trace\x00\x00\x00": 1025,
+ "go 1.234 trace\x00\x00": 1234,
+ "go 1.2345 trace\x00": -1,
+ "go 0.0 trace\x00\x00\x00\x00": -1,
+ "go a.b trace\x00\x00\x00\x00": -1,
+ }
+ for header, ver := range tests {
+ ver1, err := parseHeader([]byte(header))
+ if ver == -1 {
+ if err == nil {
+ t.Fatalf("no error on input: %q, version %v", header, ver1)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("failed to parse: %q (%v)", header, err)
+ }
+ if ver != ver1 {
+ t.Fatalf("wrong version: %v, want %v, input: %q", ver1, ver, header)
+ }
+ }
+ }
+}
+
+func TestTimestampOverflow(t *testing.T) {
+ // Test that parser correctly handles large timestamps (long tracing).
+ w := NewWriter()
+ w.Emit(EvBatch, 0, 0)
+ w.Emit(EvFrequency, 1e9)
+ for ts := uint64(1); ts < 1e16; ts *= 2 {
+ w.Emit(EvGoCreate, ts, ts, 0, 0)
+ }
+ if _, err := Parse(w, ""); err != nil {
+ t.Fatalf("failed to parse: %v", err)
+ }
+}
diff --git a/src/internal/trace/testdata/http_1_10_good b/src/internal/trace/testdata/http_1_10_good
new file mode 100644
index 0000000..a4f2ed8
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_10_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_11_good b/src/internal/trace/testdata/http_1_11_good
new file mode 100644
index 0000000..0efcc6f
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_11_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_19_good b/src/internal/trace/testdata/http_1_19_good
new file mode 100644
index 0000000..c1d519e
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_19_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_21_good b/src/internal/trace/testdata/http_1_21_good
new file mode 100644
index 0000000..b3295f9
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_21_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_5_good b/src/internal/trace/testdata/http_1_5_good
new file mode 100644
index 0000000..0736cae
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_5_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_7_good b/src/internal/trace/testdata/http_1_7_good
new file mode 100644
index 0000000..b0e318e
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_7_good
Binary files differ
diff --git a/src/internal/trace/testdata/http_1_9_good b/src/internal/trace/testdata/http_1_9_good
new file mode 100644
index 0000000..ca89278
--- /dev/null
+++ b/src/internal/trace/testdata/http_1_9_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_10_good b/src/internal/trace/testdata/stress_1_10_good
new file mode 100644
index 0000000..19778b0
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_10_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_11_good b/src/internal/trace/testdata/stress_1_11_good
new file mode 100644
index 0000000..6468d89
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_11_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_19_good b/src/internal/trace/testdata/stress_1_19_good
new file mode 100644
index 0000000..13f5926
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_19_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_21_good b/src/internal/trace/testdata/stress_1_21_good
new file mode 100644
index 0000000..1ade5e0
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_21_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_5_good b/src/internal/trace/testdata/stress_1_5_good
new file mode 100644
index 0000000..c5055eb
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_5_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_5_unordered b/src/internal/trace/testdata/stress_1_5_unordered
new file mode 100644
index 0000000..11f7d74
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_5_unordered
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_7_good b/src/internal/trace/testdata/stress_1_7_good
new file mode 100644
index 0000000..b4d927d
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_7_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_1_9_good b/src/internal/trace/testdata/stress_1_9_good
new file mode 100644
index 0000000..dcf17f1
--- /dev/null
+++ b/src/internal/trace/testdata/stress_1_9_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_10_good b/src/internal/trace/testdata/stress_start_stop_1_10_good
new file mode 100644
index 0000000..b908e10
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_10_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_11_good b/src/internal/trace/testdata/stress_start_stop_1_11_good
new file mode 100644
index 0000000..457f01a
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_11_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_19_good b/src/internal/trace/testdata/stress_start_stop_1_19_good
new file mode 100644
index 0000000..92d9278
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_19_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_21_good b/src/internal/trace/testdata/stress_start_stop_1_21_good
new file mode 100644
index 0000000..fff46a9
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_21_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_5_good b/src/internal/trace/testdata/stress_start_stop_1_5_good
new file mode 100644
index 0000000..72a887b
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_5_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_7_good b/src/internal/trace/testdata/stress_start_stop_1_7_good
new file mode 100644
index 0000000..c23ed7d
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_7_good
Binary files differ
diff --git a/src/internal/trace/testdata/stress_start_stop_1_9_good b/src/internal/trace/testdata/stress_start_stop_1_9_good
new file mode 100644
index 0000000..f00f190
--- /dev/null
+++ b/src/internal/trace/testdata/stress_start_stop_1_9_good
Binary files differ
diff --git a/src/internal/trace/testdata/user_task_region_1_11_good b/src/internal/trace/testdata/user_task_region_1_11_good
new file mode 100644
index 0000000..f4edb67
--- /dev/null
+++ b/src/internal/trace/testdata/user_task_region_1_11_good
Binary files differ
diff --git a/src/internal/trace/testdata/user_task_region_1_19_good b/src/internal/trace/testdata/user_task_region_1_19_good
new file mode 100644
index 0000000..1daa3b2
--- /dev/null
+++ b/src/internal/trace/testdata/user_task_region_1_19_good
Binary files differ
diff --git a/src/internal/trace/testdata/user_task_region_1_21_good b/src/internal/trace/testdata/user_task_region_1_21_good
new file mode 100644
index 0000000..5c01a64
--- /dev/null
+++ b/src/internal/trace/testdata/user_task_region_1_21_good
Binary files differ
diff --git a/src/internal/trace/writer.go b/src/internal/trace/writer.go
new file mode 100644
index 0000000..dd0b9f1
--- /dev/null
+++ b/src/internal/trace/writer.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import "bytes"
+
+// Writer is a test trace writer.
+type Writer struct {
+ bytes.Buffer
+}
+
+func NewWriter() *Writer {
+ w := new(Writer)
+ w.Write([]byte("go 1.9 trace\x00\x00\x00\x00"))
+ return w
+}
+
+// Emit writes an event record to the trace.
+// See Event types for valid types and required arguments.
+func (w *Writer) Emit(typ byte, args ...uint64) {
+ nargs := byte(len(args)) - 1
+ if nargs > 3 {
+ nargs = 3
+ }
+ buf := []byte{typ | nargs<<6}
+ if nargs == 3 {
+ buf = append(buf, 0)
+ }
+ for _, a := range args {
+ buf = appendVarint(buf, a)
+ }
+ if nargs == 3 {
+ buf[1] = byte(len(buf) - 2)
+ }
+ n, err := w.Write(buf)
+ if n != len(buf) || err != nil {
+ panic("failed to write")
+ }
+}
+
+func appendVarint(buf []byte, v uint64) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ buf = append(buf, 0x80|byte(v))
+ }
+ buf = append(buf, byte(v))
+ return buf
+}
diff --git a/src/internal/txtar/archive.go b/src/internal/txtar/archive.go
new file mode 100644
index 0000000..fd95f1e
--- /dev/null
+++ b/src/internal/txtar/archive.go
@@ -0,0 +1,140 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package txtar implements a trivial text-based file archive format.
+//
+// The goals for the format are:
+//
+// - be trivial enough to create and edit by hand.
+// - be able to store trees of text files describing go command test cases.
+// - diff nicely in git history and code reviews.
+//
+// Non-goals include being a completely general archive format,
+// storing binary data, storing file modes, storing special files like
+// symbolic links, and so on.
+//
+// # Txtar format
+//
+// A txtar archive is zero or more comment lines and then a sequence of file entries.
+// Each file entry begins with a file marker line of the form "-- FILENAME --"
+// and is followed by zero or more file content lines making up the file data.
+// The comment or file content ends at the next file marker line.
+// The file marker line must begin with the three-byte sequence "-- "
+// and end with the three-byte sequence " --", but the enclosed
+// file name can be surrounding by additional white space,
+// all of which is stripped.
+//
+// If the txtar file is missing a trailing newline on the final line,
+// parsers should consider a final newline to be present anyway.
+//
+// There are no possible syntax errors in a txtar archive.
+package txtar
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// An Archive is a collection of files.
+type Archive struct {
+ Comment []byte
+ Files []File
+}
+
+// A File is a single file in an archive.
+type File struct {
+ Name string // name of file ("foo/bar.txt")
+ Data []byte // text content of file
+}
+
+// Format returns the serialized form of an Archive.
+// It is assumed that the Archive data structure is well-formed:
+// a.Comment and all a.File[i].Data contain no file marker lines,
+// and all a.File[i].Name is non-empty.
+func Format(a *Archive) []byte {
+ var buf bytes.Buffer
+ buf.Write(fixNL(a.Comment))
+ for _, f := range a.Files {
+ fmt.Fprintf(&buf, "-- %s --\n", f.Name)
+ buf.Write(fixNL(f.Data))
+ }
+ return buf.Bytes()
+}
+
+// ParseFile parses the named file as an archive.
+func ParseFile(file string) (*Archive, error) {
+ data, err := os.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ return Parse(data), nil
+}
+
+// Parse parses the serialized form of an Archive.
+// The returned Archive holds slices of data.
+func Parse(data []byte) *Archive {
+ a := new(Archive)
+ var name string
+ a.Comment, name, data = findFileMarker(data)
+ for name != "" {
+ f := File{name, nil}
+ f.Data, name, data = findFileMarker(data)
+ a.Files = append(a.Files, f)
+ }
+ return a
+}
+
+var (
+ newlineMarker = []byte("\n-- ")
+ marker = []byte("-- ")
+ markerEnd = []byte(" --")
+)
+
+// findFileMarker finds the next file marker in data,
+// extracts the file name, and returns the data before the marker,
+// the file name, and the data after the marker.
+// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil.
+func findFileMarker(data []byte) (before []byte, name string, after []byte) {
+ var i int
+ for {
+ if name, after = isMarker(data[i:]); name != "" {
+ return data[:i], name, after
+ }
+ j := bytes.Index(data[i:], newlineMarker)
+ if j < 0 {
+ return fixNL(data), "", nil
+ }
+ i += j + 1 // positioned at start of new possible marker
+ }
+}
+
+// isMarker checks whether data begins with a file marker line.
+// If so, it returns the name from the line and the data after the line.
+// Otherwise it returns name == "" with an unspecified after.
+func isMarker(data []byte) (name string, after []byte) {
+ if !bytes.HasPrefix(data, marker) {
+ return "", nil
+ }
+ if i := bytes.IndexByte(data, '\n'); i >= 0 {
+ data, after = data[:i], data[i+1:]
+ }
+ if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) {
+ return "", nil
+ }
+ return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after
+}
+
+// If data is empty or ends in \n, fixNL returns data.
+// Otherwise fixNL returns a new slice consisting of data with a final \n added.
+func fixNL(data []byte) []byte {
+ if len(data) == 0 || data[len(data)-1] == '\n' {
+ return data
+ }
+ d := make([]byte, len(data)+1)
+ copy(d, data)
+ d[len(data)] = '\n'
+ return d
+}
diff --git a/src/internal/types/errors/code_string.go b/src/internal/types/errors/code_string.go
new file mode 100644
index 0000000..719fc73
--- /dev/null
+++ b/src/internal/types/errors/code_string.go
@@ -0,0 +1,199 @@
+// Code generated by "stringer -type Code codes.go"; DO NOT EDIT.
+
+package errors
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSyntaxTree - -1]
+ _ = x[Test-1]
+ _ = x[BlankPkgName-2]
+ _ = x[MismatchedPkgName-3]
+ _ = x[InvalidPkgUse-4]
+ _ = x[BadImportPath-5]
+ _ = x[BrokenImport-6]
+ _ = x[ImportCRenamed-7]
+ _ = x[UnusedImport-8]
+ _ = x[InvalidInitCycle-9]
+ _ = x[DuplicateDecl-10]
+ _ = x[InvalidDeclCycle-11]
+ _ = x[InvalidTypeCycle-12]
+ _ = x[InvalidConstInit-13]
+ _ = x[InvalidConstVal-14]
+ _ = x[InvalidConstType-15]
+ _ = x[UntypedNilUse-16]
+ _ = x[WrongAssignCount-17]
+ _ = x[UnassignableOperand-18]
+ _ = x[NoNewVar-19]
+ _ = x[MultiValAssignOp-20]
+ _ = x[InvalidIfaceAssign-21]
+ _ = x[InvalidChanAssign-22]
+ _ = x[IncompatibleAssign-23]
+ _ = x[UnaddressableFieldAssign-24]
+ _ = x[NotAType-25]
+ _ = x[InvalidArrayLen-26]
+ _ = x[BlankIfaceMethod-27]
+ _ = x[IncomparableMapKey-28]
+ _ = x[InvalidPtrEmbed-30]
+ _ = x[BadRecv-31]
+ _ = x[InvalidRecv-32]
+ _ = x[DuplicateFieldAndMethod-33]
+ _ = x[DuplicateMethod-34]
+ _ = x[InvalidBlank-35]
+ _ = x[InvalidIota-36]
+ _ = x[MissingInitBody-37]
+ _ = x[InvalidInitSig-38]
+ _ = x[InvalidInitDecl-39]
+ _ = x[InvalidMainDecl-40]
+ _ = x[TooManyValues-41]
+ _ = x[NotAnExpr-42]
+ _ = x[TruncatedFloat-43]
+ _ = x[NumericOverflow-44]
+ _ = x[UndefinedOp-45]
+ _ = x[MismatchedTypes-46]
+ _ = x[DivByZero-47]
+ _ = x[NonNumericIncDec-48]
+ _ = x[UnaddressableOperand-49]
+ _ = x[InvalidIndirection-50]
+ _ = x[NonIndexableOperand-51]
+ _ = x[InvalidIndex-52]
+ _ = x[SwappedSliceIndices-53]
+ _ = x[NonSliceableOperand-54]
+ _ = x[InvalidSliceExpr-55]
+ _ = x[InvalidShiftCount-56]
+ _ = x[InvalidShiftOperand-57]
+ _ = x[InvalidReceive-58]
+ _ = x[InvalidSend-59]
+ _ = x[DuplicateLitKey-60]
+ _ = x[MissingLitKey-61]
+ _ = x[InvalidLitIndex-62]
+ _ = x[OversizeArrayLit-63]
+ _ = x[MixedStructLit-64]
+ _ = x[InvalidStructLit-65]
+ _ = x[MissingLitField-66]
+ _ = x[DuplicateLitField-67]
+ _ = x[UnexportedLitField-68]
+ _ = x[InvalidLitField-69]
+ _ = x[UntypedLit-70]
+ _ = x[InvalidLit-71]
+ _ = x[AmbiguousSelector-72]
+ _ = x[UndeclaredImportedName-73]
+ _ = x[UnexportedName-74]
+ _ = x[UndeclaredName-75]
+ _ = x[MissingFieldOrMethod-76]
+ _ = x[BadDotDotDotSyntax-77]
+ _ = x[NonVariadicDotDotDot-78]
+ _ = x[MisplacedDotDotDot-79]
+ _ = x[InvalidDotDotDot-81]
+ _ = x[UncalledBuiltin-82]
+ _ = x[InvalidAppend-83]
+ _ = x[InvalidCap-84]
+ _ = x[InvalidClose-85]
+ _ = x[InvalidCopy-86]
+ _ = x[InvalidComplex-87]
+ _ = x[InvalidDelete-88]
+ _ = x[InvalidImag-89]
+ _ = x[InvalidLen-90]
+ _ = x[SwappedMakeArgs-91]
+ _ = x[InvalidMake-92]
+ _ = x[InvalidReal-93]
+ _ = x[InvalidAssert-94]
+ _ = x[ImpossibleAssert-95]
+ _ = x[InvalidConversion-96]
+ _ = x[InvalidUntypedConversion-97]
+ _ = x[BadOffsetofSyntax-98]
+ _ = x[InvalidOffsetof-99]
+ _ = x[UnusedExpr-100]
+ _ = x[UnusedVar-101]
+ _ = x[MissingReturn-102]
+ _ = x[WrongResultCount-103]
+ _ = x[OutOfScopeResult-104]
+ _ = x[InvalidCond-105]
+ _ = x[InvalidPostDecl-106]
+ _ = x[InvalidIterVar-108]
+ _ = x[InvalidRangeExpr-109]
+ _ = x[MisplacedBreak-110]
+ _ = x[MisplacedContinue-111]
+ _ = x[MisplacedFallthrough-112]
+ _ = x[DuplicateCase-113]
+ _ = x[DuplicateDefault-114]
+ _ = x[BadTypeKeyword-115]
+ _ = x[InvalidTypeSwitch-116]
+ _ = x[InvalidExprSwitch-117]
+ _ = x[InvalidSelectCase-118]
+ _ = x[UndeclaredLabel-119]
+ _ = x[DuplicateLabel-120]
+ _ = x[MisplacedLabel-121]
+ _ = x[UnusedLabel-122]
+ _ = x[JumpOverDecl-123]
+ _ = x[JumpIntoBlock-124]
+ _ = x[InvalidMethodExpr-125]
+ _ = x[WrongArgCount-126]
+ _ = x[InvalidCall-127]
+ _ = x[UnusedResults-128]
+ _ = x[InvalidDefer-129]
+ _ = x[InvalidGo-130]
+ _ = x[BadDecl-131]
+ _ = x[RepeatedDecl-132]
+ _ = x[InvalidUnsafeAdd-133]
+ _ = x[InvalidUnsafeSlice-134]
+ _ = x[UnsupportedFeature-135]
+ _ = x[NotAGenericType-136]
+ _ = x[WrongTypeArgCount-137]
+ _ = x[CannotInferTypeArgs-138]
+ _ = x[InvalidTypeArg-139]
+ _ = x[InvalidInstanceCycle-140]
+ _ = x[InvalidUnion-141]
+ _ = x[MisplacedConstraintIface-142]
+ _ = x[InvalidMethodTypeParams-143]
+ _ = x[MisplacedTypeParam-144]
+ _ = x[InvalidUnsafeSliceData-145]
+ _ = x[InvalidUnsafeString-146]
+ _ = x[InvalidClear-148]
+ _ = x[TypeTooLarge-149]
+ _ = x[InvalidMinMaxOperand-150]
+}
+
+const (
+ _Code_name_0 = "InvalidSyntaxTree"
+ _Code_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKey"
+ _Code_name_2 = "InvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDot"
+ _Code_name_3 = "InvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDecl"
+ _Code_name_4 = "InvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
+ _Code_name_5 = "InvalidClearTypeTooLargeInvalidMinMaxOperand"
+)
+
+var (
+ _Code_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411}
+ _Code_index_2 = [...]uint16{0, 15, 22, 33, 56, 71, 83, 94, 109, 123, 138, 153, 166, 175, 189, 204, 215, 230, 239, 255, 275, 293, 312, 324, 343, 362, 378, 395, 414, 428, 439, 454, 467, 482, 498, 512, 528, 543, 560, 578, 593, 603, 613, 630, 652, 666, 680, 700, 718, 738, 756}
+ _Code_index_3 = [...]uint16{0, 16, 31, 44, 54, 66, 77, 91, 104, 115, 125, 140, 151, 162, 175, 191, 208, 232, 249, 264, 274, 283, 296, 312, 328, 339, 354}
+ _Code_index_4 = [...]uint16{0, 14, 30, 44, 61, 81, 94, 110, 124, 141, 158, 175, 190, 204, 218, 229, 241, 254, 271, 284, 295, 308, 320, 329, 336, 348, 364, 382, 400, 415, 432, 451, 465, 485, 497, 521, 544, 562, 584, 603}
+ _Code_index_5 = [...]uint8{0, 12, 24, 44}
+)
+
+func (i Code) String() string {
+ switch {
+ case i == -1:
+ return _Code_name_0
+ case 1 <= i && i <= 28:
+ i -= 1
+ return _Code_name_1[_Code_index_1[i]:_Code_index_1[i+1]]
+ case 30 <= i && i <= 79:
+ i -= 30
+ return _Code_name_2[_Code_index_2[i]:_Code_index_2[i+1]]
+ case 81 <= i && i <= 106:
+ i -= 81
+ return _Code_name_3[_Code_index_3[i]:_Code_index_3[i+1]]
+ case 108 <= i && i <= 146:
+ i -= 108
+ return _Code_name_4[_Code_index_4[i]:_Code_index_4[i+1]]
+ case 148 <= i && i <= 150:
+ i -= 148
+ return _Code_name_5[_Code_index_5[i]:_Code_index_5[i+1]]
+ default:
+ return "Code(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/src/internal/types/errors/codes.go b/src/internal/types/errors/codes.go
new file mode 100644
index 0000000..62358c7
--- /dev/null
+++ b/src/internal/types/errors/codes.go
@@ -0,0 +1,1477 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+//go:generate stringer -type Code codes.go
+
+type Code int
+
+// This file defines the error codes that can be produced during type-checking.
+// Collectively, these codes provide an identifier that may be used to
+// implement special handling for certain types of errors.
+//
+// Error code values should not be changed: add new codes at the end.
+//
+// Error codes should be fine-grained enough that the exact nature of the error
+// can be easily determined, but coarse enough that they are not an
+// implementation detail of the type checking algorithm. As a rule-of-thumb,
+// errors should be considered equivalent if there is a theoretical refactoring
+// of the type checker in which they are emitted in exactly one place. For
+// example, the type checker emits different error messages for "too many
+// arguments" and "too few arguments", but one can imagine an alternative type
+// checker where this check instead just emits a single "wrong number of
+// arguments", so these errors should have the same code.
+//
+// Error code names should be as brief as possible while retaining accuracy and
+// distinctiveness. In most cases names should start with an adjective
+// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
+// and end with a noun identifying the relevant language object. For example,
+// "_DuplicateDecl" or "_InvalidSliceExpr". For brevity, naming follows the
+// convention that "bad" implies a problem with syntax, and "invalid" implies a
+// problem with types.
+
+const (
+ // InvalidSyntaxTree occurs if an invalid syntax tree is provided
+ // to the type checker. It should never happen.
+ InvalidSyntaxTree Code = -1
+)
+
+const (
+ // The zero Code value indicates an unset (invalid) error code.
+ _ Code = iota
+
+ // Test is reserved for errors that only apply while in self-test mode.
+ Test
+
+ // BlankPkgName occurs when a package name is the blank identifier "_".
+ //
+ // Per the spec:
+ // "The PackageName must not be the blank identifier."
+ //
+ // Example:
+ // package _
+ BlankPkgName
+
+ // MismatchedPkgName occurs when a file's package name doesn't match the
+ // package name already established by other files.
+ MismatchedPkgName
+
+ // InvalidPkgUse occurs when a package identifier is used outside of a
+ // selector expression.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // var _ = fmt
+ InvalidPkgUse
+
+ // BadImportPath occurs when an import path is not valid.
+ BadImportPath
+
+ // BrokenImport occurs when importing a package fails.
+ //
+ // Example:
+ // import "amissingpackage"
+ BrokenImport
+
+ // ImportCRenamed occurs when the special import "C" is renamed. "C" is a
+ // pseudo-package, and must not be renamed.
+ //
+ // Example:
+ // import _ "C"
+ ImportCRenamed
+
+ // UnusedImport occurs when an import is unused.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // func main() {}
+ UnusedImport
+
+ // InvalidInitCycle occurs when an invalid cycle is detected within the
+ // initialization graph.
+ //
+ // Example:
+ // var x int = f()
+ //
+ // func f() int { return x }
+ InvalidInitCycle
+
+ // DuplicateDecl occurs when an identifier is declared multiple times.
+ //
+ // Example:
+ // var x = 1
+ // var x = 2
+ DuplicateDecl
+
+ // InvalidDeclCycle occurs when a declaration cycle is not valid.
+ //
+ // Example:
+ // type S struct {
+ // S
+ // }
+ //
+ InvalidDeclCycle
+
+ // InvalidTypeCycle occurs when a cycle in type definitions results in a
+ // type that is not well-defined.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T [unsafe.Sizeof(T{})]int
+ InvalidTypeCycle
+
+ // InvalidConstInit occurs when a const declaration has a non-constant
+ // initializer.
+ //
+ // Example:
+ // var x int
+ // const _ = x
+ InvalidConstInit
+
+ // InvalidConstVal occurs when a const value cannot be converted to its
+ // target type.
+ //
+ // TODO(findleyr): this error code and example are not very clear. Consider
+ // removing it.
+ //
+ // Example:
+ // const _ = 1 << "hello"
+ InvalidConstVal
+
+ // InvalidConstType occurs when the underlying type in a const declaration
+ // is not a valid constant type.
+ //
+ // Example:
+ // const c *int = 4
+ InvalidConstType
+
+ // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
+ // initialize a variable declared without an explicit type.
+ //
+ // Example:
+ // var x = nil
+ UntypedNilUse
+
+ // WrongAssignCount occurs when the number of values on the right-hand side
+ // of an assignment or initialization expression does not match the number
+ // of variables on the left-hand side.
+ //
+ // Example:
+ // var x = 1, 2
+ WrongAssignCount
+
+ // UnassignableOperand occurs when the left-hand side of an assignment is
+ // not assignable.
+ //
+ // Example:
+ // func f() {
+ // const c = 1
+ // c = 2
+ // }
+ UnassignableOperand
+
+ // NoNewVar occurs when a short variable declaration (':=') does not declare
+ // new variables.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // x := 2
+ // }
+ NoNewVar
+
+ // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
+ // not have single-valued left-hand or right-hand side.
+ //
+ // Per the spec:
+ // "In assignment operations, both the left- and right-hand expression lists
+ // must contain exactly one single-valued expression"
+ //
+ // Example:
+ // func f() int {
+ // x, y := 1, 2
+ // x, y += 1
+ // return x + y
+ // }
+ MultiValAssignOp
+
+ // InvalidIfaceAssign occurs when a value of type T is used as an
+ // interface, but T does not implement a method of the expected interface.
+ //
+ // Example:
+ // type I interface {
+ // f()
+ // }
+ //
+ // type T int
+ //
+ // var x I = T(1)
+ InvalidIfaceAssign
+
+ // InvalidChanAssign occurs when a chan assignment is invalid.
+ //
+ // Per the spec, a value x is assignable to a channel type T if:
+ // "x is a bidirectional channel value, T is a channel type, x's type V and
+ // T have identical element types, and at least one of V or T is not a
+ // defined type."
+ //
+ // Example:
+ // type T1 chan int
+ // type T2 chan int
+ //
+ // var x T1
+ // // Invalid assignment because both types are named
+ // var _ T2 = x
+ InvalidChanAssign
+
+ // IncompatibleAssign occurs when the type of the right-hand side expression
+ // in an assignment cannot be assigned to the type of the variable being
+ // assigned.
+ //
+ // Example:
+ // var x []int
+ // var _ int = x
+ IncompatibleAssign
+
+ // UnaddressableFieldAssign occurs when trying to assign to a struct field
+ // in a map value.
+ //
+ // Example:
+ // func f() {
+ // m := make(map[string]struct{i int})
+ // m["foo"].i = 42
+ // }
+ UnaddressableFieldAssign
+
+ // NotAType occurs when the identifier used as the underlying type in a type
+ // declaration or the right-hand side of a type alias does not denote a type.
+ //
+ // Example:
+ // var S = 2
+ //
+ // type T S
+ NotAType
+
+ // InvalidArrayLen occurs when an array length is not a constant value.
+ //
+ // Example:
+ // var n = 3
+ // var _ = [n]int{}
+ InvalidArrayLen
+
+ // BlankIfaceMethod occurs when a method name is '_'.
+ //
+ // Per the spec:
+ // "The name of each explicitly specified method must be unique and not
+ // blank."
+ //
+ // Example:
+ // type T interface {
+ // _(int)
+ // }
+ BlankIfaceMethod
+
+ // IncomparableMapKey occurs when a map key type does not support the == and
+ // != operators.
+ //
+ // Per the spec:
+ // "The comparison operators == and != must be fully defined for operands of
+ // the key type; thus the key type must not be a function, map, or slice."
+ //
+ // Example:
+ // var x map[T]int
+ //
+ // type T []int
+ IncomparableMapKey
+
+ // InvalidIfaceEmbed occurs when a non-interface type is embedded in an
+ // interface (for go 1.17 or earlier).
+ _ // not used anymore
+
+ // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
+ // and T itself is itself a pointer, an unsafe.Pointer, or an interface.
+ //
+ // Per the spec:
+ // "An embedded field must be specified as a type name T or as a pointer to
+ // a non-interface type name *T, and T itself may not be a pointer type."
+ //
+ // Example:
+ // type T *int
+ //
+ // type S struct {
+ // *T
+ // }
+ InvalidPtrEmbed
+
+ // BadRecv occurs when a method declaration does not have exactly one
+ // receiver parameter.
+ //
+ // Example:
+ // func () _() {}
+ BadRecv
+
+ // InvalidRecv occurs when a receiver type expression is not of the form T
+ // or *T, or T is a pointer type.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (**T) m() {}
+ InvalidRecv
+
+ // DuplicateFieldAndMethod occurs when an identifier appears as both a field
+ // and method name.
+ //
+ // Example:
+ // type T struct {
+ // m int
+ // }
+ //
+ // func (T) m() {}
+ DuplicateFieldAndMethod
+
+ // DuplicateMethod occurs when two methods on the same receiver type have
+ // the same name.
+ //
+ // Example:
+ // type T struct {}
+ // func (T) m() {}
+ // func (T) m(i int) int { return i }
+ DuplicateMethod
+
+ // InvalidBlank occurs when a blank identifier is used as a value or type.
+ //
+ // Per the spec:
+ // "The blank identifier may appear as an operand only on the left-hand side
+ // of an assignment."
+ //
+ // Example:
+ // var x = _
+ InvalidBlank
+
+ // InvalidIota occurs when the predeclared identifier iota is used outside
+ // of a constant declaration.
+ //
+ // Example:
+ // var x = iota
+ InvalidIota
+
+ // MissingInitBody occurs when an init function is missing its body.
+ //
+ // Example:
+ // func init()
+ MissingInitBody
+
+ // InvalidInitSig occurs when an init function declares parameters or
+ // results.
+ //
+ // Deprecated: no longer emitted by the type checker. _InvalidInitDecl is
+ // used instead.
+ InvalidInitSig
+
+ // InvalidInitDecl occurs when init is declared as anything other than a
+ // function.
+ //
+ // Example:
+ // var init = 1
+ //
+ // Example:
+ // func init() int { return 1 }
+ InvalidInitDecl
+
+ // InvalidMainDecl occurs when main is declared as anything other than a
+ // function, in a main package.
+ InvalidMainDecl
+
+ // TooManyValues occurs when a function returns too many values for the
+ // expression context in which it is used.
+ //
+ // Example:
+ // func ReturnTwo() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // var x = ReturnTwo()
+ TooManyValues
+
+ // NotAnExpr occurs when a type expression is used where a value expression
+ // is expected.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func f() {
+ // T
+ // }
+ NotAnExpr
+
+ // TruncatedFloat occurs when a float constant is truncated to an integer
+ // value.
+ //
+ // Example:
+ // var _ int = 98.6
+ TruncatedFloat
+
+ // NumericOverflow occurs when a numeric constant overflows its target type.
+ //
+ // Example:
+ // var x int8 = 1000
+ NumericOverflow
+
+ // UndefinedOp occurs when an operator is not defined for the type(s) used
+ // in an operation.
+ //
+ // Example:
+ // var c = "a" - "b"
+ UndefinedOp
+
+ // MismatchedTypes occurs when operand types are incompatible in a binary
+ // operation.
+ //
+ // Example:
+ // var a = "hello"
+ // var b = 1
+ // var c = a - b
+ MismatchedTypes
+
+ // DivByZero occurs when a division operation is provable at compile
+ // time to be a division by zero.
+ //
+ // Example:
+ // const divisor = 0
+ // var x int = 1/divisor
+ DivByZero
+
+ // NonNumericIncDec occurs when an increment or decrement operator is
+ // applied to a non-numeric value.
+ //
+ // Example:
+ // func f() {
+ // var c = "c"
+ // c++
+ // }
+ NonNumericIncDec
+
+ // UnaddressableOperand occurs when the & operator is applied to an
+ // unaddressable expression.
+ //
+ // Example:
+ // var x = &1
+ UnaddressableOperand
+
+ // InvalidIndirection occurs when a non-pointer value is indirected via the
+ // '*' operator.
+ //
+ // Example:
+ // var x int
+ // var y = *x
+ InvalidIndirection
+
+ // NonIndexableOperand occurs when an index operation is applied to a value
+ // that cannot be indexed.
+ //
+ // Example:
+ // var x = 1
+ // var y = x[1]
+ NonIndexableOperand
+
+ // InvalidIndex occurs when an index argument is not of integer type,
+ // negative, or out-of-bounds.
+ //
+ // Example:
+ // var s = [...]int{1,2,3}
+ // var x = s[5]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var _ = s[-1]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var i string
+ // var _ = s[i]
+ InvalidIndex
+
+ // SwappedSliceIndices occurs when constant indices in a slice expression
+ // are decreasing in value.
+ //
+ // Example:
+ // var _ = []int{1,2,3}[2:1]
+ SwappedSliceIndices
+
+ // NonSliceableOperand occurs when a slice operation is applied to a value
+ // whose type is not sliceable, or is unaddressable.
+ //
+ // Example:
+ // var x = [...]int{1, 2, 3}[:1]
+ //
+ // Example:
+ // var x = 1
+ // var y = 1[:1]
+ NonSliceableOperand
+
+ // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
+ // applied to a string.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s[1:2:3]
+ InvalidSliceExpr
+
+ // InvalidShiftCount occurs when the right-hand side of a shift operation is
+ // either non-integer, negative, or too large.
+ //
+ // Example:
+ // var (
+ // x string
+ // y int = 1 << x
+ // )
+ InvalidShiftCount
+
+ // InvalidShiftOperand occurs when the shifted operand is not an integer.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s << 2
+ InvalidShiftOperand
+
+ // InvalidReceive occurs when there is a channel receive from a value that
+ // is either not a channel, or is a send-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // <-x
+ // }
+ InvalidReceive
+
+ // InvalidSend occurs when there is a channel send to a value that is not a
+ // channel, or is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // x <- "hello!"
+ // }
+ InvalidSend
+
+ // DuplicateLitKey occurs when an index is duplicated in a slice, array, or
+ // map literal.
+ //
+ // Example:
+ // var _ = []int{0:1, 0:2}
+ //
+ // Example:
+ // var _ = map[string]int{"a": 1, "a": 2}
+ DuplicateLitKey
+
+ // MissingLitKey occurs when a map literal is missing a key expression.
+ //
+ // Example:
+ // var _ = map[string]int{1}
+ MissingLitKey
+
+ // InvalidLitIndex occurs when the key in a key-value element of a slice or
+ // array literal is not an integer constant.
+ //
+ // Example:
+ // var i = 0
+ // var x = []string{i: "world"}
+ InvalidLitIndex
+
+ // OversizeArrayLit occurs when an array literal exceeds its length.
+ //
+ // Example:
+ // var _ = [2]int{1,2,3}
+ OversizeArrayLit
+
+ // MixedStructLit occurs when a struct literal contains a mix of positional
+ // and named elements.
+ //
+ // Example:
+ // var _ = struct{i, j int}{i: 1, 2}
+ MixedStructLit
+
+ // InvalidStructLit occurs when a positional struct literal has an incorrect
+ // number of values.
+ //
+ // Example:
+ // var _ = struct{i, j int}{1,2,3}
+ InvalidStructLit
+
+ // MissingLitField occurs when a struct literal refers to a field that does
+ // not exist on the struct type.
+ //
+ // Example:
+ // var _ = struct{i int}{j: 2}
+ MissingLitField
+
+ // DuplicateLitField occurs when a struct literal contains duplicated
+ // fields.
+ //
+ // Example:
+ // var _ = struct{i int}{i: 1, i: 2}
+ DuplicateLitField
+
+ // UnexportedLitField occurs when a positional struct literal implicitly
+ // assigns an unexported field of an imported type.
+ UnexportedLitField
+
+ // InvalidLitField occurs when a field name is not a valid identifier.
+ //
+ // Example:
+ // var _ = struct{i int}{1: 1}
+ InvalidLitField
+
+ // UntypedLit occurs when a composite literal omits a required type
+ // identifier.
+ //
+ // Example:
+ // type outer struct{
+ // inner struct { i int }
+ // }
+ //
+ // var _ = outer{inner: {1}}
+ UntypedLit
+
+ // InvalidLit occurs when a composite literal expression does not match its
+ // type.
+ //
+ // Example:
+ // type P *struct{
+ // x int
+ // }
+ // var _ = P {}
+ InvalidLit
+
+ // AmbiguousSelector occurs when a selector is ambiguous.
+ //
+ // Example:
+ // type E1 struct { i int }
+ // type E2 struct { i int }
+ // type T struct { E1; E2 }
+ //
+ // var x T
+ // var _ = x.i
+ AmbiguousSelector
+
+ // UndeclaredImportedName occurs when a package-qualified identifier is
+ // undeclared by the imported package.
+ //
+ // Example:
+ // import "go/types"
+ //
+ // var _ = types.NotAnActualIdentifier
+ UndeclaredImportedName
+
+ // UnexportedName occurs when a selector refers to an unexported identifier
+ // of an imported package.
+ //
+ // Example:
+ // import "reflect"
+ //
+ // type _ reflect.flag
+ UnexportedName
+
+ // UndeclaredName occurs when an identifier is not declared in the current
+ // scope.
+ //
+ // Example:
+ // var x T
+ UndeclaredName
+
+ // MissingFieldOrMethod occurs when a selector references a field or method
+ // that does not exist.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // var x = T{}.f
+ MissingFieldOrMethod
+
+ // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
+ // not valid.
+ //
+ // Example:
+ // var _ = map[int][...]int{0: {}}
+ BadDotDotDotSyntax
+
+ // NonVariadicDotDotDot occurs when a "..." is used on the final argument to
+ // a non-variadic function.
+ //
+ // Example:
+ // func printArgs(s []string) {
+ // for _, a := range s {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // s := []string{"a", "b", "c"}
+ // printArgs(s...)
+ // }
+ NonVariadicDotDotDot
+
+ // MisplacedDotDotDot occurs when a "..." is used somewhere other than the
+ // final argument in a function declaration.
+ //
+ // Example:
+ // func f(...int, int)
+ MisplacedDotDotDot
+
+ _ // InvalidDotDotDotOperand was removed.
+
+ // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
+ // function.
+ //
+ // Example:
+ // var s = []int{1, 2, 3}
+ // var l = len(s...)
+ InvalidDotDotDot
+
+ // UncalledBuiltin occurs when a built-in function is used as a
+ // function-valued expression, instead of being called.
+ //
+ // Per the spec:
+ // "The built-in functions do not have standard Go types, so they can only
+ // appear in call expressions; they cannot be used as function values."
+ //
+ // Example:
+ // var _ = copy
+ UncalledBuiltin
+
+ // InvalidAppend occurs when append is called with a first argument that is
+ // not a slice.
+ //
+ // Example:
+ // var _ = append(1, 2)
+ InvalidAppend
+
+ // InvalidCap occurs when an argument to the cap built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = cap(s)
+ InvalidCap
+
+ // InvalidClose occurs when close(...) is called with an argument that is
+ // not of channel type, or that is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x int
+ // close(x)
+ // }
+ InvalidClose
+
+ // InvalidCopy occurs when the arguments are not of slice type or do not
+ // have compatible type.
+ //
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
+ // information on the type requirements for the copy built-in.
+ //
+ // Example:
+ // func f() {
+ // var x []int
+ // y := []int64{1,2,3}
+ // copy(x, y)
+ // }
+ InvalidCopy
+
+ // InvalidComplex occurs when the complex built-in function is called with
+ // arguments with incompatible types.
+ //
+ // Example:
+ // var _ = complex(float32(1), float64(2))
+ InvalidComplex
+
+ // InvalidDelete occurs when the delete built-in function is called with a
+ // first argument that is not a map.
+ //
+ // Example:
+ // func f() {
+ // m := "hello"
+ // delete(m, "e")
+ // }
+ InvalidDelete
+
+ // InvalidImag occurs when the imag built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = imag(int(1))
+ InvalidImag
+
+ // InvalidLen occurs when an argument to the len built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = len(s)
+ InvalidLen
+
+ // SwappedMakeArgs occurs when make is called with three arguments, and its
+ // length argument is larger than its capacity argument.
+ //
+ // Example:
+ // var x = make([]int, 3, 2)
+ SwappedMakeArgs
+
+ // InvalidMake occurs when make is called with an unsupported type argument.
+ //
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
+ // information on the types that may be created using make.
+ //
+ // Example:
+ // var x = make(int)
+ InvalidMake
+
+ // InvalidReal occurs when the real built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = real(int(1))
+ InvalidReal
+
+ // InvalidAssert occurs when a type assertion is applied to a
+ // value that is not of interface type.
+ //
+ // Example:
+ // var x = 1
+ // var _ = x.(float64)
+ InvalidAssert
+
+ // ImpossibleAssert occurs for a type assertion x.(T) when the value x of
+ // interface cannot have dynamic type T, due to a missing or mismatching
+ // method on T.
+ //
+ // Example:
+ // type T int
+ //
+ // func (t *T) m() int { return int(*t) }
+ //
+ // type I interface { m() int }
+ //
+ // var x I
+ // var _ = x.(T)
+ ImpossibleAssert
+
+ // InvalidConversion occurs when the argument type cannot be converted to the
+ // target.
+ //
+ // See https://golang.org/ref/spec#Conversions for the rules of
+ // convertibility.
+ //
+ // Example:
+ // var x float64
+ // var _ = string(x)
+ InvalidConversion
+
+ // InvalidUntypedConversion occurs when there is no valid implicit
+ // conversion from an untyped value satisfying the type constraints of the
+ // context in which it is used.
+ //
+ // Example:
+ // var _ = 1 + []int{}
+ InvalidUntypedConversion
+
+ // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
+ // that is not a selector expression.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Offsetof(x)
+ BadOffsetofSyntax
+
+ // InvalidOffsetof occurs when unsafe.Offsetof is called with a method
+ // selector, rather than a field selector, or when the field is embedded via
+ // a pointer.
+ //
+ // Per the spec:
+ //
+ // "If f is an embedded field, it must be reachable without pointer
+ // indirections through fields of the struct. "
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct { f int }
+ // type S struct { *T }
+ // var s S
+ // var _ = unsafe.Offsetof(s.f)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type S struct{}
+ //
+ // func (S) m() {}
+ //
+ // var s S
+ // var _ = unsafe.Offsetof(s.m)
+ InvalidOffsetof
+
+ // UnusedExpr occurs when a side-effect free expression is used as a
+ // statement. Such a statement has no effect.
+ //
+ // Example:
+ // func f(i int) {
+ // i*i
+ // }
+ UnusedExpr
+
+ // UnusedVar occurs when a variable is declared but unused.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // }
+ UnusedVar
+
+ // MissingReturn occurs when a function with results is missing a return
+ // statement.
+ //
+ // Example:
+ // func f() int {}
+ MissingReturn
+
+ // WrongResultCount occurs when a return statement returns an incorrect
+ // number of values.
+ //
+ // Example:
+ // func ReturnOne() int {
+ // return 1, 2
+ // }
+ WrongResultCount
+
+ // OutOfScopeResult occurs when the name of a value implicitly returned by
+ // an empty return statement is shadowed in a nested scope.
+ //
+ // Example:
+ // func factor(n int) (i int) {
+ // for i := 2; i < n; i++ {
+ // if n%i == 0 {
+ // return
+ // }
+ // }
+ // return 0
+ // }
+ OutOfScopeResult
+
+ // InvalidCond occurs when an if condition is not a boolean expression.
+ //
+ // Example:
+ // func checkReturn(i int) {
+ // if i {
+ // panic("non-zero return")
+ // }
+ // }
+ InvalidCond
+
+ // InvalidPostDecl occurs when there is a declaration in a for-loop post
+ // statement.
+ //
+ // Example:
+ // func f() {
+ // for i := 0; i < 10; j := 0 {}
+ // }
+ InvalidPostDecl
+
+ _ // InvalidChanRange was removed.
+
+ // InvalidIterVar occurs when two iteration variables are used while ranging
+ // over a channel.
+ //
+ // Example:
+ // func f(c chan int) {
+ // for k, v := range c {
+ // println(k, v)
+ // }
+ // }
+ InvalidIterVar
+
+ // InvalidRangeExpr occurs when the type of a range expression is not array,
+ // slice, string, map, or channel.
+ //
+ // Example:
+ // func f(i int) {
+ // for j := range i {
+ // println(j)
+ // }
+ // }
+ InvalidRangeExpr
+
+ // MisplacedBreak occurs when a break statement is not within a for, switch,
+ // or select statement of the innermost function definition.
+ //
+ // Example:
+ // func f() {
+ // break
+ // }
+ MisplacedBreak
+
+ // MisplacedContinue occurs when a continue statement is not within a for
+ // loop of the innermost function definition.
+ //
+ // Example:
+ // func sumeven(n int) int {
+ // proceed := func() {
+ // continue
+ // }
+ // sum := 0
+ // for i := 1; i <= n; i++ {
+ // if i % 2 != 0 {
+ // proceed()
+ // }
+ // sum += i
+ // }
+ // return sum
+ // }
+ MisplacedContinue
+
+ // MisplacedFallthrough occurs when a fallthrough statement is not within an
+ // expression switch.
+ //
+ // Example:
+ // func typename(i interface{}) string {
+ // switch i.(type) {
+ // case int64:
+ // fallthrough
+ // case int:
+ // return "int"
+ // }
+ // return "unsupported"
+ // }
+ MisplacedFallthrough
+
+ // DuplicateCase occurs when a type or expression switch has duplicate
+ // cases.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // case 1:
+ // println("One")
+ // }
+ // }
+ DuplicateCase
+
+ // DuplicateDefault occurs when a type or expression switch has multiple
+ // default clauses.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // default:
+ // println("One")
+ // default:
+ // println("1")
+ // }
+ // }
+ DuplicateDefault
+
+ // BadTypeKeyword occurs when a .(type) expression is used anywhere other
+ // than a type switch.
+ //
+ // Example:
+ // type I interface {
+ // m()
+ // }
+ // var t I
+ // var _ = t.(type)
+ BadTypeKeyword
+
+ // InvalidTypeSwitch occurs when .(type) is used on an expression that is
+ // not of interface type.
+ //
+ // Example:
+ // func f(i int) {
+ // switch x := i.(type) {}
+ // }
+ InvalidTypeSwitch
+
+ // InvalidExprSwitch occurs when a switch expression is not comparable.
+ //
+ // Example:
+ // func _() {
+ // var a struct{ _ func() }
+ // switch a /* ERROR cannot switch on a */ {
+ // }
+ // }
+ InvalidExprSwitch
+
+ // InvalidSelectCase occurs when a select case is not a channel send or
+ // receive.
+ //
+ // Example:
+ // func checkChan(c <-chan int) bool {
+ // select {
+ // case c:
+ // return true
+ // default:
+ // return false
+ // }
+ // }
+ InvalidSelectCase
+
+ // UndeclaredLabel occurs when an undeclared label is jumped to.
+ //
+ // Example:
+ // func f() {
+ // goto L
+ // }
+ UndeclaredLabel
+
+ // DuplicateLabel occurs when a label is declared more than once.
+ //
+ // Example:
+ // func f() int {
+ // L:
+ // L:
+ // return 1
+ // }
+ DuplicateLabel
+
+ // MisplacedLabel occurs when a break or continue label is not on a for,
+ // switch, or select statement.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // a := []int{1,2,3}
+ // for _, e := range a {
+ // if e > 10 {
+ // break L
+ // }
+ // println(a)
+ // }
+ // }
+ MisplacedLabel
+
+ // UnusedLabel occurs when a label is declared and not used.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // }
+ UnusedLabel
+
+ // JumpOverDecl occurs when a label jumps over a variable declaration.
+ //
+ // Example:
+ // func f() int {
+ // goto L
+ // x := 2
+ // L:
+ // x++
+ // return x
+ // }
+ JumpOverDecl
+
+ // JumpIntoBlock occurs when a forward jump goes to a label inside a nested
+ // block.
+ //
+ // Example:
+ // func f(x int) {
+ // goto L
+ // if x > 0 {
+ // L:
+ // print("inside block")
+ // }
+ // }
+ JumpIntoBlock
+
+ // InvalidMethodExpr occurs when a pointer method is called but the argument
+ // is not addressable.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (*T) m() int { return 1 }
+ //
+ // var _ = T.m(T{})
+ InvalidMethodExpr
+
+ // WrongArgCount occurs when too few or too many arguments are passed by a
+ // function call.
+ //
+ // Example:
+ // func f(i int) {}
+ // var x = f()
+ WrongArgCount
+
+ // InvalidCall occurs when an expression is called that is not of function
+ // type.
+ //
+ // Example:
+ // var x = "x"
+ // var y = x()
+ InvalidCall
+
+ // UnusedResults occurs when a restricted expression-only built-in function
+ // is suspended via go or defer. Such a suspension discards the results of
+ // these side-effect free built-in functions, and therefore is ineffectual.
+ //
+ // Example:
+ // func f(a []int) int {
+ // defer len(a)
+ // return i
+ // }
+ UnusedResults
+
+ // InvalidDefer occurs when a deferred expression is not a function call,
+ // for example if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // defer int32(i)
+ // return i
+ // }
+ InvalidDefer
+
+ // InvalidGo occurs when a go expression is not a function call, for example
+ // if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // go int32(i)
+ // return i
+ // }
+ InvalidGo
+
+ // All codes below were added in Go 1.17.
+
+ // BadDecl occurs when a declaration has invalid syntax.
+ BadDecl
+
+ // RepeatedDecl occurs when an identifier occurs more than once on the left
+ // hand side of a short variable declaration.
+ //
+ // Example:
+ // func _() {
+ // x, y, y := 1, 2, 3
+ // }
+ RepeatedDecl
+
+ // InvalidUnsafeAdd occurs when unsafe.Add is called with a
+ // length argument that is not of integer type.
+ // It also occurs if it is used in a package compiled for a
+ // language version before go1.17.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var p unsafe.Pointer
+ // var _ = unsafe.Add(p, float64(1))
+ InvalidUnsafeAdd
+
+ // InvalidUnsafeSlice occurs when unsafe.Slice is called with a
+ // pointer argument that is not of pointer type or a length argument
+ // that is not of integer type, negative, or out of bounds.
+ // It also occurs if it is used in a package compiled for a language
+ // version before go1.17.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(x, 1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, float64(1))
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, -1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, uint64(1) << 63)
+ InvalidUnsafeSlice
+
+ // All codes below were added in Go 1.18.
+
+ // UnsupportedFeature occurs when a language feature is used that is not
+ // supported at this Go version.
+ UnsupportedFeature
+
+ // NotAGenericType occurs when a non-generic type is used where a generic
+ // type is expected: in type or function instantiation.
+ //
+ // Example:
+ // type T int
+ //
+ // var _ T[int]
+ NotAGenericType
+
+ // WrongTypeArgCount occurs when a type or function is instantiated with an
+ // incorrect number of type arguments, including when a generic type or
+ // function is used without instantiation.
+ //
+ // Errors involving failed type inference are assigned other error codes.
+ //
+ // Example:
+ // type T[p any] int
+ //
+ // var _ T[int, string]
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // var x = f
+ WrongTypeArgCount
+
+ // CannotInferTypeArgs occurs when type or function type argument inference
+ // fails to infer all type arguments.
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // func _() {
+ // f()
+ // }
+ CannotInferTypeArgs
+
+ // InvalidTypeArg occurs when a type argument does not satisfy its
+ // corresponding type parameter constraints.
+ //
+ // Example:
+ // type T[P ~int] struct{}
+ //
+ // var _ T[string]
+ InvalidTypeArg // arguments? InferenceFailed
+
+ // InvalidInstanceCycle occurs when an invalid cycle is detected
+ // within the instantiation graph.
+ //
+ // Example:
+ // func f[T any]() { f[*T]() }
+ InvalidInstanceCycle
+
+ // InvalidUnion occurs when an embedded union or approximation element is
+ // not valid.
+ //
+ // Example:
+ // type _ interface {
+ // ~int | interface{ m() }
+ // }
+ InvalidUnion
+
+ // MisplacedConstraintIface occurs when a constraint-type interface is used
+ // outside of constraint position.
+ //
+ // Example:
+ // type I interface { ~int }
+ //
+ // var _ I
+ MisplacedConstraintIface
+
+ // InvalidMethodTypeParams occurs when methods have type parameters.
+ //
+ // It cannot be encountered with an AST parsed using go/parser.
+ InvalidMethodTypeParams
+
+ // MisplacedTypeParam occurs when a type parameter is used in a place where
+ // it is not permitted.
+ //
+ // Example:
+ // type T[P any] P
+ //
+ // Example:
+ // type T[P any] struct{ *P }
+ MisplacedTypeParam
+
+ // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
+ // an argument that is not of slice type. It also occurs if it is used
+ // in a package compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.SliceData(x)
+ InvalidUnsafeSliceData
+
+ // InvalidUnsafeString occurs when unsafe.String is called with
+ // a length argument that is not of integer type, negative, or
+ // out of bounds. It also occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var b [10]byte
+ // var _ = unsafe.String(&b[0], -1)
+ InvalidUnsafeString
+
+ // InvalidUnsafeStringData occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ _ // not used anymore
+
+ // InvalidClear occurs when clear is called with an argument
+ // that is not of map or slice type.
+ //
+ // Example:
+ // func _(x int) {
+ // clear(x)
+ // }
+ InvalidClear
+
+ // TypeTooLarge occurs if unsafe.Sizeof or unsafe.Offsetof is
+ // called with an expression whose type is too large.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type E [1 << 31 - 1]int
+ // var a [1 << 31]E
+ // var _ = unsafe.Sizeof(a)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type E [1 << 31 - 1]int
+ // var s struct {
+ // _ [1 << 31]E
+ // x int
+ // }
+ // var _ = unsafe.Offsetof(s.x)
+ TypeTooLarge
+
+ // InvalidMinMaxOperand occurs if min or max is called
+ // with an operand that cannot be ordered because it
+ // does not support the < operator.
+ //
+ // Example:
+ // const _ = min(true)
+ //
+ // Example:
+ // var s, t []byte
+ // var _ = max(s, t)
+ InvalidMinMaxOperand
+)
diff --git a/src/internal/types/errors/codes_test.go b/src/internal/types/errors/codes_test.go
new file mode 100644
index 0000000..2490ade
--- /dev/null
+++ b/src/internal/types/errors/codes_test.go
@@ -0,0 +1,197 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "internal/testenv"
+ "reflect"
+ "strings"
+ "testing"
+
+ . "go/types"
+)
+
+func TestErrorCodeExamples(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // go command needed to resolve std .a files for importer.Default().
+
+ walkCodes(t, func(name string, value int, spec *ast.ValueSpec) {
+ t.Run(name, func(t *testing.T) {
+ doc := spec.Doc.Text()
+ examples := strings.Split(doc, "Example:")
+ for i := 1; i < len(examples); i++ {
+ example := strings.TrimSpace(examples[i])
+ err := checkExample(t, example)
+ if err == nil {
+ t.Fatalf("no error in example #%d", i)
+ }
+ typerr, ok := err.(Error)
+ if !ok {
+ t.Fatalf("not a types.Error: %v", err)
+ }
+ if got := readCode(typerr); got != value {
+ t.Errorf("%s: example #%d returned code %d (%s), want %d", name, i, got, err, value)
+ }
+ }
+ })
+ })
+}
+
+func walkCodes(t *testing.T, f func(string, int, *ast.ValueSpec)) {
+ t.Helper()
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "codes.go", nil, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{Importer: importer.Default()}
+ info := &Info{
+ Types: make(map[ast.Expr]TypeAndValue),
+ Defs: make(map[*ast.Ident]Object),
+ Uses: make(map[*ast.Ident]Object),
+ }
+ _, err = conf.Check("types", fset, []*ast.File{file}, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, decl := range file.Decls {
+ decl, ok := decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ spec, ok := spec.(*ast.ValueSpec)
+ if !ok || len(spec.Names) == 0 {
+ continue
+ }
+ obj := info.ObjectOf(spec.Names[0])
+ if named, ok := obj.Type().(*Named); ok && named.Obj().Name() == "Code" {
+ if len(spec.Names) != 1 {
+ t.Fatalf("bad Code declaration for %q: got %d names, want exactly 1", spec.Names[0].Name, len(spec.Names))
+ }
+ codename := spec.Names[0].Name
+ value := int(constant.Val(obj.(*Const).Val()).(int64))
+ f(codename, value, spec)
+ }
+ }
+ }
+}
+
+func readCode(err Error) int {
+ v := reflect.ValueOf(err)
+ return int(v.FieldByName("go116code").Int())
+}
+
+func checkExample(t *testing.T, example string) error {
+ t.Helper()
+ fset := token.NewFileSet()
+ if !strings.HasPrefix(example, "package") {
+ example = "package p\n\n" + example
+ }
+ file, err := parser.ParseFile(fset, "example.go", example, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{
+ FakeImportC: true,
+ Importer: importer.Default(),
+ }
+ _, err = conf.Check("example", fset, []*ast.File{file}, nil)
+ return err
+}
+
+func TestErrorCodeStyle(t *testing.T) {
+ // The set of error codes is large and intended to be self-documenting, so
+ // this test enforces some style conventions.
+ forbiddenInIdent := []string{
+ // use invalid instead
+ "illegal",
+ // words with a common short-form
+ "argument",
+ "assertion",
+ "assignment",
+ "boolean",
+ "channel",
+ "condition",
+ "declaration",
+ "expression",
+ "function",
+ "initial", // use init for initializer, initialization, etc.
+ "integer",
+ "interface",
+ "iterat", // use iter for iterator, iteration, etc.
+ "literal",
+ "operation",
+ "package",
+ "pointer",
+ "receiver",
+ "signature",
+ "statement",
+ "variable",
+ }
+ forbiddenInComment := []string{
+ // lhs and rhs should be spelled-out.
+ "lhs", "rhs",
+ // builtin should be hyphenated.
+ "builtin",
+ // Use dot-dot-dot.
+ "ellipsis",
+ }
+ nameHist := make(map[int]int)
+ longestName := ""
+ maxValue := 0
+
+ walkCodes(t, func(name string, value int, spec *ast.ValueSpec) {
+ if name == "_" {
+ return
+ }
+ nameHist[len(name)]++
+ if value > maxValue {
+ maxValue = value
+ }
+ if len(name) > len(longestName) {
+ longestName = name
+ }
+ if !token.IsExported(name) {
+ t.Errorf("%q is not exported", name)
+ }
+ lower := strings.ToLower(name)
+ for _, bad := range forbiddenInIdent {
+ if strings.Contains(lower, bad) {
+ t.Errorf("%q contains forbidden word %q", name, bad)
+ }
+ }
+ doc := spec.Doc.Text()
+ if doc == "" {
+ t.Errorf("%q is undocumented", name)
+ } else if !strings.HasPrefix(doc, name) {
+ t.Errorf("doc for %q does not start with the error code name", name)
+ }
+ lowerComment := strings.ToLower(strings.TrimPrefix(doc, name))
+ for _, bad := range forbiddenInComment {
+ if strings.Contains(lowerComment, bad) {
+ t.Errorf("doc for %q contains forbidden word %q", name, bad)
+ }
+ }
+ })
+
+ if testing.Verbose() {
+ var totChars, totCount int
+ for chars, count := range nameHist {
+ totChars += chars * count
+ totCount += count
+ }
+ avg := float64(totChars) / float64(totCount)
+ fmt.Println()
+ fmt.Printf("%d error codes\n", totCount)
+ fmt.Printf("average length: %.2f chars\n", avg)
+ fmt.Printf("max length: %d (%s)\n", len(longestName), longestName)
+ }
+}
diff --git a/src/internal/types/errors/generrordocs.go b/src/internal/types/errors/generrordocs.go
new file mode 100644
index 0000000..46343be
--- /dev/null
+++ b/src/internal/types/errors/generrordocs.go
@@ -0,0 +1,117 @@
+//go:build ignore
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// generrordocs creates a Markdown file for each (compiler) error code
+// and its associated documentation.
+// Note: this program must be run in this directory.
+// go run generrordocs.go <dir>
+
+//go:generate go run generrordocs.go errors_markdown
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "log"
+ "os"
+ "path"
+ "strings"
+ "text/template"
+
+ . "go/types"
+)
+
+func main() {
+ if len(os.Args) != 2 {
+ log.Fatal("missing argument: generrordocs <dir>")
+ }
+ outDir := os.Args[1]
+ if err := os.MkdirAll(outDir, 0755); err != nil {
+ log.Fatal("unable to create output directory: %s", err)
+ }
+ walkCodes(func(name string, vs *ast.ValueSpec) {
+ // ignore unused errors
+ if name == "_" {
+ return
+ }
+ // Ensure that < are represented correctly when its included in code
+ // blocks. The goldmark Markdown parser converts them to &amp;lt;
+ // when not escaped. It is the only known string with this issue.
+ desc := strings.ReplaceAll(vs.Doc.Text(), "<", `{{raw "<"}}`)
+ e := struct {
+ Name string
+ Description string
+ }{
+ Name: name,
+ Description: fmt.Sprintf("```\n%s```\n", desyc),
+ }
+ var buf bytes.Buffer
+ err := template.Must(template.New("eachError").Parse(markdownTemplate)).Execute(&buf, e)
+ if err != nil {
+ log.Fatalf("template.Must: %s", err)
+ }
+ if err := os.WriteFile(path.Join(outDir, name+".md"), buf.Bytes(), 0660); err != nil {
+ log.Fatalf("os.WriteFile: %s\n", err)
+ }
+ })
+ log.Printf("output directory: %s\n", outDir)
+}
+
+func walkCodes(f func(string, *ast.ValueSpec)) {
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "codes.go", nil, parser.ParseComments)
+ if err != nil {
+ log.Fatalf("ParseFile failed: %s", err)
+ }
+ conf := Config{Importer: importer.Default()}
+ info := &Info{
+ Types: make(map[ast.Expr]TypeAndValue),
+ Defs: make(map[*ast.Ident]Object),
+ Uses: make(map[*ast.Ident]Object),
+ }
+ _, err = conf.Check("types", fset, []*ast.File{file}, info)
+ if err != nil {
+ log.Fatalf("Check failed: %s", err)
+ }
+ for _, decl := range file.Decls {
+ decl, ok := decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ spec, ok := spec.(*ast.ValueSpec)
+ if !ok || len(spec.Names) == 0 {
+ continue
+ }
+ obj := info.ObjectOf(spec.Names[0])
+ if named, ok := obj.Type().(*Named); ok && named.Obj().Name() == "Code" {
+ if len(spec.Names) != 1 {
+ log.Fatalf("bad Code declaration for %q: got %d names, want exactly 1", spec.Names[0].Name, len(spec.Names))
+ }
+ codename := spec.Names[0].Name
+ f(codename, spec)
+ }
+ }
+ }
+}
+
+const markdownTemplate = `---
+title: {{.Name}}
+layout: article
+---
+<!-- Copyright 2023 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. -->
+
+<!-- Code generated by generrordocs.go; DO NOT EDIT. -->
+
+{{.Description}}
+`
diff --git a/src/internal/types/testdata/check/blank.go b/src/internal/types/testdata/check/blank.go
new file mode 100644
index 0000000..2bea11f
--- /dev/null
+++ b/src/internal/types/testdata/check/blank.go
@@ -0,0 +1,5 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package _ /* ERROR "invalid package name" */
diff --git a/src/internal/types/testdata/check/builtins0.go b/src/internal/types/testdata/check/builtins0.go
new file mode 100644
index 0000000..ed4769e
--- /dev/null
+++ b/src/internal/types/testdata/check/builtins0.go
@@ -0,0 +1,1075 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// builtin calls
+
+package builtins
+
+import "unsafe"
+
+func f0() {}
+
+func append1() {
+ var b byte
+ var x int
+ var s []byte
+ _ = append() // ERROR "not enough arguments"
+ _ = append("foo" /* ERROR "must be a slice" */ )
+ _ = append(nil /* ERROR "must be a slice" */ , s)
+ _ = append(x /* ERROR "must be a slice" */ , s)
+ _ = append(s)
+ _ = append(s, nil...)
+ append /* ERROR "not used" */ (s)
+
+ _ = append(s, b)
+ _ = append(s, x /* ERROR "cannot use x" */ )
+ _ = append(s, s /* ERROR "cannot use s" */ )
+ _ = append(s...) /* ERROR "not enough arguments" */
+ _ = append(s, b, s /* ERROR "too many arguments" */ ...)
+ _ = append(s, 1, 2, 3)
+ _ = append(s, 1, 2, 3, x /* ERROR "cannot use x" */ , 5, 6, 6)
+ _ = append(s, 1, 2 /* ERROR "too many arguments" */, s...)
+ _ = append([]interface{}(nil), 1, 2, "foo", x, 3.1425, false)
+
+ type S []byte
+ type T string
+ var t T
+ _ = append(s, "foo" /* ERRORx `cannot use .* in argument to append` */ )
+ _ = append(s, "foo"...)
+ _ = append(S(s), "foo" /* ERRORx `cannot use .* in argument to append` */ )
+ _ = append(S(s), "foo"...)
+ _ = append(s, t /* ERROR "cannot use t" */ )
+ _ = append(s, t...)
+ _ = append(s, T("foo")...)
+ _ = append(S(s), t /* ERROR "cannot use t" */ )
+ _ = append(S(s), t...)
+ _ = append(S(s), T("foo")...)
+ _ = append([]string{}, t /* ERROR "cannot use t" */ , "foo")
+ _ = append([]T{}, t, "foo")
+}
+
+// from the spec
+func append2() {
+ s0 := []int{0, 0}
+ s1 := append(s0, 2) // append a single element s1 == []int{0, 0, 2}
+ s2 := append(s1, 3, 5, 7) // append multiple elements s2 == []int{0, 0, 2, 3, 5, 7}
+ s3 := append(s2, s0...) // append a slice s3 == []int{0, 0, 2, 3, 5, 7, 0, 0}
+ s4 := append(s3[3:6], s3[2:]...) // append overlapping slice s4 == []int{3, 5, 7, 2, 3, 5, 7, 0, 0}
+
+ var t []interface{}
+ t = append(t, 42, 3.1415, "foo") // t == []interface{}{42, 3.1415, "foo"}
+
+ var b []byte
+ b = append(b, "bar"...) // append string contents b == []byte{'b', 'a', 'r' }
+
+ _ = s4
+}
+
+func append3() {
+ f1 := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ f3 := func() (s []int, x, y int) { return }
+ f5 := func() (s []interface{}, x int, y float32, z string, b bool) { return }
+ ff := func() (int, float32) { return 0, 0 }
+ _ = append(f0 /* ERROR "used as value" */ ())
+ _ = append(f1())
+ _ = append(f2())
+ _ = append(f3())
+ _ = append(f5())
+ _ = append(ff /* ERROR "must be a slice" */ ()) // TODO(gri) better error message
+}
+
+func cap1() {
+ var a [10]bool
+ var p *[20]int
+ var c chan string
+ _ = cap() // ERROR "not enough arguments"
+ _ = cap(1, 2) // ERROR "too many arguments"
+ _ = cap(42 /* ERROR "invalid" */)
+ const _3 = cap(a)
+ assert(_3 == 10)
+ const _4 = cap(p)
+ assert(_4 == 20)
+ _ = cap(c)
+ cap /* ERROR "not used" */ (c)
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = cap(((*T)(nil)).a)
+
+ var s [][]byte
+ _ = cap(s)
+ _ = cap(s... /* ERROR "invalid use of ..." */ )
+}
+
+func cap2() {
+ f1a := func() (a [10]int) { return }
+ f1s := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ _ = cap(f0 /* ERROR "used as value" */ ())
+ _ = cap(f1a())
+ _ = cap(f1s())
+ _ = cap(f2()) // ERROR "too many arguments"
+}
+
+// test cases for issue 7387
+func cap3() {
+ var f = func() int { return 0 }
+ var x = f()
+ const (
+ _ = cap([4]int{})
+ _ = cap([4]int{x})
+ _ = cap /* ERROR "not constant" */ ([4]int{f()})
+ _ = cap /* ERROR "not constant" */ ([4]int{cap([]int{})})
+ _ = cap([4]int{cap([4]int{})})
+ )
+ var y float64
+ var z complex128
+ const (
+ _ = cap([4]float64{})
+ _ = cap([4]float64{y})
+ _ = cap([4]float64{real(2i)})
+ _ = cap /* ERROR "not constant" */ ([4]float64{real(z)})
+ )
+ var ch chan [10]int
+ const (
+ _ = cap /* ERROR "not constant" */ (<-ch)
+ _ = cap /* ERROR "not constant" */ ([4]int{(<-ch)[0]})
+ )
+}
+
+func clear1() {
+ var a [10]int
+ var m map[float64]string
+ var s []byte
+ clear(a /* ERROR "cannot clear a" */)
+ clear(&/* ERROR "cannot clear &a" */a)
+ clear(m)
+ clear(s)
+ clear([]int{})
+}
+
+func close1() {
+ var c chan int
+ var r <-chan int
+ close() // ERROR "not enough arguments"
+ close(1, 2) // ERROR "too many arguments"
+ close(42 /* ERROR "cannot close non-channel" */)
+ close(r /* ERROR "receive-only channel" */)
+ close(c)
+ _ = close /* ERROR "used as value" */ (c)
+
+ var s []chan int
+ close(s... /* ERROR "invalid use of ..." */ )
+}
+
+func close2() {
+ f1 := func() (ch chan int) { return }
+ f2 := func() (ch chan int, x int) { return }
+ close(f0 /* ERROR "used as value" */ ())
+ close(f1())
+ close(f2()) // ERROR "too many arguments"
+}
+
+func complex1() {
+ var i32 int32
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = complex() // ERROR "not enough arguments"
+ _ = complex(1) // ERROR "not enough arguments"
+ _ = complex(true /* ERROR "mismatched types" */ , 0)
+ _ = complex(i32 /* ERROR "expected floating-point" */ , 0)
+ _ = complex("foo" /* ERROR "mismatched types" */ , 0)
+ _ = complex(c64 /* ERROR "expected floating-point" */ , 0)
+ _ = complex(0 /* ERROR "mismatched types" */ , true)
+ _ = complex(0 /* ERROR "expected floating-point" */ , i32)
+ _ = complex(0 /* ERROR "mismatched types" */ , "foo")
+ _ = complex(0 /* ERROR "expected floating-point" */ , c64)
+ _ = complex(f32, f32)
+ _ = complex(f32, 1)
+ _ = complex(f32, 1.0)
+ _ = complex(f32, 'a')
+ _ = complex(f64, f64)
+ _ = complex(f64, 1)
+ _ = complex(f64, 1.0)
+ _ = complex(f64, 'a')
+ _ = complex(f32 /* ERROR "mismatched types" */ , f64)
+ _ = complex(f64 /* ERROR "mismatched types" */ , f32)
+ _ = complex(1, 1)
+ _ = complex(1, 1.1)
+ _ = complex(1, 'a')
+ complex /* ERROR "not used" */ (1, 2)
+
+ var _ complex64 = complex(f32, f32)
+ var _ complex64 = complex /* ERRORx `cannot use .* in variable declaration` */ (f64, f64)
+
+ var _ complex128 = complex /* ERRORx `cannot use .* in variable declaration` */ (f32, f32)
+ var _ complex128 = complex(f64, f64)
+
+ // untyped constants
+ const _ int = complex(1, 0)
+ const _ float32 = complex(1, 0)
+ const _ complex64 = complex(1, 0)
+ const _ complex128 = complex(1, 0)
+ const _ = complex(0i, 0i)
+ const _ = complex(0i, 0)
+ const _ int = 1.0 + complex(1, 0i)
+
+ const _ int = complex /* ERROR "int" */ (1.1, 0)
+ const _ float32 = complex /* ERROR "float32" */ (1, 2)
+
+ // untyped values
+ var s uint
+ _ = complex(1 /* ERROR "integer" */ <<s, 0)
+ const _ = complex /* ERROR "not constant" */ (1 /* ERROR "integer" */ <<s, 0)
+ var _ int = complex /* ERRORx `cannot use .* in variable declaration` */ (1 /* ERROR "integer" */ <<s, 0)
+
+ // floating-point argument types must be identical
+ type F32 float32
+ type F64 float64
+ var x32 F32
+ var x64 F64
+ c64 = complex(x32, x32)
+ _ = complex(x32 /* ERROR "mismatched types" */ , f32)
+ _ = complex(f32 /* ERROR "mismatched types" */ , x32)
+ c128 = complex(x64, x64)
+ _ = c128
+ _ = complex(x64 /* ERROR "mismatched types" */ , f64)
+ _ = complex(f64 /* ERROR "mismatched types" */ , x64)
+
+ var t []float32
+ _ = complex(t... /* ERROR "invalid use of ..." */ )
+}
+
+func complex2() {
+ f1 := func() (x float32) { return }
+ f2 := func() (x, y float32) { return }
+ f3 := func() (x, y, z float32) { return }
+ _ = complex(f0 /* ERROR "used as value" */ ())
+ _ = complex(f1()) // ERROR "not enough arguments"
+ _ = complex(f2())
+ _ = complex(f3()) // ERROR "too many arguments"
+}
+
+func copy1() {
+ copy() // ERROR "not enough arguments"
+ copy("foo") // ERROR "not enough arguments"
+ copy([ /* ERROR "copy expects slice arguments" */ ...]int{}, []int{})
+ copy([ /* ERROR "copy expects slice arguments" */ ]int{}, [...]int{})
+ copy([ /* ERROR "different element types" */ ]int8{}, "foo")
+
+ // spec examples
+ var a = [...]int{0, 1, 2, 3, 4, 5, 6, 7}
+ var s = make([]int, 6)
+ var b = make([]byte, 5)
+ n1 := copy(s, a[0:]) // n1 == 6, s == []int{0, 1, 2, 3, 4, 5}
+ n2 := copy(s, s[2:]) // n2 == 4, s == []int{2, 3, 4, 5, 4, 5}
+ n3 := copy(b, "Hello, World!") // n3 == 5, b == []byte("Hello")
+ _, _, _ = n1, n2, n3
+
+ var t [][]int
+ copy(t, t)
+ copy(t /* ERROR "copy expects slice arguments" */ , nil)
+ copy(nil /* ERROR "copy expects slice arguments" */ , t)
+ copy(nil /* ERROR "copy expects slice arguments" */ , nil)
+ copy(t... /* ERROR "invalid use of ..." */ )
+}
+
+func copy2() {
+ f1 := func() (a []int) { return }
+ f2 := func() (a, b []int) { return }
+ f3 := func() (a, b, c []int) { return }
+ copy(f0 /* ERROR "used as value" */ ())
+ copy(f1()) // ERROR "not enough arguments"
+ copy(f2())
+ copy(f3()) // ERROR "too many arguments"
+}
+
+func delete1() {
+ var m map[string]int
+ var s string
+ delete() // ERROR "not enough arguments"
+ delete(1) // ERROR "not enough arguments"
+ delete(1, 2, 3) // ERROR "too many arguments"
+ delete(m, 0 /* ERROR "cannot use" */)
+ delete(m, s)
+ _ = delete /* ERROR "used as value" */ (m, s)
+
+ var t []map[string]string
+ delete(t... /* ERROR "invalid use of ..." */ )
+}
+
+func delete2() {
+ f1 := func() (m map[string]int) { return }
+ f2 := func() (m map[string]int, k string) { return }
+ f3 := func() (m map[string]int, k string, x float32) { return }
+ delete(f0 /* ERROR "used as value" */ ())
+ delete(f1()) // ERROR "not enough arguments"
+ delete(f2())
+ delete(f3()) // ERROR "too many arguments"
+}
+
+func imag1() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = imag() // ERROR "not enough arguments"
+ _ = imag(1, 2) // ERROR "too many arguments"
+ _ = imag(10)
+ _ = imag(2.7182818)
+ _ = imag("foo" /* ERROR "expected complex" */)
+ _ = imag('a')
+ const _5 = imag(1 + 2i)
+ assert(_5 == 2)
+ f32 = _5
+ f64 = _5
+ const _6 = imag(0i)
+ assert(_6 == 0)
+ f32 = imag(c64)
+ f64 = imag(c128)
+ f32 = imag /* ERRORx `cannot use .* in assignment` */ (c128)
+ f64 = imag /* ERRORx `cannot use .* in assignment` */ (c64)
+ imag /* ERROR "not used" */ (c64)
+ _, _ = f32, f64
+
+ // complex type may not be predeclared
+ type C64 complex64
+ type C128 complex128
+ var x64 C64
+ var x128 C128
+ f32 = imag(x64)
+ f64 = imag(x128)
+
+ var a []complex64
+ _ = imag(a... /* ERROR "invalid use of ..." */ )
+
+ // if argument is untyped, result is untyped
+ const _ byte = imag(1.2 + 3i)
+ const _ complex128 = imag(1.2 + 3i)
+
+ // lhs constant shift operands are typed as complex128
+ var s uint
+ _ = imag(1 /* ERROR "must be integer" */ << s)
+}
+
+func imag2() {
+ f1 := func() (x complex128) { return }
+ f2 := func() (x, y complex128) { return }
+ _ = imag(f0 /* ERROR "used as value" */ ())
+ _ = imag(f1())
+ _ = imag(f2()) // ERROR "too many arguments"
+}
+
+func len1() {
+ const c = "foobar"
+ var a [10]bool
+ var p *[20]int
+ var m map[string]complex128
+ _ = len() // ERROR "not enough arguments"
+ _ = len(1, 2) // ERROR "too many arguments"
+ _ = len(42 /* ERROR "invalid" */)
+ const _3 = len(c)
+ assert(_3 == 6)
+ const _4 = len(a)
+ assert(_4 == 10)
+ const _5 = len(p)
+ assert(_5 == 20)
+ _ = len(m)
+ len /* ERROR "not used" */ (c)
+
+ // esoteric case
+ var t string
+ var hash map[interface{}][]*[10]int
+ const n = len /* ERROR "not constant" */ (hash[recover()][len(t)])
+ assert(n == 10) // ok because n has unknown value and no error is reported
+ var ch <-chan int
+ const nn = len /* ERROR "not constant" */ (hash[<-ch][len(t)])
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = len(((*T)(nil)).a)
+
+ var s [][]byte
+ _ = len(s)
+ _ = len(s... /* ERROR "invalid use of ..." */ )
+}
+
+func len2() {
+ f1 := func() (x []int) { return }
+ f2 := func() (x, y []int) { return }
+ _ = len(f0 /* ERROR "used as value" */ ())
+ _ = len(f1())
+ _ = len(f2()) // ERROR "too many arguments"
+}
+
+// test cases for issue 7387
+func len3() {
+ var f = func() int { return 0 }
+ var x = f()
+ const (
+ _ = len([4]int{})
+ _ = len([4]int{x})
+ _ = len /* ERROR "not constant" */ ([4]int{f()})
+ _ = len /* ERROR "not constant" */ ([4]int{len([]int{})})
+ _ = len([4]int{len([4]int{})})
+ )
+ var y float64
+ var z complex128
+ const (
+ _ = len([4]float64{})
+ _ = len([4]float64{y})
+ _ = len([4]float64{real(2i)})
+ _ = len /* ERROR "not constant" */ ([4]float64{real(z)})
+ )
+ var ch chan [10]int
+ const (
+ _ = len /* ERROR "not constant" */ (<-ch)
+ _ = len /* ERROR "not constant" */ ([4]int{(<-ch)[0]})
+ )
+}
+
+func make1() {
+ var n int
+ var m float32
+ var s uint
+
+ _ = make() // ERROR "not enough arguments"
+ _ = make(1 /* ERROR "not a type" */)
+ _ = make(int /* ERROR "cannot make" */)
+
+ // slices
+ _ = make/* ERROR "arguments" */ ([]int)
+ _ = make/* ERROR "arguments" */ ([]int, 2, 3, 4)
+ _ = make([]int, int /* ERROR "not an expression" */)
+ _ = make([]int, 10, float32 /* ERROR "not an expression" */)
+ _ = make([]int, "foo" /* ERROR "cannot convert" */)
+ _ = make([]int, 10, 2.3 /* ERROR "truncated" */)
+ _ = make([]int, 5, 10.0)
+ _ = make([]int, 0i)
+ _ = make([]int, 1.0)
+ _ = make([]int, 1.0<<s)
+ _ = make([]int, 1.1 /* ERROR "int" */ <<s)
+ _ = make([]int, - /* ERROR "must not be negative" */ 1, 10)
+ _ = make([]int, 0, - /* ERROR "must not be negative" */ 1)
+ _ = make([]int, - /* ERROR "must not be negative" */ 1, - /* ERROR "must not be negative" */ 1)
+ _ = make([]int, 1 /* ERROR "overflows" */ <<100, 1 /* ERROR "overflows" */ <<100)
+ _ = make([]int, 10 /* ERROR "length and capacity swapped" */ , 9)
+ _ = make([]int, 1 /* ERROR "overflows" */ <<100, 12345)
+ _ = make([]int, m /* ERROR "must be integer" */ )
+ _ = &make /* ERROR "cannot take address" */ ([]int, 0)
+
+ // maps
+ _ = make /* ERROR "arguments" */ (map[int]string, 10, 20)
+ _ = make(map[int]float32, int /* ERROR "not an expression" */)
+ _ = make(map[int]float32, "foo" /* ERROR "cannot convert" */)
+ _ = make(map[int]float32, 10)
+ _ = make(map[int]float32, n)
+ _ = make(map[int]float32, int64(n))
+ _ = make(map[string]bool, 10.0)
+ _ = make(map[string]bool, 10.0<<s)
+ _ = &make /* ERROR "cannot take address" */ (map[string]bool)
+
+ // channels
+ _ = make /* ERROR "arguments" */ (chan int, 10, 20)
+ _ = make(chan int, int /* ERROR "not an expression" */)
+ _ = make(chan<- int, "foo" /* ERROR "cannot convert" */)
+ _ = make(chan int, - /* ERROR "must not be negative" */ 10)
+ _ = make(<-chan float64, 10)
+ _ = make(chan chan int, n)
+ _ = make(chan string, int64(n))
+ _ = make(chan bool, 10.0)
+ _ = make(chan bool, 10.0<<s)
+ _ = &make /* ERROR "cannot take address" */ (chan bool)
+
+ make /* ERROR "not used" */ ([]int, 10)
+
+ var t []int
+ _ = make([]int, t[0], t[1])
+ _ = make([]int, t... /* ERROR "invalid use of ..." */ )
+}
+
+func make2() {
+ f1 := func() (x []int) { return }
+ _ = make(f0 /* ERROR "not a type" */ ())
+ _ = make(f1 /* ERROR "not a type" */ ())
+}
+
+func max1() {
+ var b bool
+ var c complex128
+ var x int
+ var s string
+ type myint int
+ var m myint
+ _ = max() /* ERROR "not enough arguments" */
+ _ = max(b /* ERROR "cannot be ordered" */ )
+ _ = max(c /* ERROR "cannot be ordered" */ )
+ _ = max(x)
+ _ = max(s)
+ _ = max(x, x)
+ _ = max(x, x, x, x, x)
+ var _ int = max /* ERROR "cannot use max(m) (value of type myint) as int value" */ (m)
+ _ = max(x, m /* ERROR "invalid argument: mismatched types int (previous argument) and myint (type of m)" */ , x)
+
+ _ = max(1, x)
+ _ = max(1.0, x)
+ _ = max(1.2 /* ERROR "1.2 (untyped float constant) truncated to int" */ , x)
+ _ = max(-10, 1.0, c /* ERROR "cannot be ordered" */ )
+
+ const (
+ _ = max /* ERROR "max(x) (value of type int) is not constant" */ (x)
+ _ = max(true /* ERROR "invalid argument: true (untyped bool constant) cannot be ordered" */ )
+ _ = max(1)
+ _ = max(1, 2.3, 'a')
+ _ = max(1, "foo" /* ERROR "mismatched types" */ )
+ _ = max(1, 0i /* ERROR "cannot be ordered" */ )
+ _ = max(1, 2 /* ERROR "cannot be ordered" */ + 3i )
+ )
+}
+
+func max2() {
+ _ = assert(max(0) == 0)
+ _ = assert(max(0, 1) == 1)
+ _ = assert(max(0, -10, 123456789) == 123456789)
+ _ = assert(max(-12345678901234567890, 0) == 0)
+
+ _ = assert(max(1, 2.3) == 2.3)
+ _ = assert(max(1, 2.3, 'a') == 'a')
+
+ _ = assert(max("", "a") == "a")
+ _ = assert(max("abcde", "xyz", "foo", "bar") == "xyz")
+
+ const (
+ _ int = max(1.0)
+ _ float32 = max(1, 2)
+ _ int = max /* ERROR "cannot use max(1, 2.3) (untyped float constant 2.3) as int value" */ (1, 2.3)
+ _ int = max(1.2, 3) // ok!
+ _ byte = max(1, 'a')
+ )
+}
+
+func min1() {
+ var b bool
+ var c complex128
+ var x int
+ var s string
+ type myint int
+ var m myint
+ _ = min() /* ERROR "not enough arguments" */
+ _ = min(b /* ERROR "cannot be ordered" */ )
+ _ = min(c /* ERROR "cannot be ordered" */ )
+ _ = min(x)
+ _ = min(s)
+ _ = min(x, x)
+ _ = min(x, x, x, x, x)
+ var _ int = min /* ERROR "cannot use min(m) (value of type myint) as int value" */ (m)
+ _ = min(x, m /* ERROR "invalid argument: mismatched types int (previous argument) and myint (type of m)" */ , x)
+
+ _ = min(1, x)
+ _ = min(1.0, x)
+ _ = min(1.2 /* ERROR "1.2 (untyped float constant) truncated to int" */ , x)
+ _ = min(-10, 1.0, c /* ERROR "cannot be ordered" */ )
+
+ const (
+ _ = min /* ERROR "min(x) (value of type int) is not constant" */ (x)
+ _ = min(true /* ERROR "invalid argument: true (untyped bool constant) cannot be ordered" */ )
+ _ = min(1)
+ _ = min(1, 2.3, 'a')
+ _ = min(1, "foo" /* ERROR "mismatched types" */ )
+ _ = min(1, 0i /* ERROR "cannot be ordered" */ )
+ _ = min(1, 2 /* ERROR "cannot be ordered" */ + 3i )
+ )
+}
+
+func min2() {
+ _ = assert(min(0) == 0)
+ _ = assert(min(0, 1) == 0)
+ _ = assert(min(0, -10, 123456789) == -10)
+ _ = assert(min(-12345678901234567890, 0) == -12345678901234567890)
+
+ _ = assert(min(1, 2.3) == 1)
+ _ = assert(min(1, 2.3, 'a') == 1)
+
+ _ = assert(min("", "a") == "")
+ _ = assert(min("abcde", "xyz", "foo", "bar") == "abcde")
+
+ const (
+ _ int = min(1.0)
+ _ float32 = min(1, 2)
+ _ int = min(1, 2.3) // ok!
+ _ int = min /* ERROR "cannot use min(1.2, 3) (untyped float constant 1.2) as int value" */ (1.2, 3)
+ _ byte = min(1, 'a')
+ )
+}
+
+func new1() {
+ _ = new() // ERROR "not enough arguments"
+ _ = new(1, 2) // ERROR "too many arguments"
+ _ = new("foo" /* ERROR "not a type" */)
+ p := new(float64)
+ _ = new(struct{ x, y int })
+ q := new(*float64)
+ _ = *p == **q
+ new /* ERROR "not used" */ (int)
+ _ = &new /* ERROR "cannot take address" */ (int)
+
+ _ = new(int... /* ERROR "invalid use of ..." */ )
+}
+
+func new2() {
+ f1 := func() (x []int) { return }
+ _ = new(f0 /* ERROR "not a type" */ ())
+ _ = new(f1 /* ERROR "not a type" */ ())
+}
+
+func panic1() {
+ panic() // ERROR "not enough arguments"
+ panic(1, 2) // ERROR "too many arguments"
+ panic(0)
+ panic("foo")
+ panic(false)
+ panic(1<<10)
+ panic(1 << /* ERROR "constant shift overflow" */ 1000)
+ _ = panic /* ERROR "used as value" */ (0)
+
+ var s []byte
+ panic(s)
+ panic(s... /* ERROR "invalid use of ..." */ )
+}
+
+func panic2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ panic(f0 /* ERROR "used as value" */ ())
+ panic(f1())
+ panic(f2()) // ERROR "too many arguments"
+}
+
+func print1() {
+ print()
+ print(1)
+ print(1, 2)
+ print("foo")
+ print(2.718281828)
+ print(false)
+ print(1<<10)
+ print(1 << /* ERROR "constant shift overflow" */ 1000)
+ println(nil /* ERROR "untyped nil" */ )
+
+ var s []int
+ print(s... /* ERROR "invalid use of ..." */ )
+ _ = print /* ERROR "used as value" */ ()
+}
+
+func print2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ f3 := func() (x int, y float32, z string) { return }
+ print(f0 /* ERROR "used as value" */ ())
+ print(f1())
+ print(f2())
+ print(f3())
+}
+
+func println1() {
+ println()
+ println(1)
+ println(1, 2)
+ println("foo")
+ println(2.718281828)
+ println(false)
+ println(1<<10)
+ println(1 << /* ERROR "constant shift overflow" */ 1000)
+ println(nil /* ERROR "untyped nil" */ )
+
+ var s []int
+ println(s... /* ERROR "invalid use of ..." */ )
+ _ = println /* ERROR "used as value" */ ()
+}
+
+func println2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ f3 := func() (x int, y float32, z string) { return }
+ println(f0 /* ERROR "used as value" */ ())
+ println(f1())
+ println(f2())
+ println(f3())
+}
+
+func real1() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = real() // ERROR "not enough arguments"
+ _ = real(1, 2) // ERROR "too many arguments"
+ _ = real(10)
+ _ = real(2.7182818)
+ _ = real("foo" /* ERROR "expected complex" */)
+ const _5 = real(1 + 2i)
+ assert(_5 == 1)
+ f32 = _5
+ f64 = _5
+ const _6 = real(0i)
+ assert(_6 == 0)
+ f32 = real(c64)
+ f64 = real(c128)
+ f32 = real /* ERRORx `cannot use .* in assignment` */ (c128)
+ f64 = real /* ERRORx `cannot use .* in assignment` */ (c64)
+ real /* ERROR "not used" */ (c64)
+
+ // complex type may not be predeclared
+ type C64 complex64
+ type C128 complex128
+ var x64 C64
+ var x128 C128
+ f32 = imag(x64)
+ f64 = imag(x128)
+ _, _ = f32, f64
+
+ var a []complex64
+ _ = real(a... /* ERROR "invalid use of ..." */ )
+
+ // if argument is untyped, result is untyped
+ const _ byte = real(1 + 2.3i)
+ const _ complex128 = real(1 + 2.3i)
+
+ // lhs constant shift operands are typed as complex128
+ var s uint
+ _ = real(1 /* ERROR "must be integer" */ << s)
+}
+
+func real2() {
+ f1 := func() (x complex128) { return }
+ f2 := func() (x, y complex128) { return }
+ _ = real(f0 /* ERROR "used as value" */ ())
+ _ = real(f1())
+ _ = real(f2()) // ERROR "too many arguments"
+}
+
+func recover1() {
+ _ = recover()
+ _ = recover(10) // ERROR "too many arguments"
+ recover()
+
+ var s []int
+ recover(s... /* ERROR "invalid use of ..." */ )
+}
+
+func recover2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ _ = recover(f0 /* ERROR "used as value" */ ())
+ _ = recover(f1()) // ERROR "too many arguments"
+ _ = recover(f2()) // ERROR "too many arguments"
+}
+
+// assuming types.DefaultPtrSize == 8
+type S0 struct{ // offset
+ a bool // 0
+ b rune // 4
+ c *int // 8
+ d bool // 16
+ e complex128 // 24
+} // 40
+
+type S1 struct{ // offset
+ x float32 // 0
+ y string // 8
+ z *S1 // 24
+ S0 // 32
+} // 72
+
+type S2 struct{ // offset
+ *S1 // 0
+} // 8
+
+type S3 struct { // offset
+ a int64 // 0
+ b int32 // 8
+} // 12
+
+type S4 struct { // offset
+ S3 // 0
+ int32 // 12
+} // 16
+
+type S5 struct { // offset
+ a [3]int32 // 0
+ b int32 // 12
+} // 16
+
+func (S2) m() {}
+
+func Alignof1() {
+ var x int
+ _ = unsafe.Alignof() // ERROR "not enough arguments"
+ _ = unsafe.Alignof(1, 2) // ERROR "too many arguments"
+ _ = unsafe.Alignof(int /* ERROR "not an expression" */)
+ _ = unsafe.Alignof(42)
+ _ = unsafe.Alignof(new(struct{}))
+ _ = unsafe.Alignof(1<<10)
+ _ = unsafe.Alignof(1 << /* ERROR "constant shift overflow" */ 1000)
+ _ = unsafe.Alignof(nil /* ERROR "untyped nil" */ )
+ unsafe /* ERROR "not used" */ .Alignof(x)
+
+ var y S0
+ assert(unsafe.Alignof(y.a) == 1)
+ assert(unsafe.Alignof(y.b) == 4)
+ assert(unsafe.Alignof(y.c) == 8)
+ assert(unsafe.Alignof(y.d) == 1)
+ assert(unsafe.Alignof(y.e) == 8)
+
+ var s []byte
+ _ = unsafe.Alignof(s)
+ _ = unsafe.Alignof(s... /* ERROR "invalid use of ..." */ )
+}
+
+func Alignof2() {
+ f1 := func() (x int32) { return }
+ f2 := func() (x, y int32) { return }
+ _ = unsafe.Alignof(f0 /* ERROR "used as value" */ ())
+ assert(unsafe.Alignof(f1()) == 4)
+ _ = unsafe.Alignof(f2()) // ERROR "too many arguments"
+}
+
+func Offsetof1() {
+ var x struct{ f int }
+ _ = unsafe.Offsetof() // ERROR "not enough arguments"
+ _ = unsafe.Offsetof(1, 2) // ERROR "too many arguments"
+ _ = unsafe.Offsetof(int /* ERROR "not a selector expression" */ )
+ _ = unsafe.Offsetof(x /* ERROR "not a selector expression" */ )
+ _ = unsafe.Offsetof(nil /* ERROR "not a selector expression" */ )
+ _ = unsafe.Offsetof(x.f)
+ _ = unsafe.Offsetof((x.f))
+ _ = unsafe.Offsetof((((((((x))).f)))))
+ unsafe /* ERROR "not used" */ .Offsetof(x.f)
+
+ var y0 S0
+ assert(unsafe.Offsetof(y0.a) == 0)
+ assert(unsafe.Offsetof(y0.b) == 4)
+ assert(unsafe.Offsetof(y0.c) == 8)
+ assert(unsafe.Offsetof(y0.d) == 16)
+ assert(unsafe.Offsetof(y0.e) == 24)
+
+ var y1 S1
+ assert(unsafe.Offsetof(y1.x) == 0)
+ assert(unsafe.Offsetof(y1.y) == 8)
+ assert(unsafe.Offsetof(y1.z) == 24)
+ assert(unsafe.Offsetof(y1.S0) == 32)
+
+ assert(unsafe.Offsetof(y1.S0.a) == 0) // relative to S0
+ assert(unsafe.Offsetof(y1.a) == 32) // relative to S1
+ assert(unsafe.Offsetof(y1.b) == 36) // relative to S1
+ assert(unsafe.Offsetof(y1.c) == 40) // relative to S1
+ assert(unsafe.Offsetof(y1.d) == 48) // relative to S1
+ assert(unsafe.Offsetof(y1.e) == 56) // relative to S1
+
+ var y1p *S1
+ assert(unsafe.Offsetof(y1p.S0) == 32)
+
+ type P *S1
+ var p P = y1p
+ assert(unsafe.Offsetof(p.S0) == 32)
+
+ var y2 S2
+ assert(unsafe.Offsetof(y2.S1) == 0)
+ _ = unsafe.Offsetof(y2 /* ERROR "embedded via a pointer" */ .x)
+ _ = unsafe.Offsetof(y2 /* ERROR "method value" */ .m)
+
+ var s []byte
+ _ = unsafe.Offsetof(s... /* ERROR "invalid use of ..." */ )
+}
+
+func Offsetof2() {
+ f1 := func() (x int32) { return }
+ f2 := func() (x, y int32) { return }
+ _ = unsafe.Offsetof(f0 /* ERROR "not a selector expression" */ ())
+ _ = unsafe.Offsetof(f1 /* ERROR "not a selector expression" */ ())
+ _ = unsafe.Offsetof(f2 /* ERROR "not a selector expression" */ ())
+}
+
+func Sizeof1() {
+ var x int
+ _ = unsafe.Sizeof() // ERROR "not enough arguments"
+ _ = unsafe.Sizeof(1, 2) // ERROR "too many arguments"
+ _ = unsafe.Sizeof(int /* ERROR "not an expression" */)
+ _ = unsafe.Sizeof(42)
+ _ = unsafe.Sizeof(new(complex128))
+ _ = unsafe.Sizeof(1<<10)
+ _ = unsafe.Sizeof(1 << /* ERROR "constant shift overflow" */ 1000)
+ _ = unsafe.Sizeof(nil /* ERROR "untyped nil" */ )
+ unsafe /* ERROR "not used" */ .Sizeof(x)
+
+ // basic types have size guarantees
+ assert(unsafe.Sizeof(byte(0)) == 1)
+ assert(unsafe.Sizeof(uint8(0)) == 1)
+ assert(unsafe.Sizeof(int8(0)) == 1)
+ assert(unsafe.Sizeof(uint16(0)) == 2)
+ assert(unsafe.Sizeof(int16(0)) == 2)
+ assert(unsafe.Sizeof(uint32(0)) == 4)
+ assert(unsafe.Sizeof(int32(0)) == 4)
+ assert(unsafe.Sizeof(float32(0)) == 4)
+ assert(unsafe.Sizeof(uint64(0)) == 8)
+ assert(unsafe.Sizeof(int64(0)) == 8)
+ assert(unsafe.Sizeof(float64(0)) == 8)
+ assert(unsafe.Sizeof(complex64(0)) == 8)
+ assert(unsafe.Sizeof(complex128(0)) == 16)
+
+ var y0 S0
+ assert(unsafe.Sizeof(y0.a) == 1)
+ assert(unsafe.Sizeof(y0.b) == 4)
+ assert(unsafe.Sizeof(y0.c) == 8)
+ assert(unsafe.Sizeof(y0.d) == 1)
+ assert(unsafe.Sizeof(y0.e) == 16)
+ assert(unsafe.Sizeof(y0) == 40)
+
+ var y1 S1
+ assert(unsafe.Sizeof(y1) == 72)
+
+ var y2 S2
+ assert(unsafe.Sizeof(y2) == 8)
+
+ var y3 S3
+ assert(unsafe.Sizeof(y3) == 12)
+
+ var y4 S4
+ assert(unsafe.Sizeof(y4) == 16)
+
+ var y5 S5
+ assert(unsafe.Sizeof(y5) == 16)
+
+ var a3 [10]S3
+ assert(unsafe.Sizeof(a3) == 156)
+
+ // test case for issue 5670
+ type T struct {
+ a int32
+ _ int32
+ c int32
+ }
+ assert(unsafe.Sizeof(T{}) == 12)
+
+ var s []byte
+ _ = unsafe.Sizeof(s)
+ _ = unsafe.Sizeof(s... /* ERROR "invalid use of ..." */ )
+}
+
+func Sizeof2() {
+ f1 := func() (x int64) { return }
+ f2 := func() (x, y int64) { return }
+ _ = unsafe.Sizeof(f0 /* ERROR "used as value" */ ())
+ assert(unsafe.Sizeof(f1()) == 8)
+ _ = unsafe.Sizeof(f2()) // ERROR "too many arguments"
+}
+
+func Slice1() {
+ var x int
+ unsafe.Slice() // ERROR "not enough arguments"
+ unsafe.Slice(1, 2, 3) // ERROR "too many arguments"
+ unsafe.Slice(1 /* ERROR "is not a pointer" */ , 2)
+ unsafe.Slice(nil /* ERROR "nil is not a pointer" */ , 0)
+ unsafe.Slice(&x, "foo" /* ERRORx `cannot convert .* to type int` */ )
+ unsafe.Slice(&x, 1.2 /* ERROR "truncated to int" */ )
+ unsafe.Slice(&x, - /* ERROR "must not be negative" */ 1)
+ unsafe /* ERROR "not used" */ .Slice(&x, 0)
+ var _ []byte = unsafe /* ERROR "value of type []int" */ .Slice(&x, 0)
+
+ var _ []int = unsafe.Slice(&x, 0)
+ _ = unsafe.Slice(&x, 1.0)
+ _ = unsafe.Slice((*int)(nil), 0)
+}
+
+func SliceData1() {
+ var s []int
+ unsafe.SliceData(0 /* ERROR "not a slice" */)
+ unsafe /* ERROR "not used" */ .SliceData(s)
+
+ type S []int
+ _ = unsafe.SliceData(s)
+ _ = unsafe.SliceData(S{})
+}
+
+func String1() {
+ var b byte
+ unsafe.String() // ERROR "not enough arguments"
+ unsafe.String(1, 2, 3) // ERROR "too many arguments"
+ unsafe.String(1 /* ERROR "cannot use 1" */ , 2)
+ unsafe.String(&b, "foo" /* ERRORx `cannot convert .* to type int` */ )
+ unsafe.String(&b, 1.2 /* ERROR "truncated to int" */ )
+ unsafe.String(&b, - /* ERROR "must not be negative" */ 1)
+ unsafe /* ERROR "not used" */ .String(&b, 0)
+ var _ []byte = unsafe /* ERROR "value of type string" */ .String(&b, 0)
+
+ var _ string = unsafe.String(&b, 0)
+ _ = unsafe.String(&b, 1.0)
+ _ = unsafe.String(nil, 0) // here we allow nil as ptr argument (in contrast to unsafe.Slice)
+}
+
+func StringData1() {
+ var s string
+ type S string
+ unsafe.StringData(0 /* ERROR "cannot use 0" */)
+ unsafe.StringData(S /* ERROR "cannot use S" */ ("foo"))
+ unsafe /* ERROR "not used" */ .StringData(s)
+
+ _ = unsafe.StringData(s)
+ _ = unsafe.StringData("foo")
+}
+
+// self-testing only
+func assert1() {
+ var x int
+ assert() /* ERROR "not enough arguments" */
+ assert(1, 2) /* ERROR "too many arguments" */
+ assert("foo" /* ERROR "boolean constant" */ )
+ assert(x /* ERROR "boolean constant" */)
+ assert(true)
+ assert /* ERROR "failed" */ (false)
+ _ = assert(true)
+
+ var s []byte
+ assert(s... /* ERROR "invalid use of ..." */ )
+}
+
+func assert2() {
+ f1 := func() (x bool) { return }
+ f2 := func() (x bool) { return }
+ assert(f0 /* ERROR "used as value" */ ())
+ assert(f1 /* ERROR "boolean constant" */ ())
+ assert(f2 /* ERROR "boolean constant" */ ())
+}
+
+// self-testing only
+func trace1() {
+ // Uncomment the code below to test trace - will produce console output
+ // _ = trace /* ERROR "no value" */ ()
+ // _ = trace(1)
+ // _ = trace(true, 1.2, '\'', "foo", 42i, "foo" <= "bar")
+
+ var s []byte
+ trace(s... /* ERROR "invalid use of ..." */ )
+}
+
+func trace2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x int, y string) { return }
+ f3 := func() (x int, y string, z []int) { return }
+ _ = f1
+ _ = f2
+ _ = f3
+ // Uncomment the code below to test trace - will produce console output
+ // trace(f0())
+ // trace(f1())
+ // trace(f2())
+ // trace(f3())
+ // trace(f0(), 1)
+ // trace(f1(), 1, 2)
+ // trace(f2(), 1, 2, 3)
+ // trace(f3(), 1, 2, 3, 4)
+}
diff --git a/src/internal/types/testdata/check/builtins1.go b/src/internal/types/testdata/check/builtins1.go
new file mode 100644
index 0000000..f7ac72d
--- /dev/null
+++ b/src/internal/types/testdata/check/builtins1.go
@@ -0,0 +1,330 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests built-in calls on generic types.
+
+package builtins
+
+import "unsafe"
+
+// clear
+
+func _[T any](x T) {
+ clear(x /* ERROR "cannot clear x" */)
+}
+
+func _[T ~map[int]string | ~[]byte](x T) {
+ clear(x)
+}
+
+func _[T ~map[int]string | ~[]byte | ~*[10]int | string](x T) {
+ clear(x /* ERROR "cannot clear x" */)
+}
+
+// close
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ close(ch /* ERROR "cannot close non-channel" */)
+}
+
+func _[T C0](ch T) {
+ close(ch /* ERROR "cannot close non-channel" */)
+}
+
+func _[T C1](ch T) {
+ close(ch)
+}
+
+func _[T C2](ch T) {
+ close(ch /* ERROR "cannot close receive-only channel" */)
+}
+
+func _[T C3](ch T) {
+ close(ch)
+}
+
+func _[T C4](ch T) {
+ close(ch)
+}
+
+func _[T C5[X], X any](ch T) {
+ close(ch)
+}
+
+// copy
+
+func _[T any](x, y T) {
+ copy(x /* ERROR "copy expects slice arguments" */ , y)
+}
+
+func _[T ~[]byte](x, y T) {
+ copy(x, y)
+ copy(x, "foo")
+ copy("foo" /* ERROR "expects slice arguments" */ , y)
+
+ var x2 []byte
+ copy(x2, y) // element types are identical
+ copy(y, x2) // element types are identical
+
+ type myByte byte
+ var x3 []myByte
+ copy(x3 /* ERROR "different element types" */ , y)
+ copy(y /* ERROR "different element types" */ , x3)
+}
+
+func _[T ~[]E, E any](x T, y []E) {
+ copy(x, y)
+ copy(x /* ERROR "different element types" */ , "foo")
+}
+
+func _[T ~string](x []byte, y T) {
+ copy(x, y)
+ copy(y /* ERROR "expects slice arguments" */ , x)
+}
+
+func _[T ~[]byte|~string](x T, y []byte) {
+ copy(x /* ERROR "expects slice arguments" */ , y)
+ copy(y, x)
+}
+
+type L0 []int
+type L1 []int
+
+func _[T L0 | L1](x, y T) {
+ copy(x, y)
+}
+
+// delete
+
+type M0 interface{ int }
+type M1 interface{ map[string]int }
+type M2 interface { map[string]int | map[string]float64 }
+type M3 interface{ map[string]int | map[rune]int }
+type M4[K comparable, V any] interface{ map[K]V | map[rune]V }
+
+func _[T any](m T) {
+ delete(m /* ERROR "not a map" */, "foo")
+}
+
+func _[T M0](m T) {
+ delete(m /* ERROR "not a map" */, "foo")
+}
+
+func _[T M1](m T) {
+ delete(m, "foo")
+}
+
+func _[T M2](m T) {
+ delete(m, "foo")
+ delete(m, 0 /* ERRORx `cannot use .* as string` */)
+}
+
+func _[T M3](m T) {
+ delete(m /* ERROR "must have identical key types" */, "foo")
+}
+
+func _[T M4[rune, V], V any](m T) {
+ delete(m, 'k')
+}
+
+func _[T M4[K, V], K comparable, V any](m T) {
+ delete(m /* ERROR "must have identical key types" */, "foo")
+}
+
+// make
+
+type myChan chan int
+
+func _[
+ S1 ~[]int,
+ S2 ~[]int | ~chan int,
+
+ M1 ~map[string]int,
+ M2 ~map[string]int | ~chan int,
+
+ C1 ~chan int,
+ C2 ~chan int | ~chan string,
+ C3 chan int | myChan, // single underlying type
+]() {
+ type S0 []int
+ _ = make([]int, 10)
+ _ = make(S0, 10)
+ _ = make(S1, 10)
+ _ = make() /* ERROR "not enough arguments" */
+ _ = make /* ERROR "expects 2 or 3 arguments" */ (S1)
+ _ = make(S1, 10, 20)
+ _ = make /* ERROR "expects 2 or 3 arguments" */ (S1, 10, 20, 30)
+ _ = make(S2 /* ERROR "cannot make S2: no core type" */ , 10)
+
+ type M0 map[string]int
+ _ = make(map[string]int)
+ _ = make(M0)
+ _ = make(M1)
+ _ = make(M1, 10)
+ _ = make/* ERROR "expects 1 or 2 arguments" */(M1, 10, 20)
+ _ = make(M2 /* ERROR "cannot make M2: no core type" */ )
+
+ type C0 chan int
+ _ = make(chan int)
+ _ = make(C0)
+ _ = make(C1)
+ _ = make(C1, 10)
+ _ = make/* ERROR "expects 1 or 2 arguments" */(C1, 10, 20)
+ _ = make(C2 /* ERROR "cannot make C2: no core type" */ )
+ _ = make(C3)
+}
+
+// max
+
+func _[
+ P1 ~int|~float64,
+ P2 ~int|~string|~uint,
+ P3 ~int|bool,
+]() {
+ var x1 P1
+ _ = max(x1)
+ _ = max(x1, x1)
+ _ = max(1, x1, 2)
+ const _ = max /* ERROR "max(1, x1, 2) (value of type P1 constrained by ~int | ~float64) is not constant" */ (1, x1, 2)
+
+ var x2 P2
+ _ = max(x2)
+ _ = max(x2, x2)
+ _ = max(1, 2 /* ERROR "cannot convert 2 (untyped int constant) to type P2" */, x2) // error at 2 because max is 2
+
+ _ = max(x1, x2 /* ERROR "mismatched types P1 (previous argument) and P2 (type of x2)" */ )
+}
+
+// min
+
+func _[
+ P1 ~int|~float64,
+ P2 ~int|~string|~uint,
+ P3 ~int|bool,
+]() {
+ var x1 P1
+ _ = min(x1)
+ _ = min(x1, x1)
+ _ = min(1, x1, 2)
+ const _ = min /* ERROR "min(1, x1, 2) (value of type P1 constrained by ~int | ~float64) is not constant" */ (1, x1, 2)
+
+ var x2 P2
+ _ = min(x2)
+ _ = min(x2, x2)
+ _ = min(1 /* ERROR "cannot convert 1 (untyped int constant) to type P2" */ , 2, x2) // error at 1 because min is 1
+
+ _ = min(x1, x2 /* ERROR "mismatched types P1 (previous argument) and P2 (type of x2)" */ )
+}
+
+// unsafe.Alignof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Alignof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(a)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(s)
+ const pp = unsafe.Alignof(p)
+ assert(pp == 8)
+ const ll = unsafe.Alignof(l)
+ assert(ll == 8)
+ const ff = unsafe.Alignof(f)
+ assert(ff == 8)
+ const ii = unsafe.Alignof(i)
+ assert(ii == 8)
+ const cc = unsafe.Alignof(c)
+ assert(cc == 8)
+ const mm = unsafe.Alignof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(t)
+}
+
+// unsafe.Offsetof
+
+func _[T comparable]() {
+ var (
+ b struct{ _, f int64 }
+ a struct{ _, f [10]T }
+ s struct{ _, f struct{ f T } }
+ p struct{ _, f *T }
+ l struct{ _, f []T }
+ f struct{ _, f func(T) }
+ i struct{ _, f interface{ m() T } }
+ c struct{ _, f chan T }
+ m struct{ _, f map[T]T }
+ t struct{ _, f T }
+ )
+
+ const bb = unsafe.Offsetof(b.f)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(a)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(s)
+ const pp = unsafe.Offsetof(p.f)
+ assert(pp == 8)
+ const ll = unsafe.Offsetof(l.f)
+ assert(ll == 24)
+ const ff = unsafe.Offsetof(f.f)
+ assert(ff == 8)
+ const ii = unsafe.Offsetof(i.f)
+ assert(ii == 16)
+ const cc = unsafe.Offsetof(c.f)
+ assert(cc == 8)
+ const mm = unsafe.Offsetof(m.f)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(t)
+}
+
+// unsafe.Sizeof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Sizeof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(a)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(s)
+ const pp = unsafe.Sizeof(p)
+ assert(pp == 8)
+ const ll = unsafe.Sizeof(l)
+ assert(ll == 24)
+ const ff = unsafe.Sizeof(f)
+ assert(ff == 8)
+ const ii = unsafe.Sizeof(i)
+ assert(ii == 16)
+ const cc = unsafe.Sizeof(c)
+ assert(cc == 8)
+ const mm = unsafe.Sizeof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR "not constant" */ .Alignof(t)
+}
diff --git a/src/internal/types/testdata/check/chans.go b/src/internal/types/testdata/check/chans.go
new file mode 100644
index 0000000..fad2bce
--- /dev/null
+++ b/src/internal/types/testdata/check/chans.go
@@ -0,0 +1,62 @@
+package chans
+
+import "runtime"
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[T any]() (*Sender[T], *Receiver[T]) {
+ c := make(chan T)
+ d := make(chan bool)
+ s := &Sender[T]{values: c, done: d}
+ r := &Receiver[T]{values: c, done: d}
+ runtime.SetFinalizer(r, r.finalize)
+ return s, r
+}
+
+// A sender is used to send values to a Receiver.
+type Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+// Send sends a value to the receiver. It returns whether any more
+// values may be sent; if it returns false the value was not sent.
+func (s *Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[T]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+// Next returns the next value from the channel. The bool result
+// indicates whether the value is valid, or whether the Sender has
+// been closed and no more values will be received.
+func (r *Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[T]) finalize() {
+ close(r.done)
+}
diff --git a/src/internal/types/testdata/check/compliterals.go b/src/internal/types/testdata/check/compliterals.go
new file mode 100644
index 0000000..60eac97
--- /dev/null
+++ b/src/internal/types/testdata/check/compliterals.go
@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Composite literals with parameterized types
+
+package comp_literals
+
+type myStruct struct {
+ f int
+}
+
+type slice[E any] []E
+
+func struct_literals[S struct{f int}|myStruct]() {
+ _ = S{}
+ _ = S{0}
+ _ = S{f: 0}
+
+ _ = slice[int]{1, 2, 3}
+ _ = slice[S]{{}, {0}, {f:0}}
+}
diff --git a/src/internal/types/testdata/check/const0.go b/src/internal/types/testdata/check/const0.go
new file mode 100644
index 0000000..49c62d6
--- /dev/null
+++ b/src/internal/types/testdata/check/const0.go
@@ -0,0 +1,382 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// constant declarations
+
+package const0
+
+import "unsafe"
+
+// constants declarations must be initialized by constants
+var x = 0
+const c0 = x /* ERROR "not constant" */
+
+// typed constants must have constant types
+const _ interface /* ERROR "invalid constant type" */ {} = 0
+
+func _ () {
+ const _ interface /* ERROR "invalid constant type" */ {} = 0
+ for i := 0; i < 10; i++ {} // don't crash with non-nil iota here
+}
+
+// untyped constants
+const (
+ // boolean values
+ ub0 = false
+ ub1 = true
+ ub2 = 2 < 1
+ ub3 = ui1 == uf1
+ ub4 = true == 0 /* ERROR "mismatched types untyped bool and untyped int" */
+
+ // integer values
+ ui0 = 0
+ ui1 = 1
+ ui2 = 42
+ ui3 = 3141592653589793238462643383279502884197169399375105820974944592307816406286
+ ui4 = -10
+
+ ui5 = ui0 + ui1
+ ui6 = ui1 - ui1
+ ui7 = ui2 * ui1
+ ui8 = ui3 / ui3
+ ui9 = ui3 % ui3
+
+ ui10 = 1 / 0 /* ERROR "division by zero" */
+ ui11 = ui1 / 0 /* ERROR "division by zero" */
+ ui12 = ui3 / ui0 /* ERROR "division by zero" */
+ ui13 = 1 % 0 /* ERROR "division by zero" */
+ ui14 = ui1 % 0 /* ERROR "division by zero" */
+ ui15 = ui3 % ui0 /* ERROR "division by zero" */
+
+ ui16 = ui2 & ui3
+ ui17 = ui2 | ui3
+ ui18 = ui2 ^ ui3
+ ui19 = 1 /* ERROR "invalid operation" */ % 1.0
+
+ // floating point values
+ uf0 = 0.
+ uf1 = 1.
+ uf2 = 4.2e1
+ uf3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ uf4 = 1e-1
+
+ uf5 = uf0 + uf1
+ uf6 = uf1 - uf1
+ uf7 = uf2 * uf1
+ uf8 = uf3 / uf3
+ uf9 = uf3 /* ERROR "not defined" */ % uf3
+
+ uf10 = 1 / 0 /* ERROR "division by zero" */
+ uf11 = uf1 / 0 /* ERROR "division by zero" */
+ uf12 = uf3 / uf0 /* ERROR "division by zero" */
+
+ uf16 = uf2 /* ERROR "not defined" */ & uf3
+ uf17 = uf2 /* ERROR "not defined" */ | uf3
+ uf18 = uf2 /* ERROR "not defined" */ ^ uf3
+
+ // complex values
+ uc0 = 0.i
+ uc1 = 1.i
+ uc2 = 4.2e1i
+ uc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ uc4 = 1e-1i
+
+ uc5 = uc0 + uc1
+ uc6 = uc1 - uc1
+ uc7 = uc2 * uc1
+ uc8 = uc3 / uc3
+ uc9 = uc3 /* ERROR "not defined" */ % uc3
+
+ uc10 = 1 / 0 /* ERROR "division by zero" */
+ uc11 = uc1 / 0 /* ERROR "division by zero" */
+ uc12 = uc3 / uc0 /* ERROR "division by zero" */
+
+ uc16 = uc2 /* ERROR "not defined" */ & uc3
+ uc17 = uc2 /* ERROR "not defined" */ | uc3
+ uc18 = uc2 /* ERROR "not defined" */ ^ uc3
+)
+
+type (
+ mybool bool
+ myint int
+ myfloat float64
+ mycomplex complex128
+)
+
+// typed constants
+const (
+ // boolean values
+ tb0 bool = false
+ tb1 bool = true
+ tb2 mybool = 2 < 1
+ tb3 mybool = ti1 == tf1 /* ERROR "mismatched types" */
+
+ // integer values
+ ti0 int8 = ui0
+ ti1 int32 = ui1
+ ti2 int64 = ui2
+ ti3 myint = ui3 /* ERROR "overflows" */
+ ti4 myint = ui4
+
+ ti5 = ti0 /* ERROR "mismatched types" */ + ti1
+ ti6 = ti1 - ti1
+ ti7 = ti2 /* ERROR "mismatched types" */ * ti1
+ ti8 = ti3 / ti3
+ ti9 = ti3 % ti3
+
+ ti10 = 1 / 0 /* ERROR "division by zero" */
+ ti11 = ti1 / 0 /* ERROR "division by zero" */
+ ti12 = ti3 /* ERROR "mismatched types" */ / ti0
+ ti13 = 1 % 0 /* ERROR "division by zero" */
+ ti14 = ti1 % 0 /* ERROR "division by zero" */
+ ti15 = ti3 /* ERROR "mismatched types" */ % ti0
+
+ ti16 = ti2 /* ERROR "mismatched types" */ & ti3
+ ti17 = ti2 /* ERROR "mismatched types" */ | ti4
+ ti18 = ti2 ^ ti5 // no mismatched types error because the type of ti5 is unknown
+
+ // floating point values
+ tf0 float32 = 0.
+ tf1 float32 = 1.
+ tf2 float64 = 4.2e1
+ tf3 myfloat = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ tf4 myfloat = 1e-1
+
+ tf5 = tf0 + tf1
+ tf6 = tf1 - tf1
+ tf7 = tf2 /* ERROR "mismatched types" */ * tf1
+ tf8 = tf3 / tf3
+ tf9 = tf3 /* ERROR "not defined" */ % tf3
+
+ tf10 = 1 / 0 /* ERROR "division by zero" */
+ tf11 = tf1 / 0 /* ERROR "division by zero" */
+ tf12 = tf3 /* ERROR "mismatched types" */ / tf0
+
+ tf16 = tf2 /* ERROR "mismatched types" */ & tf3
+ tf17 = tf2 /* ERROR "mismatched types" */ | tf3
+ tf18 = tf2 /* ERROR "mismatched types" */ ^ tf3
+
+ // complex values
+ tc0 = 0.i
+ tc1 = 1.i
+ tc2 = 4.2e1i
+ tc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ tc4 = 1e-1i
+
+ tc5 = tc0 + tc1
+ tc6 = tc1 - tc1
+ tc7 = tc2 * tc1
+ tc8 = tc3 / tc3
+ tc9 = tc3 /* ERROR "not defined" */ % tc3
+
+ tc10 = 1 / 0 /* ERROR "division by zero" */
+ tc11 = tc1 / 0 /* ERROR "division by zero" */
+ tc12 = tc3 / tc0 /* ERROR "division by zero" */
+
+ tc16 = tc2 /* ERROR "not defined" */ & tc3
+ tc17 = tc2 /* ERROR "not defined" */ | tc3
+ tc18 = tc2 /* ERROR "not defined" */ ^ tc3
+)
+
+// initialization cycles
+const (
+ a /* ERROR "initialization cycle" */ = a
+ b /* ERROR "initialization cycle" */ , c /* ERROR "initialization cycle" */, d, e = e, d, c, b // TODO(gri) should only have one cycle error
+ f float64 = d
+)
+
+// multiple initialization
+const (
+ a1, a2, a3 = 7, 3.1415926, "foo"
+ b1, b2, b3 = b3, b1, 42
+ c1, c2, c3 /* ERROR "missing init expr for c3" */ = 1, 2
+ d1, d2, d3 = 1, 2, 3, 4 /* ERROR "extra init expr 4" */
+ _p0 = assert(a1 == 7)
+ _p1 = assert(a2 == 3.1415926)
+ _p2 = assert(a3 == "foo")
+ _p3 = assert(b1 == 42)
+ _p4 = assert(b2 == 42)
+ _p5 = assert(b3 == 42)
+)
+
+func _() {
+ const (
+ a1, a2, a3 = 7, 3.1415926, "foo"
+ b1, b2, b3 = b3, b1, 42
+ c1, c2, c3 /* ERROR "missing init expr for c3" */ = 1, 2
+ d1, d2, d3 = 1, 2, 3, 4 /* ERROR "extra init expr 4" */
+ _p0 = assert(a1 == 7)
+ _p1 = assert(a2 == 3.1415926)
+ _p2 = assert(a3 == "foo")
+ _p3 = assert(b1 == 42)
+ _p4 = assert(b2 == 42)
+ _p5 = assert(b3 == 42)
+ )
+}
+
+// iota
+const (
+ iota0 = iota
+ iota1 = iota
+ iota2 = iota*2
+ _a0 = assert(iota0 == 0)
+ _a1 = assert(iota1 == 1)
+ _a2 = assert(iota2 == 4)
+ iota6 = iota*3
+
+ iota7
+ iota8
+ _a3 = assert(iota7 == 21)
+ _a4 = assert(iota8 == 24)
+)
+
+const (
+ _b0 = iota
+ _b1 = assert(iota + iota2 == 5)
+ _b2 = len([iota]int{}) // iota may appear in a type!
+ _b3 = assert(_b2 == 2)
+ _b4 = len(A{})
+)
+
+type A [iota /* ERROR "cannot use iota" */ ]int
+
+// constant expressions with operands across different
+// constant declarations must use the right iota values
+const (
+ _c0 = iota
+ _c1
+ _c2
+ _x = _c2 + _d1 + _e0 // 3
+)
+
+const (
+ _d0 = iota
+ _d1
+)
+
+const (
+ _e0 = iota
+)
+
+var _ = assert(_x == 3)
+
+// special cases
+const (
+ _n0 = nil /* ERROR "not constant" */
+ _n1 = [ /* ERROR "not constant" */ ]int{}
+)
+
+// iotas must not be usable in expressions outside constant declarations
+type _ [iota /* ERROR "iota outside constant decl" */ ]byte
+var _ = iota /* ERROR "iota outside constant decl" */
+func _() {
+ _ = iota /* ERROR "iota outside constant decl" */
+ const _ = iota
+ _ = iota /* ERROR "iota outside constant decl" */
+}
+
+func _() {
+ iota := 123
+ const x = iota /* ERROR "is not constant" */
+ var y = iota
+ _ = y
+}
+
+// iotas are usable inside closures in constant declarations (#22345)
+const (
+ _ = iota
+ _ = len([iota]byte{})
+ _ = unsafe.Sizeof(iota)
+ _ = unsafe.Sizeof(func() { _ = iota })
+ _ = unsafe.Sizeof(func() { var _ = iota })
+ _ = unsafe.Sizeof(func() { const _ = iota })
+ _ = unsafe.Sizeof(func() { type _ [iota]byte })
+ _ = unsafe.Sizeof(func() { func() int { return iota }() })
+)
+
+// verify inner and outer const declarations have distinct iotas
+const (
+ zero = iota
+ one = iota
+ _ = unsafe.Sizeof(func() {
+ var x [iota]int // [2]int
+ const (
+ Zero = iota
+ One
+ Two
+ _ = unsafe.Sizeof([iota-1]int{} == x) // assert types are equal
+ _ = unsafe.Sizeof([Two]int{} == x) // assert types are equal
+ )
+ var z [iota]int // [2]int
+ _ = unsafe.Sizeof([2]int{} == z) // assert types are equal
+ })
+ three = iota // the sequence continues
+)
+var _ [three]int = [3]int{} // assert 'three' has correct value
+
+var (
+ _ = iota /* ERROR "iota outside constant decl" */
+ _ = unsafe.Sizeof(iota /* ERROR "iota outside constant decl" */ )
+ _ = unsafe.Sizeof(func() { _ = iota /* ERROR "iota outside constant decl" */ })
+ _ = unsafe.Sizeof(func() { var _ = iota /* ERROR "iota outside constant decl" */ })
+ _ = unsafe.Sizeof(func() { type _ [iota /* ERROR "iota outside constant decl" */ ]byte })
+ _ = unsafe.Sizeof(func() { func() int { return iota /* ERROR "iota outside constant decl" */ }() })
+)
+
+// constant arithmetic precision and rounding must lead to expected (integer) results
+var _ = []int64{
+ 0.0005 * 1e9,
+ 0.001 * 1e9,
+ 0.005 * 1e9,
+ 0.01 * 1e9,
+ 0.05 * 1e9,
+ 0.1 * 1e9,
+ 0.5 * 1e9,
+ 1 * 1e9,
+ 5 * 1e9,
+}
+
+const _ = unsafe.Sizeof(func() {
+ const _ = 0
+ _ = iota
+
+ const (
+ zero = iota
+ one
+ )
+ assert(one == 1)
+ assert(iota == 0)
+})
+
+// issue #52438
+const i1 = iota
+const i2 = iota
+const i3 = iota
+
+func _() {
+ assert(i1 == 0)
+ assert(i2 == 0)
+ assert(i3 == 0)
+
+ const i4 = iota
+ const i5 = iota
+ const i6 = iota
+
+ assert(i4 == 0)
+ assert(i5 == 0)
+ assert(i6 == 0)
+}
+
+// untyped constants must not get arbitrarily large
+const prec = 512 // internal maximum precision for integers
+const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1
+
+const _ = maxInt + /* ERROR "constant addition overflow" */ 1
+const _ = -maxInt - /* ERROR "constant subtraction overflow" */ 1
+const _ = maxInt ^ /* ERROR "constant bitwise XOR overflow" */ -1
+const _ = maxInt * /* ERROR "constant multiplication overflow" */ 2
+const _ = maxInt << /* ERROR "constant shift overflow" */ 2
+const _ = 1 << /* ERROR "constant shift overflow" */ prec
+
+const _ = ^ /* ERROR "constant bitwise complement overflow" */ maxInt
diff --git a/src/internal/types/testdata/check/const1.go b/src/internal/types/testdata/check/const1.go
new file mode 100644
index 0000000..c912801
--- /dev/null
+++ b/src/internal/types/testdata/check/const1.go
@@ -0,0 +1,334 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// constant conversions
+
+package const1
+
+import "math"
+
+const(
+ mi = ^int(0)
+ mu = ^uint(0)
+ mp = ^uintptr(0)
+
+ logSizeofInt = uint(mi>>8&1 + mi>>16&1 + mi>>32&1)
+ logSizeofUint = uint(mu>>8&1 + mu>>16&1 + mu>>32&1)
+ logSizeofUintptr = uint(mp>>8&1 + mp>>16&1 + mp>>32&1)
+)
+
+const (
+ minInt8 = -1<<(8<<iota - 1)
+ minInt16
+ minInt32
+ minInt64
+ minInt = -1<<(8<<logSizeofInt - 1)
+)
+
+const (
+ maxInt8 = 1<<(8<<iota - 1) - 1
+ maxInt16
+ maxInt32
+ maxInt64
+ maxInt = 1<<(8<<logSizeofInt - 1) - 1
+)
+
+const (
+ maxUint8 = 1<<(8<<iota) - 1
+ maxUint16
+ maxUint32
+ maxUint64
+ maxUint = 1<<(8<<logSizeofUint) - 1
+ maxUintptr = 1<<(8<<logSizeofUintptr) - 1
+)
+
+const (
+ smallestFloat32 = 1.0 / (1<<(127 - 1 + 23))
+ // TODO(gri) The compiler limits integers to 512 bit and thus
+ // we cannot compute the value (1<<(1023 - 1 + 52))
+ // without overflow. For now we match the compiler.
+ // See also issue #44057.
+ // smallestFloat64 = 1.0 / (1<<(1023 - 1 + 52))
+ smallestFloat64 = math.SmallestNonzeroFloat64
+)
+
+const (
+ _ = assert(smallestFloat32 > 0)
+ _ = assert(smallestFloat64 > 0)
+)
+
+const (
+ maxFloat32 = 1<<127 * (1<<24 - 1) / (1.0<<23)
+ // TODO(gri) The compiler limits integers to 512 bit and thus
+ // we cannot compute the value 1<<1023
+ // without overflow. For now we match the compiler.
+ // See also issue #44057.
+ // maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52)
+ maxFloat64 = math.MaxFloat64
+)
+
+const (
+ _ int8 = minInt8 /* ERROR "overflows" */ - 1
+ _ int8 = minInt8
+ _ int8 = maxInt8
+ _ int8 = maxInt8 /* ERROR "overflows" */ + 1
+ _ int8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int8(minInt8 /* ERROR "cannot convert" */ - 1)
+ _ = int8(minInt8)
+ _ = int8(maxInt8)
+ _ = int8(maxInt8 /* ERROR "cannot convert" */ + 1)
+ _ = int8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int16 = minInt16 /* ERROR "overflows" */ - 1
+ _ int16 = minInt16
+ _ int16 = maxInt16
+ _ int16 = maxInt16 /* ERROR "overflows" */ + 1
+ _ int16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int16(minInt16 /* ERROR "cannot convert" */ - 1)
+ _ = int16(minInt16)
+ _ = int16(maxInt16)
+ _ = int16(maxInt16 /* ERROR "cannot convert" */ + 1)
+ _ = int16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int32 = minInt32 /* ERROR "overflows" */ - 1
+ _ int32 = minInt32
+ _ int32 = maxInt32
+ _ int32 = maxInt32 /* ERROR "overflows" */ + 1
+ _ int32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int32(minInt32 /* ERROR "cannot convert" */ - 1)
+ _ = int32(minInt32)
+ _ = int32(maxInt32)
+ _ = int32(maxInt32 /* ERROR "cannot convert" */ + 1)
+ _ = int32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int64 = minInt64 /* ERROR "overflows" */ - 1
+ _ int64 = minInt64
+ _ int64 = maxInt64
+ _ int64 = maxInt64 /* ERROR "overflows" */ + 1
+ _ int64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int64(minInt64 /* ERROR "cannot convert" */ - 1)
+ _ = int64(minInt64)
+ _ = int64(maxInt64)
+ _ = int64(maxInt64 /* ERROR "cannot convert" */ + 1)
+ _ = int64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int = minInt /* ERROR "overflows" */ - 1
+ _ int = minInt
+ _ int = maxInt
+ _ int = maxInt /* ERROR "overflows" */ + 1
+ _ int = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int(minInt /* ERROR "cannot convert" */ - 1)
+ _ = int(minInt)
+ _ = int(maxInt)
+ _ = int(maxInt /* ERROR "cannot convert" */ + 1)
+ _ = int(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint8 = 0 /* ERROR "overflows" */ - 1
+ _ uint8 = 0
+ _ uint8 = maxUint8
+ _ uint8 = maxUint8 /* ERROR "overflows" */ + 1
+ _ uint8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint8(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint8(0)
+ _ = uint8(maxUint8)
+ _ = uint8(maxUint8 /* ERROR "cannot convert" */ + 1)
+ _ = uint8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint16 = 0 /* ERROR "overflows" */ - 1
+ _ uint16 = 0
+ _ uint16 = maxUint16
+ _ uint16 = maxUint16 /* ERROR "overflows" */ + 1
+ _ uint16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint16(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint16(0)
+ _ = uint16(maxUint16)
+ _ = uint16(maxUint16 /* ERROR "cannot convert" */ + 1)
+ _ = uint16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint32 = 0 /* ERROR "overflows" */ - 1
+ _ uint32 = 0
+ _ uint32 = maxUint32
+ _ uint32 = maxUint32 /* ERROR "overflows" */ + 1
+ _ uint32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint32(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint32(0)
+ _ = uint32(maxUint32)
+ _ = uint32(maxUint32 /* ERROR "cannot convert" */ + 1)
+ _ = uint32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint64 = 0 /* ERROR "overflows" */ - 1
+ _ uint64 = 0
+ _ uint64 = maxUint64
+ _ uint64 = maxUint64 /* ERROR "overflows" */ + 1
+ _ uint64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint64(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint64(0)
+ _ = uint64(maxUint64)
+ _ = uint64(maxUint64 /* ERROR "cannot convert" */ + 1)
+ _ = uint64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint = 0 /* ERROR "overflows" */ - 1
+ _ uint = 0
+ _ uint = maxUint
+ _ uint = maxUint /* ERROR "overflows" */ + 1
+ _ uint = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint(0)
+ _ = uint(maxUint)
+ _ = uint(maxUint /* ERROR "cannot convert" */ + 1)
+ _ = uint(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uintptr = 0 /* ERROR "overflows" */ - 1
+ _ uintptr = 0
+ _ uintptr = maxUintptr
+ _ uintptr = maxUintptr /* ERROR "overflows" */ + 1
+ _ uintptr = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uintptr(0 /* ERROR "cannot convert" */ - 1)
+ _ = uintptr(0)
+ _ = uintptr(maxUintptr)
+ _ = uintptr(maxUintptr /* ERROR "cannot convert" */ + 1)
+ _ = uintptr(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ float32 = minInt64
+ _ float64 = minInt64
+ _ complex64 = minInt64
+ _ complex128 = minInt64
+
+ _ = float32(minInt64)
+ _ = float64(minInt64)
+ _ = complex64(minInt64)
+ _ = complex128(minInt64)
+)
+
+const (
+ _ float32 = maxUint64
+ _ float64 = maxUint64
+ _ complex64 = maxUint64
+ _ complex128 = maxUint64
+
+ _ = float32(maxUint64)
+ _ = float64(maxUint64)
+ _ = complex64(maxUint64)
+ _ = complex128(maxUint64)
+)
+
+// TODO(gri) find smaller deltas below
+
+const delta32 = maxFloat32/(1 << 23)
+
+const (
+ _ float32 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ float32 = -maxFloat32
+ _ float32 = maxFloat32
+ _ float32 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = float32(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = float32(-maxFloat32)
+ _ = float32(maxFloat32)
+ _ = float32(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+
+ _ = assert(float32(smallestFloat32) == smallestFloat32)
+ _ = assert(float32(smallestFloat32/2) == 0)
+ _ = assert(float32(smallestFloat64) == 0)
+ _ = assert(float32(smallestFloat64/2) == 0)
+)
+
+const delta64 = maxFloat64/(1 << 52)
+
+const (
+ _ float64 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ float64 = -maxFloat64
+ _ float64 = maxFloat64
+ _ float64 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = float64(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = float64(-maxFloat64)
+ _ = float64(maxFloat64)
+ _ = float64(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+
+ _ = assert(float64(smallestFloat32) == smallestFloat32)
+ _ = assert(float64(smallestFloat32/2) == smallestFloat32/2)
+ _ = assert(float64(smallestFloat64) == smallestFloat64)
+ _ = assert(float64(smallestFloat64/2) == 0)
+)
+
+const (
+ _ complex64 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ complex64 = -maxFloat32
+ _ complex64 = maxFloat32
+ _ complex64 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = complex64(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = complex64(-maxFloat32)
+ _ = complex64(maxFloat32)
+ _ = complex64(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+)
+
+const (
+ _ complex128 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ complex128 = -maxFloat64
+ _ complex128 = maxFloat64
+ _ complex128 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = complex128(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = complex128(-maxFloat64)
+ _ = complex128(maxFloat64)
+ _ = complex128(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+)
+
+// Initialization of typed constant and conversion are the same:
+const (
+ f32 = 1 + smallestFloat32
+ x32 float32 = f32
+ y32 = float32(f32)
+ _ = assert(x32 - y32 == 0)
+)
+
+const (
+ f64 = 1 + smallestFloat64
+ x64 float64 = f64
+ y64 = float64(f64)
+ _ = assert(x64 - y64 == 0)
+)
+
+const (
+ _ = int8(-1) << 7
+ _ = int8 /* ERROR "overflows" */ (-1) << 8
+
+ _ = uint32(1) << 31
+ _ = uint32 /* ERROR "overflows" */ (1) << 32
+)
diff --git a/src/internal/types/testdata/check/constdecl.go b/src/internal/types/testdata/check/constdecl.go
new file mode 100644
index 0000000..e7b871b
--- /dev/null
+++ b/src/internal/types/testdata/check/constdecl.go
@@ -0,0 +1,160 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package constdecl
+
+import "math"
+import "unsafe"
+
+var v int
+
+// Const decls must be initialized by constants.
+const _ = v /* ERROR "not constant" */
+const _ = math /* ERROR "not constant" */ .Sin(0)
+const _ = int /* ERROR "not an expression" */
+
+func _() {
+ const _ = v /* ERROR "not constant" */
+ const _ = math /* ERROR "not constant" */ .Sin(0)
+ const _ = int /* ERROR "not an expression" */
+}
+
+// Identifier and expression arity must match.
+const _ /* ERROR "missing init expr for _" */
+const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+const _ /* ERROR "missing init expr for _" */ int
+const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+)
+
+const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+)
+
+const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+)
+
+func _() {
+ const _ /* ERROR "missing init expr for _" */
+ const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ const _ /* ERROR "missing init expr for _" */ int
+ const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+ const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+ )
+
+ const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+ )
+
+ const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+ )
+}
+
+// Test case for constant with invalid initialization.
+// Caused panic because the constant value was not set up (gri - 7/8/2014).
+func _() {
+ const (
+ x string = missing /* ERROR "undefined" */
+ y = x + ""
+ )
+}
+
+// Test case for constants depending on function literals (see also #22992).
+const A /* ERROR "initialization cycle" */ = unsafe.Sizeof(func() { _ = A })
+
+func _() {
+ // The function literal below must not see a.
+ const a = unsafe.Sizeof(func() { _ = a /* ERROR "undefined" */ })
+ const b = unsafe.Sizeof(func() { _ = a })
+
+ // The function literal below must not see x, y, or z.
+ const x, y, z = 0, 1, unsafe.Sizeof(func() { _ = x /* ERROR "undefined" */ + y /* ERROR "undefined" */ + z /* ERROR "undefined" */ })
+}
+
+// Test cases for errors in inherited constant initialization expressions.
+// Errors related to inherited initialization expressions must appear at
+// the constant identifier being declared, not at the original expression
+// (issues #42991, #42992).
+const (
+ _ byte = 255 + iota
+ /* some gap */
+ _ // ERROR "overflows"
+ /* some gap */
+ /* some gap */ _ /* ERROR "overflows" */; _ /* ERROR "overflows" */
+ /* some gap */
+ _ = 255 + iota
+ _ = byte /* ERROR "overflows" */ (255) + iota
+ _ /* ERROR "overflows" */
+)
+
+// Test cases from issue.
+const (
+ ok = byte(iota + 253)
+ bad
+ barn
+ bard // ERROR "cannot convert"
+)
+
+const (
+ c = len([1 - iota]int{})
+ d
+ e // ERROR "invalid array length"
+ f // ERROR "invalid array length"
+)
+
+// Test that identifiers in implicit (omitted) RHS
+// expressions of constant declarations are resolved
+// in the correct context; see issues #49157, #53585.
+const X = 2
+
+func _() {
+ const (
+ A = iota // 0
+ iota = iota // 1
+ B // 1 (iota is declared locally on prev. line)
+ C // 1
+ )
+ assert(A == 0 && B == 1 && C == 1)
+
+ const (
+ X = X + X
+ Y
+ Z = iota
+ )
+ assert(X == 4 && Y == 8 && Z == 1)
+}
+
+// TODO(gri) move extra tests from testdata/const0.src into here
diff --git a/src/internal/types/testdata/check/conversions0.go b/src/internal/types/testdata/check/conversions0.go
new file mode 100644
index 0000000..e1336c0
--- /dev/null
+++ b/src/internal/types/testdata/check/conversions0.go
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// conversions
+
+package conversions
+
+import "unsafe"
+
+// argument count
+var (
+ _ = int() /* ERROR "missing argument" */
+ _ = int(1, 2 /* ERROR "too many arguments" */ )
+)
+
+// numeric constant conversions are in const1.src.
+
+func string_conversions() {
+ const A = string(65)
+ assert(A == "A")
+ const E = string(-1)
+ assert(E == "\uFFFD")
+ assert(E == string(1234567890))
+
+ type myint int
+ assert(A == string(myint(65)))
+
+ type mystring string
+ const _ mystring = mystring("foo")
+
+ const _ = string(true /* ERROR "cannot convert" */ )
+ const _ = string(1.2 /* ERROR "cannot convert" */ )
+ const _ = string(nil /* ERROR "cannot convert" */ )
+
+ // issues 11357, 11353: argument must be of integer type
+ _ = string(0.0 /* ERROR "cannot convert" */ )
+ _ = string(0i /* ERROR "cannot convert" */ )
+ _ = string(1 /* ERROR "cannot convert" */ + 2i)
+}
+
+func interface_conversions() {
+ type E interface{}
+
+ type I1 interface{
+ m1()
+ }
+
+ type I2 interface{
+ m1()
+ m2(x int)
+ }
+
+ type I3 interface{
+ m1()
+ m2() int
+ }
+
+ var e E
+ var i1 I1
+ var i2 I2
+ var i3 I3
+
+ _ = E(0)
+ _ = E(nil)
+ _ = E(e)
+ _ = E(i1)
+ _ = E(i2)
+
+ _ = I1(0 /* ERROR "cannot convert" */ )
+ _ = I1(nil)
+ _ = I1(i1)
+ _ = I1(e /* ERROR "cannot convert" */ )
+ _ = I1(i2)
+
+ _ = I2(nil)
+ _ = I2(i1 /* ERROR "cannot convert" */ )
+ _ = I2(i2)
+ _ = I2(i3 /* ERROR "cannot convert" */ )
+
+ _ = I3(nil)
+ _ = I3(i1 /* ERROR "cannot convert" */ )
+ _ = I3(i2 /* ERROR "cannot convert" */ )
+ _ = I3(i3)
+
+ // TODO(gri) add more tests, improve error message
+}
+
+func issue6326() {
+ type T unsafe.Pointer
+ var x T
+ _ = uintptr(x) // see issue 6326
+}
diff --git a/src/internal/types/testdata/check/conversions1.go b/src/internal/types/testdata/check/conversions1.go
new file mode 100644
index 0000000..65aabde
--- /dev/null
+++ b/src/internal/types/testdata/check/conversions1.go
@@ -0,0 +1,313 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test various valid and invalid struct assignments and conversions.
+// Does not compile.
+
+package conversions2
+
+type I interface {
+ m()
+}
+
+// conversions between structs
+
+func _() {
+ type S struct{}
+ type T struct{}
+ var s S
+ var t T
+ var u struct{}
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u
+ t = T(u)
+}
+
+func _() {
+ type S struct{ x int }
+ type T struct {
+ x int "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x int "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = T(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct{ x E }
+ type T struct {
+ x E "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x E "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = T(u)
+}
+
+func _() {
+ type S struct {
+ x struct {
+ x int "foo"
+ }
+ }
+ type T struct {
+ x struct {
+ x int "bar"
+ } "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x struct {
+ x int "bar"
+ } "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = T(u)
+}
+
+func _() {
+ type E1 struct {
+ x int "foo"
+ }
+ type E2 struct {
+ x int "bar"
+ }
+ type S struct{ x E1 }
+ type T struct {
+ x E2 "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x E2 "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = S(s)
+ s = S(t /* ERROR "cannot convert" */ )
+ s = S(u /* ERROR "cannot convert" */ )
+ t = u // ERRORx `cannot use .* in assignment`
+ t = T(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(struct {
+ x int "bar"
+ })
+ }
+ var s S
+ var t T
+ var u struct{ f func(E) }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = S(s)
+ s = S(t)
+ s = S(u /* ERROR "cannot convert" */ )
+ t = u // ERRORx `cannot use .* in assignment`
+ t = T(u /* ERROR "cannot convert" */ )
+}
+
+// conversions between pointers to structs
+
+func _() {
+ type S struct{}
+ type T struct{}
+ var s *S
+ var t *T
+ var u *struct{}
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u)
+}
+
+func _() {
+ type S struct{ x int }
+ type T struct {
+ x int "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x int "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct{ x E }
+ type T struct {
+ x E "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x E "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u)
+}
+
+func _() {
+ type S struct {
+ x struct {
+ x int "foo"
+ }
+ }
+ type T struct {
+ x struct {
+ x int "bar"
+ } "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x struct {
+ x int "bar"
+ } "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u)
+}
+
+func _() {
+ type E1 struct {
+ x int "foo"
+ }
+ type E2 struct {
+ x int "bar"
+ }
+ type S struct{ x E1 }
+ type T struct {
+ x E2 "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x E2 "bar"
+ }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t /* ERROR "cannot convert" */ )
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(struct {
+ x int "bar"
+ })
+ }
+ var s *S
+ var t *T
+ var u *struct{ f func(E) }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u /* ERROR "cannot convert" */ )
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(*struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(*struct {
+ x int "bar"
+ })
+ }
+ var s *S
+ var t *T
+ var u *struct{ f func(E) }
+ s = s
+ s = t // ERRORx `cannot use .* in assignment`
+ s = u // ERRORx `cannot use .* in assignment`
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERRORx `cannot use .* in assignment`
+ t = (*T)(u /* ERROR "cannot convert" */ )
+}
diff --git a/src/internal/types/testdata/check/cycles0.go b/src/internal/types/testdata/check/cycles0.go
new file mode 100644
index 0000000..8ad7877
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles0.go
@@ -0,0 +1,175 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cycles
+
+import "unsafe"
+
+type (
+ T0 int
+ T1 /* ERROR "invalid recursive type: T1 refers to itself" */ T1
+ T2 *T2
+
+ T3 /* ERROR "invalid recursive type" */ T4
+ T4 T5
+ T5 T3
+
+ T6 T7
+ T7 *T8
+ T8 T6
+
+ // arrays
+ A0 /* ERROR "invalid recursive type" */ [10]A0
+ A1 [10]*A1
+
+ A2 /* ERROR "invalid recursive type" */ [10]A3
+ A3 [10]A4
+ A4 A2
+
+ A5 [10]A6
+ A6 *A5
+
+ // slices
+ L0 []L0
+
+ // structs
+ S0 /* ERROR "invalid recursive type: S0 refers to itself" */ struct{ _ S0 }
+ S1 /* ERROR "invalid recursive type: S1 refers to itself" */ struct{ S1 }
+ S2 struct{ _ *S2 }
+ S3 struct{ *S3 }
+
+ S4 /* ERROR "invalid recursive type" */ struct{ S5 }
+ S5 struct{ S6 }
+ S6 S4
+
+ // pointers
+ P0 *P0
+ PP *struct{ PP.f /* ERROR "PP.f is not a type" */ }
+
+ // functions
+ F0 func(F0)
+ F1 func() F1
+ F2 func(F2) F2
+
+ // interfaces
+ I0 /* ERROR "invalid recursive type: I0 refers to itself" */ interface{ I0 }
+
+ I1 /* ERROR "invalid recursive type" */ interface{ I2 }
+ I2 interface{ I3 }
+ I3 interface{ I1 }
+
+ I4 interface{ f(I4) }
+
+ // testcase for issue 5090
+ I5 interface{ f(I6) }
+ I6 interface{ I5 }
+
+ // maps
+ M0 map[M0 /* ERROR "invalid map key" */ ]M0
+
+ // channels
+ C0 chan C0
+)
+
+// test case for issue #34771
+type (
+ AA /* ERROR "invalid recursive type" */ B
+ B C
+ C [10]D
+ D E
+ E AA
+)
+
+func _() {
+ type (
+ t1 /* ERROR "invalid recursive type: t1 refers to itself" */ t1
+ t2 *t2
+
+ t3 t4 /* ERROR "undefined" */
+ t4 t5 /* ERROR "undefined" */
+ t5 t3
+
+ // arrays
+ a0 /* ERROR "invalid recursive type: a0 refers to itself" */ [10]a0
+ a1 [10]*a1
+
+ // slices
+ l0 []l0
+
+ // structs
+ s0 /* ERROR "invalid recursive type: s0 refers to itself" */ struct{ _ s0 }
+ s1 /* ERROR "invalid recursive type: s1 refers to itself" */ struct{ s1 }
+ s2 struct{ _ *s2 }
+ s3 struct{ *s3 }
+
+ // pointers
+ p0 *p0
+
+ // functions
+ f0 func(f0)
+ f1 func() f1
+ f2 func(f2) f2
+
+ // interfaces
+ i0 /* ERROR "invalid recursive type: i0 refers to itself" */ interface{ i0 }
+
+ // maps
+ m0 map[m0 /* ERROR "invalid map key" */ ]m0
+
+ // channels
+ c0 chan c0
+ )
+}
+
+// test cases for issue 6667
+
+type A [10]map[A /* ERROR "invalid map key" */ ]bool
+
+type S struct {
+ m map[S /* ERROR "invalid map key" */ ]bool
+}
+
+// test cases for issue 7236
+// (cycle detection must not be dependent on starting point of resolution)
+
+type (
+ P1 *T9
+ T9 /* ERROR "invalid recursive type: T9 refers to itself" */ T9
+
+ T10 /* ERROR "invalid recursive type: T10 refers to itself" */ T10
+ P2 *T10
+)
+
+func (T11) m() {}
+
+type T11 /* ERROR "invalid recursive type: T11 refers to itself" */ struct{ T11 }
+
+type T12 /* ERROR "invalid recursive type: T12 refers to itself" */ struct{ T12 }
+
+func (*T12) m() {}
+
+type (
+ P3 *T13
+ T13 /* ERROR "invalid recursive type" */ T13
+)
+
+// test cases for issue 18643
+// (type cycle detection when non-type expressions are involved)
+type (
+ T14 [len(T14 /* ERROR "invalid recursive type" */ {})]int
+ T15 [][len(T15 /* ERROR "invalid recursive type" */ {})]int
+ T16 map[[len(T16 /* ERROR "invalid recursive type" */ {1:2})]int]int
+ T17 map[int][len(T17 /* ERROR "invalid recursive type" */ {1:2})]int
+)
+
+// Test case for types depending on function literals (see also #22992).
+type T20 chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte
+type T22 = chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte
+
+func _() {
+ type T0 func(T0)
+ type T1 /* ERROR "invalid recursive type" */ = func(T1)
+ type T2 chan [unsafe.Sizeof(func(ch T2){ _ = <-ch })]byte
+ type T3 /* ERROR "invalid recursive type" */ = chan [unsafe.Sizeof(func(ch T3){ _ = <-ch })]byte
+}
diff --git a/src/internal/types/testdata/check/cycles1.go b/src/internal/types/testdata/check/cycles1.go
new file mode 100644
index 0000000..ae2b38e
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles1.go
@@ -0,0 +1,77 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ A interface {
+ a() interface {
+ ABC1
+ }
+ }
+ B interface {
+ b() interface {
+ ABC2
+ }
+ }
+ C interface {
+ c() interface {
+ ABC3
+ }
+ }
+
+ AB interface {
+ A
+ B
+ }
+ BC interface {
+ B
+ C
+ }
+
+ ABC1 interface {
+ A
+ B
+ C
+ }
+ ABC2 interface {
+ AB
+ C
+ }
+ ABC3 interface {
+ A
+ BC
+ }
+)
+
+var (
+ x1 ABC1
+ x2 ABC2
+ x3 ABC3
+)
+
+func _() {
+ // all types have the same method set
+ x1 = x2
+ x2 = x1
+
+ x1 = x3
+ x3 = x1
+
+ x2 = x3
+ x3 = x2
+
+ // all methods return the same type again
+ x1 = x1.a()
+ x1 = x1.b()
+ x1 = x1.c()
+
+ x2 = x2.a()
+ x2 = x2.b()
+ x2 = x2.c()
+
+ x3 = x3.a()
+ x3 = x3.b()
+ x3 = x3.c()
+}
diff --git a/src/internal/types/testdata/check/cycles2.go b/src/internal/types/testdata/check/cycles2.go
new file mode 100644
index 0000000..a932d28
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles2.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// Test case for issue 5090
+
+type t interface {
+ f(u)
+}
+
+type u interface {
+ t
+}
+
+func _() {
+ var t t
+ var u u
+
+ t.f(t)
+ t.f(u)
+
+ u.f(t)
+ u.f(u)
+}
+
+
+// Test case for issues #6589, #33656.
+
+type A interface {
+ a() interface {
+ AB
+ }
+}
+
+type B interface {
+ b() interface {
+ AB
+ }
+}
+
+type AB interface {
+ a() interface {
+ A
+ B
+ }
+ b() interface {
+ A
+ B
+ }
+}
+
+var x AB
+var y interface {
+ A
+ B
+}
+
+var _ = x == y
+
+
+// Test case for issue 6638.
+
+type T interface {
+ m() [T(nil).m /* ERROR "undefined" */ ()[0]]int
+}
+
+// Variations of this test case.
+
+type T1 /* ERROR "invalid recursive type" */ interface {
+ m() [x1.m()[0]]int
+}
+
+var x1 T1
+
+type T2 /* ERROR "invalid recursive type" */ interface {
+ m() [len(x2.m())]int
+}
+
+var x2 T2
+
+type T3 /* ERROR "invalid recursive type" */ interface {
+ m() [unsafe.Sizeof(x3.m)]int
+}
+
+var x3 T3
+
+type T4 /* ERROR "invalid recursive type" */ interface {
+ m() [unsafe.Sizeof(cast4(x4.m))]int // cast is invalid but we have a cycle, so all bets are off
+}
+
+var x4 T4
+var _ = cast4(x4.m)
+
+type cast4 func()
diff --git a/src/internal/types/testdata/check/cycles3.go b/src/internal/types/testdata/check/cycles3.go
new file mode 100644
index 0000000..3ed999c
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles3.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+var (
+ _ A = A(nil).a().b().c().d().e().f()
+ _ A = A(nil).b().c().d().e().f()
+ _ A = A(nil).c().d().e().f()
+ _ A = A(nil).d().e().f()
+ _ A = A(nil).e().f()
+ _ A = A(nil).f()
+ _ A = A(nil)
+)
+
+type (
+ A interface {
+ a() B
+ B
+ }
+
+ B interface {
+ b() C
+ C
+ }
+
+ C interface {
+ c() D
+ D
+ }
+
+ D interface {
+ d() E
+ E
+ }
+
+ E interface {
+ e() F
+ F
+ }
+
+ F interface {
+ f() A
+ }
+)
+
+type (
+ U /* ERROR "invalid recursive type" */ interface {
+ V
+ }
+
+ V interface {
+ v() [unsafe.Sizeof(u)]int
+ }
+)
+
+var u U
diff --git a/src/internal/types/testdata/check/cycles4.go b/src/internal/types/testdata/check/cycles4.go
new file mode 100644
index 0000000..e823001
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles4.go
@@ -0,0 +1,121 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// Check that all methods of T are collected before
+// determining the result type of m (which embeds
+// all methods of T).
+
+type T interface {
+ m() interface {T}
+ E
+}
+
+var _ int = T.m(nil).m().e()
+
+type E interface {
+ e() int
+}
+
+// Check that unresolved forward chains are followed
+// (see also comment in resolver.go, checker.typeDecl).
+
+var _ int = C.m(nil).m().e()
+
+type A B
+
+type B interface {
+ m() interface{C}
+ E
+}
+
+type C A
+
+// Check that interface type comparison for identity
+// does not recur endlessly.
+
+type T1 interface {
+ m() interface{T1}
+}
+
+type T2 interface {
+ m() interface{T2}
+}
+
+func _(x T1, y T2) {
+ // Checking for assignability of interfaces must check
+ // if all methods of x are present in y, and that they
+ // have identical signatures. The signatures recur via
+ // the result type, which is an interface that embeds
+ // a single method m that refers to the very interface
+ // that contains it. This requires cycle detection in
+ // identity checks for interface types.
+ x = y
+}
+
+type T3 interface {
+ m() interface{T4}
+}
+
+type T4 interface {
+ m() interface{T3}
+}
+
+func _(x T1, y T3) {
+ x = y
+}
+
+// Check that interfaces are type-checked in order of
+// (embedded interface) dependencies (was issue 7158).
+
+var x1 T5 = T7(nil)
+
+type T5 interface {
+ T6
+}
+
+type T6 interface {
+ m() T7
+}
+type T7 interface {
+ T5
+}
+
+// Actual test case from issue 7158.
+
+func wrapNode() Node {
+ return wrapElement()
+}
+
+func wrapElement() Element {
+ return nil
+}
+
+type EventTarget interface {
+ AddEventListener(Event)
+}
+
+type Node interface {
+ EventTarget
+}
+
+type Element interface {
+ Node
+}
+
+type Event interface {
+ Target() Element
+}
+
+// Check that accessing an interface method too early doesn't lead
+// to follow-on errors due to an incorrectly computed type set.
+
+type T8 interface {
+ m() [unsafe.Sizeof(T8.m /* ERROR "undefined" */ )]int
+}
+
+var _ = T8.m // no error expected here
diff --git a/src/internal/types/testdata/check/cycles5.go b/src/internal/types/testdata/check/cycles5.go
new file mode 100644
index 0000000..a863aa8
--- /dev/null
+++ b/src/internal/types/testdata/check/cycles5.go
@@ -0,0 +1,200 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// test case from issue #18395
+
+type (
+ A interface { B }
+ B interface { C }
+ C interface { D; F() A }
+ D interface { G() B }
+)
+
+var _ = A(nil).G // G must be found
+
+
+// test case from issue #21804
+
+type sourceBridge interface {
+ listVersions() ([]Version, error)
+}
+
+type Constraint interface {
+ copyTo(*ConstraintMsg)
+}
+
+type ConstraintMsg struct{}
+
+func (m *ConstraintMsg) asUnpairedVersion() UnpairedVersion {
+ return nil
+}
+
+type Version interface {
+ Constraint
+}
+
+type UnpairedVersion interface {
+ Version
+}
+
+var _ Constraint = UnpairedVersion(nil)
+
+
+// derived test case from issue #21804
+
+type (
+ _ interface{ m(B1) }
+ A1 interface{ a(D1) }
+ B1 interface{ A1 }
+ C1 interface{ B1 }
+ D1 interface{ C1 }
+)
+
+var _ A1 = C1(nil)
+
+
+// derived test case from issue #22701
+
+func F(x I4) interface{} {
+ return x.Method()
+}
+
+type Unused interface {
+ RefersToI1(a I1)
+}
+
+type I1 interface {
+ I2
+ I3
+}
+
+type I2 interface {
+ RefersToI4() I4
+}
+
+type I3 interface {
+ Method() interface{}
+}
+
+type I4 interface {
+ I1
+}
+
+
+// check embedding of error interface
+
+type Error interface{ error }
+
+var err Error
+var _ = err.Error()
+
+
+// more esoteric cases
+
+type (
+ T1 interface { T2 }
+ T2 /* ERROR "invalid recursive type" */ T2
+)
+
+type (
+ T3 interface { T4 }
+ T4 /* ERROR "invalid recursive type" */ T5
+ T5 = T6
+ T6 = T7
+ T7 = T4
+)
+
+
+// arbitrary code may appear inside an interface
+
+const n = unsafe.Sizeof(func(){})
+
+type I interface {
+ m([unsafe.Sizeof(func() { I.m(nil, [n]byte{}) })]byte)
+}
+
+
+// test cases for varias alias cycles
+
+type T10 /* ERROR "invalid recursive type" */ = *T10 // issue #25141
+type T11 /* ERROR "invalid recursive type" */ = interface{ f(T11) } // issue #23139
+
+// issue #18640
+type (
+ aa = bb
+ bb struct {
+ *aa
+ }
+)
+
+type (
+ a struct{ *b }
+ b = c
+ c struct{ *b /* ERROR "invalid use of type alias" */ }
+)
+
+// issue #24939
+type (
+ _ interface {
+ M(P)
+ }
+
+ M interface {
+ F() P // ERROR "invalid use of type alias"
+ }
+
+ P = interface {
+ I() M
+ }
+)
+
+// issue #8699
+type T12 /* ERROR "invalid recursive type" */ [len(a12)]int
+var a12 = makeArray()
+func makeArray() (res T12) { return }
+
+// issue #20770
+var r /* ERROR "invalid cycle in declaration of r" */ = newReader()
+func newReader() r
+
+// variations of the theme of #8699 and #20770
+var arr /* ERROR "cycle" */ = f()
+func f() [len(arr)]int
+
+// issue #25790
+func ff(ff /* ERROR "not a type" */ )
+func gg((gg /* ERROR "not a type" */ ))
+
+type T13 /* ERROR "invalid recursive type T13" */ [len(b13)]int
+var b13 T13
+
+func g1() [unsafe.Sizeof(g1)]int
+func g2() [unsafe.Sizeof(x2)]int
+var x2 = g2
+
+// verify that we get the correct sizes for the functions above
+// (note: assert is statically evaluated in go/types test mode)
+func init() {
+ assert(unsafe.Sizeof(g1) == 8)
+ assert(unsafe.Sizeof(x2) == 8)
+}
+
+func h() [h /* ERROR "no value" */ ()[0]]int { panic(0) }
+
+var c14 /* ERROR "cycle" */ T14
+type T14 [uintptr(unsafe.Sizeof(&c14))]byte
+
+// issue #34333
+type T15 /* ERROR "invalid recursive type T15" */ struct {
+ f func() T16
+ b T16
+}
+
+type T16 struct {
+ T15
+} \ No newline at end of file
diff --git a/src/internal/types/testdata/check/decls0.go b/src/internal/types/testdata/check/decls0.go
new file mode 100644
index 0000000..0b99faa
--- /dev/null
+++ b/src/internal/types/testdata/check/decls0.go
@@ -0,0 +1,210 @@
+// -lang=go1.17
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type declarations
+
+package p // don't permit non-interface elements in interfaces
+
+import "unsafe"
+
+const pi = 3.1415
+
+type (
+ N undefined /* ERROR "undefined" */
+ B bool
+ I int32
+ A [10]P
+ T struct {
+ x, y P
+ }
+ P *T
+ R (*R)
+ F func(A) I
+ Y interface {
+ f(A) I
+ }
+ S [](((P)))
+ M map[I]F
+ C chan<- I
+
+ // blank types must be typechecked
+ _ pi /* ERROR "not a type" */
+ _ struct{}
+ _ struct{ pi /* ERROR "not a type" */ }
+)
+
+
+// declarations of init
+const _, init /* ERROR "cannot declare init" */ , _ = 0, 1, 2
+type init /* ERROR "cannot declare init" */ struct{}
+var _, init /* ERROR "cannot declare init" */ int
+
+func init() {}
+func init /* ERROR "missing function body" */ ()
+
+func _() { const init = 0 }
+func _() { type init int }
+func _() { var init int; _ = init }
+
+// invalid array types
+type (
+ iA0 [... /* ERROR "invalid use of [...] array" */ ]byte
+ // The error message below could be better. At the moment
+ // we believe an integer that is too large is not an integer.
+ // But at least we get an error.
+ iA1 [1 /* ERROR "invalid array length" */ <<100]int
+ iA2 [- /* ERROR "invalid array length" */ 1]complex128
+ iA3 ["foo" /* ERROR "must be integer" */ ]string
+ iA4 [float64 /* ERROR "must be integer" */ (0)]int
+)
+
+
+type (
+ p1 pi.foo /* ERROR "pi.foo is not a type" */
+ p2 unsafe.Pointer
+)
+
+
+type (
+ Pi pi /* ERROR "not a type" */
+
+ a /* ERROR "invalid recursive type" */ a
+ a /* ERROR "redeclared" */ int
+
+ b /* ERROR "invalid recursive type" */ c
+ c d
+ d e
+ e b
+
+ t *t
+
+ U V
+ V *W
+ W U
+
+ P1 *S2
+ P2 P1
+
+ S0 struct {
+ }
+ S1 struct {
+ a, b, c int
+ u, v, a /* ERROR "redeclared" */ float32
+ }
+ S2 struct {
+ S0 // embedded field
+ S0 /* ERROR "redeclared" */ int
+ }
+ S3 struct {
+ x S2
+ }
+ S4/* ERROR "invalid recursive type" */ struct {
+ S4
+ }
+ S5 /* ERROR "invalid recursive type" */ struct {
+ S6
+ }
+ S6 struct {
+ field S7
+ }
+ S7 struct {
+ S5
+ }
+
+ L1 []L1
+ L2 []int
+
+ A1 [10.0]int
+ A2 /* ERROR "invalid recursive type" */ [10]A2
+ A3 /* ERROR "invalid recursive type" */ [10]struct {
+ x A4
+ }
+ A4 [10]A3
+
+ F1 func()
+ F2 func(x, y, z float32)
+ F3 func(x, y, x /* ERROR "redeclared" */ float32)
+ F4 func() (x, y, x /* ERROR "redeclared" */ float32)
+ F5 func(x int) (x /* ERROR "redeclared" */ float32)
+ F6 func(x ...int)
+
+ I1 interface{}
+ I2 interface {
+ m1()
+ }
+ I3 interface {
+ m1()
+ m1 /* ERROR "duplicate method" */ ()
+ }
+ I4 interface {
+ m1(x, y, x /* ERROR "redeclared" */ float32)
+ m2() (x, y, x /* ERROR "redeclared" */ float32)
+ m3(x int) (x /* ERROR "redeclared" */ float32)
+ }
+ I5 interface {
+ m1(I5)
+ }
+ I6 interface {
+ S0 /* ERROR "non-interface type S0" */
+ }
+ I7 interface {
+ I1
+ I1
+ }
+ I8 /* ERROR "invalid recursive type" */ interface {
+ I8
+ }
+ I9 /* ERROR "invalid recursive type" */ interface {
+ I10
+ }
+ I10 interface {
+ I11
+ }
+ I11 interface {
+ I9
+ }
+
+ C1 chan int
+ C2 <-chan int
+ C3 chan<- C3
+ C4 chan C5
+ C5 chan C6
+ C6 chan C4
+
+ M1 map[Last]string
+ M2 map[string]M2
+
+ Last int
+)
+
+// cycles in function/method declarations
+// (test cases for issues #5217, #25790 and variants)
+func f1(x f1 /* ERROR "not a type" */ ) {}
+func f2(x *f2 /* ERROR "not a type" */ ) {}
+func f3() (x f3 /* ERROR "not a type" */ ) { return }
+func f4() (x *f4 /* ERROR "not a type" */ ) { return }
+// TODO(#43215) this should be detected as a cycle error
+func f5([unsafe.Sizeof(f5)]int) {}
+
+func (S0) m1 (x S0.m1 /* ERROR "S0.m1 is not a type" */ ) {}
+func (S0) m2 (x *S0.m2 /* ERROR "S0.m2 is not a type" */ ) {}
+func (S0) m3 () (x S0.m3 /* ERROR "S0.m3 is not a type" */ ) { return }
+func (S0) m4 () (x *S0.m4 /* ERROR "S0.m4 is not a type" */ ) { return }
+
+// interfaces may not have any blank methods
+type BlankI interface {
+ _ /* ERROR "methods must have a unique non-blank name" */ ()
+ _ /* ERROR "methods must have a unique non-blank name" */ (float32) int
+ m()
+}
+
+// non-interface types may have multiple blank methods
+type BlankT struct{}
+
+func (BlankT) _() {}
+func (BlankT) _(int) {}
+func (BlankT) _() int { return 0 }
+func (BlankT) _(int) int { return 0}
diff --git a/src/internal/types/testdata/check/decls1.go b/src/internal/types/testdata/check/decls1.go
new file mode 100644
index 0000000..06f3b2e
--- /dev/null
+++ b/src/internal/types/testdata/check/decls1.go
@@ -0,0 +1,146 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// variable declarations
+
+package decls1
+
+import (
+ "math"
+)
+
+// Global variables without initialization
+var (
+ a, b bool
+ c byte
+ d uint8
+ r rune
+ i int
+ j, k, l int
+ x, y float32
+ xx, yy float64
+ u, v complex64
+ uu, vv complex128
+ s, t string
+ array []byte
+ iface interface{}
+
+ blank _ /* ERROR "cannot use _" */
+)
+
+// Global variables with initialization
+var (
+ s1 = i + j
+ s2 = i /* ERROR "mismatched types" */ + x
+ s3 = c + d
+ s4 = s + t
+ s5 = s /* ERROR "invalid operation" */ / t
+ s6 = array[t1]
+ s7 = array[x /* ERROR "integer" */]
+ s8 = &a
+ s10 = &42 /* ERROR "cannot take address" */
+ s11 = &v
+ s12 = -(u + *t11) / *&v
+ s13 = a /* ERROR "shifted operand" */ << d
+ s14 = i << j
+ s18 = math.Pi * 10.0
+ s19 = s1 /* ERROR "cannot call" */ ()
+ s20 = f0 /* ERROR "no value" */ ()
+ s21 = f6(1, s1, i)
+ s22 = f6(1, s1, uu /* ERRORx `cannot use .* in argument` */ )
+
+ t1 int = i + j
+ t2 int = i /* ERROR "mismatched types" */ + x
+ t3 int = c /* ERRORx `cannot use .* variable declaration` */ + d
+ t4 string = s + t
+ t5 string = s /* ERROR "invalid operation" */ / t
+ t6 byte = array[t1]
+ t7 byte = array[x /* ERROR "must be integer" */]
+ t8 *int = & /* ERRORx `cannot use .* variable declaration` */ a
+ t10 *int = &42 /* ERROR "cannot take address" */
+ t11 *complex64 = &v
+ t12 complex64 = -(u + *t11) / *&v
+ t13 int = a /* ERROR "shifted operand" */ << d
+ t14 int = i << j
+ t15 math /* ERROR "not in selector" */
+ t16 math.xxx /* ERROR "undefined" */
+ t17 math /* ERROR "not a type" */ .Pi
+ t18 float64 = math.Pi * 10.0
+ t19 int = t1 /* ERROR "cannot call" */ ()
+ t20 int = f0 /* ERROR "no value" */ ()
+ t21 int = a /* ERRORx `cannot use .* variable declaration` */
+)
+
+// Various more complex expressions
+var (
+ u1 = x /* ERROR "not an interface" */ .(int)
+ u2 = iface.([]int)
+ u3 = iface.(a /* ERROR "not a type" */ )
+ u4, ok = iface.(int)
+ u5, ok2, ok3 = iface /* ERROR "assignment mismatch" */ .(int)
+)
+
+// Constant expression initializations
+var (
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
+ v2 = c + 255
+ v3 = c + 256 /* ERROR "overflows" */
+ v4 = r + 2147483647
+ v5 = r + 2147483648 /* ERROR "overflows" */
+ v6 = 42
+ v7 = v6 + 9223372036854775807
+ v8 = v6 + 9223372036854775808 /* ERROR "overflows" */
+ v9 = i + 1 << 10
+ v10 byte = 1024 /* ERROR "overflows" */
+ v11 = xx/yy*yy - xx
+ v12 = true && false
+ v13 = nil /* ERROR "use of untyped nil" */
+ v14 string = 257 // ERRORx `cannot use 257 .* as string value in variable declaration$`
+ v15 int8 = 257 // ERRORx `cannot use 257 .* as int8 value in variable declaration .*overflows`
+)
+
+// Multiple assignment expressions
+var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+)
+
+func _() {
+ var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+ )
+
+ _, _ = m1a, m1b
+ _, _, _ = m2a, m2b, m2c
+ _, _ = m3a, m3b
+}
+
+// Declaration of parameters and results
+func f0() {}
+func f1(a /* ERROR "not a type" */) {}
+func f2(a, b, c d /* ERROR "not a type" */) {}
+
+func f3() int { return 0 }
+func f4() a /* ERROR "not a type" */ { return 0 }
+func f5() (a, b, c d /* ERROR "not a type" */) { return }
+
+func f6(a, b, c int) complex128 { return 0 }
+
+// Declaration of receivers
+type T struct{}
+
+func (T) m0() {}
+func (*T) m1() {}
+func (x T) m2() {}
+func (x *T) m3() {}
+
+// Initialization functions
+func init() {}
+func init /* ERROR "no arguments and no return values" */ (int) {}
+func init /* ERROR "no arguments and no return values" */ () int { return 0 }
+func init /* ERROR "no arguments and no return values" */ (int) int { return 0 }
+func (T) init(int) int { return 0 }
diff --git a/src/internal/types/testdata/check/decls2/decls2a.go b/src/internal/types/testdata/check/decls2/decls2a.go
new file mode 100644
index 0000000..c2fb421
--- /dev/null
+++ b/src/internal/types/testdata/check/decls2/decls2a.go
@@ -0,0 +1,111 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "time"
+import "unsafe"
+
+// T1 declared before its methods.
+type T1 struct{
+ f int
+}
+
+func (T1) m() {}
+func (T1) m /* ERROR "already declared" */ () {}
+func (x *T1) f /* ERROR "field and method" */ () {}
+
+// Conflict between embedded field and method name,
+// with the embedded field being a basic type.
+type T1b struct {
+ int
+}
+
+func (T1b) int /* ERROR "field and method" */ () {}
+
+type T1c struct {
+ time.Time
+}
+
+func (T1c) Time /* ERROR "field and method" */ () int { return 0 }
+
+// Disabled for now: LookupFieldOrMethod will find Pointer even though
+// it's double-declared (it would cost extra in the common case to verify
+// this). But the MethodSet computation will not find it due to the name
+// collision caused by the double-declaration, leading to an internal
+// inconsistency while we are verifying one computation against the other.
+// var _ = T1c{}.Pointer
+
+// T2's method declared before the type.
+func (*T2) f /* ERROR "field and method" */ () {}
+
+type T2 struct {
+ f int
+}
+
+// Methods declared without a declared type.
+func (undefined /* ERROR "undefined" */) m() {}
+func (x *undefined /* ERROR "undefined" */) m() {}
+
+func (pi /* ERROR "not a type" */) m1() {}
+func (x pi /* ERROR "not a type" */) m2() {}
+func (x *pi /* ERROR "not a type" */ ) m3() {}
+
+// Blank types.
+type _ struct { m int }
+type _ struct { m int }
+
+func (_ /* ERROR "cannot use _" */) m() {}
+func m(_ /* ERROR "cannot use _" */) {}
+
+// Methods with receiver base type declared in another file.
+func (T3) m1() {}
+func (*T3) m2() {}
+func (x T3) m3() {}
+func (x *T3) f /* ERROR "field and method" */ () {}
+
+// Methods of non-struct type.
+type T4 func()
+
+func (self T4) m() func() { return self }
+
+// Methods associated with an interface.
+type T5 interface {
+ m() int
+}
+
+func (T5 /* ERROR "invalid receiver" */ ) m1() {}
+func (T5 /* ERROR "invalid receiver" */ ) m2() {}
+
+// Methods associated with a named pointer type.
+type ptr *int
+func (ptr /* ERROR "invalid receiver" */ ) _() {}
+func (* /* ERROR "invalid receiver" */ ptr) _() {}
+
+// Methods with zero or multiple receivers.
+func ( /* ERROR "method has no receiver" */ ) _() {}
+func (T3, * /* ERROR "method has multiple receivers" */ T3) _() {}
+func (T3, T3, T3 /* ERROR "method has multiple receivers" */ ) _() {}
+func (a, b /* ERROR "method has multiple receivers" */ T3) _() {}
+func (a, b, c /* ERROR "method has multiple receivers" */ T3) _() {}
+
+// Methods associated with non-local or unnamed types.
+func (int /* ERROR "cannot define new methods on non-local type int" */ ) m() {}
+func ([ /* ERROR "invalid receiver" */ ]int) m() {}
+func (time /* ERROR "cannot define new methods on non-local type time.Time" */ .Time) m() {}
+func (* /* ERROR "cannot define new methods on non-local type time.Time" */ time.Time) m() {}
+func (x /* ERROR "invalid receiver" */ interface{}) m() {}
+
+// Unsafe.Pointer is treated like a pointer when used as receiver type.
+type UP unsafe.Pointer
+func (UP /* ERROR "invalid" */ ) m1() {}
+func (* /* ERROR "invalid" */ UP) m2() {}
+
+// Double declarations across package files
+const c_double = 0
+type t_double int
+var v_double int
+func f_double() {}
diff --git a/src/internal/types/testdata/check/decls2/decls2b.go b/src/internal/types/testdata/check/decls2/decls2b.go
new file mode 100644
index 0000000..dd6cd44
--- /dev/null
+++ b/src/internal/types/testdata/check/decls2/decls2b.go
@@ -0,0 +1,75 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "io"
+
+const pi = 3.1415
+
+func (T1) m /* ERROR "already declared" */ () {}
+func (T2) m(io.Writer) {}
+
+type T3 struct {
+ f *T3
+}
+
+type T6 struct {
+ x int
+}
+
+func (t *T6) m1() int {
+ return t.x
+}
+
+func f() {
+ var t *T6
+ t.m1()
+}
+
+// Double declarations across package files
+const c_double /* ERROR "redeclared" */ = 0
+type t_double /* ERROR "redeclared" */ int
+var v_double /* ERROR "redeclared" */ int
+func f_double /* ERROR "redeclared" */ () {}
+
+// Blank methods need to be type-checked.
+// Verify by checking that errors are reported.
+func (T /* ERROR "undefined" */ ) _() {}
+func (T1) _(undefined /* ERROR "undefined" */ ) {}
+func (T1) _() int { return "foo" /* ERRORx "cannot use .* in return statement" */ }
+
+// Methods with undefined receiver type can still be checked.
+// Verify by checking that errors are reported.
+func (Foo /* ERROR "undefined" */ ) m() {}
+func (Foo /* ERROR "undefined" */ ) m(undefined /* ERROR "undefined" */ ) {}
+func (Foo /* ERRORx `undefined` */ ) m() int { return "foo" /* ERRORx "cannot use .* in return statement" */ }
+
+func (Foo /* ERROR "undefined" */ ) _() {}
+func (Foo /* ERROR "undefined" */ ) _(undefined /* ERROR "undefined" */ ) {}
+func (Foo /* ERROR "undefined" */ ) _() int { return "foo" /* ERRORx "cannot use .* in return statement" */ }
+
+// Receiver declarations are regular parameter lists;
+// receiver types may use parentheses, and the list
+// may have a trailing comma.
+type T7 struct {}
+
+func (T7) m1() {}
+func ((T7)) m2() {}
+func ((*T7)) m3() {}
+func (x *(T7),) m4() {}
+func (x (*(T7)),) m5() {}
+func (x ((*((T7)))),) m6() {}
+
+// Check that methods with parenthesized receiver are actually present (issue #23130).
+var (
+ _ = T7.m1
+ _ = T7.m2
+ _ = (*T7).m3
+ _ = (*T7).m4
+ _ = (*T7).m5
+ _ = (*T7).m6
+)
diff --git a/src/internal/types/testdata/check/decls3.go b/src/internal/types/testdata/check/decls3.go
new file mode 100644
index 0000000..3d00a58
--- /dev/null
+++ b/src/internal/types/testdata/check/decls3.go
@@ -0,0 +1,309 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// embedded types
+
+package decls3
+
+import "unsafe"
+import "fmt"
+
+// fields with the same name at the same level cancel each other out
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { X int }
+ T3 struct { T1; T2 } // X is embedded twice at the same level via T1->X, T2->X
+ )
+
+ var t T3
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { T1 }
+ T3 struct { T1 }
+ T4 struct { T2; T3 } // X is embedded twice at the same level via T2->T1->X, T3->T1->X
+ )
+
+ var t T4
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func issue4355() {
+ type (
+ T1 struct {X int}
+ T2 struct {T1}
+ T3 struct {T2}
+ T4 struct {T2}
+ T5 struct {T3; T4} // X is embedded twice at the same level via T3->T2->T1->X, T4->T2->T1->X
+ )
+
+ var t T5
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func _() {
+ type State int
+ type A struct{ State }
+ type B struct{ fmt.State }
+ type T struct{ A; B }
+
+ var t T
+ _ = t.State /* ERROR "ambiguous selector t.State" */
+}
+
+// Embedded fields can be predeclared types.
+
+func _() {
+ type T0 struct{
+ int
+ float32
+ f int
+ }
+ var x T0
+ _ = x.int
+ _ = x.float32
+ _ = x.f
+
+ type T1 struct{
+ T0
+ }
+ var y T1
+ _ = y.int
+ _ = y.float32
+ _ = y.f
+}
+
+// Restrictions on embedded field types.
+
+func _() {
+ type I1 interface{}
+ type I2 interface{}
+ type P1 *int
+ type P2 *int
+ type UP unsafe.Pointer
+
+ type T1 struct {
+ I1
+ * /* ERROR "cannot be a pointer to an interface" */ I2
+ * /* ERROR "cannot be a pointer to an interface" */ error
+ P1 /* ERROR "cannot be a pointer" */
+ * /* ERROR "cannot be a pointer" */ P2
+ }
+
+ // unsafe.Pointers are treated like regular pointers when embedded
+ type T2 struct {
+ unsafe /* ERROR "cannot be unsafe.Pointer" */ .Pointer
+ */* ERROR "cannot be unsafe.Pointer" */ unsafe.Pointer /* ERROR "Pointer redeclared" */
+ UP /* ERROR "cannot be unsafe.Pointer" */
+ * /* ERROR "cannot be unsafe.Pointer" */ UP /* ERROR "UP redeclared" */
+ }
+}
+
+// Named types that are pointers.
+
+type S struct{ x int }
+func (*S) m() {}
+type P *S
+
+func _() {
+ var s *S
+ _ = s.x
+ _ = s.m
+
+ var p P
+ _ = p.x
+ _ = p.m /* ERROR "no field or method" */
+ _ = P.m /* ERROR "no field or method" */
+}
+
+// Borrowed from the FieldByName test cases in reflect/all_test.go.
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+func _() {
+ _ = struct{}{}.Foo /* ERROR "no field or method" */
+ _ = S0{}.A
+ _ = S0{}.D /* ERROR "no field or method" */
+ _ = S1{}.A
+ _ = S1{}.B
+ _ = S1{}.S0
+ _ = S1{}.C
+ _ = S2{}.A
+ _ = S2{}.S1
+ _ = S2{}.B
+ _ = S2{}.C
+ _ = S2{}.D /* ERROR "no field or method" */
+ _ = S3{}.S1 /* ERROR "ambiguous selector S3{}.S1" */
+ _ = S3{}.A
+ _ = S3{}.B /* ERROR "ambiguous selector S3{}.B" */
+ _ = S3{}.D
+ _ = S3{}.E
+ _ = S4{}.A
+ _ = S4{}.B /* ERROR "no field or method" */
+ _ = S5{}.X /* ERROR "ambiguous selector S5{}.X" */
+ _ = S5{}.Y
+ _ = S10{}.X /* ERROR "ambiguous selector S10{}.X" */
+ _ = S10{}.Y
+}
+
+// Borrowed from the FieldByName benchmark in reflect/all_test.go.
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+var _ = R0{}.X /* ERROR "ambiguous selector R0{}.X" */ \ No newline at end of file
diff --git a/src/internal/types/testdata/check/decls4.go b/src/internal/types/testdata/check/decls4.go
new file mode 100644
index 0000000..c47a68d
--- /dev/null
+++ b/src/internal/types/testdata/check/decls4.go
@@ -0,0 +1,199 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type aliases
+
+package decls4
+
+type (
+ T0 [10]int
+ T1 []byte
+ T2 struct {
+ x int
+ }
+ T3 interface{
+ m() T2
+ }
+ T4 func(int, T0) chan T2
+)
+
+type (
+ Ai = int
+ A0 = T0
+ A1 = T1
+ A2 = T2
+ A3 = T3
+ A4 = T4
+
+ A10 = [10]int
+ A11 = []byte
+ A12 = struct {
+ x int
+ }
+ A13 = interface{
+ m() A2
+ }
+ A14 = func(int, A0) chan A2
+)
+
+// check assignment compatibility due to equality of types
+var (
+ xi_ int
+ ai Ai = xi_
+
+ x0 T0
+ a0 A0 = x0
+
+ x1 T1
+ a1 A1 = x1
+
+ x2 T2
+ a2 A2 = x2
+
+ x3 T3
+ a3 A3 = x3
+
+ x4 T4
+ a4 A4 = x4
+)
+
+// alias receiver types
+func (Ai /* ERROR "cannot define new methods on non-local type int" */) m1() {}
+func (T0) m1() {}
+func (A0) m1 /* ERROR "already declared" */ () {}
+func (A0) m2 () {}
+func (A3 /* ERROR "invalid receiver" */ ) m1 () {}
+func (A10 /* ERROR "invalid receiver" */ ) m1() {}
+
+// x0 has methods m1, m2 declared via receiver type names T0 and A0
+var _ interface{ m1(); m2() } = x0
+
+// alias receiver types (test case for issue #23042)
+type T struct{}
+
+var (
+ _ = T.m
+ _ = T{}.m
+ _ interface{m()} = T{}
+)
+
+var (
+ _ = T.n
+ _ = T{}.n
+ _ interface{m(); n()} = T{}
+)
+
+type U = T
+func (U) m() {}
+
+// alias receiver types (long type declaration chains)
+type (
+ V0 = V1
+ V1 = (V2)
+ V2 = ((V3))
+ V3 = T
+)
+
+func (V0) m /* ERROR "already declared" */ () {}
+func (V1) n() {}
+
+// alias receiver types (invalid due to cycles)
+type (
+ W0 /* ERROR "invalid recursive type" */ = W1
+ W1 = (W2)
+ W2 = ((W0))
+)
+
+func (W0) m() {} // no error expected (due to above cycle error)
+func (W1) n() {}
+
+// alias receiver types (invalid due to builtin underlying type)
+type (
+ B0 = B1
+ B1 = B2
+ B2 = int
+)
+
+func (B0 /* ERROR "cannot define new methods on non-local type int" */ ) m() {}
+func (B1 /* ERROR "cannot define new methods on non-local type int" */ ) n() {}
+
+// cycles
+type (
+ C2 /* ERROR "invalid recursive type" */ = C2
+ C3 /* ERROR "invalid recursive type" */ = C4
+ C4 = C3
+ C5 struct {
+ f *C6
+ }
+ C6 = C5
+ C7 /* ERROR "invalid recursive type" */ struct {
+ f C8
+ }
+ C8 = C7
+)
+
+// embedded fields
+var (
+ s0 struct { T0 }
+ s1 struct { A0 } = s0 /* ERROR "cannot use" */ // embedded field names are different
+)
+
+// embedding and lookup of fields and methods
+func _(s struct{A0}) { s.A0 = x0 }
+
+type eX struct{xf int}
+
+func (eX) xm()
+
+type eY = struct{eX} // field/method set of eY includes xf, xm
+
+type eZ = *struct{eX} // field/method set of eZ includes xf, xm
+
+type eA struct {
+ eX // eX contributes xf, xm to eA
+}
+
+type eA2 struct {
+ *eX // *eX contributes xf, xm to eA
+}
+
+type eB struct {
+ eY // eY contributes xf, xm to eB
+}
+
+type eB2 struct {
+ *eY // *eY contributes xf, xm to eB
+}
+
+type eC struct {
+ eZ // eZ contributes xf, xm to eC
+}
+
+var (
+ _ = eA{}.xf
+ _ = eA{}.xm
+ _ = eA2{}.xf
+ _ = eA2{}.xm
+ _ = eB{}.xf
+ _ = eB{}.xm
+ _ = eB2{}.xf
+ _ = eB2{}.xm
+ _ = eC{}.xf
+ _ = eC{}.xm
+)
+
+// ambiguous selectors due to embedding via type aliases
+type eD struct {
+ eY
+ eZ
+}
+
+var (
+ _ = eD{}.xf /* ERROR "ambiguous selector eD{}.xf" */
+ _ = eD{}.xm /* ERROR "ambiguous selector eD{}.xm" */
+)
+
+var (
+ _ interface{ xm() } = eD /* ERROR "ambiguous selector eD.xm" */ {}
+) \ No newline at end of file
diff --git a/src/internal/types/testdata/check/decls5.go b/src/internal/types/testdata/check/decls5.go
new file mode 100644
index 0000000..88d3194
--- /dev/null
+++ b/src/internal/types/testdata/check/decls5.go
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// declarations of main
+const _, main /* ERROR "cannot declare main" */ , _ = 0, 1, 2
+type main /* ERROR "cannot declare main" */ struct{}
+var _, main /* ERROR "cannot declare main" */ int
diff --git a/src/internal/types/testdata/check/errors.go b/src/internal/types/testdata/check/errors.go
new file mode 100644
index 0000000..10b6a22
--- /dev/null
+++ b/src/internal/types/testdata/check/errors.go
@@ -0,0 +1,66 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+// Testing precise operand formatting in error messages
+// (matching messages are regular expressions, hence the \'s).
+func f(x int, m map[string]int) {
+ // no values
+ _ = f /* ERROR "f(0, m) (no value) used as value" */ (0, m)
+
+ // built-ins
+ _ = println // ERROR "println (built-in) must be called"
+
+ // types
+ _ = complex128 // ERROR "complex128 (type) is not an expression"
+
+ // constants
+ const c1 = 991
+ const c2 float32 = 0.5
+ const c3 = "foo"
+ 0 // ERROR "0 (untyped int constant) is not used"
+ 0.5 // ERROR "0.5 (untyped float constant) is not used"
+ "foo" // ERROR `"foo" (untyped string constant) is not used`
+ c1 // ERROR "c1 (untyped int constant 991) is not used"
+ c2 // ERROR "c2 (constant 0.5 of type float32) is not used"
+ c1 /* ERROR "c1 + c2 (constant 991.5 of type float32) is not used" */ + c2
+ c3 // ERROR `c3 (untyped string constant "foo") is not used`
+
+ // variables
+ x // ERROR "x (variable of type int) is not used"
+
+ // values
+ nil // ERROR "nil is not used"
+ ( /* ERROR "(*int)(nil) (value of type *int) is not used" */ *int)(nil)
+ x /* ERROR "x != x (untyped bool value) is not used" */ != x
+ x /* ERROR "x + x (value of type int) is not used" */ + x
+
+ // value, ok's
+ const s = "foo"
+ m /* ERROR "m[s] (map index expression of type int) is not used" */ [s]
+}
+
+// Valid ERROR comments can have a variety of forms.
+func _() {
+ 0 /* ERRORx "0 .* is not used" */
+ 0 /* ERRORx "0 .* is not used" */
+ 0 // ERRORx "0 .* is not used"
+ 0 // ERRORx "0 .* is not used"
+}
+
+// Don't report spurious errors as a consequence of earlier errors.
+// Add more tests as needed.
+func _() {
+ if err := foo /* ERROR "undefined" */ (); err != nil /* "no error here" */ {}
+}
+
+// Use unqualified names for package-local objects.
+type T struct{}
+var _ int = T /* ERROR "value of type T" */ {} // use T in error message rather than errors.T
+
+// Don't report errors containing "invalid type" (issue #24182).
+func _(x *missing /* ERROR "undefined: missing" */ ) {
+ x.m() // there shouldn't be an error here referring to *invalid type
+}
diff --git a/src/internal/types/testdata/check/expr0.go b/src/internal/types/testdata/check/expr0.go
new file mode 100644
index 0000000..eba991e
--- /dev/null
+++ b/src/internal/types/testdata/check/expr0.go
@@ -0,0 +1,196 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// unary expressions
+
+package expr0
+
+type mybool bool
+
+var (
+ // bool
+ b0 = true
+ b1 bool = b0
+ b2 = !true
+ b3 = !b1
+ b4 bool = !true
+ b5 bool = !b4
+ b6 = +b0 /* ERROR "not defined" */
+ b7 = -b0 /* ERROR "not defined" */
+ b8 = ^b0 /* ERROR "not defined" */
+ b9 = *b0 /* ERROR "cannot indirect" */
+ b10 = &true /* ERROR "cannot take address" */
+ b11 = &b0
+ b12 = <-b0 /* ERROR "cannot receive" */
+ b13 = & & /* ERROR "cannot take address" */ b0
+ b14 = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ b0
+
+ // byte
+ _ = byte(0)
+ _ = byte(- /* ERROR "cannot convert" */ 1)
+ _ = - /* ERROR "-byte(1) (constant -1 of type byte) overflows byte" */ byte(1) // test for issue 11367
+ _ = byte /* ERROR "overflows byte" */ (0) - byte(1)
+ _ = ~ /* ERROR "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)" */ byte(0)
+
+ // int
+ i0 = 1
+ i1 int = i0
+ i2 = +1
+ i3 = +i0
+ i4 int = +1
+ i5 int = +i4
+ i6 = -1
+ i7 = -i0
+ i8 int = -1
+ i9 int = -i4
+ i10 = !i0 /* ERROR "not defined" */
+ i11 = ^1
+ i12 = ^i0
+ i13 int = ^1
+ i14 int = ^i4
+ i15 = *i0 /* ERROR "cannot indirect" */
+ i16 = &i0
+ i17 = *i16
+ i18 = <-i16 /* ERROR "cannot receive" */
+ i19 = ~ /* ERROR "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)" */ i0
+
+ // uint
+ u0 = uint(1)
+ u1 uint = u0
+ u2 = +1
+ u3 = +u0
+ u4 uint = +1
+ u5 uint = +u4
+ u6 = -1
+ u7 = -u0
+ u8 uint = - /* ERROR "overflows" */ 1
+ u9 uint = -u4
+ u10 = !u0 /* ERROR "not defined" */
+ u11 = ^1
+ u12 = ^i0
+ u13 uint = ^ /* ERROR "overflows" */ 1
+ u14 uint = ^u4
+ u15 = *u0 /* ERROR "cannot indirect" */
+ u16 = &u0
+ u17 = *u16
+ u18 = <-u16 /* ERROR "cannot receive" */
+ u19 = ^uint(0)
+ u20 = ~ /* ERROR "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)" */ u0
+
+ // float64
+ f0 = float64(1)
+ f1 float64 = f0
+ f2 = +1
+ f3 = +f0
+ f4 float64 = +1
+ f5 float64 = +f4
+ f6 = -1
+ f7 = -f0
+ f8 float64 = -1
+ f9 float64 = -f4
+ f10 = !f0 /* ERROR "not defined" */
+ f11 = ^1
+ f12 = ^i0
+ f13 float64 = ^1
+ f14 float64 = ^f4 /* ERROR "not defined" */
+ f15 = *f0 /* ERROR "cannot indirect" */
+ f16 = &f0
+ f17 = *u16
+ f18 = <-u16 /* ERROR "cannot receive" */
+ f19 = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ f0
+
+ // complex128
+ c0 = complex128(1)
+ c1 complex128 = c0
+ c2 = +1
+ c3 = +c0
+ c4 complex128 = +1
+ c5 complex128 = +c4
+ c6 = -1
+ c7 = -c0
+ c8 complex128 = -1
+ c9 complex128 = -c4
+ c10 = !c0 /* ERROR "not defined" */
+ c11 = ^1
+ c12 = ^i0
+ c13 complex128 = ^1
+ c14 complex128 = ^c4 /* ERROR "not defined" */
+ c15 = *c0 /* ERROR "cannot indirect" */
+ c16 = &c0
+ c17 = *u16
+ c18 = <-u16 /* ERROR "cannot receive" */
+ c19 = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ c0
+
+ // string
+ s0 = "foo"
+ s1 = +"foo" /* ERROR "not defined" */
+ s2 = -s0 /* ERROR "not defined" */
+ s3 = !s0 /* ERROR "not defined" */
+ s4 = ^s0 /* ERROR "not defined" */
+ s5 = *s4
+ s6 = &s4
+ s7 = *s6
+ s8 = <-s7
+ s9 = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ s0
+
+ // channel
+ ch chan int
+ rc <-chan float64
+ sc chan <- string
+ ch0 = +ch /* ERROR "not defined" */
+ ch1 = -ch /* ERROR "not defined" */
+ ch2 = !ch /* ERROR "not defined" */
+ ch3 = ^ch /* ERROR "not defined" */
+ ch4 = *ch /* ERROR "cannot indirect" */
+ ch5 = &ch
+ ch6 = *ch5
+ ch7 = <-ch
+ ch8 = <-rc
+ ch9 = <-sc /* ERROR "cannot receive" */
+ ch10, ok = <-ch
+ // ok is of type bool
+ ch11, myok = <-ch
+ _ mybool = myok /* ERRORx `cannot use .* in variable declaration` */
+ ch12 = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ ch
+
+)
+
+// address of composite literals
+type T struct{x, y int}
+
+func f() T { return T{} }
+
+var (
+ _ = &T{1, 2}
+ _ = &[...]int{}
+ _ = &[]int{}
+ _ = &[]int{}
+ _ = &map[string]T{}
+ _ = &(T{1, 2})
+ _ = &((((T{1, 2}))))
+ _ = &f /* ERROR "cannot take address" */ ()
+)
+
+// recursive pointer types
+type P *P
+
+var (
+ p1 P = new(P)
+ p2 P = *p1
+ p3 P = &p2
+)
+
+func g() (a, b int) { return }
+
+func _() {
+ _ = -g /* ERROR "multiple-value g" */ ()
+ _ = <-g /* ERROR "multiple-value g" */ ()
+}
+
+// ~ is accepted as unary operator only permitted in interface type elements
+var (
+ _ = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ 0
+ _ = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ "foo"
+ _ = ~ /* ERROR "cannot use ~ outside of interface or type constraint" */ i0
+)
diff --git a/src/internal/types/testdata/check/expr1.go b/src/internal/types/testdata/check/expr1.go
new file mode 100644
index 0000000..1c04c8f
--- /dev/null
+++ b/src/internal/types/testdata/check/expr1.go
@@ -0,0 +1,127 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// binary expressions
+
+package expr1
+
+type mybool bool
+
+func _(x, y bool, z mybool) {
+ x = x || y
+ x = x || true
+ x = x || false
+ x = x && y
+ x = x && true
+ x = x && false
+
+ z = z /* ERROR "mismatched types" */ || y
+ z = z || true
+ z = z || false
+ z = z /* ERROR "mismatched types" */ && y
+ z = z && true
+ z = z && false
+}
+
+type myint int
+
+func _(x, y int, z myint) {
+ x = x + 1
+ x = x + 1.0
+ x = x + 1.1 // ERROR "truncated to int"
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x % y
+ x = x << y
+ x = x >> y
+
+ z = z + 1
+ z = z + 1.0
+ z = z + 1.1 // ERROR "truncated to int"
+ z = z /* ERROR "mismatched types" */ + y
+ z = z /* ERROR "mismatched types" */ - y
+ z = z /* ERROR "mismatched types" */ * y
+ z = z /* ERROR "mismatched types" */ / y
+ z = z /* ERROR "mismatched types" */ % y
+ z = z << y
+ z = z >> y
+}
+
+type myuint uint
+
+func _(x, y uint, z myuint) {
+ x = x + 1
+ x = x + - /* ERROR "overflows uint" */ 1
+ x = x + 1.0
+ x = x + 1.1 // ERROR "truncated to uint"
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x % y
+ x = x << y
+ x = x >> y
+
+ z = z + 1
+ z = x + - /* ERROR "overflows uint" */ 1
+ z = z + 1.0
+ z = z + 1.1 // ERROR "truncated to uint"
+ z = z /* ERROR "mismatched types" */ + y
+ z = z /* ERROR "mismatched types" */ - y
+ z = z /* ERROR "mismatched types" */ * y
+ z = z /* ERROR "mismatched types" */ / y
+ z = z /* ERROR "mismatched types" */ % y
+ z = z << y
+ z = z >> y
+}
+
+type myfloat64 float64
+
+func _(x, y float64, z myfloat64) {
+ x = x + 1
+ x = x + -1
+ x = x + 1.0
+ x = x + 1.1
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x /* ERROR "not defined" */ % y
+ x = x /* ERRORx `operand x .* must be integer` */ << y
+ x = x /* ERRORx `operand x .* must be integer` */ >> y
+
+ z = z + 1
+ z = z + -1
+ z = z + 1.0
+ z = z + 1.1
+ z = z /* ERROR "mismatched types" */ + y
+ z = z /* ERROR "mismatched types" */ - y
+ z = z /* ERROR "mismatched types" */ * y
+ z = z /* ERROR "mismatched types" */ / y
+ z = z /* ERROR "mismatched types" */ % y
+ z = z /* ERRORx `operand z .* must be integer` */ << y
+ z = z /* ERRORx `operand z .* must be integer` */ >> y
+}
+
+type mystring string
+
+func _(x, y string, z mystring) {
+ x = x + "foo"
+ x = x /* ERROR "not defined" */ - "foo"
+ x = x /* ERROR "mismatched types string and untyped int" */ + 1
+ x = x + y
+ x = x /* ERROR "not defined" */ - y
+ x = x /* ERROR "mismatched types string and untyped int" */* 10
+}
+
+func f() (a, b int) { return }
+
+func _(x int) {
+ _ = f /* ERROR "multiple-value f" */ () + 1
+ _ = x + f /* ERROR "multiple-value f" */ ()
+ _ = f /* ERROR "multiple-value f" */ () + f
+ _ = f /* ERROR "multiple-value f" */ () + f /* ERROR "multiple-value f" */ ()
+}
diff --git a/src/internal/types/testdata/check/expr2.go b/src/internal/types/testdata/check/expr2.go
new file mode 100644
index 0000000..ebb85eb
--- /dev/null
+++ b/src/internal/types/testdata/check/expr2.go
@@ -0,0 +1,260 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comparisons
+
+package expr2
+
+func _bool() {
+ const t = true == true
+ const f = true == false
+ _ = t /* ERRORx `operator .* not defined` */ < f
+ _ = 0 == t /* ERROR "mismatched types untyped int and untyped bool" */
+ var b bool
+ var x, y float32
+ b = x < y
+ _ = b
+ _ = struct{b bool}{x < y}
+}
+
+// corner cases
+var (
+ v0 = nil == nil // ERROR "operator == not defined on untyped nil"
+)
+
+func arrays() {
+ // basics
+ var a, b [10]int
+ _ = a == b
+ _ = a != b
+ _ = a /* ERROR "< not defined" */ < b
+ _ = a == nil /* ERROR "mismatched types" */
+
+ type C [10]int
+ var c C
+ _ = a == c
+
+ type D [10]int
+ var d D
+ _ = c == d /* ERROR "mismatched types" */
+
+ var e [10]func() int
+ _ = e /* ERROR "[10]func() int cannot be compared" */ == e
+}
+
+func structs() {
+ // basics
+ var s, t struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ _ = s == t
+ _ = s != t
+ _ = s /* ERROR "< not defined" */ < t
+ _ = s == nil /* ERROR "mismatched types" */
+
+ type S struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ type T struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ var ss S
+ var tt T
+ _ = s == ss
+ _ = ss == tt /* ERROR "mismatched types" */
+
+ var u struct {
+ x int
+ a [10]map[string]int
+ }
+ _ = u /* ERROR "cannot be compared" */ == u
+}
+
+func pointers() {
+ // nil
+ _ = nil == nil // ERROR "operator == not defined on untyped nil"
+ _ = nil != nil // ERROR "operator != not defined on untyped nil"
+ _ = nil /* ERROR "< not defined" */ < nil
+ _ = nil /* ERROR "<= not defined" */ <= nil
+ _ = nil /* ERROR "> not defined" */ > nil
+ _ = nil /* ERROR ">= not defined" */ >= nil
+
+ // basics
+ var p, q *int
+ _ = p == q
+ _ = p != q
+
+ _ = p == nil
+ _ = p != nil
+ _ = nil == q
+ _ = nil != q
+
+ _ = p /* ERROR "< not defined" */ < q
+ _ = p /* ERROR "<= not defined" */ <= q
+ _ = p /* ERROR "> not defined" */ > q
+ _ = p /* ERROR ">= not defined" */ >= q
+
+ // various element types
+ type (
+ S1 struct{}
+ S2 struct{}
+ P1 *S1
+ P2 *S2
+ )
+ var (
+ ps1 *S1
+ ps2 *S2
+ p1 P1
+ p2 P2
+ )
+ _ = ps1 == ps1
+ _ = ps1 == ps2 /* ERROR "mismatched types" */
+ _ = ps2 == ps1 /* ERROR "mismatched types" */
+
+ _ = p1 == p1
+ _ = p1 == p2 /* ERROR "mismatched types" */
+
+ _ = p1 == ps1
+}
+
+func channels() {
+ // basics
+ var c, d chan int
+ _ = c == d
+ _ = c != d
+ _ = c == nil
+ _ = c /* ERROR "< not defined" */ < d
+
+ // various element types (named types)
+ type (
+ C1 chan int
+ C1r <-chan int
+ C1s chan<- int
+ C2 chan float32
+ )
+ var (
+ c1 C1
+ c1r C1r
+ c1s C1s
+ c1a chan int
+ c2 C2
+ )
+ _ = c1 == c1
+ _ = c1 == c1r /* ERROR "mismatched types" */
+ _ = c1 == c1s /* ERROR "mismatched types" */
+ _ = c1r == c1s /* ERROR "mismatched types" */
+ _ = c1 == c1a
+ _ = c1a == c1
+ _ = c1 == c2 /* ERROR "mismatched types" */
+ _ = c1a == c2 /* ERROR "mismatched types" */
+
+ // various element types (unnamed types)
+ var (
+ d1 chan int
+ d1r <-chan int
+ d1s chan<- int
+ d1a chan<- int
+ d2 chan float32
+ )
+ _ = d1 == d1
+ _ = d1 == d1r
+ _ = d1 == d1s
+ _ = d1r == d1s /* ERROR "mismatched types" */
+ _ = d1 == d1a
+ _ = d1a == d1
+ _ = d1 == d2 /* ERROR "mismatched types" */
+ _ = d1a == d2 /* ERROR "mismatched types" */
+}
+
+// for interfaces test
+type S1 struct{}
+type S11 struct{}
+type S2 struct{}
+func (*S1) m() int
+func (*S11) m() int
+func (*S11) n()
+func (*S2) m() float32
+
+func interfaces() {
+ // basics
+ var i, j interface{ m() int }
+ _ = i == j
+ _ = i != j
+ _ = i == nil
+ _ = i /* ERROR "< not defined" */ < j
+
+ // various interfaces
+ var ii interface { m() int; n() }
+ var k interface { m() float32 }
+ _ = i == ii
+ _ = i == k /* ERROR "mismatched types" */
+
+ // interfaces vs values
+ var s1 S1
+ var s11 S11
+ var s2 S2
+
+ _ = i == 0 /* ERROR "cannot convert" */
+ _ = i == s1 /* ERROR "mismatched types" */
+ _ = i == &s1
+ _ = i == &s11
+
+ _ = i == s2 /* ERROR "mismatched types" */
+ _ = i == & /* ERROR "mismatched types" */ s2
+
+ // issue #28164
+ // testcase from issue
+ _ = interface{}(nil) == [ /* ERROR "slice can only be compared to nil" */ ]int(nil)
+
+ // related cases
+ var e interface{}
+ var s []int
+ var x int
+ _ = e == s // ERROR "slice can only be compared to nil"
+ _ = s /* ERROR "slice can only be compared to nil" */ == e
+ _ = e /* ERROR "operator < not defined on interface" */ < x
+ _ = x < e // ERROR "operator < not defined on interface"
+}
+
+func slices() {
+ // basics
+ var s []int
+ _ = s == nil
+ _ = s != nil
+ _ = s /* ERROR "< not defined" */ < nil
+
+ // slices are not otherwise comparable
+ _ = s /* ERROR "slice can only be compared to nil" */ == s
+ _ = s /* ERROR "< not defined" */ < s
+}
+
+func maps() {
+ // basics
+ var m map[string]int
+ _ = m == nil
+ _ = m != nil
+ _ = m /* ERROR "< not defined" */ < nil
+
+ // maps are not otherwise comparable
+ _ = m /* ERROR "map can only be compared to nil" */ == m
+ _ = m /* ERROR "< not defined" */ < m
+}
+
+func funcs() {
+ // basics
+ var f func(int) float32
+ _ = f == nil
+ _ = f != nil
+ _ = f /* ERROR "< not defined" */ < nil
+
+ // funcs are not otherwise comparable
+ _ = f /* ERROR "func can only be compared to nil" */ == f
+ _ = f /* ERROR "< not defined" */ < f
+}
diff --git a/src/internal/types/testdata/check/expr3.go b/src/internal/types/testdata/check/expr3.go
new file mode 100644
index 0000000..91534cd
--- /dev/null
+++ b/src/internal/types/testdata/check/expr3.go
@@ -0,0 +1,564 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expr3
+
+import "time"
+
+func indexes() {
+ _ = 1 /* ERROR "cannot index" */ [0]
+ _ = indexes /* ERROR "cannot index" */ [0]
+ _ = ( /* ERROR "cannot slice" */ 12 + 3)[1:2]
+
+ var a [10]int
+ _ = a[true /* ERROR "cannot convert" */ ]
+ _ = a["foo" /* ERROR "cannot convert" */ ]
+ _ = a[1.1 /* ERROR "truncated" */ ]
+ _ = a[1.0]
+ _ = a[- /* ERROR "negative" */ 1]
+ _ = a[- /* ERROR "negative" */ 1 :]
+ _ = a[: - /* ERROR "negative" */ 1]
+ _ = a[: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = a[0: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = a[0: /* ERROR "middle index required" */ :10]
+ _ = a[:10:10]
+
+ var a0 int
+ a0 = a[0]
+ _ = a0
+ var a1 int32
+ a1 = a /* ERRORx `cannot use .* in assignment` */ [1]
+ _ = a1
+
+ _ = a[9]
+ _ = a[10 /* ERRORx `index .* out of bounds` */ ]
+ _ = a[1 /* ERROR "overflows" */ <<100]
+ _ = a[1<< /* ERROR "constant shift overflow" */ 1000] // no out-of-bounds follow-on error
+ _ = a[10:]
+ _ = a[:10]
+ _ = a[10:10]
+ _ = a[11 /* ERRORx `index .* out of bounds` */ :]
+ _ = a[: 11 /* ERRORx `index .* out of bounds` */ ]
+ _ = a[: 1 /* ERROR "overflows" */ <<100]
+ _ = a[:10:10]
+ _ = a[:11 /* ERRORx `index .* out of bounds` */ :10]
+ _ = a[:10:11 /* ERRORx `index .* out of bounds` */ ]
+ _ = a[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = a[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = a[10:0 /* ERROR "invalid slice indices" */:0]
+ _ = &a /* ERROR "cannot take address" */ [:10]
+
+ pa := &a
+ _ = pa[9]
+ _ = pa[10 /* ERRORx `index .* out of bounds` */ ]
+ _ = pa[1 /* ERROR "overflows" */ <<100]
+ _ = pa[10:]
+ _ = pa[:10]
+ _ = pa[10:10]
+ _ = pa[11 /* ERRORx `index .* out of bounds` */ :]
+ _ = pa[: 11 /* ERRORx `index .* out of bounds` */ ]
+ _ = pa[: 1 /* ERROR "overflows" */ <<100]
+ _ = pa[:10:10]
+ _ = pa[:11 /* ERRORx `index .* out of bounds` */ :10]
+ _ = pa[:10:11 /* ERRORx `index .* out of bounds` */ ]
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = pa[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :0]
+ _ = &pa /* ERROR "cannot take address" */ [:10]
+
+ var b [0]int
+ _ = b[0 /* ERRORx `index .* out of bounds` */ ]
+ _ = b[:]
+ _ = b[0:]
+ _ = b[:0]
+ _ = b[0:0]
+ _ = b[0:0:0]
+ _ = b[1 /* ERRORx `index .* out of bounds` */ :0:0]
+
+ var s []int
+ _ = s[- /* ERROR "negative" */ 1]
+ _ = s[- /* ERROR "negative" */ 1 :]
+ _ = s[: - /* ERROR "negative" */ 1]
+ _ = s[0]
+ _ = s[1:2]
+ _ = s[2:1 /* ERROR "invalid slice indices" */ ]
+ _ = s[2:]
+ _ = s[: 1 /* ERROR "overflows" */ <<100]
+ _ = s[1 /* ERROR "overflows" */ <<100 :]
+ _ = s[1 /* ERROR "overflows" */ <<100 : 1 /* ERROR "overflows" */ <<100]
+ _ = s[: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = s[:10:10]
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = s[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :0]
+ _ = &s /* ERROR "cannot take address" */ [:10]
+
+ var m map[string]int
+ _ = m[0 /* ERRORx `cannot use .* in map index` */ ]
+ _ = m /* ERROR "cannot slice" */ ["foo" : "bar"]
+ _ = m["foo"]
+ // ok is of type bool
+ type mybool bool
+ var ok mybool
+ _, ok = m["bar"]
+ _ = ok
+ _ = m/* ERROR "mismatched types int and untyped string" */[0 /* ERROR "cannot use 0" */ ] + "foo"
+
+ var t string
+ _ = t[- /* ERROR "negative" */ 1]
+ _ = t[- /* ERROR "negative" */ 1 :]
+ _ = t[: - /* ERROR "negative" */ 1]
+ _ = t[1:2:3 /* ERROR "3-index slice of string" */ ]
+ _ = "foo"[1:2:3 /* ERROR "3-index slice of string" */ ]
+ var t0 byte
+ t0 = t[0]
+ _ = t0
+ var t1 rune
+ t1 = t /* ERRORx `cannot use .* in assignment` */ [2]
+ _ = t1
+ _ = ("foo" + "bar")[5]
+ _ = ("foo" + "bar")[6 /* ERRORx `index .* out of bounds` */ ]
+
+ const c = "foo"
+ _ = c[- /* ERROR "negative" */ 1]
+ _ = c[- /* ERROR "negative" */ 1 :]
+ _ = c[: - /* ERROR "negative" */ 1]
+ var c0 byte
+ c0 = c[0]
+ _ = c0
+ var c2 float32
+ c2 = c /* ERRORx `cannot use .* in assignment` */ [2]
+ _ = c[3 /* ERRORx `index .* out of bounds` */ ]
+ _ = ""[0 /* ERRORx `index .* out of bounds` */ ]
+ _ = c2
+
+ _ = s[1<<30] // no compile-time error here
+
+ // issue 4913
+ type mystring string
+ var ss string
+ var ms mystring
+ var i, j int
+ ss = "foo"[1:2]
+ ss = "foo"[i:j]
+ ms = "foo" /* ERRORx `cannot use .* in assignment` */ [1:2]
+ ms = "foo" /* ERRORx `cannot use .* in assignment` */ [i:j]
+ _, _ = ss, ms
+}
+
+type T struct {
+ x int
+ y func()
+}
+
+func (*T) m() {}
+
+func method_expressions() {
+ _ = T.a /* ERROR "no field or method" */
+ _ = T.x /* ERROR "has no method" */
+ _ = T.m /* ERROR "invalid method expression T.m (needs pointer receiver (*T).m)" */
+ _ = (*T).m
+
+ var f func(*T) = T.m /* ERROR "invalid method expression T.m (needs pointer receiver (*T).m)" */
+ var g func(*T) = (*T).m
+ _, _ = f, g
+
+ _ = T.y /* ERROR "has no method" */
+ _ = (*T).y /* ERROR "has no method" */
+}
+
+func struct_literals() {
+ type T0 struct {
+ a, b, c int
+ }
+
+ type T1 struct {
+ T0
+ a, b int
+ u float64
+ s string
+ }
+
+ // keyed elements
+ _ = T1{}
+ _ = T1{a: 0, 1 /* ERRORx `mixture of .* elements` */ }
+ _ = T1{aa /* ERROR "unknown field" */ : 0}
+ _ = T1{1 /* ERROR "invalid field name" */ : 0}
+ _ = T1{a: 0, s: "foo", u: 0, a /* ERROR "duplicate field" */: 10}
+ _ = T1{a: "foo" /* ERRORx `cannot use .* in struct literal` */ }
+ _ = T1{c /* ERROR "unknown field" */ : 0}
+ _ = T1{T0: { /* ERROR "missing type" */ }} // struct literal element type may not be elided
+ _ = T1{T0: T0{}}
+ _ = T1{T0 /* ERROR "invalid field name" */ .a: 0}
+
+ // unkeyed elements
+ _ = T0{1, 2, 3}
+ _ = T0{1, b /* ERROR "mixture" */ : 2, 3}
+ _ = T0{1, 2} /* ERROR "too few values" */
+ _ = T0{1, 2, 3, 4 /* ERROR "too many values" */ }
+ _ = T0{1, "foo" /* ERRORx `cannot use .* in struct literal` */, 3.4 /* ERRORx `cannot use .*\(truncated\)` */}
+
+ // invalid type
+ type P *struct{
+ x int
+ }
+ _ = P /* ERROR "invalid composite literal type" */ {}
+
+ // unexported fields
+ _ = time.Time{}
+ _ = time.Time{sec /* ERROR "unknown field" */ : 0}
+ _ = time.Time{
+ 0 /* ERROR "implicit assignment to unexported field wall in struct literal" */,
+ 0 /* ERROR "implicit assignment" */ ,
+ nil /* ERROR "implicit assignment" */ ,
+ }
+}
+
+func array_literals() {
+ type A0 [0]int
+ _ = A0{}
+ _ = A0{0 /* ERRORx `index .* out of bounds` */}
+ _ = A0{0 /* ERRORx `index .* out of bounds` */ : 0}
+
+ type A1 [10]int
+ _ = A1{}
+ _ = A1{0, 1, 2}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 /* ERRORx `index .* out of bounds` */ }
+ _ = A1{- /* ERROR "negative" */ 1: 0}
+ _ = A1{8: 8, 9}
+ _ = A1{8: 8, 9, 10 /* ERRORx `index .* out of bounds` */ }
+ _ = A1{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{10 /* ERRORx `index .* out of bounds` */ : 10, 10 /* ERRORx `index .* out of bounds` */ : 10}
+ _ = A1{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = A1{2.0}
+ _ = A1{2.1 /* ERROR "truncated" */ }
+ _ = A1{"foo" /* ERRORx `cannot use .* in array or slice literal` */ }
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = A1{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = A1{f /* ERROR "truncated" */ : 0}
+ _ = A1{s /* ERROR "cannot convert" */ : 0}
+
+ a0 := [...]int{}
+ assert(len(a0) == 0)
+
+ a1 := [...]int{0, 1, 2}
+ assert(len(a1) == 3)
+ var a13 [3]int
+ var a14 [4]int
+ a13 = a1
+ a14 = a1 /* ERRORx `cannot use .* in assignment` */
+ _, _ = a13, a14
+
+ a2 := [...]int{- /* ERROR "negative" */ 1: 0}
+ _ = a2
+
+ a3 := [...]int{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ assert(len(a3) == 5) // somewhat arbitrary
+
+ a4 := [...]complex128{0, 1, 2, 1<<10-2: -1i, 1i, 400: 10, 12, 14}
+ assert(len(a4) == 1024)
+
+ // composite literal element types may be elided
+ type T []int
+ _ = [10]T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ a6 := [...]T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ assert(len(a6) == 8)
+
+ // recursively so
+ _ = [10][10]T{{}, [10]T{{}}, {{1, 2, 3}}}
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = [...]Point{Point{1.5, -3.5}, Point{0, 0}}
+ _ = [...]Point{{1.5, -3.5}, {0, 0}}
+ _ = [][]int{[]int{1, 2, 3}, []int{4, 5}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+ _ = [...]*Point{&Point{1.5, -3.5}, &Point{0, 0}}
+ _ = [...]*Point{{1.5, -3.5}, {0, 0}}
+}
+
+func slice_literals() {
+ type S0 []int
+ _ = S0{}
+ _ = S0{0, 1, 2}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ _ = S0{- /* ERROR "negative" */ 1: 0}
+ _ = S0{8: 8, 9}
+ _ = S0{8: 8, 9, 10}
+ _ = S0{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{10: 10, 10 /* ERROR "duplicate index" */ : 10}
+ _ = S0{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = S0{2.0}
+ _ = S0{2.1 /* ERROR "truncated" */ }
+ _ = S0{"foo" /* ERRORx `cannot use .* in array or slice literal` */ }
+
+ // indices must be resolved correctly
+ const index1 = 1
+ _ = S0{index1: 1}
+ _ = S0{index2: 2}
+ _ = S0{index3 /* ERROR "undefined" */ : 3}
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = S0{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = S0{f /* ERROR "truncated" */ : 0}
+ _ = S0{s /* ERROR "cannot convert" */ : 0}
+
+ // composite literal element types may be elided
+ type T []int
+ _ = []T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+
+ // recursively so
+ _ = [][]T{{}, []T{{}}, {{1, 2, 3}}}
+
+ // issue 17954
+ type T0 *struct { s string }
+ _ = []T0{{}}
+ _ = []T0{{"foo"}}
+
+ type T1 *struct{ int }
+ _ = []T1{}
+ _ = []T1{{0}, {1}, {2}}
+
+ type T2 T1
+ _ = []T2{}
+ _ = []T2{{0}, {1}, {2}}
+
+ _ = map[T0]T2{}
+ _ = map[T0]T2{{}: {}}
+}
+
+const index2 int = 2
+
+type N int
+func (N) f() {}
+
+func map_literals() {
+ type M0 map[string]int
+ type M1 map[bool]int
+ type M2 map[*int]int
+
+ _ = M0{}
+ _ = M0{1 /* ERROR "missing key" */ }
+ _ = M0{1 /* ERRORx `cannot use .* in map literal` */ : 2}
+ _ = M0{"foo": "bar" /* ERRORx `cannot use .* in map literal` */ }
+ _ = M0{"foo": 1, "bar": 2, "foo" /* ERROR "duplicate key" */ : 3 }
+
+ _ = map[interface{}]int{2: 1, 2 /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{int(2): 1, int16(2): 1}
+ _ = map[interface{}]int{int16(2): 1, int16 /* ERROR "duplicate key" */ (2): 1}
+
+ type S string
+
+ _ = map[interface{}]int{"a": 1, "a" /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{"a": 1, S("a"): 1}
+ _ = map[interface{}]int{S("a"): 1, S /* ERROR "duplicate key" */ ("a"): 1}
+ _ = map[interface{}]int{1.0: 1, 1.0 /* ERROR "duplicate key" */: 1}
+ _ = map[interface{}]int{int64(-1): 1, int64 /* ERROR "duplicate key" */ (-1) : 1}
+ _ = map[interface{}]int{^uint64(0): 1, ^ /* ERROR "duplicate key" */ uint64(0): 1}
+ _ = map[interface{}]int{complex(1,2): 1, complex /* ERROR "duplicate key" */ (1,2) : 1}
+
+ type I interface {
+ f()
+ }
+
+ _ = map[I]int{N(0): 1, N(2): 1}
+ _ = map[I]int{N(2): 1, N /* ERROR "duplicate key" */ (2): 1}
+
+ // map keys must be resolved correctly
+ key1 := "foo"
+ _ = M0{key1: 1}
+ _ = M0{key2: 2}
+ _ = M0{key3 /* ERROR "undefined" */ : 2}
+
+ var value int
+ _ = M1{true: 1, false: 0}
+ _ = M2{nil: 0, &value: 1}
+
+ // composite literal element types may be elided
+ type T [2]int
+ _ = map[int]T{0: T{3, 4}, 1: {5, 6}}
+
+ // recursively so
+ _ = map[int][]T{0: {}, 1: {{}, T{1, 2}}}
+
+ // composite literal key types may be elided
+ _ = map[T]int{T{3, 4}: 0, {5, 6}: 1}
+
+ // recursively so
+ _ = map[[2]T]int{{}: 0, {{}}: 1, [2]T{{}}: 2, {T{1, 2}}: 3}
+
+ // composite literal element and key types may be elided
+ _ = map[T]T{{}: {}, {1, 2}: T{3, 4}, T{4, 5}: {}}
+ _ = map[T]M0{{} : {}, T{1, 2}: M0{"foo": 0}, {1, 3}: {"foo": 1}}
+
+ // recursively so
+ _ = map[[2]T][]T{{}: {}, {{}}: {{}, T{1, 2}}, [2]T{{}}: nil, {T{1, 2}}: {{}, {}}}
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = map[string]Point{"orig": {0, 0}}
+ _ = map[*Point]string{{0, 0}: "orig"}
+
+ // issue 17954
+ type T0 *struct{ s string }
+ type T1 *struct{ int }
+ type T2 T1
+
+ _ = map[T0]T2{}
+ _ = map[T0]T2{{}: {}}
+}
+
+var key2 string = "bar"
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T1 struct{}
+type T2 struct{}
+
+func (T2) m(int) {}
+
+type mybool bool
+
+func type_asserts() {
+ var x int
+ _ = x /* ERROR "not an interface" */ .(int)
+
+ var e interface{}
+ var ok bool
+ x, ok = e.(int)
+ _ = ok
+
+ // ok value is of type bool
+ var myok mybool
+ _, myok = e.(int)
+ _ = myok
+
+ var t I
+ _ = t /* ERRORx `use of .* outside type switch` */ .(type)
+ _ = t /* ERROR "m has pointer receiver" */ .(T)
+ _ = t.(*T)
+ _ = t /* ERROR "missing method m" */ .(T1)
+ _ = t /* ERROR "wrong type for method m" */ .(T2)
+ _ = t /* STRICT "wrong type for method m" */ .(I2) // only an error in strict mode (issue 8561)
+
+ // e doesn't statically have an m, but may have one dynamically.
+ _ = e.(I2)
+}
+
+func f0() {}
+func f1(x int) {}
+func f2(u float32, s string) {}
+func fs(s []byte) {}
+func fv(x ...int) {}
+func fi(x ... interface{}) {}
+func (T) fm(x ...int)
+
+func g0() {}
+func g1() int { return 0}
+func g2() (u float32, s string) { return }
+func gs() []byte { return nil }
+
+func _calls() {
+ var x int
+ var y float32
+ var s []int
+
+ f0()
+ _ = f0 /* ERROR "used as value" */ ()
+ f0(g0 /* ERROR "too many arguments" */ )
+
+ f1(0)
+ f1(x)
+ f1(10.0)
+ f1() /* ERROR "not enough arguments in call to f1\n\thave ()\n\twant (int)" */
+ f1(x, y /* ERROR "too many arguments in call to f1\n\thave (int, float32)\n\twant (int)" */ )
+ f1(s /* ERRORx `cannot use .* in argument` */ )
+ f1(x ... /* ERROR "cannot use ..." */ )
+ f1(g0 /* ERROR "used as value" */ ())
+ f1(g1())
+ f1(g2 /* ERROR "too many arguments in call to f1\n\thave (float32, string)\n\twant (int)" */ ())
+
+ f2() /* ERROR "not enough arguments in call to f2\n\thave ()\n\twant (float32, string)" */
+ f2(3.14) /* ERROR "not enough arguments in call to f2\n\thave (number)\n\twant (float32, string)" */
+ f2(3.14, "foo")
+ f2(x /* ERRORx `cannot use .* in argument` */ , "foo")
+ f2(g0 /* ERROR "used as value" */ ()) /* ERROR "not enough arguments in call to f2\n\thave (func())\n\twant (float32, string)" */
+ f2(g1()) /* ERROR "not enough arguments in call to f2\n\thave (int)\n\twant (float32, string)" */
+ f2(g2())
+
+ fs() /* ERROR "not enough arguments" */
+ fs(g0 /* ERROR "used as value" */ ())
+ fs(g1 /* ERRORx `cannot use .* in argument` */ ())
+ fs(g2 /* ERROR "too many arguments" */ ())
+ fs(gs())
+
+ fv()
+ fv(1, 2.0, x)
+ fv(s /* ERRORx `cannot use .* in argument` */ )
+ fv(s...)
+ fv(x /* ERROR "cannot use" */ ...)
+ fv(1, s /* ERROR "too many arguments" */ ...)
+ fv(gs /* ERRORx `cannot use .* in argument` */ ())
+ fv(gs /* ERRORx `cannot use .* in argument` */ ()...)
+
+ var t T
+ t.fm()
+ t.fm(1, 2.0, x)
+ t.fm(s /* ERRORx `cannot use .* in argument` */ )
+ t.fm(g1())
+ t.fm(1, s /* ERROR "too many arguments" */ ...)
+ t.fm(gs /* ERRORx `cannot use .* in argument` */ ())
+ t.fm(gs /* ERRORx `cannot use .* in argument` */ ()...)
+
+ T.fm(t, )
+ T.fm(t, 1, 2.0, x)
+ T.fm(t, s /* ERRORx `cannot use .* in argument` */ )
+ T.fm(t, g1())
+ T.fm(t, 1, s /* ERROR "too many arguments" */ ...)
+ T.fm(t, gs /* ERRORx `cannot use .* in argument` */ ())
+ T.fm(t, gs /* ERRORx `cannot use .* in argument` */ ()...)
+
+ var i interface{ fm(x ...int) } = t
+ i.fm()
+ i.fm(1, 2.0, x)
+ i.fm(s /* ERRORx `cannot use .* in argument` */ )
+ i.fm(g1())
+ i.fm(1, s /* ERROR "too many arguments" */ ...)
+ i.fm(gs /* ERRORx `cannot use .* in argument` */ ())
+ i.fm(gs /* ERRORx `cannot use .* in argument` */ ()...)
+
+ fi()
+ fi(1, 2.0, x, 3.14, "foo")
+ fi(g2())
+ fi(0, g2)
+ fi(0, g2 /* ERROR "multiple-value g2" */ ())
+}
+
+func issue6344() {
+ type T []interface{}
+ var x T
+ fi(x...) // ... applies also to named slices
+}
diff --git a/src/internal/types/testdata/check/funcinference.go b/src/internal/types/testdata/check/funcinference.go
new file mode 100644
index 0000000..e0e978f
--- /dev/null
+++ b/src/internal/types/testdata/check/funcinference.go
@@ -0,0 +1,112 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package funcInference
+
+import "strconv"
+
+type any interface{}
+
+func f0[A any, B interface{*C}, C interface{*D}, D interface{*A}](A, B, C, D) {}
+func _() {
+ f := f0[string]
+ f("a", nil, nil, nil)
+ f0("a", nil, nil, nil)
+}
+
+func f1[A any, B interface{*A}](A, B) {}
+func _() {
+ f := f1[int]
+ f(int(0), new(int))
+ f1(int(0), new(int))
+}
+
+func f2[A any, B interface{[]A}](A, B) {}
+func _() {
+ f := f2[byte]
+ f(byte(0), []byte{})
+ f2(byte(0), []byte{})
+}
+
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
+// func _() {
+// f := f3[int]
+// var x int
+// f(x, &x, &x)
+// f3(x, &x, &x)
+// }
+
+func f4[A any, B interface{[]C}, C interface{*A}](A, B, C) {}
+func _() {
+ f := f4[int]
+ var x int
+ f(x, []*int{}, &x)
+ f4(x, []*int{}, &x)
+}
+
+func f5[A interface{struct{b B; c C}}, B any, C interface{*B}](x B) A { panic(0) }
+func _() {
+ x := f5(1.2)
+ var _ float64 = x.b
+ var _ float64 = *x.c
+}
+
+func f6[A any, B interface{~struct{f []A}}](B) A { panic(0) }
+func _() {
+ x := f6(struct{f []string}{})
+ var _ string = x
+}
+
+func f7[A interface{*B}, B interface{~*A}]() {}
+
+// More realistic examples
+
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
+ r := make(S, len(s))
+ for i, v := range s {
+ r[i] = v + v
+ }
+ return r
+}
+
+type MySlice []int
+
+var _ = Double(MySlice{1})
+
+// From the draft design.
+
+type Setter[B any] interface {
+ Set(string)
+ *B
+}
+
+func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
+ result := make([]T, len(s))
+ for i, v := range s {
+ // The type of &result[i] is *T which is in the type set
+ // of Setter, so we can convert it to PT.
+ p := PT(&result[i])
+ // PT has a Set method.
+ p.Set(v)
+ }
+ return result
+}
+
+type Settable int
+
+func (p *Settable) Set(s string) {
+ i, _ := strconv.Atoi(s) // real code should not ignore the error
+ *p = Settable(i)
+}
+
+var _ = FromStrings[Settable]([]string{"1", "2"})
+
+// Suitable error message when the type parameter is provided (rather than inferred).
+
+func f8[P, Q any](P, Q) {}
+
+func _(s string) {
+ f8[int](s /* ERROR "cannot use s (variable of type string) as int value in argument to f8[int]" */ , s)
+}
diff --git a/src/internal/types/testdata/check/go1_12.go b/src/internal/types/testdata/check/go1_12.go
new file mode 100644
index 0000000..b47d3de
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_12.go
@@ -0,0 +1,36 @@
+// -lang=go1.12
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package p
+
+// numeric literals
+const (
+ _ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later"
+ _ = 0b111 // ERROR "binary literals requires go1.13 or later"
+ _ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later"
+ _ = 0xabc // ok
+ _ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later"
+
+ _ = 0B111 // ERROR "binary"
+ _ = 0O567 // ERROR "octal"
+ _ = 0Xabc // ok
+ _ = 0X0P1 // ERROR "hexadecimal floating-point"
+
+ _ = 1_000i // ERROR "underscores"
+ _ = 0b111i // ERROR "binary"
+ _ = 0o567i // ERROR "octal"
+ _ = 0xabci // ERROR "hexadecimal floating-point"
+ _ = 0x0p1i // ERROR "hexadecimal floating-point"
+)
+
+// signed shift counts
+var (
+ s int
+ _ = 1 << s // ERROR "invalid operation: signed shift count s (variable of type int) requires go1.13 or later"
+ _ = 1 >> s // ERROR "signed shift count"
+)
diff --git a/src/internal/types/testdata/check/go1_13.go b/src/internal/types/testdata/check/go1_13.go
new file mode 100644
index 0000000..cc7861d
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_13.go
@@ -0,0 +1,23 @@
+// -lang=go1.13
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package p
+
+// interface embedding
+
+type I interface { m() }
+
+type _ interface {
+ m()
+ I // ERROR "duplicate method m"
+}
+
+type _ interface {
+ I
+ I // ERROR "duplicate method m"
+}
diff --git a/src/internal/types/testdata/check/go1_16.go b/src/internal/types/testdata/check/go1_16.go
new file mode 100644
index 0000000..9675b29
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_16.go
@@ -0,0 +1,15 @@
+// -lang=go1.16
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (*Array)(s /* ERROR "requires go1.17 or later" */ )
diff --git a/src/internal/types/testdata/check/go1_19.go b/src/internal/types/testdata/check/go1_19.go
new file mode 100644
index 0000000..b6cff4f
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_19.go
@@ -0,0 +1,15 @@
+// -lang=go1.19
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (Array)(s /* ERROR "requires go1.20 or later" */)
diff --git a/src/internal/types/testdata/check/go1_19_20.go b/src/internal/types/testdata/check/go1_19_20.go
new file mode 100644
index 0000000..52e5dfd
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_19_20.go
@@ -0,0 +1,17 @@
+// -lang=go1.19
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+//go:build go1.20
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (Array)(s /* ok */)
diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go
new file mode 100644
index 0000000..08365a7
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_20_19.go
@@ -0,0 +1,17 @@
+// -lang=go1.20
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+//go:build go1.19
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */)
diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go
new file mode 100644
index 0000000..2acd258
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_21_19.go
@@ -0,0 +1,17 @@
+// -lang=go1.21
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+//go:build go1.19
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (Array)(s /* ERROR "requires go1.20 or later" */)
diff --git a/src/internal/types/testdata/check/go1_8.go b/src/internal/types/testdata/check/go1_8.go
new file mode 100644
index 0000000..6a7e639
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_8.go
@@ -0,0 +1,12 @@
+// -lang=go1.8
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package p
+
+// type alias declarations
+type any = /* ERROR "type aliases requires go1.9 or later" */ interface{}
diff --git a/src/internal/types/testdata/check/go1_xx_19.go b/src/internal/types/testdata/check/go1_xx_19.go
new file mode 100644
index 0000000..01f6b7d
--- /dev/null
+++ b/src/internal/types/testdata/check/go1_xx_19.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+//go:build go1.19
+
+package p
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (Array)(s /* ok because Go 1.X prior to Go 1.21 ignored the //go:build go1.19 */)
diff --git a/src/internal/types/testdata/check/gotos.go b/src/internal/types/testdata/check/gotos.go
new file mode 100644
index 0000000..069a94b
--- /dev/null
+++ b/src/internal/types/testdata/check/gotos.go
@@ -0,0 +1,560 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified copy of $GOROOT/test/goto.go.
+
+package gotos
+
+var (
+ i, n int
+ x []int
+ c chan int
+ m map[int]int
+ s string
+)
+
+// goto after declaration okay
+func _() {
+ x := 1
+ goto L
+L:
+ _ = x
+}
+
+// goto before declaration okay
+func _() {
+ goto L
+L:
+ x := 1
+ _ = x
+}
+
+// goto across declaration not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 36" */
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in inner scope okay
+func _() {
+ goto L
+ {
+ x := 1
+ _ = x
+ }
+L:
+}
+
+// goto across declaration after inner scope not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 58" */
+ {
+ x := 1
+ _ = x
+ }
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in reverse okay
+func _() {
+L:
+ x := 1
+ _ = x
+ goto L
+}
+
+func _() {
+L: L1:
+ x := 1
+ _ = x
+ goto L
+ goto L1
+}
+
+// error shows first offending variable
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 84" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+L:
+}
+
+// goto not okay even if code path is dead
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+ return
+L:
+}
+
+// goto into outer block okay
+func _() {
+ {
+ goto L
+ }
+L:
+}
+
+func _() {
+ {
+ goto L
+ goto L1
+ }
+L: L1:
+}
+
+// goto backward into outer block okay
+func _() {
+L:
+ {
+ goto L
+ }
+}
+
+func _() {
+L: L1:
+ {
+ goto L
+ goto L1
+ }
+}
+
+// goto into inner block not okay
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+ {
+ L: L1:
+ }
+}
+
+// goto backward into inner block still not okay
+func _() {
+ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ {
+ L: L1:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+// error shows first (outermost) offending block
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ {
+ {
+ L:
+ }
+ }
+ }
+}
+
+// error prefers block diagnostic over declaration diagnostic
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ x := 1
+ _ = x
+ {
+ L:
+ }
+}
+
+// many kinds of blocks, all invalid to jump into or among,
+// but valid to jump out of
+
+// if
+
+func _() {
+L:
+ if true {
+ goto L
+ }
+}
+
+func _() {
+L:
+ if true {
+ goto L
+ } else {
+ }
+}
+
+func _() {
+L:
+ if false {
+ } else {
+ goto L
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if false {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// for
+
+func _() {
+ for {
+ goto L
+ }
+L:
+}
+
+func _() {
+ for {
+ goto L
+ L:
+ }
+}
+
+func _() {
+ for {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for {
+ goto L
+ L1:
+ }
+L:
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+func _() {
+ for i < n {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = 0; i < n; i++ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range x {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range c {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range m {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range s {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+// switch
+
+func _() {
+L:
+ switch i {
+ case 0:
+ goto L
+ }
+}
+
+func _() {
+L:
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ default:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// select
+// different from switch. the statement has no implicit block around it.
+
+func _() {
+L:
+ select {
+ case <-c:
+ goto L
+ }
+}
+
+func _() {
+L:
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case <-c:
+ default:
+ L:
+ }
+}
+
+func _() {
+ select {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case <-c:
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
diff --git a/src/internal/types/testdata/check/importC.go b/src/internal/types/testdata/check/importC.go
new file mode 100644
index 0000000..2cdf383
--- /dev/null
+++ b/src/internal/types/testdata/check/importC.go
@@ -0,0 +1,56 @@
+// -fakeImportC
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importC
+
+import "C"
+import _ /* ERROR `cannot rename import "C"` */ "C"
+import foo /* ERROR `cannot rename import "C"` */ "C"
+import . /* ERROR `cannot rename import "C"` */ "C"
+
+// Test cases extracted from issue #22090.
+
+import "unsafe"
+
+const _ C.int = 0xff // no error due to invalid constant type
+
+type T struct {
+ Name string
+ Ordinal int
+}
+
+func _(args []T) {
+ var s string
+ for i, v := range args {
+ cname := C.CString(v.Name)
+ args[i].Ordinal = int(C.sqlite3_bind_parameter_index(s, cname)) // no error due to i not being "used"
+ C.free(unsafe.Pointer(cname))
+ }
+}
+
+type CType C.Type
+
+const _ CType = C.X // no error due to invalid constant type
+const _ = C.X
+
+// Test cases extracted from issue #23712.
+
+func _() {
+ var a [C.ArrayLength]byte
+ _ = a[0] // no index out of bounds error here
+}
+
+// Additional tests to verify fix for #23712.
+
+func _() {
+ var a [C.ArrayLength1]byte
+ _ = 1 / len(a) // no division by zero error here and below
+ _ = 1 / cap(a)
+ _ = uint(unsafe.Sizeof(a)) // must not be negative
+
+ var b [C.ArrayLength2]byte
+ a = b // should be valid
+}
diff --git a/src/internal/types/testdata/check/importdecl0/importdecl0a.go b/src/internal/types/testdata/check/importdecl0/importdecl0a.go
new file mode 100644
index 0000000..d514ae4
--- /dev/null
+++ b/src/internal/types/testdata/check/importdecl0/importdecl0a.go
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import ()
+
+import (
+ // we can have multiple blank imports (was bug)
+ _ "math"
+ _ "net/rpc"
+ init /* ERROR "cannot import package as init" */ "fmt"
+ // reflect defines a type "flag" which shows up in the gc export data
+ "reflect"
+ . /* ERROR "imported and not used" */ "reflect"
+)
+
+import "math" /* ERROR "imported and not used" */
+import m /* ERROR "imported as m and not used" */ "math"
+import _ "math"
+
+import (
+ "math/big" /* ERROR "imported and not used" */
+ b /* ERROR "imported as b and not used" */ "math/big"
+ _ "math/big"
+)
+
+import "fmt"
+import f1 "fmt"
+import f2 "fmt"
+
+// reflect.flag must not be visible in this package
+type flag int
+type _ reflect.flag /* ERROR "not exported" */
+
+// imported package name may conflict with local objects
+type reflect /* ERROR "reflect already declared" */ int
+
+// dot-imported exported objects may conflict with local objects
+type Value /* ERROR "Value already declared through dot-import of package reflect" */ struct{}
+
+var _ = fmt.Println // use "fmt"
+
+func _() {
+ f1.Println() // use "fmt"
+}
+
+func _() {
+ _ = func() {
+ f2.Println() // use "fmt"
+ }
+}
diff --git a/src/internal/types/testdata/check/importdecl0/importdecl0b.go b/src/internal/types/testdata/check/importdecl0/importdecl0b.go
new file mode 100644
index 0000000..99e1d1e
--- /dev/null
+++ b/src/internal/types/testdata/check/importdecl0/importdecl0b.go
@@ -0,0 +1,30 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import "math"
+import m "math"
+
+import . "testing" // declares T in file scope
+import . /* ERRORx `.unsafe. imported and not used` */ "unsafe"
+import . "fmt" // declares Println in file scope
+
+import (
+ "" /* ERROR "invalid import path" */
+ "a!b" /* ERROR "invalid import path" */
+ "abc\xffdef" /* ERROR "invalid import path" */
+)
+
+// using "math" in this file doesn't affect its use in other files
+const Pi0 = math.Pi
+const Pi1 = m.Pi
+
+type _ T // use "testing"
+
+func _() func() interface{} {
+ return func() interface{} {
+ return Println // use "fmt"
+ }
+}
diff --git a/src/internal/types/testdata/check/importdecl1/importdecl1a.go b/src/internal/types/testdata/check/importdecl1/importdecl1a.go
new file mode 100644
index 0000000..d377c01
--- /dev/null
+++ b/src/internal/types/testdata/check/importdecl1/importdecl1a.go
@@ -0,0 +1,22 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 8969.
+
+package importdecl1
+
+import "go/ast"
+import . "unsafe"
+
+var _ Pointer // use dot-imported package unsafe
+
+// Test cases for issue 23914.
+
+type A interface {
+ // Methods m1, m2 must be type-checked in this file scope
+ // even when embedded in an interface in a different
+ // file of the same package.
+ m1() ast.Node
+ m2() Pointer
+}
diff --git a/src/internal/types/testdata/check/importdecl1/importdecl1b.go b/src/internal/types/testdata/check/importdecl1/importdecl1b.go
new file mode 100644
index 0000000..49ac2d5
--- /dev/null
+++ b/src/internal/types/testdata/check/importdecl1/importdecl1b.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl1
+
+import . /* ERRORx ".unsafe. imported and not used" */ "unsafe"
+
+type B interface {
+ A
+}
diff --git a/src/internal/types/testdata/check/init0.go b/src/internal/types/testdata/check/init0.go
new file mode 100644
index 0000000..ee2175e
--- /dev/null
+++ b/src/internal/types/testdata/check/init0.go
@@ -0,0 +1,106 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init0
+
+// initialization cycles (we don't know the types)
+const (
+ s0 /* ERROR "initialization cycle: s0 refers to itself" */ = s0
+
+ x0 /* ERROR "initialization cycle for x0" */ = y0
+ y0 = x0
+
+ a0 = b0
+ b0 /* ERROR "initialization cycle for b0" */ = c0
+ c0 = d0
+ d0 = b0
+)
+
+var (
+ s1 /* ERROR "initialization cycle: s1 refers to itself" */ = s1
+
+ x1 /* ERROR "initialization cycle for x1" */ = y1
+ y1 = x1
+
+ a1 = b1
+ b1 /* ERROR "initialization cycle for b1" */ = c1
+ c1 = d1
+ d1 = b1
+)
+
+// initialization cycles (we know the types)
+const (
+ s2 /* ERROR "initialization cycle: s2 refers to itself" */ int = s2
+
+ x2 /* ERROR "initialization cycle for x2" */ int = y2
+ y2 = x2
+
+ a2 = b2
+ b2 /* ERROR "initialization cycle for b2" */ int = c2
+ c2 = d2
+ d2 = b2
+)
+
+var (
+ s3 /* ERROR "initialization cycle: s3 refers to itself" */ int = s3
+
+ x3 /* ERROR "initialization cycle for x3" */ int = y3
+ y3 = x3
+
+ a3 = b3
+ b3 /* ERROR "initialization cycle for b3" */ int = c3
+ c3 = d3
+ d3 = b3
+)
+
+// cycles via struct fields
+
+type S1 struct {
+ f int
+}
+const cx3 S1 /* ERROR "invalid constant type" */ = S1{cx3.f}
+var vx3 /* ERROR "initialization cycle: vx3 refers to itself" */ S1 = S1{vx3.f}
+
+// cycles via functions
+
+var x4 = x5
+var x5 /* ERROR "initialization cycle for x5" */ = f1()
+func f1() int { return x5*10 }
+
+var x6, x7 /* ERROR "initialization cycle" */ = f2()
+var x8 = x7
+func f2() (int, int) { return f3() + f3(), 0 }
+func f3() int { return x8 }
+
+// cycles via function literals
+
+var x9 /* ERROR "initialization cycle: x9 refers to itself" */ = func() int { return x9 }()
+
+var x10 /* ERROR "initialization cycle for x10" */ = f4()
+
+func f4() int {
+ _ = func() {
+ _ = x10
+ }
+ return 0
+}
+
+// cycles via method expressions
+
+type T1 struct{}
+
+func (T1) m() bool { _ = x11; return false }
+
+var x11 /* ERROR "initialization cycle for x11" */ = T1.m(T1{})
+
+// cycles via method values
+
+type T2 struct{}
+
+func (T2) m() bool { _ = x12; return false }
+
+var t1 T2
+var x12 /* ERROR "initialization cycle for x12" */ = t1.m
diff --git a/src/internal/types/testdata/check/init1.go b/src/internal/types/testdata/check/init1.go
new file mode 100644
index 0000000..c89032a
--- /dev/null
+++ b/src/internal/types/testdata/check/init1.go
@@ -0,0 +1,97 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init1
+
+// issue 6683 (marked as WorkingAsIntended)
+
+type T0 struct{}
+
+func (T0) m() int { return y0 }
+
+var x0 = T0{}
+
+var y0 /* ERROR "initialization cycle" */ = x0.m()
+
+type T1 struct{}
+
+func (T1) m() int { return y1 }
+
+var x1 interface {
+ m() int
+} = T1{}
+
+var y1 = x1.m() // no cycle reported, x1 is of interface type
+
+// issue 6703 (modified)
+
+var x2 /* ERROR "initialization cycle" */ = T2.m
+
+var y2 = x2
+
+type T2 struct{}
+
+func (T2) m() int {
+ _ = y2
+ return 0
+}
+
+var x3 /* ERROR "initialization cycle" */ = T3.m(T3{}) // <<<< added (T3{})
+
+var y3 = x3
+
+type T3 struct{}
+
+func (T3) m() int {
+ _ = y3
+ return 0
+}
+
+var x4 /* ERROR "initialization cycle" */ = T4{}.m // <<<< added {}
+
+var y4 = x4
+
+type T4 struct{}
+
+func (T4) m() int {
+ _ = y4
+ return 0
+}
+
+var x5 /* ERROR "initialization cycle" */ = T5{}.m() // <<<< added ()
+
+var y5 = x5
+
+type T5 struct{}
+
+func (T5) m() int {
+ _ = y5
+ return 0
+}
+
+// issue 4847
+// simplified test case
+
+var x6 = f6
+var y6 /* ERROR "initialization cycle" */ = f6
+func f6() { _ = y6 }
+
+// full test case
+
+type (
+ E int
+ S int
+)
+
+type matcher func(s *S) E
+
+func matchList(s *S) E { return matcher(matchAnyFn)(s) }
+
+var foo = matcher(matchList)
+
+var matchAny /* ERROR "initialization cycle" */ = matcher(matchList)
+
+func matchAnyFn(s *S) (err E) { return matchAny(s) } \ No newline at end of file
diff --git a/src/internal/types/testdata/check/init2.go b/src/internal/types/testdata/check/init2.go
new file mode 100644
index 0000000..24e9277
--- /dev/null
+++ b/src/internal/types/testdata/check/init2.go
@@ -0,0 +1,139 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init2
+
+// cycles through functions
+
+func f1() int { _ = x1; return 0 }
+var x1 /* ERROR "initialization cycle" */ = f1
+
+func f2() int { _ = x2; return 0 }
+var x2 /* ERROR "initialization cycle" */ = f2()
+
+// cycles through method expressions
+
+type T3 int
+func (T3) m() int { _ = x3; return 0 }
+var x3 /* ERROR "initialization cycle" */ = T3.m
+
+type T4 int
+func (T4) m() int { _ = x4; return 0 }
+var x4 /* ERROR "initialization cycle" */ = T4.m(0)
+
+type T3p int
+func (*T3p) m() int { _ = x3p; return 0 }
+var x3p /* ERROR "initialization cycle" */ = (*T3p).m
+
+type T4p int
+func (*T4p) m() int { _ = x4p; return 0 }
+var x4p /* ERROR "initialization cycle" */ = (*T4p).m(nil)
+
+// cycles through method expressions of embedded methods
+
+type T5 struct { E5 }
+type E5 int
+func (E5) m() int { _ = x5; return 0 }
+var x5 /* ERROR "initialization cycle" */ = T5.m
+
+type T6 struct { E6 }
+type E6 int
+func (E6) m() int { _ = x6; return 0 }
+var x6 /* ERROR "initialization cycle" */ = T6.m(T6{0})
+
+type T5p struct { E5p }
+type E5p int
+func (*E5p) m() int { _ = x5p; return 0 }
+var x5p /* ERROR "initialization cycle" */ = (*T5p).m
+
+type T6p struct { E6p }
+type E6p int
+func (*E6p) m() int { _ = x6p; return 0 }
+var x6p /* ERROR "initialization cycle" */ = (*T6p).m(nil)
+
+// cycles through method values
+
+type T7 int
+func (T7) m() int { _ = x7; return 0 }
+var x7 /* ERROR "initialization cycle" */ = T7(0).m
+
+type T8 int
+func (T8) m() int { _ = x8; return 0 }
+var x8 /* ERROR "initialization cycle" */ = T8(0).m()
+
+type T7p int
+func (*T7p) m() int { _ = x7p; return 0 }
+var x7p /* ERROR "initialization cycle" */ = new(T7p).m
+
+type T8p int
+func (*T8p) m() int { _ = x8p; return 0 }
+var x8p /* ERROR "initialization cycle" */ = new(T8p).m()
+
+type T7v int
+func (T7v) m() int { _ = x7v; return 0 }
+var x7var T7v
+var x7v /* ERROR "initialization cycle" */ = x7var.m
+
+type T8v int
+func (T8v) m() int { _ = x8v; return 0 }
+var x8var T8v
+var x8v /* ERROR "initialization cycle" */ = x8var.m()
+
+type T7pv int
+func (*T7pv) m() int { _ = x7pv; return 0 }
+var x7pvar *T7pv
+var x7pv /* ERROR "initialization cycle" */ = x7pvar.m
+
+type T8pv int
+func (*T8pv) m() int { _ = x8pv; return 0 }
+var x8pvar *T8pv
+var x8pv /* ERROR "initialization cycle" */ = x8pvar.m()
+
+// cycles through method values of embedded methods
+
+type T9 struct { E9 }
+type E9 int
+func (E9) m() int { _ = x9; return 0 }
+var x9 /* ERROR "initialization cycle" */ = T9{0}.m
+
+type T10 struct { E10 }
+type E10 int
+func (E10) m() int { _ = x10; return 0 }
+var x10 /* ERROR "initialization cycle" */ = T10{0}.m()
+
+type T9p struct { E9p }
+type E9p int
+func (*E9p) m() int { _ = x9p; return 0 }
+var x9p /* ERROR "initialization cycle" */ = new(T9p).m
+
+type T10p struct { E10p }
+type E10p int
+func (*E10p) m() int { _ = x10p; return 0 }
+var x10p /* ERROR "initialization cycle" */ = new(T10p).m()
+
+type T9v struct { E9v }
+type E9v int
+func (E9v) m() int { _ = x9v; return 0 }
+var x9var T9v
+var x9v /* ERROR "initialization cycle" */ = x9var.m
+
+type T10v struct { E10v }
+type E10v int
+func (E10v) m() int { _ = x10v; return 0 }
+var x10var T10v
+var x10v /* ERROR "initialization cycle" */ = x10var.m()
+
+type T9pv struct { E9pv }
+type E9pv int
+func (*E9pv) m() int { _ = x9pv; return 0 }
+var x9pvar *T9pv
+var x9pv /* ERROR "initialization cycle" */ = x9pvar.m
+
+type T10pv struct { E10pv }
+type E10pv int
+func (*E10pv) m() int { _ = x10pv; return 0 }
+var x10pvar *T10pv
+var x10pv /* ERROR "initialization cycle" */ = x10pvar.m()
diff --git a/src/internal/types/testdata/check/issue25008/issue25008a.go b/src/internal/types/testdata/check/issue25008/issue25008a.go
new file mode 100644
index 0000000..cf71ca1
--- /dev/null
+++ b/src/internal/types/testdata/check/issue25008/issue25008a.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "io"
+
+type A interface {
+ io.Reader
+}
+
+func f(a A) {
+ a.Read(nil)
+}
diff --git a/src/internal/types/testdata/check/issue25008/issue25008b.go b/src/internal/types/testdata/check/issue25008/issue25008b.go
new file mode 100644
index 0000000..f132b7f
--- /dev/null
+++ b/src/internal/types/testdata/check/issue25008/issue25008b.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type B interface {
+ A
+}
diff --git a/src/internal/types/testdata/check/issues0.go b/src/internal/types/testdata/check/issues0.go
new file mode 100644
index 0000000..6039df9
--- /dev/null
+++ b/src/internal/types/testdata/check/issues0.go
@@ -0,0 +1,373 @@
+// -lang=go1.17
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p // don't permit non-interface elements in interfaces
+
+import (
+ "fmt"
+ syn "regexp/syntax"
+ t1 "text/template"
+ t2 "html/template"
+)
+
+func issue7035() {
+ type T struct{ X int }
+ _ = func() {
+ fmt.Println() // must refer to imported fmt rather than the fmt below
+ }
+ fmt := new(T)
+ _ = fmt.X
+}
+
+func issue8066() {
+ const (
+ _ = float32(340282356779733661637539395458142568447)
+ _ = float32(340282356779733661637539395458142568448 /* ERROR "cannot convert" */ )
+ )
+}
+
+// Check that a missing identifier doesn't lead to a spurious error cascade.
+func issue8799a() {
+ x, ok := missing /* ERROR "undefined" */ ()
+ _ = !ok
+ _ = x
+}
+
+func issue8799b(x int, ok bool) {
+ x, ok = missing /* ERROR "undefined" */ ()
+ _ = !ok
+ _ = x
+}
+
+func issue9182() {
+ type Point C /* ERROR "undefined" */ .Point
+ // no error for composite literal based on unknown type
+ _ = Point{x: 1, y: 2}
+}
+
+func f0() (a []int) { return }
+func f1() (a []int, b int) { return }
+func f2() (a, b []int) { return }
+
+func append_([]int, ...int) {}
+
+func issue9473(a []int, b ...int) {
+ // variadic builtin function
+ _ = append(f0())
+ _ = append(f0(), f0()...)
+ _ = append(f1())
+ _ = append(f2 /* ERRORx `cannot use .* in argument` */ ())
+ _ = append(f2()... /* ERROR "cannot use ..." */ )
+ _ = append(f0(), f1 /* ERROR "multiple-value f1" */ ())
+ _ = append(f0(), f2 /* ERROR "multiple-value f2" */ ())
+ _ = append(f0(), f1 /* ERROR "multiple-value f1" */ ()...)
+ _ = append(f0(), f2 /* ERROR "multiple-value f2" */ ()...)
+
+ // variadic user-defined function
+ append_(f0())
+ append_(f0(), f0()...)
+ append_(f1())
+ append_(f2 /* ERRORx `cannot use .* in argument` */ ())
+ append_(f2()... /* ERROR "cannot use ..." */ )
+ append_(f0(), f1 /* ERROR "multiple-value f1" */ ())
+ append_(f0(), f2 /* ERROR "multiple-value f2" */ ())
+ append_(f0(), f1 /* ERROR "multiple-value f1" */ ()...)
+ append_(f0(), f2 /* ERROR "multiple-value f2" */ ()...)
+}
+
+// Check that embedding a non-interface type in an interface results in a good error message.
+func issue10979() {
+ type _ interface {
+ int /* ERROR "non-interface type int" */
+ }
+ type T struct{}
+ type _ interface {
+ T /* ERROR "non-interface type T" */
+ }
+ type _ interface {
+ nosuchtype /* ERROR "undefined: nosuchtype" */
+ }
+ type _ interface {
+ fmt.Nosuchtype /* ERROR "undefined: fmt.Nosuchtype" */
+ }
+ type _ interface {
+ nosuchpkg /* ERROR "undefined: nosuchpkg" */ .Nosuchtype
+ }
+ type I interface {
+ I.m /* ERROR "I.m is not a type" */
+ m()
+ }
+}
+
+// issue11347
+// These should not crash.
+var a1, b1 /* ERROR "cycle" */ , c1 /* ERROR "cycle" */ b1 = 0 > 0<<""[""[c1]]>c1
+var a2, b2 /* ERROR "cycle" */ = 0 /* ERROR "assignment mismatch" */ /* ERROR "assignment mismatch" */ > 0<<""[b2]
+var a3, b3 /* ERROR "cycle" */ = int /* ERROR "assignment mismatch" */ /* ERROR "assignment mismatch" */ (1<<""[b3])
+
+// issue10260
+// Check that error messages explain reason for interface assignment failures.
+type (
+ I0 interface{}
+ I1 interface{ foo() }
+ I2 interface{ foo(x int) }
+ T0 struct{}
+ T1 struct{}
+ T2 struct{}
+)
+
+func (*T1) foo() {}
+func (*T2) foo(x int) {}
+
+func issue10260() {
+ var (
+ i0 I0
+ i1 I1
+ i2 I2
+ t0 *T0
+ t1 *T1
+ t2 *T2
+ )
+
+ var x I1
+ x = T1 /* ERRORx `cannot use T1{} .* as I1 value in assignment: T1 does not implement I1 \(method foo has pointer receiver\)` */ {}
+ _ = x /* ERROR "impossible type assertion: x.(T1)\n\tT1 does not implement I1 (method foo has pointer receiver)" */ .(T1)
+
+ T1{}.foo /* ERROR "cannot call pointer method foo on T1" */ ()
+ x.Foo /* ERROR "x.Foo undefined (type I1 has no field or method Foo, but does have foo)" */ ()
+
+ _ = i2 /* ERROR "impossible type assertion: i2.(*T1)\n\t*T1 does not implement I2 (wrong type for method foo)\n\t\thave foo()\n\t\twant foo(int)" */ .(*T1)
+
+ i1 = i0 /* ERRORx `cannot use i0 .* as I1 value in assignment: I0 does not implement I1 \(missing method foo\)` */
+ i1 = t0 /* ERRORx `.* t0 .* as I1 .*: \*T0 does not implement I1 \(missing method foo\)` */
+ i1 = i2 /* ERRORx `.* i2 .* as I1 .*: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(int\)\n\t\twant foo\(\)` */
+ i1 = t2 /* ERRORx `.* t2 .* as I1 .*: \*T2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(int\)\n\t\twant foo\(\)` */
+ i2 = i1 /* ERRORx `.* i1 .* as I2 .*: I1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(int\)` */
+ i2 = t1 /* ERRORx `.* t1 .* as I2 .*: \*T1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(int\)` */
+
+ _ = func() I1 { return i0 /* ERRORx `cannot use i0 .* as I1 value in return statement: I0 does not implement I1 \(missing method foo\)` */ }
+ _ = func() I1 { return t0 /* ERRORx `.* t0 .* as I1 .*: \*T0 does not implement I1 \(missing method foo\)` */ }
+ _ = func() I1 { return i2 /* ERRORx `.* i2 .* as I1 .*: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(int\)\n\t\twant foo\(\)` */ }
+ _ = func() I1 { return t2 /* ERRORx `.* t2 .* as I1 .*: \*T2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(int\)\n\t\twant foo\(\)` */ }
+ _ = func() I2 { return i1 /* ERRORx `.* i1 .* as I2 .*: I1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(int\)` */ }
+ _ = func() I2 { return t1 /* ERRORx `.* t1 .* as I2 .*: \*T1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(int\)` */ }
+
+ // a few more - less exhaustive now
+
+ f := func(I1, I2){}
+ f(i0 /* ERROR "missing method foo" */ , i1 /* ERROR "wrong type for method foo" */ )
+
+ _ = [...]I1{i0 /* ERRORx `cannot use i0 .* as I1 value in array or slice literal: I0 does not implement I1 \(missing method foo\)` */ }
+ _ = [...]I1{i2 /* ERRORx `cannot use i2 .* as I1 value in array or slice literal: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(int\)\n\t\twant foo\(\)` */ }
+ _ = []I1{i0 /* ERROR "missing method foo" */ }
+ _ = []I1{i2 /* ERROR "wrong type for method foo" */ }
+ _ = map[int]I1{0: i0 /* ERROR "missing method foo" */ }
+ _ = map[int]I1{0: i2 /* ERROR "wrong type for method foo" */ }
+
+ make(chan I1) <- i0 /* ERROR "missing method foo" */
+ make(chan I1) <- i2 /* ERROR "wrong type for method foo" */
+}
+
+// Check that constants representable as integers are in integer form
+// before being used in operations that are only defined on integers.
+func issue14229() {
+ // from the issue
+ const _ = int64(-1<<63) % 1e6
+
+ // related
+ const (
+ a int = 3
+ b = 4.0
+ _ = a / b
+ _ = a % b
+ _ = b / a
+ _ = b % a
+ )
+}
+
+// Check that in a n:1 variable declaration with type and initialization
+// expression the type is distributed to all variables of the lhs before
+// the initialization expression assignment is checked.
+func issue15755() {
+ // from issue
+ var i interface{}
+ type b bool
+ var x, y b = i.(b)
+ _ = x == y
+
+ // related: we should see an error since the result of f1 is ([]int, int)
+ var u, v []int = f1 /* ERROR "cannot use f1" */ ()
+ _ = u
+ _ = v
+}
+
+// Test that we don't get "declared and not used"
+// errors in the context of invalid/C objects.
+func issue20358() {
+ var F C /* ERROR "undefined" */ .F
+ var A C /* ERROR "undefined" */ .A
+ var S C /* ERROR "undefined" */ .S
+ type T C /* ERROR "undefined" */ .T
+ type P C /* ERROR "undefined" */ .P
+
+ // these variables must be "used" even though
+ // the LHS expressions/types below in which
+ // context they are used are unknown/invalid
+ var f, a, s1, s2, s3, t, p int
+
+ _ = F(f)
+ _ = A[a]
+ _ = S[s1:s2:s3]
+ _ = T{t}
+ _ = P{f: p}
+}
+
+// Test that we don't declare lhs variables in short variable
+// declarations before we type-check function literals on the
+// rhs.
+func issue24026() {
+ f := func() int { f(0) /* must refer to outer f */; return 0 }
+ _ = f
+
+ _ = func() {
+ f := func() { _ = f() /* must refer to outer f */ }
+ _ = f
+ }
+
+ // b and c must not be visible inside function literal
+ a := 0
+ a, b, c := func() (int, int, int) {
+ return a, b /* ERROR "undefined" */ , c /* ERROR "undefined" */
+ }()
+ _, _ = b, c
+}
+
+func f(int) {} // for issue24026
+
+// Test that we don't report a "missing return statement" error
+// (due to incorrect context when type-checking interfaces).
+func issue24140(x interface{}) int {
+ switch x.(type) {
+ case interface{}:
+ return 0
+ default:
+ panic(0)
+ }
+}
+
+// Test that we don't crash when the 'if' condition is missing.
+func issue25438() {
+ if { /* ERROR "missing condition" */ }
+ if x := 0; /* ERROR "missing condition" */ { _ = x }
+ if
+ { /* ERROR "missing condition" */ }
+}
+
+// Test that we can embed alias type names in interfaces.
+type issue25301 interface {
+ E
+}
+
+type E = interface {
+ m()
+}
+
+// Test case from issue.
+// cmd/compile reports a cycle as well.
+type issue25301b /* ERROR "invalid recursive type" */ = interface {
+ m() interface{ issue25301b }
+}
+
+type issue25301c interface {
+ notE // ERROR "non-interface type struct{}"
+}
+
+type notE = struct{}
+
+// Test that method declarations don't introduce artificial cycles
+// (issue #26124).
+const CC TT = 1
+type TT int
+func (TT) MM() [CC]TT
+
+// Reduced test case from issue #26124.
+const preloadLimit LNumber = 128
+type LNumber float64
+func (LNumber) assertFunction() *LFunction
+type LFunction struct {
+ GFunction LGFunction
+}
+type LGFunction func(*LState)
+type LState struct {
+ reg *registry
+}
+type registry struct {
+ alloc *allocator
+}
+type allocator struct {
+ _ [int(preloadLimit)]int
+}
+
+// Test that we don't crash when type-checking composite literals
+// containing errors in the type.
+var issue27346 = [][n /* ERROR "undefined" */ ]int{
+ 0: {},
+}
+
+var issue22467 = map[int][... /* ERROR "invalid use of [...] array" */ ]int{0: {}}
+
+// Test that invalid use of ... in parameter lists is recognized
+// (issue #28281).
+func issue28281a(int, int, ...int)
+func issue28281b(a, b int, c ...int)
+func issue28281c(a, b, c ... /* ERROR "can only use ... with final parameter" */ int)
+func issue28281d(... /* ERROR "can only use ... with final parameter" */ int, int)
+func issue28281e(a, b, c ... /* ERROR "can only use ... with final parameter" */ int, d int)
+func issue28281f(... /* ERROR "can only use ... with final parameter" */ int, ... /* ERROR "can only use ... with final parameter" */ int, int)
+func (... /* ERROR "can only use ... with final parameter" */ TT) f()
+func issue28281g() (... /* ERROR "can only use ... with final parameter" */ TT)
+
+// Issue #26234: Make various field/method lookup errors easier to read by matching cmd/compile's output
+func issue26234a(f *syn.Prog) {
+ // The error message below should refer to the actual package name (syntax)
+ // not the local package name (syn).
+ f.foo /* ERROR "f.foo undefined (type *syntax.Prog has no field or method foo)" */
+}
+
+type T struct {
+ x int
+ E1
+ E2
+}
+
+type E1 struct{ f int }
+type E2 struct{ f int }
+
+func issue26234b(x T) {
+ _ = x.f /* ERROR "ambiguous selector x.f" */
+}
+
+func issue26234c() {
+ T.x /* ERROR "T.x undefined (type T has no method x)" */ ()
+}
+
+func issue35895() {
+ // T is defined in this package, don't qualify its name with the package name.
+ var _ T = 0 // ERROR "cannot use 0 (untyped int constant) as T"
+
+ // There is only one package with name syntax imported, only use the (global) package name in error messages.
+ var _ *syn.Prog = 0 // ERROR "cannot use 0 (untyped int constant) as *syntax.Prog"
+
+ // Because both t1 and t2 have the same global package name (template),
+ // qualify packages with full path name in this case.
+ var _ t1.Template = t2 /* ERRORx `cannot use .* \(value of type .html/template.\.Template\) as .text/template.\.Template` */ .Template{}
+}
+
+func issue42989(s uint) {
+ var m map[int]string
+ delete(m, 1<<s)
+ delete(m, 1.<<s)
+}
diff --git a/src/internal/types/testdata/check/issues1.go b/src/internal/types/testdata/check/issues1.go
new file mode 100644
index 0000000..72c6cf7
--- /dev/null
+++ b/src/internal/types/testdata/check/issues1.go
@@ -0,0 +1,250 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains regression tests for bugs found.
+
+package p
+
+import "io"
+import "context"
+
+func eql[T comparable](x, y T) bool {
+ return x == y
+}
+
+func _[X comparable, Y interface{comparable; m()}]() {
+ var x X
+ var y Y
+ eql(x, y /* ERROR "does not match" */ ) // interfaces of different types
+ eql(x, x)
+ eql(y, y)
+ eql(y, nil /* ERROR "cannot use nil as Y value in argument to eql" */ )
+ eql[io.Reader](nil, nil)
+}
+
+// If we have a receiver of pointer to type parameter type (below: *T)
+// we don't have any methods, like for interfaces.
+type C[T any] interface {
+ m()
+}
+
+// using type bound C
+func _[T C[T]](x *T) {
+ x.m /* ERROR "x.m undefined" */ ()
+}
+
+// using an interface literal as bound
+func _[T interface{ m() }](x *T) {
+ x.m /* ERROR "x.m undefined" */ ()
+}
+
+func f2[_ interface{ m1(); m2() }]() {}
+
+type T struct{}
+func (T) m1()
+func (*T) m2()
+
+func _() {
+ f2[T /* ERROR "m2 has pointer receiver" */ ]()
+ f2[*T]()
+}
+
+// When a type parameter is used as an argument to instantiate a parameterized
+// type, the type argument's type set must be a subset of the instantiated type
+// parameter's type set.
+type T1[P interface{~uint}] struct{}
+
+func _[P any]() {
+ _ = T1[P /* ERROR "P does not satisfy interface{~uint}" */ ]{}
+}
+
+// This is the original (simplified) program causing the same issue.
+type Unsigned interface {
+ ~uint
+}
+
+type T2[U Unsigned] struct {
+ s U
+}
+
+func (u T2[U]) Add1() U {
+ return u.s + 1
+}
+
+func NewT2[U any]() T2[U /* ERROR "U does not satisfy Unsigned" */ ] {
+ return T2[U /* ERROR "U does not satisfy Unsigned" */ ]{}
+}
+
+func _() {
+ u := NewT2[string]()
+ _ = u.Add1()
+}
+
+// When we encounter an instantiated type such as Elem[T] we must
+// not "expand" the instantiation when the type to be instantiated
+// (Elem in this case) is not yet fully set up.
+type Elem[T any] struct {
+ next *Elem[T]
+ list *List[T]
+}
+
+type List[T any] struct {
+ root Elem[T]
+}
+
+func (l *List[T]) Init() {
+ l.root.next = &l.root
+}
+
+// This is the original program causing the same issue.
+type Element2[TElem any] struct {
+ next, prev *Element2[TElem]
+ list *List2[TElem]
+ Value TElem
+}
+
+type List2[TElem any] struct {
+ root Element2[TElem]
+ len int
+}
+
+func (l *List2[TElem]) Init() *List2[TElem] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// Self-recursive instantiations must work correctly.
+type A[P any] struct { _ *A[P] }
+
+type AB[P any] struct { _ *BA[P] }
+type BA[P any] struct { _ *AB[P] }
+
+// And a variation that also caused a problem with an
+// unresolved underlying type.
+type Element3[TElem any] struct {
+ next, prev *Element3[TElem]
+ list *List3[TElem]
+ Value TElem
+}
+
+func (e *Element3[TElem]) Next() *Element3[TElem] {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+type List3[TElem any] struct {
+ root Element3[TElem]
+ len int
+}
+
+// Infinite generic type declarations must lead to an error.
+type inf1[T any] struct{ _ inf1 /* ERROR "invalid recursive type" */ [T] }
+type inf2[T any] struct{ inf2 /* ERROR "invalid recursive type" */ [T] }
+
+// The implementation of conversions T(x) between integers and floating-point
+// numbers checks that both T and x have either integer or floating-point
+// type. When the type of T or x is a type parameter, the respective simple
+// predicate disjunction in the implementation was wrong because if a type set
+// contains both an integer and a floating-point type, the type parameter is
+// neither an integer or a floating-point number.
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
+ return T2(v)
+}
+
+func _() {
+ convert[int, uint](5)
+}
+
+// When testing binary operators, for +, the operand types must either be
+// both numeric, or both strings. The implementation had the same problem
+// with this check as the conversion issue above (issue #39623).
+
+func issue39623[T interface{~int | ~string}](x, y T) T {
+ return x + y
+}
+
+// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
+ for _, v := range s {
+ sum += v
+ }
+ return
+}
+
+// Assignability of an unnamed pointer type to a type parameter that
+// has a matching underlying type.
+func _[T interface{}, PT interface{~*T}] (x T) PT {
+ return &x
+}
+
+// Indexing of type parameters containing type parameters in their constraint terms:
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
+ return x[i]
+}
+
+// Conversion of a local type to a type parameter.
+func _[T interface{~int}](x T) {
+ type myint int
+ var _ int = int(x)
+ var _ T = 42
+ var _ T = T(myint(42))
+}
+
+// Indexing a type parameter with an array type bound checks length.
+// (Example by mdempsky@.)
+func _[T interface { ~[10]int }](x T) {
+ _ = x[9] // ok
+ _ = x[20 /* ERROR "out of bounds" */ ]
+}
+
+// Pointer indirection of a type parameter.
+func _[T interface{ ~*int }](p T) int {
+ return *p
+}
+
+// Channel sends and receives on type parameters.
+func _[T interface{ ~chan int }](ch T) int {
+ ch <- 0
+ return <- ch
+}
+
+// Calling of a generic variable.
+func _[T interface{ ~func() }](f T) {
+ f()
+ go f()
+}
+
+type F1 func()
+type F2 func()
+func _[T interface{ func()|F1|F2 }](f T) {
+ f()
+ go f()
+}
+
+// We must compare against the (possibly underlying) types of term list
+// elements when checking if a constraint is satisfied by a type.
+// The underlying type of each term must be computed after the
+// interface has been instantiated as its constraint may contain
+// a type parameter that was substituted with a defined type.
+// Test case from an (originally) failing example.
+
+type sliceOf[E any] interface{ ~[]E }
+
+func append[T interface{}, S sliceOf[T], T2 interface{}](s S, t ...T2) S { panic(0) }
+
+var f func()
+var cancelSlice []context.CancelFunc
+var _ = append[context.CancelFunc, []context.CancelFunc, context.CancelFunc](cancelSlice, f)
+
+// A generic function must be instantiated with a type, not a value.
+
+func g[T any](T) T { panic(0) }
+
+var _ = g[int]
+var _ = g[nil /* ERROR "is not a type" */ ]
+var _ = g(0)
diff --git a/src/internal/types/testdata/check/labels.go b/src/internal/types/testdata/check/labels.go
new file mode 100644
index 0000000..5948952
--- /dev/null
+++ b/src/internal/types/testdata/check/labels.go
@@ -0,0 +1,207 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified concatenation of the files
+// $GOROOT/test/label.go and $GOROOT/test/label1.go.
+
+package labels
+
+var x int
+
+func f0() {
+L1 /* ERROR "label L1 declared and not used" */ :
+ for {
+ }
+L2 /* ERROR "label L2 declared and not used" */ :
+ select {
+ }
+L3 /* ERROR "label L3 declared and not used" */ :
+ switch {
+ }
+L4 /* ERROR "label L4 declared and not used" */ :
+ if true {
+ }
+L5 /* ERROR "label L5 declared and not used" */ :
+ f0()
+L6:
+ f0()
+L6 /* ERROR "label L6 already declared" */ :
+ f0()
+ if x == 20 {
+ goto L6
+ }
+
+L7:
+ for {
+ break L7
+ break L8 /* ERROR "invalid break label L8" */
+ }
+
+// A label must be directly associated with a switch, select, or
+// for statement; it cannot be the label of a labeled statement.
+
+L7a /* ERROR "declared and not used" */ : L7b:
+ for {
+ break L7a /* ERROR "invalid break label L7a" */
+ continue L7a /* ERROR "invalid continue label L7a" */
+ continue L7b
+ }
+
+L8:
+ for {
+ if x == 21 {
+ continue L8
+ continue L7 /* ERROR "invalid continue label L7" */
+ }
+ }
+
+L9:
+ switch {
+ case true:
+ break L9
+ defalt /* ERROR "label defalt declared and not used" */ :
+ }
+
+L10:
+ select {
+ default:
+ break L10
+ break L9 /* ERROR "invalid break label L9" */
+ }
+
+ goto L10a
+L10a: L10b:
+ select {
+ default:
+ break L10a /* ERROR "invalid break label L10a" */
+ break L10b
+ continue L10b /* ERROR "invalid continue label L10b" */
+ }
+}
+
+func f1() {
+L1:
+ for {
+ if x == 0 {
+ break L1
+ }
+ if x == 1 {
+ continue L1
+ }
+ goto L1
+ }
+
+L2:
+ select {
+ default:
+ if x == 0 {
+ break L2
+ }
+ if x == 1 {
+ continue L2 /* ERROR "invalid continue label L2" */
+ }
+ goto L2
+ }
+
+L3:
+ switch {
+ case x > 10:
+ if x == 11 {
+ break L3
+ }
+ if x == 12 {
+ continue L3 /* ERROR "invalid continue label L3" */
+ }
+ goto L3
+ }
+
+L4:
+ if true {
+ if x == 13 {
+ break L4 /* ERROR "invalid break label L4" */
+ }
+ if x == 14 {
+ continue L4 /* ERROR "invalid continue label L4" */
+ }
+ if x == 15 {
+ goto L4
+ }
+ }
+
+L5:
+ f1()
+ if x == 16 {
+ break L5 /* ERROR "invalid break label L5" */
+ }
+ if x == 17 {
+ continue L5 /* ERROR "invalid continue label L5" */
+ }
+ if x == 18 {
+ goto L5
+ }
+
+ for {
+ if x == 19 {
+ break L1 /* ERROR "invalid break label L1" */
+ }
+ if x == 20 {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ if x == 21 {
+ goto L1
+ }
+ }
+}
+
+// Additional tests not in the original files.
+
+func f2() {
+L1 /* ERROR "label L1 declared and not used" */ :
+ if x == 0 {
+ for {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ }
+}
+
+func f3() {
+L1:
+L2:
+L3:
+ for {
+ break L1 /* ERROR "invalid break label L1" */
+ break L2 /* ERROR "invalid break label L2" */
+ break L3
+ continue L1 /* ERROR "invalid continue label L1" */
+ continue L2 /* ERROR "invalid continue label L2" */
+ continue L3
+ goto L1
+ goto L2
+ goto L3
+ }
+}
+
+// Blank labels are never declared.
+
+func f4() {
+_:
+_: // multiple blank labels are ok
+ goto _ /* ERROR "label _ not declared" */
+}
+
+func f5() {
+_:
+ for {
+ break _ /* ERROR "invalid break label _" */
+ continue _ /* ERROR "invalid continue label _" */
+ }
+}
+
+func f6() {
+_:
+ switch {
+ default:
+ break _ /* ERROR "invalid break label _" */
+ }
+}
diff --git a/src/internal/types/testdata/check/linalg.go b/src/internal/types/testdata/check/linalg.go
new file mode 100644
index 0000000..f02e773
--- /dev/null
+++ b/src/internal/types/testdata/check/linalg.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linalg
+
+// Numeric is type bound that matches any numeric type.
+// It would likely be in a constraints package in the standard library.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
+}
+
+func DotProduct[T Numeric](s1, s2 []T) T {
+ if len(s1) != len(s2) {
+ panic("DotProduct: slices of unequal length")
+ }
+ var r T
+ for i := range s1 {
+ r += s1[i] * s2[i]
+ }
+ return r
+}
+
+// NumericAbs matches numeric types with an Abs method.
+type NumericAbs[T any] interface {
+ Numeric
+
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func AbsDifference[T NumericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// OrderedNumeric is a type bound that matches numeric types that support the < operator.
+type OrderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// Complex is a type bound that matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // OrderedAbs is a helper type that defines an Abs method for
+// // ordered numeric types.
+// type OrderedAbs[T OrderedNumeric] T
+//
+// func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
+// if a < 0 {
+// return -a
+// }
+// return a
+// }
+//
+// // ComplexAbs is a helper type that defines an Abs method for
+// // complex types.
+// type ComplexAbs[T Complex] T
+//
+// func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
+// r := float64(real(a))
+// i := float64(imag(a))
+// d := math.Sqrt(r * r + i * i)
+// return ComplexAbs[T](complex(d, 0))
+// }
+//
+// func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
+// return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
+// }
+//
+// func ComplexAbsDifference[T Complex](a, b T) T {
+// return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
+// }
diff --git a/src/internal/types/testdata/check/literals.go b/src/internal/types/testdata/check/literals.go
new file mode 100644
index 0000000..494a465
--- /dev/null
+++ b/src/internal/types/testdata/check/literals.go
@@ -0,0 +1,111 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests various representations of literals
+// and compares them with literals or constant expressions
+// of equal values.
+
+package literals
+
+func _() {
+ // 0-octals
+ assert(0_123 == 0123)
+ assert(0123_456 == 0123456)
+
+ // decimals
+ assert(1_234 == 1234)
+ assert(1_234_567 == 1234567)
+
+ // hexadecimals
+ assert(0X_0 == 0)
+ assert(0X_1234 == 0x1234)
+ assert(0X_CAFE_f00d == 0xcafef00d)
+
+ // octals
+ assert(0o0 == 0)
+ assert(0o1234 == 01234)
+ assert(0o01234567 == 01234567)
+
+ assert(0O0 == 0)
+ assert(0O1234 == 01234)
+ assert(0O01234567 == 01234567)
+
+ assert(0o_0 == 0)
+ assert(0o_1234 == 01234)
+ assert(0o0123_4567 == 01234567)
+
+ assert(0O_0 == 0)
+ assert(0O_1234 == 01234)
+ assert(0O0123_4567 == 01234567)
+
+ // binaries
+ assert(0b0 == 0)
+ assert(0b1011 == 0xb)
+ assert(0b00101101 == 0x2d)
+
+ assert(0B0 == 0)
+ assert(0B1011 == 0xb)
+ assert(0B00101101 == 0x2d)
+
+ assert(0b_0 == 0)
+ assert(0b10_11 == 0xb)
+ assert(0b_0010_1101 == 0x2d)
+
+ // decimal floats
+ assert(1_2_3. == 123.)
+ assert(0_123. == 123.)
+
+ assert(0_0e0 == 0.)
+ assert(1_2_3e0 == 123.)
+ assert(0_123e0 == 123.)
+
+ assert(0e-0_0 == 0.)
+ assert(1_2_3E+0 == 123.)
+ assert(0123E1_2_3 == 123e123)
+
+ assert(0.e+1 == 0.)
+ assert(123.E-1_0 == 123e-10)
+ assert(01_23.e123 == 123e123)
+
+ assert(.0e-1 == .0)
+ assert(.123E+10 == .123e10)
+ assert(.0123E123 == .0123e123)
+
+ assert(1_2_3.123 == 123.123)
+ assert(0123.01_23 == 123.0123)
+
+ // hexadecimal floats
+ assert(0x0.p+0 == 0.)
+ assert(0Xdeadcafe.p-10 == 0xdeadcafe/1024.0)
+ assert(0x1234.P84 == 0x1234000000000000000000000)
+
+ assert(0x.1p-0 == 1./16)
+ assert(0X.deadcafep4 == 1.0*0xdeadcafe/0x10000000)
+ assert(0x.1234P+12 == 1.0*0x1234/0x10)
+
+ assert(0x0p0 == 0.)
+ assert(0Xdeadcafep+1 == 0x1bd5b95fc)
+ assert(0x1234P-10 == 0x1234/1024.0)
+
+ assert(0x0.0p0 == 0.)
+ assert(0Xdead.cafep+1 == 1.0*0x1bd5b95fc/0x10000)
+ assert(0x12.34P-10 == 1.0*0x1234/0x40000)
+
+ assert(0Xdead_cafep+1 == 0xdeadcafep+1)
+ assert(0x_1234P-10 == 0x1234p-10)
+
+ assert(0X_dead_cafe.p-10 == 0xdeadcafe.p-10)
+ assert(0x12_34.P1_2_3 == 0x1234.p123)
+
+ assert(1_234i == 1234i)
+ assert(1_234_567i == 1234567i)
+
+ assert(0.i == 0i)
+ assert(123.i == 123i)
+ assert(0123.i == 123i)
+
+ assert(0.e+1i == 0i)
+ assert(123.E-1_0i == 123e-10i)
+ assert(01_23.e123i == 123e123i)
+}
diff --git a/src/internal/types/testdata/check/main0.go b/src/internal/types/testdata/check/main0.go
new file mode 100644
index 0000000..95a8ed1
--- /dev/null
+++ b/src/internal/types/testdata/check/main0.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main()
+func main /* ERROR "no arguments and no return values" */ /* ERROR "redeclared" */ (int)
+func main /* ERROR "no arguments and no return values" */ /* ERROR "redeclared" */ () int
diff --git a/src/internal/types/testdata/check/main1.go b/src/internal/types/testdata/check/main1.go
new file mode 100644
index 0000000..fb567a0
--- /dev/null
+++ b/src/internal/types/testdata/check/main1.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main[T /* ERROR "func main must have no type parameters" */ any]() {}
diff --git a/src/internal/types/testdata/check/map0.go b/src/internal/types/testdata/check/map0.go
new file mode 100644
index 0000000..21c989c
--- /dev/null
+++ b/src/internal/types/testdata/check/map0.go
@@ -0,0 +1,113 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// TODO(gri) fix imports for tests
+import "chans" // ERROR "could not import"
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans.Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans.Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
diff --git a/src/internal/types/testdata/check/map1.go b/src/internal/types/testdata/check/map1.go
new file mode 100644
index 0000000..e13bf33
--- /dev/null
+++ b/src/internal/types/testdata/check/map1.go
@@ -0,0 +1,146 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is like map.go2, but instead if importing chans, it contains
+// the necessary functionality at the end of the file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans_Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans_Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// chans
+
+func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T]) { panic(0) }
+
+// A sender is used to send values to a Receiver.
+type chans_Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+func (s *chans_Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+func (s *chans_Sender[T]) Close() {
+ close(s.values)
+}
+
+type chans_Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+func (r *chans_Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
diff --git a/src/internal/types/testdata/check/methodsets.go b/src/internal/types/testdata/check/methodsets.go
new file mode 100644
index 0000000..5b3e4a2
--- /dev/null
+++ b/src/internal/types/testdata/check/methodsets.go
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package methodsets
+
+type T0 struct {}
+
+func (T0) v0() {}
+func (*T0) p0() {}
+
+type T1 struct {} // like T0 with different method names
+
+func (T1) v1() {}
+func (*T1) p1() {}
+
+type T2 interface {
+ v2()
+ p2()
+}
+
+type T3 struct {
+ T0
+ *T1
+ T2
+}
+
+// Method expressions
+func _() {
+ var (
+ _ func(T0) = T0.v0
+ _ = T0.p0 /* ERROR "invalid method expression T0.p0 (needs pointer receiver (*T0).p0)" */
+
+ _ func (*T0) = (*T0).v0
+ _ func (*T0) = (*T0).p0
+
+ // T1 is like T0
+
+ _ func(T2) = T2.v2
+ _ func(T2) = T2.p2
+
+ _ func(T3) = T3.v0
+ _ func(T3) = T3.p0 /* ERROR "invalid method expression T3.p0 (needs pointer receiver (*T3).p0)" */
+ _ func(T3) = T3.v1
+ _ func(T3) = T3.p1
+ _ func(T3) = T3.v2
+ _ func(T3) = T3.p2
+
+ _ func(*T3) = (*T3).v0
+ _ func(*T3) = (*T3).p0
+ _ func(*T3) = (*T3).v1
+ _ func(*T3) = (*T3).p1
+ _ func(*T3) = (*T3).v2
+ _ func(*T3) = (*T3).p2
+ )
+}
+
+// Method values with addressable receivers
+func _() {
+ var (
+ v0 T0
+ _ func() = v0.v0
+ _ func() = v0.p0
+ )
+
+ var (
+ p0 *T0
+ _ func() = p0.v0
+ _ func() = p0.p0
+ )
+
+ // T1 is like T0
+
+ var (
+ v2 T2
+ _ func() = v2.v2
+ _ func() = v2.p2
+ )
+
+ var (
+ v4 T3
+ _ func() = v4.v0
+ _ func() = v4.p0
+ _ func() = v4.v1
+ _ func() = v4.p1
+ _ func() = v4.v2
+ _ func() = v4.p2
+ )
+
+ var (
+ p4 *T3
+ _ func() = p4.v0
+ _ func() = p4.p0
+ _ func() = p4.v1
+ _ func() = p4.p1
+ _ func() = p4.v2
+ _ func() = p4.p2
+ )
+}
+
+// Method calls with addressable receivers
+func _() {
+ var v0 T0
+ v0.v0()
+ v0.p0()
+
+ var p0 *T0
+ p0.v0()
+ p0.p0()
+
+ // T1 is like T0
+
+ var v2 T2
+ v2.v2()
+ v2.p2()
+
+ var v4 T3
+ v4.v0()
+ v4.p0()
+ v4.v1()
+ v4.p1()
+ v4.v2()
+ v4.p2()
+
+ var p4 *T3
+ p4.v0()
+ p4.p0()
+ p4.v1()
+ p4.p1()
+ p4.v2()
+ p4.p2()
+}
+
+// Method values with value receivers
+func _() {
+ var (
+ _ func() = T0{}.v0
+ _ func() = T0{}.p0 /* ERROR "cannot call pointer method p0 on T0" */
+
+ _ func() = (&T0{}).v0
+ _ func() = (&T0{}).p0
+
+ // T1 is like T0
+
+ // no values for T2
+
+ _ func() = T3{}.v0
+ _ func() = T3{}.p0 /* ERROR "cannot call pointer method p0 on T3" */
+ _ func() = T3{}.v1
+ _ func() = T3{}.p1
+ _ func() = T3{}.v2
+ _ func() = T3{}.p2
+
+ _ func() = (&T3{}).v0
+ _ func() = (&T3{}).p0
+ _ func() = (&T3{}).v1
+ _ func() = (&T3{}).p1
+ _ func() = (&T3{}).v2
+ _ func() = (&T3{}).p2
+ )
+}
+
+// Method calls with value receivers
+func _() {
+ T0{}.v0()
+ T0{}.p0 /* ERROR "cannot call pointer method p0 on T0" */ ()
+
+ (&T0{}).v0()
+ (&T0{}).p0()
+
+ // T1 is like T0
+
+ // no values for T2
+
+ T3{}.v0()
+ T3{}.p0 /* ERROR "cannot call pointer method p0 on T3" */ ()
+ T3{}.v1()
+ T3{}.p1()
+ T3{}.v2()
+ T3{}.p2()
+
+ (&T3{}).v0()
+ (&T3{}).p0()
+ (&T3{}).v1()
+ (&T3{}).p1()
+ (&T3{}).v2()
+ (&T3{}).p2()
+}
+
+// *T has no methods if T is an interface type
+func issue5918() {
+ var (
+ err error
+ _ = err.Error()
+ _ func() string = err.Error
+ _ func(error) string = error.Error
+
+ perr = &err
+ _ = perr.Error /* ERROR "type *error is pointer to interface, not interface" */ ()
+ _ func() string = perr.Error /* ERROR "type *error is pointer to interface, not interface" */
+ _ func(*error) string = (*error).Error /* ERROR "type *error is pointer to interface, not interface" */
+ )
+
+ type T *interface{ m() int }
+ var (
+ x T
+ _ = (*x).m()
+ _ = (*x).m
+
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */ ()
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */
+ _ = T.m /* ERROR "type T is pointer to interface, not interface" */
+ )
+}
diff --git a/src/internal/types/testdata/check/shifts.go b/src/internal/types/testdata/check/shifts.go
new file mode 100644
index 0000000..6ae3985
--- /dev/null
+++ b/src/internal/types/testdata/check/shifts.go
@@ -0,0 +1,399 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package shifts
+
+func shifts0() {
+ // basic constant shifts
+ const (
+ s = 10
+ _ = 0<<0
+ _ = 1<<s
+ _ = 1<<- /* ERROR "negative shift count" */ 1
+ // For the test below we may decide to convert to int
+ // rather than uint and then report a negative shift
+ // count instead, which might be a better error. The
+ // (minor) difference is that this would restrict the
+ // shift count range by half (from all uint values to
+ // the positive int values).
+ // This depends on the exact spec wording which is not
+ // done yet.
+ // TODO(gri) revisit and adjust when spec change is done
+ _ = 1<<- /* ERROR "negative shift count" */ 1.0
+ _ = 1<<1075 /* ERROR "invalid shift" */
+ _ = 2.0<<1
+ _ = 1<<1.0
+ _ = 1<<(1+0i)
+
+ _ int = 2<<s
+ _ float32 = 2<<s
+ _ complex64 = 2<<s
+
+ _ int = 2.0<<s
+ _ float32 = 2.0<<s
+ _ complex64 = 2.0<<s
+
+ _ int = 'a'<<s
+ _ float32 = 'a'<<s
+ _ complex64 = 'a'<<s
+ )
+}
+
+func shifts1() {
+ // basic non-constant shifts
+ var (
+ i int
+ u uint
+
+ _ = 1<<0
+ _ = 1<<i
+ _ = 1<<u
+ _ = 1<<"foo" /* ERROR "cannot convert" */
+ _ = i<<0
+ _ = i<<- /* ERROR "negative shift count" */ 1
+ _ = i<<1.0
+ _ = 1<<(1+0i)
+ _ = 1 /* ERROR "overflows" */ <<100
+
+ _ uint = 1 << 0
+ _ uint = 1 << u
+ _ float32 = 1 /* ERROR "must be integer" */ << u
+
+ // issue #14822
+ _ = 1<<( /* ERROR "overflows uint" */ 1<<64)
+ _ = 1<<( /* ERROR "invalid shift count" */ 1<<64-1)
+
+ // issue #43697
+ _ = u<<( /* ERROR "overflows uint" */ 1<<64)
+ _ = u<<(1<<64-1)
+ )
+}
+
+func shifts2() {
+ // from the spec
+ var (
+ s uint = 33
+ i = 1<<s // 1 has type int
+ j int32 = 1<<s // 1 has type int32; j == 0
+ k = uint64(1<<s) // 1 has type uint64; k == 1<<33
+ m int = 1.0<<s // 1.0 has type int
+ n = 1.0<<s != i // 1.0 has type int; n == false if ints are 32bits in size
+ o = 1<<s == 2<<s // 1 and 2 have type int; o == true if ints are 32bits in size
+ p = 1<<s == 1<<33 // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
+ u = 1.0 /* ERROR "must be integer" */ <<s // illegal: 1.0 has type float64, cannot shift
+ u1 = 1.0 /* ERROR "must be integer" */ <<s != 0 // illegal: 1.0 has type float64, cannot shift
+ u2 = 1 /* ERROR "must be integer" */ <<s != 1.0 // illegal: 1 has type float64, cannot shift
+ v float32 = 1 /* ERROR "must be integer" */ <<s // illegal: 1 has type float32, cannot shift
+ w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression
+ )
+ _, _, _, _, _, _, _, _, _, _, _, _ = i, j, k, m, n, o, p, u, u1, u2, v, w
+}
+
+func shifts3(a int16, b float32) {
+ // random tests
+ var (
+ s uint = 11
+ u = 1 /* ERROR "must be integer" */ <<s + 1.0
+ v complex128 = 1 /* ERROR "must be integer" */ << s + 1.0 /* ERROR "must be integer" */ << s + 1
+ )
+ x := 1.0 /* ERROR "must be integer" */ <<s + 1
+ shifts3(1.0 << s, 1 /* ERROR "must be integer" */ >> s)
+ _, _, _ = u, v, x
+}
+
+func shifts4() {
+ // shifts in comparisons w/ untyped operands
+ var s uint
+
+ _ = 1<<s == 1
+ _ = 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s == 1.
+
+ _ = 1<<s + 1 == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1 == 1.
+ _ = 1 /* ERROR "integer" */ <<s + 1. == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1. == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1 == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1 == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1. == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1. == 1.
+
+ _ = 1<<s == 1<<s
+ _ = 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s
+
+ _ = 1<<s + 1<<s == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1.
+
+ _ = 1<<s + 1<<s == 1<<s + 1<<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+}
+
+func shifts5() {
+ // shifts in comparisons w/ typed operands
+ var s uint
+ var x int
+
+ _ = 1<<s == x
+ _ = 1.<<s == x
+ _ = 1.1 /* ERROR "int" */ <<s == x
+
+ _ = 1<<s + x == 1
+ _ = 1<<s + x == 1.
+ _ = 1<<s + x == 1.1 /* ERROR "int" */
+ _ = 1.<<s + x == 1
+ _ = 1.<<s + x == 1.
+ _ = 1.<<s + x == 1.1 /* ERROR "int" */
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1.
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1.1
+
+ _ = 1<<s == x<<s
+ _ = 1.<<s == x<<s
+ _ = 1.1 /* ERROR "int" */ <<s == x<<s
+}
+
+func shifts6() {
+ // shifts as operands in non-arithmetic operations and as arguments
+ var a [10]int
+ var s uint
+
+ _ = a[1<<s]
+ _ = a[1.0]
+ _ = a[1.0<<s]
+
+ _ = make([]int, 1.0)
+ _ = make([]int, 1.0<<s)
+ _ = make([]int, 1.1 /* ERROR "must be integer" */ <<s)
+
+ _ = float32(1)
+ _ = float32(1 /* ERROR "must be integer" */ <<s)
+ _ = float32(1.0)
+ _ = float32(1.0 /* ERROR "must be integer" */ <<s)
+ _ = float32(1.1 /* ERROR "must be integer" */ <<s)
+
+ // TODO(gri) Re-enable these tests once types2 has the go/types fixes.
+ // Issue #52080.
+ // _ = int32(0x80000000 /* ERROR "overflows int32" */ << s)
+ // TODO(rfindley) Eliminate the redundant error here.
+ // _ = int32(( /* ERROR "truncated to int32" */ 0x80000000 /* ERROR "truncated to int32" */ + 0i) << s)
+
+ _ = int(1+0i<<0)
+ // _ = int((1+0i)<<s)
+ // _ = int(1.0<<s)
+ // _ = int(complex(1, 0)<<s)
+ _ = int(float32/* ERROR "must be integer" */(1.0) <<s)
+ _ = int(1.1 /* ERROR "must be integer" */ <<s)
+ _ = int(( /* ERROR "must be integer" */ 1+1i) <<s)
+
+ _ = complex(1 /* ERROR "must be integer" */ <<s, 0)
+
+ var b []int
+ _ = append(b, 1<<s)
+ _ = append(b, 1.0<<s)
+ _ = append(b, (1+0i)<<s)
+ _ = append(b, 1.1 /* ERROR "must be integer" */ <<s)
+ _ = append(b, (1 + 0i) <<s)
+ _ = append(b, ( /* ERROR "must be integer" */ 1 + 1i) <<s)
+
+ _ = complex(1.0 /* ERROR "must be integer" */ <<s, 0)
+ _ = complex(1.1 /* ERROR "must be integer" */ <<s, 0)
+ _ = complex(0, 1.0 /* ERROR "must be integer" */ <<s)
+ _ = complex(0, 1.1 /* ERROR "must be integer" */ <<s)
+
+ // TODO(gri) The delete below is not type-checked correctly yet.
+ // var m1 map[int]string
+ // delete(m1, 1<<s)
+}
+
+func shifts7() {
+ // shifts of shifts
+ var s uint
+ var x int
+ _ = x
+
+ _ = 1<<(1<<s)
+ _ = 1<<(1.<<s)
+ _ = 1. /* ERROR "integer" */ <<(1<<s)
+ _ = 1. /* ERROR "integer" */ <<(1.<<s)
+
+ x = 1<<(1<<s)
+ x = 1<<(1.<<s)
+ x = 1.<<(1<<s)
+ x = 1.<<(1.<<s)
+
+ _ = (1<<s)<<(1<<s)
+ _ = (1<<s)<<(1.<<s)
+ _ = ( /* ERROR "integer" */ 1.<<s)<<(1<<s)
+ _ = ( /* ERROR "integer" */ 1.<<s)<<(1.<<s)
+
+ x = (1<<s)<<(1<<s)
+ x = (1<<s)<<(1.<<s)
+ x = ( /* ERROR "integer" */ 1.<<s)<<(1<<s)
+ x = ( /* ERROR "integer" */ 1.<<s)<<(1.<<s)
+}
+
+func shifts8() {
+ // shift examples from shift discussion: better error messages
+ var s uint
+ _ = 1.0 /* ERROR "shifted operand 1.0 (type float64) must be integer" */ <<s == 1
+ _ = 1.0 /* ERROR "shifted operand 1.0 (type float64) must be integer" */ <<s == 1.0
+ _ = 1 /* ERROR "shifted operand 1 (type float64) must be integer" */ <<s == 1.0
+ _ = 1 /* ERROR "shifted operand 1 (type float64) must be integer" */ <<s + 1.0 == 1
+ _ = 1 /* ERROR "shifted operand 1 (type float64) must be integer" */ <<s + 1.1 == 1
+ _ = 1 /* ERROR "shifted operand 1 (type float64) must be integer" */ <<s + 1 == 1.0
+
+ // additional cases
+ _ = complex(1.0 /* ERROR "shifted operand 1.0 (type float64) must be integer" */ <<s, 1)
+ _ = complex(1.0, 1 /* ERROR "shifted operand 1 (type float64) must be integer" */ <<s)
+
+ _ = int(1.<<s)
+ _ = int(1.1 /* ERRORx `shifted operand .* must be integer` */ <<s)
+ _ = float32(1 /* ERRORx `shifted operand .* must be integer` */ <<s)
+ _ = float32(1. /* ERRORx `shifted operand .* must be integer` */ <<s)
+ _ = float32(1.1 /* ERRORx `shifted operand .* must be integer` */ <<s)
+ // TODO(gri) the error messages for these two are incorrect - disabled for now
+ // _ = complex64(1<<s)
+ // _ = complex64(1.<<s)
+ _ = complex64(1.1 /* ERRORx `shifted operand .* must be integer` */ <<s)
+}
+
+func shifts9() {
+ // various originally failing snippets of code from the std library
+ // from src/compress/lzw/reader.go:90
+ {
+ var d struct {
+ bits uint32
+ width uint
+ }
+ _ = uint16(d.bits & (1<<d.width - 1))
+ }
+
+ // from src/debug/dwarf/buf.go:116
+ {
+ var ux uint64
+ var bits uint
+ x := int64(ux)
+ if x&(1<<(bits-1)) != 0 {}
+ }
+
+ // from src/encoding/asn1/asn1.go:160
+ {
+ var bytes []byte
+ if bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {}
+ }
+
+ // from src/math/big/rat.go:140
+ {
+ var exp int
+ var mantissa uint64
+ shift := uint64(-1022 - (exp - 1)) // [1..53)
+ _ = mantissa & (1<<shift - 1)
+ }
+
+ // from src/net/interface.go:51
+ {
+ type Flags uint
+ var f Flags
+ var i int
+ if f&(1<<uint(i)) != 0 {}
+ }
+
+ // from src/runtime/softfloat64.go:234
+ {
+ var gm uint64
+ var shift uint
+ _ = gm & (1<<shift - 1)
+ }
+
+ // from src/strconv/atof.go:326
+ {
+ var mant uint64
+ var mantbits uint
+ if mant == 2<<mantbits {}
+ }
+
+ // from src/route_bsd.go:82
+ {
+ var Addrs int32
+ const rtaRtMask = 1
+ var i uint
+ if Addrs&rtaRtMask&(1<<i) == 0 {}
+ }
+
+ // from src/text/scanner/scanner.go:540
+ {
+ var s struct { Whitespace uint64 }
+ var ch rune
+ for s.Whitespace&(1<<uint(ch)) != 0 {}
+ }
+}
+
+func issue5895() {
+ var x = 'a' << 1 // type of x must be rune
+ var _ rune = x
+}
+
+func issue11325() {
+ var _ = 0 >> 1.1 /* ERROR "truncated to uint" */ // example from issue 11325
+ _ = 0 >> 1.1 /* ERROR "truncated to uint" */
+ _ = 0 << 1.1 /* ERROR "truncated to uint" */
+ _ = 0 >> 1.
+ _ = 1 >> 1.1 /* ERROR "truncated to uint" */
+ _ = 1 >> 1.
+ _ = 1. >> 1
+ _ = 1. >> 1.
+ _ = 1.1 /* ERROR "must be integer" */ >> 1
+}
+
+func issue11594() {
+ var _ = complex64 /* ERROR "must be integer" */ (1) << 2 // example from issue 11594
+ _ = float32 /* ERROR "must be integer" */ (0) << 1
+ _ = float64 /* ERROR "must be integer" */ (0) >> 2
+ _ = complex64 /* ERROR "must be integer" */ (0) << 3
+ _ = complex64 /* ERROR "must be integer" */ (0) >> 4
+}
+
+func issue21727() {
+ var s uint
+ var a = make([]int, 1<<s + 1.2 /* ERROR "truncated to int" */ )
+ var _ = a[1<<s - 2.3 /* ERROR "truncated to int" */ ]
+ var _ int = 1<<s + 3.4 /* ERROR "truncated to int" */
+ var _ = string(1 /* ERRORx `shifted operand 1 .* must be integer` */ << s)
+ var _ = string(1.0 /* ERROR "cannot convert" */ << s)
+}
+
+func issue22969() {
+ var s uint
+ var a []byte
+ _ = a[0xffffffffffffffff /* ERROR "overflows int" */ <<s] // example from issue 22969
+ _ = make([]int, 0xffffffffffffffff /* ERROR "overflows int" */ << s)
+ _ = make([]int, 0, 0xffffffffffffffff /* ERROR "overflows int" */ << s)
+ var _ byte = 0x100 /* ERROR "overflows byte" */ << s
+ var _ int8 = 0xff /* ERROR "overflows int8" */ << s
+ var _ int16 = 0xffff /* ERROR "overflows int16" */ << s
+ var _ int32 = 0x80000000 /* ERROR "overflows int32" */ << s
+}
diff --git a/src/internal/types/testdata/check/slices.go b/src/internal/types/testdata/check/slices.go
new file mode 100644
index 0000000..2c33518
--- /dev/null
+++ b/src/internal/types/testdata/check/slices.go
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices implements various slice algorithms.
+package slices
+
+// Map turns a []T1 to a []T2 using a mapping function.
+func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
+ r := make([]T2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []T1 to a single value using a reduction function.
+func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[T any](s []T, f func(T) bool) []T {
+ var r []T
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Example uses
+
+func limiter(x int) byte {
+ switch {
+ case x < 0:
+ return 0
+ default:
+ return byte(x)
+ case x > 255:
+ return 255
+ }
+}
+
+var input = []int{-4, 68954, 7, 44, 0, -555, 6945}
+var limited1 = Map[int, byte](input, limiter)
+var limited2 = Map(input, limiter) // using type inference
+
+func reducer(x float64, y int) float64 {
+ return x + float64(y)
+}
+
+var reduced1 = Reduce[int, float64](input, 0, reducer)
+var reduced2 = Reduce(input, 1i /* ERROR "overflows" */, reducer) // using type inference
+var reduced3 = Reduce(input, 1, reducer) // using type inference
+
+func filter(x int) bool {
+ return x&1 != 0
+}
+
+var filtered1 = Filter[int](input, filter)
+var filtered2 = Filter(input, filter) // using type inference
+
diff --git a/src/internal/types/testdata/check/stmt0.go b/src/internal/types/testdata/check/stmt0.go
new file mode 100644
index 0000000..5232285
--- /dev/null
+++ b/src/internal/types/testdata/check/stmt0.go
@@ -0,0 +1,994 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// statements
+
+package stmt0
+
+func assignments0() (int, int) {
+ var a, b, c int
+ var ch chan int
+ f0 := func() {}
+ f1 := func() int { return 1 }
+ f2 := func() (int, int) { return 1, 2 }
+ f3 := func() (int, int, int) { return 1, 2, 3 }
+
+ a, b, c = 1, 2, 3
+ a, b, c = 1 /* ERROR "assignment mismatch: 3 variables but 2 values" */ , 2
+ a, b, c = 1 /* ERROR "assignment mismatch: 3 variables but 4 values" */ , 2, 3, 4
+ _, _, _ = a, b, c
+
+ a = f0 /* ERROR "used as value" */ ()
+ a = f1()
+ a = f2 /* ERROR "assignment mismatch: 1 variable but f2 returns 2 values" */ ()
+ a, b = f2()
+ a, b, c = f2 /* ERROR "assignment mismatch: 3 variables but f2 returns 2 values" */ ()
+ a, b, c = f3()
+ a, b = f3 /* ERROR "assignment mismatch: 2 variables but f3 returns 3 values" */ ()
+
+ a, b, c = <- /* ERROR "assignment mismatch: 3 variables but 1 value" */ ch
+
+ return /* ERROR "not enough return values\n\thave ()\n\twant (int, int)" */
+ return 1 /* ERROR "not enough return values\n\thave (number)\n\twant (int, int)" */
+ return 1, 2
+ return 1, 2, 3 /* ERROR "too many return values\n\thave (number, number, number)\n\twant (int, int)" */
+}
+
+func assignments1() {
+ b, i, f, c, s := false, 1, 1.0, 1i, "foo"
+ b = i /* ERRORx `cannot use .* in assignment` */
+ i = f /* ERRORx `cannot use .* in assignment` */
+ f = c /* ERRORx `cannot use .* in assignment` */
+ c = s /* ERRORx `cannot use .* in assignment` */
+ s = b /* ERRORx `cannot use .* in assignment` */
+
+ v0, v1, v2 := 1 /* ERROR "assignment mismatch" */ , 2, 3, 4
+ _, _, _ = v0, v1, v2
+
+ b = true
+
+ i += 1
+ i /* ERROR "mismatched types int and untyped string" */+= "foo"
+
+ f -= 1
+ f /= 0
+ f = float32(0)/0 /* ERROR "division by zero" */
+ f /* ERROR "mismatched types float64 and untyped string" */-= "foo"
+
+ c *= 1
+ c /= 0
+
+ s += "bar"
+ s /* ERROR "mismatched types string and untyped int" */+= 1
+
+ var u64 uint64
+ u64 += 1<<u64
+
+ undefined /* ERROR "undefined" */ = 991
+
+ // test cases for issue 5800
+ var (
+ _ int = nil /* ERROR "cannot use nil as int value in variable declaration" */
+ _ [10]int = nil /* ERROR "cannot use nil as [10]int value in variable declaration" */
+ _ []byte = nil
+ _ struct{} = nil /* ERROR "cannot use nil as struct{} value in variable declaration" */
+ _ func() = nil
+ _ map[int]string = nil
+ _ chan int = nil
+ )
+
+ // test cases for issue 5500
+ _ = func() (int, bool) {
+ var m map[int]int
+ return m /* ERROR "not enough return values" */ [0]
+ }
+
+ g := func(int, bool){}
+ var m map[int]int
+ g(m[0]) /* ERROR "not enough arguments" */
+
+ // assignments to _
+ _ = nil /* ERROR "use of untyped nil" */
+ _ = 1 << /* ERROR "constant shift overflow" */ 1000
+ (_) = 0
+}
+
+func assignments2() {
+ type mybool bool
+ var m map[string][]bool
+ var s []bool
+ var b bool
+ var d mybool
+ _ = s
+ _ = b
+ _ = d
+
+ // assignments to map index expressions are ok
+ s, b = m["foo"]
+ _, d = m["bar"]
+ m["foo"] = nil
+ m["foo"] = nil /* ERROR "assignment mismatch: 1 variable but 2 values" */ , false
+ _ = append(m["foo"])
+ _ = append(m["foo"], true)
+
+ var c chan int
+ _, b = <-c
+ _, d = <-c
+ <- /* ERROR "cannot assign" */ c = 0
+ <-c = 0 /* ERROR "assignment mismatch: 1 variable but 2 values" */ , false
+
+ var x interface{}
+ _, b = x.(int)
+ x /* ERROR "cannot assign" */ .(int) = 0
+ x.(int) = 0 /* ERROR "assignment mismatch: 1 variable but 2 values" */ , false
+
+ assignments2 /* ERROR "used as value" */ () = nil
+ int /* ERROR "not an expression" */ = 0
+}
+
+func issue6487() {
+ type S struct{x int}
+ _ = &S /* ERROR "cannot take address" */ {}.x
+ _ = &( /* ERROR "cannot take address" */ S{}.x)
+ _ = (&S{}).x
+ S /* ERROR "cannot assign" */ {}.x = 0
+ (&S{}).x = 0
+
+ type M map[string]S
+ var m M
+ m /* ERROR "cannot assign to struct field" */ ["foo"].x = 0
+ _ = &( /* ERROR "cannot take address" */ m["foo"].x)
+ _ = &m /* ERROR "cannot take address" */ ["foo"].x
+}
+
+func issue6766a() {
+ a, a /* ERROR "a repeated on left side of :=" */ := 1, 2
+ _ = a
+ a, b, b /* ERROR "b repeated on left side of :=" */ := 1, 2, 3
+ _ = b
+ c, c /* ERROR "c repeated on left side of :=" */, b := 1, 2, 3
+ _ = c
+ a, b := /* ERROR "no new variables" */ 1, 2
+}
+
+func shortVarDecls1() {
+ const c = 0
+ type d int
+ a, b, c /* ERROR "cannot assign" */ , d /* ERROR "cannot assign" */ := 1, "zwei", 3.0, 4
+ var _ int = a // a is of type int
+ var _ string = b // b is of type string
+}
+
+func incdecs() {
+ const c = 3.14
+ c /* ERROR "cannot assign" */ ++
+ s := "foo"
+ s /* ERROR "invalid operation" */ --
+ 3.14 /* ERROR "cannot assign" */ ++
+ var (
+ x int
+ y float32
+ z complex128
+ )
+ x++
+ y--
+ z++
+}
+
+func sends() {
+ var ch chan int
+ var rch <-chan int
+ var x int
+ x <- /* ERROR "cannot send" */ x
+ rch <- /* ERROR "cannot send" */ x
+ ch <- "foo" /* ERRORx `cannot use .* in send` */
+ ch <- x
+}
+
+func selects() {
+ select {}
+ var (
+ ch chan int
+ sc chan <- bool
+ )
+ select {
+ case <-ch:
+ case (<-ch):
+ case t := <-ch:
+ _ = t
+ case t := (<-ch):
+ _ = t
+ case t, ok := <-ch:
+ _, _ = t, ok
+ case t, ok := (<-ch):
+ _, _ = t, ok
+ case <-sc /* ERROR "cannot receive from send-only channel" */ :
+ }
+ select {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+ select {
+ case a, b := <-ch:
+ _, b = a, b
+ case x /* ERROR "send or receive" */ :
+ case a /* ERROR "send or receive" */ := ch:
+ }
+
+ // test for issue 9570: ch2 in second case falsely resolved to
+ // ch2 declared in body of first case
+ ch1 := make(chan int)
+ ch2 := make(chan int)
+ select {
+ case <-ch1:
+ var ch2 /* ERROR "ch2 declared and not used" */ chan bool
+ case i := <-ch2:
+ print(i + 1)
+ }
+}
+
+func gos() {
+ go 1; /* ERROR "must be function call" */
+ go int /* ERROR "go requires function call, not conversion" */ (0)
+ go ( /* ERROR "expression in go must not be parenthesized" */ gos())
+ go gos()
+ var c chan int
+ go close(c)
+ go len /* ERROR "go discards result" */ (c)
+}
+
+func defers() {
+ defer 1; /* ERROR "must be function call" */
+ defer int /* ERROR "defer requires function call, not conversion" */ (0)
+ defer ( /* ERROR "expression in defer must not be parenthesized" */ defers())
+ defer defers()
+ var c chan int
+ defer close(c)
+ defer len /* ERROR "defer discards result" */ (c)
+}
+
+func breaks() {
+ var x, y int
+
+ break /* ERROR "break" */
+ {
+ break /* ERROR "break" */
+ }
+ if x < y {
+ break /* ERROR "break" */
+ }
+
+ switch x {
+ case 0:
+ break
+ case 1:
+ if x == y {
+ break
+ }
+ default:
+ break
+ break
+ }
+
+ var z interface{}
+ switch z.(type) {
+ case int:
+ break
+ }
+
+ for {
+ break
+ }
+
+ var a []int
+ for _ = range a {
+ break
+ }
+
+ for {
+ if x == y {
+ break
+ }
+ }
+
+ var ch chan int
+ select {
+ case <-ch:
+ break
+ }
+
+ select {
+ case <-ch:
+ if x == y {
+ break
+ }
+ default:
+ break
+ }
+}
+
+func continues() {
+ var x, y int
+
+ continue /* ERROR "continue" */
+ {
+ continue /* ERROR "continue" */
+ }
+
+ if x < y {
+ continue /* ERROR "continue" */
+ }
+
+ switch x {
+ case 0:
+ continue /* ERROR "continue" */
+ }
+
+ var z interface{}
+ switch z.(type) {
+ case int:
+ continue /* ERROR "continue" */
+ }
+
+ var ch chan int
+ select {
+ case <-ch:
+ continue /* ERROR "continue" */
+ }
+
+ for i := 0; i < 10; i++ {
+ continue
+ if x < y {
+ continue
+ break
+ }
+ switch x {
+ case y:
+ continue
+ default:
+ break
+ }
+ select {
+ case <-ch:
+ continue
+ }
+ }
+
+ var a []int
+ for _ = range a {
+ continue
+ if x < y {
+ continue
+ break
+ }
+ switch x {
+ case y:
+ continue
+ default:
+ break
+ }
+ select {
+ case <-ch:
+ continue
+ }
+ }
+}
+
+func returns0() {
+ return
+ return 0 /* ERROR "too many return values" */
+}
+
+func returns1(x float64) (int, *float64) {
+ return 0, &x
+ return /* ERROR "not enough return values" */
+ return "foo" /* ERRORx `cannot .* in return statement` */, x /* ERRORx `cannot use .* in return statement` */
+ return 0, &x, 1 /* ERROR "too many return values" */
+}
+
+func returns2() (a, b int) {
+ return
+ return 1, "foo" /* ERRORx `cannot use .* in return statement` */
+ return 1, 2, 3 /* ERROR "too many return values" */
+ {
+ type a int
+ return 1, 2
+ return /* ERROR "a not in scope at return" */
+ }
+}
+
+func returns3() (_ int) {
+ return
+ {
+ var _ int // blank (_) identifiers never shadow since they are in no scope
+ return
+ }
+}
+
+func switches0() {
+ var x int
+
+ switch x {
+ }
+
+ switch x {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch {
+ case 1 /* ERROR "cannot convert" */ :
+ }
+
+ true := "false"
+ _ = true
+ // A tagless switch is equivalent to the bool
+ // constant true, not the identifier 'true'.
+ switch {
+ case "false" /* ERROR "cannot convert" */:
+ }
+
+ switch int32(x) {
+ case 1, 2:
+ case x /* ERROR "invalid case x in switch on int32(x) (mismatched types int and int32)" */ :
+ }
+
+ switch x {
+ case 1 /* ERROR "overflows" */ << 100:
+ }
+
+ switch x {
+ case 1:
+ case 1 /* ERROR "duplicate case" */ :
+ case ( /* ERROR "duplicate case" */ 1):
+ case 2, 3, 4:
+ case 5, 1 /* ERROR "duplicate case" */ :
+ }
+
+ switch uint64(x) {
+ case 1<<64 - 1:
+ case 1 /* ERROR "duplicate case" */ <<64 - 1:
+ case 2, 3, 4:
+ case 5, 1 /* ERROR "duplicate case" */ <<64 - 1:
+ }
+
+ var y32 float32
+ switch y32 {
+ case 1.1:
+ case 11/10: // integer division!
+ case 11. /* ERROR "duplicate case" */ /10:
+ case 2, 3.0, 4.1:
+ case 5.2, 1.10 /* ERROR "duplicate case" */ :
+ }
+
+ var y64 float64
+ switch y64 {
+ case 1.1:
+ case 11/10: // integer division!
+ case 11. /* ERROR "duplicate case" */ /10:
+ case 2, 3.0, 4.1:
+ case 5.2, 1.10 /* ERROR "duplicate case" */ :
+ }
+
+ var s string
+ switch s {
+ case "foo":
+ case "foo" /* ERROR "duplicate case" */ :
+ case "f" /* ERROR "duplicate case" */ + "oo":
+ case "abc", "def", "ghi":
+ case "jkl", "foo" /* ERROR "duplicate case" */ :
+ }
+
+ type T int
+ type F float64
+ type S string
+ type B bool
+ var i interface{}
+ switch i {
+ case nil:
+ case nil: // no duplicate detection
+ case (*int)(nil):
+ case (*int)(nil): // do duplicate detection
+ case 1:
+ case byte(1):
+ case int /* ERROR "duplicate case" */ (1):
+ case T(1):
+ case 1.0:
+ case F(1.0):
+ case F /* ERROR "duplicate case" */ (1.0):
+ case "hello":
+ case S("hello"):
+ case S /* ERROR "duplicate case" */ ("hello"):
+ case 1==1, B(false):
+ case false, B(2==2):
+ }
+
+ // switch on array
+ var a [3]int
+ switch a {
+ case [3]int{1, 2, 3}:
+ case [3]int{1, 2, 3}: // no duplicate detection
+ case [ /* ERROR "mismatched types" */ 4]int{4, 5, 6}:
+ }
+
+ // switch on channel
+ var c1, c2 chan int
+ switch c1 {
+ case nil:
+ case c1:
+ case c2:
+ case c1, c2: // no duplicate detection
+ }
+}
+
+func switches1() {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+
+ var x int
+ switch x {
+ case 0:
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ break
+ case 1:
+ fallthrough
+ case 2:
+ fallthrough; ; ; // trailing empty statements are ok
+ case 3:
+ default:
+ fallthrough; ;
+ case 4:
+ fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ var y interface{}
+ switch y.(type) {
+ case int:
+ fallthrough /* ERROR "cannot fallthrough in type switch" */ ; ; ;
+ default:
+ }
+
+ switch x {
+ case 0:
+ if x == 0 {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ }
+
+ switch x {
+ case 0:
+ goto L1
+ L1: fallthrough; ;
+ case 1:
+ goto L2
+ goto L3
+ goto L4
+ L2: L3: L4: fallthrough
+ default:
+ }
+
+ switch x {
+ case 0:
+ goto L5
+ L5: fallthrough
+ default:
+ goto L6
+ goto L7
+ goto L8
+ L6: L7: L8: fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ switch x {
+ case 0:
+ fallthrough; ;
+ case 1:
+ {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ case 2:
+ fallthrough
+ case 3:
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ { /* empty block is not an empty statement */ }; ;
+ default:
+ fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ switch x {
+ case 0:
+ {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ }
+}
+
+func switches2() {
+ // untyped nil is not permitted as switch expression
+ switch nil /* ERROR "use of untyped nil" */ {
+ case 1, 2, "foo": // don't report additional errors here
+ }
+
+ // untyped constants are converted to default types
+ switch 1<<63-1 {
+ }
+ switch 1 /* ERRORx `cannot use .* as int value.*\(overflows\)` */ << 63 {
+ }
+ var x int
+ switch 1.0 {
+ case 1.0, 2.0, x /* ERROR "mismatched types int and float64" */ :
+ }
+ switch x {
+ case 1.0:
+ }
+
+ // untyped bools become of type bool
+ type B bool
+ var b B = true
+ switch x == x {
+ case b /* ERROR "mismatched types B and bool" */ :
+ }
+ switch {
+ case b /* ERROR "mismatched types B and bool" */ :
+ }
+}
+
+func issue11667() {
+ switch 9223372036854775808 /* ERRORx `cannot use .* as int value.*\(overflows\)` */ {
+ }
+ switch 9223372036854775808 /* ERRORx `cannot use .* as int value.*\(overflows\)` */ {
+ case 9223372036854775808:
+ }
+ var x int
+ switch x {
+ case 9223372036854775808 /* ERROR "overflows int" */ :
+ }
+ var y float64
+ switch y {
+ case 9223372036854775808:
+ }
+}
+
+func issue11687() {
+ f := func() (_, _ int) { return }
+ switch f /* ERROR "multiple-value f" */ () {
+ }
+ var x int
+ switch f /* ERROR "multiple-value f" */ () {
+ case x:
+ }
+ switch x {
+ case f /* ERROR "multiple-value f" */ ():
+ }
+}
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T struct{}
+type T1 struct{}
+type T2 struct{}
+
+func (T) m() {}
+func (T2) m(int) {}
+
+func typeswitches() {
+ var i int
+ var x interface{}
+
+ switch x.(type) {}
+ switch (x /* ERROR "outside type switch" */ .(type)) {}
+
+ switch x.(type) {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch x /* ERROR "declared and not used" */ := x.(type) {}
+ switch _ /* ERROR "no new variable on left side of :=" */ := x.(type) {}
+
+ switch x := x.(type) {
+ case int:
+ var y int = x
+ _ = y
+ }
+
+ switch x /* ERROR "x declared and not used" */ := i /* ERROR "not an interface" */ .(type) {}
+
+ switch t := x.(type) {
+ case nil:
+ var v bool = t /* ERRORx `cannot use .* in variable declaration` */
+ _ = v
+ case int:
+ var v int = t
+ _ = v
+ case float32, complex64:
+ var v float32 = t /* ERRORx `cannot use .* in variable declaration` */
+ _ = v
+ default:
+ var v float32 = t /* ERRORx `cannot use .* in variable declaration` */
+ _ = v
+ }
+
+ var t I
+ switch t.(type) {
+ case T:
+ case T1 /* ERROR "missing method m" */ :
+ case T2 /* ERROR "wrong type for method m" */ :
+ case I2 /* STRICT "wrong type for method m" */ : // only an error in strict mode (issue 8561)
+ }
+
+
+ {
+ x := 1
+ v := 2
+ switch v /* ERROR "v (variable of type int) is not an interface" */ .(type) {
+ case int:
+ println(x)
+ println(x / 0 /* ERROR "invalid operation: division by zero" */)
+ case 1 /* ERROR "1 is not a type" */:
+ }
+ }
+}
+
+// Test that each case clause uses the correct type of the variable
+// declared by the type switch (issue 5504).
+func typeswitch0() {
+ switch y := interface{}(nil).(type) {
+ case int:
+ func() int { return y + 0 }()
+ case float32:
+ func() float32 { return y }()
+ }
+}
+
+// Test correct scope setup.
+// (no redeclaration errors expected in the type switch)
+func typeswitch1() {
+ var t I
+ switch t := t; t := t.(type) {
+ case nil:
+ var _ I = t
+ case T:
+ var _ T = t
+ default:
+ var _ I = t
+ }
+}
+
+// Test correct typeswitch against interface types.
+type A interface { a() }
+type B interface { b() }
+type C interface { a(int) }
+
+func typeswitch2() {
+ switch A(nil).(type) {
+ case A:
+ case B:
+ case C /* STRICT "cannot have dynamic type" */: // only an error in strict mode (issue 8561)
+ }
+}
+
+func typeswitch3(x interface{}) {
+ switch x.(type) {
+ case int:
+ case float64:
+ case int /* ERROR "duplicate case" */ :
+ }
+
+ switch x.(type) {
+ case nil:
+ case int:
+ case nil /* ERROR "duplicate case" */ , nil /* ERROR "duplicate case" */ :
+ }
+
+ type F func(int)
+ switch x.(type) {
+ case nil:
+ case int, func(int):
+ case float32, func /* ERROR "duplicate case" */ (x int):
+ case F:
+ }
+}
+
+func fors1() {
+ for {}
+ var i string
+ _ = i
+ for i := 0; i < 10; i++ {}
+ for i := 0; i < 10; j /* ERROR "cannot declare" */ := 0 {}
+}
+
+func rangeloops1() {
+ var (
+ x int
+ a [10]float32
+ b []string
+ p *[10]complex128
+ pp **[10]complex128
+ s string
+ m map[int]bool
+ c chan int
+ sc chan<- int
+ rc <-chan int
+ )
+
+ for range x /* ERROR "cannot range over" */ {}
+ for _ = range x /* ERROR "cannot range over" */ {}
+ for i := range x /* ERROR "cannot range over" */ {}
+
+ for range a {}
+ for i := range a {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range a {
+ var ii int
+ ii = i
+ _ = ii
+ var xx float64
+ xx = x /* ERRORx `cannot use .* in assignment` */
+ _ = xx
+ }
+ var ii int
+ var xx float32
+ for ii, xx = range a {}
+ _, _ = ii, xx
+
+ for range b {}
+ for i := range b {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range b {
+ var ii int
+ ii = i
+ _ = ii
+ var xx string
+ xx = x
+ _ = xx
+ }
+
+ for range s {}
+ for i := range s {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range s {
+ var ii int
+ ii = i
+ _ = ii
+ var xx rune
+ xx = x
+ _ = xx
+ }
+
+ for range p {}
+ for _, x := range p {
+ var xx complex128
+ xx = x
+ _ = xx
+ }
+
+ for range pp /* ERROR "cannot range over" */ {}
+ for _, x := range pp /* ERROR "cannot range over" */ {}
+
+ for range m {}
+ for k := range m {
+ var kk int32
+ kk = k /* ERRORx `cannot use .* in assignment` */
+ _ = kk
+ }
+ for k, v := range m {
+ var kk int
+ kk = k
+ _ = kk
+ if v {}
+ }
+
+ for range c {}
+ for _, _ /* ERROR "only one iteration variable" */ = range c {}
+ for e := range c {
+ var ee int
+ ee = e
+ _ = ee
+ }
+ for _ = range sc /* ERROR "cannot range over" */ {}
+ for _ = range rc {}
+
+ // constant strings
+ const cs = "foo"
+ for range cs {}
+ for range "" {}
+ for i, x := range cs { _, _ = i, x }
+ for i, x := range "" {
+ var ii int
+ ii = i
+ _ = ii
+ var xx rune
+ xx = x
+ _ = xx
+ }
+}
+
+func rangeloops2() {
+ type I int
+ type R rune
+
+ var a [10]int
+ var i I
+ _ = i
+ for i /* ERRORx `cannot use .* in assignment` */ = range a {}
+ for i /* ERRORx `cannot use .* in assignment` */ = range &a {}
+ for i /* ERRORx `cannot use .* in assignment` */ = range a[:] {}
+
+ var s string
+ var r R
+ _ = r
+ for i /* ERRORx `cannot use .* in assignment` */ = range s {}
+ for i /* ERRORx `cannot use .* in assignment` */ = range "foo" {}
+ for _, r /* ERRORx `cannot use .* in assignment` */ = range s {}
+ for _, r /* ERRORx `cannot use .* in assignment` */ = range "foo" {}
+}
+
+func issue6766b() {
+ for _ := /* ERROR "no new variables" */ range "" {}
+ for a, a /* ERROR "redeclared" */ := range "" { _ = a }
+ var a int
+ _ = a
+ for a, a /* ERROR "redeclared" */ := range []int{1, 2, 3} { _ = a }
+}
+
+// Test that despite errors in the range clause,
+// the loop body is still type-checked (and thus
+// errors reported).
+func issue10148() {
+ for y /* ERROR "declared and not used" */ := range "" {
+ _ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
+ }
+ for range 1 /* ERROR "cannot range over 1" */ {
+ _ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
+ }
+ for y := range 1 /* ERROR "cannot range over 1" */ {
+ _ = "" /* ERROR "mismatched types untyped string and untyped int" */ + 1
+ }
+}
+
+func labels0() {
+ goto L0
+ goto L1
+ L0:
+ L1:
+ L1 /* ERROR "already declared" */ :
+ if true {
+ goto L2
+ L2:
+ L0 /* ERROR "already declared" */ :
+ }
+ _ = func() {
+ goto L0
+ goto L1
+ goto L2
+ L0:
+ L1:
+ L2:
+ }
+}
+
+func expression_statements(ch chan int) {
+ expression_statements(ch)
+ <-ch
+ println()
+
+ 0 /* ERROR "not used" */
+ 1 /* ERROR "not used" */ +2
+ cap /* ERROR "not used" */ (ch)
+ println /* ERROR "must be called" */
+}
diff --git a/src/internal/types/testdata/check/stmt1.go b/src/internal/types/testdata/check/stmt1.go
new file mode 100644
index 0000000..f79f920
--- /dev/null
+++ b/src/internal/types/testdata/check/stmt1.go
@@ -0,0 +1,259 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// terminating statements
+
+package stmt1
+
+func _() {}
+
+func _() int {} /* ERROR "missing return" */
+
+func _() int { panic(0) }
+func _() int { (panic(0)) }
+
+// block statements
+func _(x, y int) (z int) {
+ {
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ {
+ return; ; ; // trailing empty statements are ok
+ }
+ ; ; ;
+}
+
+func _(x, y int) (z int) {
+ {
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ {
+ ; ; ;
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+// if statements
+func _(x, y int) (z int) {
+ if x < y { return }
+ return 1
+}
+
+func _(x, y int) (z int) {
+ if x < y { return; ; ; ; }
+ return 1
+}
+
+func _(x, y int) (z int) {
+ if x < y { return }
+ return 1; ;
+}
+
+func _(x, y int) (z int) {
+ if x < y { return }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ if x < y {
+ } else { return 1
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ if x < y { return
+ } else { return
+ }
+}
+
+// for statements
+func _(x, y int) (z int) {
+ for x < y {
+ return
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ for {
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ return; ; ; ;
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ return
+ break
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ for {
+ for { break }
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ for { break }
+ return ; ;
+ }
+ ;
+}
+
+func _(x, y int) (z int) {
+L: for {
+ for { break L }
+ return
+ }
+} /* ERROR "missing return" */
+
+// switch statements
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default: return
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return;
+ default: return; ; ;
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ case 1: break
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break
+ }
+ panic(0)
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break
+ }
+ panic(0); ; ;
+ }
+ ;
+}
+
+func _(x, y int) (z int) {
+L: switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break L
+ }
+ panic(0)
+ }
+} /* ERROR "missing return" */
+
+// select statements
+func _(ch chan int) (z int) {
+ select {}
+} // nice!
+
+func _(ch chan int) (z int) {
+ select {}
+ ; ;
+}
+
+func _(ch chan int) (z int) {
+ select {
+ default: break
+ }
+} /* ERROR "missing return" */
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return
+ default: break
+ }
+} /* ERROR "missing return" */
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return
+ default:
+ for i := 0; i < 10; i++ {
+ break
+ }
+ return
+ }
+}
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return; ; ;
+ default:
+ for i := 0; i < 10; i++ {
+ break
+ }
+ return; ; ;
+ }
+ ; ; ;
+}
+
+func _(ch chan int) (z int) {
+L: select {
+ case <-ch: return
+ default:
+ for i := 0; i < 10; i++ {
+ break L
+ }
+ return
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+func parenPanic() int {
+ ((((((panic)))(0))))
+}
+
+func issue23218a() int {
+ {
+ panic := func(interface{}){}
+ panic(0)
+ }
+} /* ERROR "missing return" */
+
+func issue23218b() int {
+ {
+ panic := func(interface{}){}
+ ((((panic))))(0)
+ }
+} /* ERROR "missing return" */
diff --git a/src/internal/types/testdata/check/typeinference.go b/src/internal/types/testdata/check/typeinference.go
new file mode 100644
index 0000000..0478d93
--- /dev/null
+++ b/src/internal/types/testdata/check/typeinference.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeInference
+
+// As of issue #51527, type-type inference has been disabled.
+
+// basic inference
+type Tb[P ~*Q, Q any] int
+func _() {
+ var x Tb /* ERROR "got 1 arguments" */ [*int]
+ var y Tb[*int, int]
+ x = y /* ERRORx `cannot use y .* in assignment` */
+ _ = x
+}
+
+// recursive inference
+type Tr[A any, B *C, C *D, D *A] int
+func _() {
+ var x Tr /* ERROR "got 1 arguments" */ [string]
+ var y Tr[string, ***string, **string, *string]
+ var z Tr[int, ***int, **int, *int]
+ x = y /* ERRORx `cannot use y .* in assignment` */
+ x = z // ERRORx `cannot use z .* as Tr`
+ _ = x
+}
+
+// other patterns of inference
+type To0[A any, B []A] int
+type To1[A any, B struct{a A}] int
+type To2[A any, B [][]A] int
+type To3[A any, B [3]*A] int
+type To4[A any, B any, C struct{a A; b B}] int
+func _() {
+ var _ To0 /* ERROR "got 1 arguments" */ [int]
+ var _ To1 /* ERROR "got 1 arguments" */ [int]
+ var _ To2 /* ERROR "got 1 arguments" */ [int]
+ var _ To3 /* ERROR "got 1 arguments" */ [int]
+ var _ To4 /* ERROR "got 2 arguments" */ [int, string]
+}
+
+// failed inference
+type Tf0[A, B any] int
+type Tf1[A any, B ~struct{a A; c C}, C any] int
+func _() {
+ var _ Tf0 /* ERROR "got 1 arguments but 2 type parameters" */ [int]
+ var _ Tf1 /* ERROR "got 1 arguments but 3 type parameters" */ [int]
+}
diff --git a/src/internal/types/testdata/check/typeinst0.go b/src/internal/types/testdata/check/typeinst0.go
new file mode 100644
index 0000000..bbcdaec
--- /dev/null
+++ b/src/internal/types/testdata/check/typeinst0.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myInt int
+
+// Parameterized type declarations
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+type T1[P any] P // ERROR "cannot use a type parameter as RHS in type declaration"
+
+type T2[P any] struct {
+ f P
+ g int // int should still be in scope chain
+}
+
+type List[P any] []P
+
+// Alias type declarations cannot have type parameters.
+// Issue #46477 proposes to change that.
+type A1[P any] = /* ERROR "cannot be alias" */ struct{}
+
+// Pending clarification of #46477 we disallow aliases
+// of generic types.
+type A2 = List // ERROR "cannot use generic type"
+var _ A2[int]
+var _ A2
+
+type A3 = List[int]
+var _ A3
+
+// Parameterized type instantiations
+
+var x int
+type _ x /* ERROR "not a type" */ [int]
+
+type _ int /* ERROR "not a generic type" */ [] // ERROR "expected type argument list"
+type _ myInt /* ERROR "not a generic type" */ [] // ERROR "expected type argument list"
+
+// TODO(gri) better error messages
+type _ T1[] // ERROR "expected type argument list"
+type _ T1[x /* ERROR "not a type" */ ]
+type _ T1 /* ERROR "got 2 arguments but 1 type parameters" */ [int, float32]
+
+var _ T2[int] = T2[int]{}
+
+var _ List[int] = []int{1, 2, 3}
+var _ List[[]int] = [][]int{{1, 2, 3}}
+var _ List[List[List[int]]]
+
+// Parameterized types containing parameterized types
+
+type T3[P any] List[P]
+
+var _ T3[int] = T3[int](List[int]{1, 2, 3})
+
+// Self-recursive generic types are not permitted
+
+type self1[P any] self1 /* ERROR "invalid recursive type" */ [P]
+type self2[P any] *self2[P] // this is ok
diff --git a/src/internal/types/testdata/check/typeinst1.go b/src/internal/types/testdata/check/typeinst1.go
new file mode 100644
index 0000000..0e09e70
--- /dev/null
+++ b/src/internal/types/testdata/check/typeinst1.go
@@ -0,0 +1,282 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type List[E any] []E
+var _ List[List[List[int]]]
+var _ List[List[List[int]]] = []List[List[int]]{}
+
+type (
+ T1[P1 any] struct {
+ f1 T2[P1, float32]
+ }
+
+ T2[P2, P3 any] struct {
+ f2 P2
+ f3 P3
+ }
+)
+
+func _() {
+ var x1 T1[int]
+ var x2 T2[int, float32]
+
+ x1.f1.f2 = 0
+ x1.f1 = x2
+}
+
+type T3[P any] T1[T2[P, P]]
+
+func _() {
+ var x1 T3[int]
+ var x2 T2[int, int]
+ x1.f1.f2 = x2
+}
+
+func f[P any] (x P) List[P] {
+ return List[P]{x}
+}
+
+var (
+ _ []int = f(0)
+ _ []float32 = f[float32](10)
+ _ List[complex128] = f(1i)
+ _ []List[int] = f(List[int]{})
+ _ List[List[int]] = []List[int]{}
+ _ = []List[int]{}
+)
+
+// Parameterized types with methods
+
+func (l List[E]) Head() (_ E, _ bool) {
+ if len(l) > 0 {
+ return l[0], true
+ }
+ return
+}
+
+// A test case for instantiating types with other types (extracted from map.go2)
+
+type Pair[K any] struct {
+ key K
+}
+
+type Receiver[T any] struct {
+ values T
+}
+
+type Iterator[K any] struct {
+ r Receiver[Pair[K]]
+}
+
+func Values [T any] (r Receiver[T]) T {
+ return r.values
+}
+
+func (it Iterator[K]) Next() K {
+ return Values[Pair[K]](it.r).key
+}
+
+// A more complex test case testing type bounds (extracted from linalg.go2 and reduced to essence)
+
+type NumericAbs[T any] interface {
+ Abs() T
+}
+
+func AbsDifference[T NumericAbs[T]](x T) { panic(0) }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type OrderedAbs[T any] T
+//
+// func (a OrderedAbs[T]) Abs() OrderedAbs[T]
+//
+// func OrderedAbsDifference[T any](x T) {
+// AbsDifference(OrderedAbs[T](x))
+// }
+
+// same code, reduced to essence
+
+func g[P interface{ m() P }](x P) { panic(0) }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T4[P any] P
+//
+// func (_ T4[P]) m() T4[P]
+//
+// func _[Q any](x Q) {
+// g(T4[Q](x))
+// }
+
+// Another test case that caused problems in the past
+
+type T5[_ interface { a() }, _ interface{}] struct{}
+
+type A[P any] struct{ x P }
+
+func (_ A[P]) a() {}
+
+var _ T5[A[int], int]
+
+// Invoking methods with parameterized receiver types uses
+// type inference to determine the actual type arguments matching
+// the receiver type parameters from the actual receiver argument.
+// Go does implicit address-taking and dereferenciation depending
+// on the actual receiver and the method's receiver type. To make
+// type inference work, the type-checker matches "pointer-ness"
+// of the actual receiver and the method's receiver type.
+// The following code tests this mechanism.
+
+type R1[A any] struct{}
+func (_ R1[A]) vm()
+func (_ *R1[A]) pm()
+
+func _[T any](r R1[T], p *R1[T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+type R2[A, B any] struct{}
+func (_ R2[A, B]) vm()
+func (_ *R2[A, B]) pm()
+
+func _[T any](r R2[T, int], p *R2[string, T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+// It is ok to have multiple embedded unions.
+type _ interface {
+ m0()
+ ~int | ~string | ~bool
+ ~float32 | ~float64
+ m1()
+ m2()
+ ~complex64 | ~complex128
+ ~rune
+}
+
+// Type sets may contain each type at most once.
+type _ interface {
+ ~int|~ /* ERROR "overlapping terms ~int" */ int
+ ~int|int /* ERROR "overlapping terms int" */
+ int|int /* ERROR "overlapping terms int" */
+}
+
+type _ interface {
+ ~struct{f int} | ~struct{g int} | ~ /* ERROR "overlapping terms" */ struct{f int}
+}
+
+// Interface term lists can contain any type, incl. *Named types.
+// Verify that we use the underlying type(s) of the type(s) in the
+// term list when determining if an operation is permitted.
+
+type MyInt int
+func add1[T interface{MyInt}](x T) T {
+ return x + 1
+}
+
+type MyString string
+func double[T interface{MyInt|MyString}](x T) T {
+ return x + x
+}
+
+// Embedding of interfaces with term lists leads to interfaces
+// with term lists that are the intersection of the embedded
+// term lists.
+
+type E0 interface {
+ ~int | ~bool | ~string
+}
+
+type E1 interface {
+ ~int | ~float64 | ~string
+}
+
+type E2 interface {
+ ~float64
+}
+
+type I0 interface {
+ E0
+}
+
+func f0[T I0]() {}
+var _ = f0[int]
+var _ = f0[bool]
+var _ = f0[string]
+var _ = f0[float64 /* ERROR "does not satisfy I0" */ ]
+
+type I01 interface {
+ E0
+ E1
+}
+
+func f01[T I01]() {}
+var _ = f01[int]
+var _ = f01[bool /* ERROR "does not satisfy I0" */ ]
+var _ = f01[string]
+var _ = f01[float64 /* ERROR "does not satisfy I0" */ ]
+
+type I012 interface {
+ E0
+ E1
+ E2
+}
+
+func f012[T I012]() {}
+var _ = f012[int /* ERRORx `cannot satisfy I012.*empty type set` */ ]
+var _ = f012[bool /* ERRORx `cannot satisfy I012.*empty type set` */ ]
+var _ = f012[string /* ERRORx `cannot satisfy I012.*empty type set` */ ]
+var _ = f012[float64 /* ERRORx `cannot satisfy I012.*empty type set` */ ]
+
+type I12 interface {
+ E1
+ E2
+}
+
+func f12[T I12]() {}
+var _ = f12[int /* ERROR "does not satisfy I12" */ ]
+var _ = f12[bool /* ERROR "does not satisfy I12" */ ]
+var _ = f12[string /* ERROR "does not satisfy I12" */ ]
+var _ = f12[float64]
+
+type I0_ interface {
+ E0
+ ~int
+}
+
+func f0_[T I0_]() {}
+var _ = f0_[int]
+var _ = f0_[bool /* ERROR "does not satisfy I0_" */ ]
+var _ = f0_[string /* ERROR "does not satisfy I0_" */ ]
+var _ = f0_[float64 /* ERROR "does not satisfy I0_" */ ]
+
+// Using a function instance as a type is an error.
+var _ f0 // ERROR "not a type"
+var _ f0 /* ERROR "not a type" */ [int]
+
+// Empty type sets can only be satisfied by empty type sets.
+type none interface {
+ // force an empty type set
+ int
+ string
+}
+
+func ff[T none]() {}
+func gg[T any]() {}
+func hh[T ~int]() {}
+
+func _[T none]() {
+ _ = ff[int /* ERROR "cannot satisfy none (empty type set)" */ ]
+ _ = ff[T] // pathological but ok because T's type set is empty, too
+ _ = gg[int]
+ _ = gg[T]
+ _ = hh[int]
+ _ = hh[T]
+}
diff --git a/src/internal/types/testdata/check/typeinstcycles.go b/src/internal/types/testdata/check/typeinstcycles.go
new file mode 100644
index 0000000..74fe191
--- /dev/null
+++ b/src/internal/types/testdata/check/typeinstcycles.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+func F1[T any](_ [unsafe.Sizeof(F1[int])]T) (res T) { return }
+func F2[T any](_ T) (res [unsafe.Sizeof(F2[string])]int) { return }
+func F3[T any](_ [unsafe.Sizeof(F1[string])]int) {}
diff --git a/src/internal/types/testdata/check/typeparams.go b/src/internal/types/testdata/check/typeparams.go
new file mode 100644
index 0000000..b002377
--- /dev/null
+++ b/src/internal/types/testdata/check/typeparams.go
@@ -0,0 +1,508 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// import "io" // for type assertion tests
+
+var _ any // ok to use any anywhere
+func _[_ any, _ interface{any}](any) {
+ var _ any
+}
+
+func identity[T any](x T) T { return x }
+
+func _[_ any](x int) int { panic(0) }
+func _[T any](T /* ERROR "redeclared" */ T)() {}
+func _[T, T /* ERROR "redeclared" */ any]() {}
+
+// Constraints (incl. any) may be parenthesized.
+func _[_ (any)]() {}
+func _[_ (interface{})]() {}
+
+func reverse[T any](list []T) []T {
+ rlist := make([]T, len(list))
+ i := len(list)
+ for _, x := range list {
+ i--
+ rlist[i] = x
+ }
+ return rlist
+}
+
+var _ = reverse /* ERROR "cannot use generic function reverse" */
+var _ = reverse[int, float32 /* ERROR "got 2 type arguments" */ ] ([]int{1, 2, 3})
+var _ = reverse[int]([ /* ERROR "cannot use" */ ]float32{1, 2, 3})
+var f = reverse[chan int]
+var _ = f(0 /* ERRORx `cannot use 0 .* as \[\]chan int` */ )
+
+func swap[A, B any](a A, b B) (B, A) { return b, a }
+
+var _ = swap /* ERROR "multiple-value" */ [int, float32](1, 2)
+var f32, i = swap[int, float32](swap[float32, int](1, 2))
+var _ float32 = f32
+var _ int = i
+
+func swapswap[A, B any](a A, b B) (A, B) {
+ return swap[B, A](b, a)
+}
+
+type F[A, B any] func(A, B) (B, A)
+
+func min[T interface{ ~int }](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
+func _[T any](x, y T) bool { return x /* ERROR "type parameter T is not comparable" */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR "type parameter T is not comparable" */ < y }
+
+func _[T C1[T]](x, y T) bool { return x /* ERROR "type parameter T is not comparable" */ < y }
+func _[T C2[T]](x, y T) bool { return x < y }
+
+type C1[T any] interface{}
+type C2[T any] interface{ ~int | ~float32 }
+
+func new[T any]() *T {
+ var x T
+ return &x
+}
+
+var _ = new /* ERROR "cannot use generic function new" */
+var _ *int = new[int]()
+
+func _[T any](map[T /* ERROR "invalid map key type T (missing comparable constraint)" */]int) {} // w/o constraint we don't know if T is comparable
+
+func f1[T1 any](struct{T1 /* ERRORx `cannot be a .* type parameter` */ }) int { panic(0) }
+var _ = f1[int](struct{T1}{})
+type T1 = int
+
+func f2[t1 any](struct{t1 /* ERRORx `cannot be a .* type parameter` */ ; x float32}) int { panic(0) }
+var _ = f2[t1](struct{t1; x float32}{})
+type t1 = int
+
+
+func f3[A, B, C any](A, struct{x B}, func(A, struct{x B}, *C)) int { panic(0) }
+
+var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
+
+// indexing
+
+func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] } // map and non-map types
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+
+// indexing with various combinations of map types in type sets (see issue #42616)
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x /* ERROR "cannot index" */ [i] } // map and non-map types
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR "cannot take address" */ [i] }
+func _[T interface{ ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR "cannot index" */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x /* ERROR "cannot index" */ [i] } // map and non-map types
+
+// indexing with various combinations of array and other types in type sets
+func _[T interface{ [10]int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR "out of bounds" */ ] }
+func _[T interface{ [10]byte | string }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR "out of bounds" */ ] }
+func _[T interface{ [10]int | *[20]int | []int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR "out of bounds" */ ] }
+
+// indexing with strings and non-variable arrays (assignment not permitted)
+func _[T string](x T) { _ = x[0]; x /* ERROR "cannot assign" */ [0] = 0 }
+func _[T []byte | string](x T) { x /* ERROR "cannot assign" */ [0] = 0 }
+func _[T [10]byte]() { f := func() (x T) { return }; f /* ERROR "cannot assign" */ ()[0] = 0 }
+func _[T [10]byte]() { f := func() (x *T) { return }; f /* ERROR "cannot index" */ ()[0] = 0 }
+func _[T [10]byte]() { f := func() (x *T) { return }; (*f())[0] = 0 }
+func _[T *[10]byte]() { f := func() (x T) { return }; f()[0] = 0 }
+
+// slicing
+
+func _[T interface{ ~[10]E }, E any] (x T, i, j, k int) { var _ []E = x[i:j] }
+func _[T interface{ ~[10]E }, E any] (x T, i, j, k int) { var _ []E = x[i:j:k] }
+func _[T interface{ ~[]byte }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ ~[]byte }] (x T, i, j, k int) { var _ T = x[i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR "3-index slice of string" */ ] }
+
+type myByte1 []byte
+type myByte2 []byte
+func _[T interface{ []byte | myByte1 | myByte2 }] (x T, i, j, k int) { var _ T = x[i:j:k] }
+func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x /* ERROR "no core type" */ [i:j:k] }
+
+func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR "3-index slice of string" */ ] }
+func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x /* ERROR "no core type" */ [i:j] }
+
+// len/cap built-ins
+
+func _[T any](x T) { _ = len(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
+
+func _[T any](x T) { _ = cap(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR "invalid argument" */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
+
+// range iteration
+
+func _[T interface{}](x T) {
+ for range x /* ERROR "cannot range" */ {}
+}
+
+type myString string
+
+func _[
+ B1 interface{ string },
+ B2 interface{ string | myString },
+
+ C1 interface{ chan int },
+ C2 interface{ chan int | <-chan int },
+ C3 interface{ chan<- int },
+
+ S1 interface{ []int },
+ S2 interface{ []int | [10]int },
+
+ A1 interface{ [10]int },
+ A2 interface{ [10]int | []int },
+
+ P1 interface{ *[10]int },
+ P2 interface{ *[10]int | *[]int },
+
+ M1 interface{ map[string]int },
+ M2 interface{ map[string]int | map[string]string },
+]() {
+ var b0 string
+ for range b0 {}
+ for _ = range b0 {}
+ for _, _ = range b0 {}
+
+ var b1 B1
+ for range b1 {}
+ for _ = range b1 {}
+ for _, _ = range b1 {}
+
+ var b2 B2
+ for range b2 {}
+
+ var c0 chan int
+ for range c0 {}
+ for _ = range c0 {}
+ for _, _ /* ERROR "permits only one iteration variable" */ = range c0 {}
+
+ var c1 C1
+ for range c1 {}
+ for _ = range c1 {}
+ for _, _ /* ERROR "permits only one iteration variable" */ = range c1 {}
+
+ var c2 C2
+ for range c2 {}
+
+ var c3 C3
+ for range c3 /* ERROR "receive from send-only channel" */ {}
+
+ var s0 []int
+ for range s0 {}
+ for _ = range s0 {}
+ for _, _ = range s0 {}
+
+ var s1 S1
+ for range s1 {}
+ for _ = range s1 {}
+ for _, _ = range s1 {}
+
+ var s2 S2
+ for range s2 /* ERRORx `cannot range over s2.*no core type` */ {}
+
+ var a0 []int
+ for range a0 {}
+ for _ = range a0 {}
+ for _, _ = range a0 {}
+
+ var a1 A1
+ for range a1 {}
+ for _ = range a1 {}
+ for _, _ = range a1 {}
+
+ var a2 A2
+ for range a2 /* ERRORx `cannot range over a2.*no core type` */ {}
+
+ var p0 *[10]int
+ for range p0 {}
+ for _ = range p0 {}
+ for _, _ = range p0 {}
+
+ var p1 P1
+ for range p1 {}
+ for _ = range p1 {}
+ for _, _ = range p1 {}
+
+ var p2 P2
+ for range p2 /* ERRORx `cannot range over p2.*no core type` */ {}
+
+ var m0 map[string]int
+ for range m0 {}
+ for _ = range m0 {}
+ for _, _ = range m0 {}
+
+ var m1 M1
+ for range m1 {}
+ for _ = range m1 {}
+ for _, _ = range m1 {}
+
+ var m2 M2
+ for range m2 /* ERRORx `cannot range over m2.*no core type` */ {}
+}
+
+// type inference checks
+
+var _ = new /* ERROR "cannot infer T" */ ()
+
+func f4[A, B, C any](A, B) C { panic(0) }
+
+var _ = f4 /* ERROR "cannot infer C" */ (1, 2)
+var _ = f4[int, float32, complex128](1, 2)
+
+func f5[A, B, C any](A, []*B, struct{f []C}) int { panic(0) }
+
+var _ = f5[int, float32, complex128](0, nil, struct{f []complex128}{})
+var _ = f5 /* ERROR "cannot infer" */ (0, nil, struct{f []complex128}{})
+var _ = f5(0, []*float32{new[float32]()}, struct{f []complex128}{})
+
+func f6[A any](A, []A) int { panic(0) }
+
+var _ = f6(0, nil)
+
+func f6nil[A any](A) int { panic(0) }
+
+var _ = f6nil /* ERROR "cannot infer" */ (nil)
+
+// type inference with variadic functions
+
+func f7[T any](...T) T { panic(0) }
+
+var _ int = f7 /* ERROR "cannot infer T" */ ()
+var _ int = f7(1)
+var _ int = f7(1, 2)
+var _ int = f7([]int{}...)
+var _ int = f7 /* ERROR "cannot use" */ ([]float64{}...)
+var _ float64 = f7([]float64{}...)
+var _ = f7[float64](1, 2.3)
+var _ = f7(float64(1), 2.3)
+var _ = f7(1, 2.3)
+var _ = f7(1.2, 3)
+
+func f8[A, B any](A, B, ...B) int { panic(0) }
+
+var _ = f8(1) /* ERROR "not enough arguments" */
+var _ = f8(1, 2.3)
+var _ = f8(1, 2.3, 3.4, 4.5)
+var _ = f8(1, 2.3, 3.4, 4)
+var _ = f8[int, float64](1, 2.3, 3.4, 4)
+
+var _ = f8[int, float64](0, 0, nil...) // test case for #18268
+
+// init functions cannot have type parameters
+
+func init() {}
+func init[_ /* ERROR "func init must have no type parameters" */ any]() {}
+func init[P /* ERROR "func init must have no type parameters" */ any]() {}
+
+type T struct {}
+
+func (T) m1() {}
+func (T) m2[ /* ERROR "method must have no type parameters" */ _ any]() {}
+func (T) m3[ /* ERROR "method must have no type parameters" */ P any]() {}
+
+// type inference across parameterized types
+
+type S1[P any] struct { f P }
+
+func f9[P any](x S1[P]) {}
+
+func _() {
+ f9[int](S1[int]{42})
+ f9(S1[int]{42})
+}
+
+type S2[A, B, C any] struct{}
+
+func f10[X, Y, Z any](a S2[X, int, Z], b S2[X, Y, bool]) {}
+
+func _[P any]() {
+ f10[int, float32, string](S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[P, int, P]{}, S2[P, float32, bool]{})
+}
+
+// corner case for type inference
+// (was bug: after instantiating f11, the type-checker didn't mark f11 as non-generic)
+
+func f11[T any]() {}
+
+func _() {
+ f11[int]()
+}
+
+// the previous example was extracted from
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// func f12[T interface{m() T}]() {}
+//
+// type A[T any] T
+//
+// func (a A[T]) m() A[T]
+//
+// func _[T any]() {
+// f12[A[T]]()
+// }
+
+// method expressions
+
+func (_ S1[P]) m()
+
+func _() {
+ m := S1[int].m
+ m(struct { f int }{42})
+}
+
+func _[T any] (x T) {
+ m := S1[T].m
+ m(S1[T]{x})
+}
+
+type I1[A any] interface {
+ m1(A)
+}
+
+var _ I1[int] = r1[int]{}
+
+type r1[T any] struct{}
+
+func (_ r1[T]) m1(T)
+
+type I2[A, B any] interface {
+ m1(A)
+ m2(A) B
+}
+
+var _ I2[int, float32] = R2[int, float32]{}
+
+type R2[P, Q any] struct{}
+
+func (_ R2[X, Y]) m1(X)
+func (_ R2[X, Y]) m2(X) Y
+
+// type assertions and type switches over generic types
+// NOTE: These are currently disabled because it's unclear what the correct
+// approach is, and one can always work around by assigning the variable to
+// an interface first.
+
+// // ReadByte1 corresponds to the ReadByte example in the draft design.
+// func ReadByte1[T io.Reader](r T) (byte, error) {
+// if br, ok := r.(io.ByteReader); ok {
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // ReadBytes2 is like ReadByte1 but uses a type switch instead.
+// func ReadByte2[T io.Reader](r T) (byte, error) {
+// switch br := r.(type) {
+// case io.ByteReader:
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // type assertions and type switches over generic types are strict
+// type I3 interface {
+// m(int)
+// }
+//
+// type I4 interface {
+// m() int // different signature from I3.m
+// }
+//
+// func _[T I3](x I3, p T) {
+// // type assertions and type switches over interfaces are not strict
+// _ = x.(I4)
+// switch x.(type) {
+// case I4:
+// }
+//
+// // type assertions and type switches over generic types are strict
+// _ = p /* ERROR "cannot have dynamic type I4" */.(I4)
+// switch p.(type) {
+// case I4 /* ERROR "cannot have dynamic type I4" */ :
+// }
+// }
+
+// type assertions and type switches over generic types lead to errors for now
+
+func _[T any](x T) {
+ _ = x /* ERROR "cannot use type assertion" */ .(int)
+ switch x /* ERROR "cannot use type switch" */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+func _[T interface{~int}](x T) {
+ _ = x /* ERROR "cannot use type assertion" */ .(int)
+ switch x /* ERROR "cannot use type switch" */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+// error messages related to type bounds mention those bounds
+type C[P any] interface{}
+
+func _[P C[P]] (x P) {
+ x.m /* ERROR "x.m undefined" */ ()
+}
+
+type I interface {}
+
+func _[P I] (x P) {
+ x.m /* ERROR "type P has no field or method m" */ ()
+}
+
+func _[P interface{}] (x P) {
+ x.m /* ERROR "type P has no field or method m" */ ()
+}
+
+func _[P any] (x P) {
+ x.m /* ERROR "type P has no field or method m" */ ()
+}
diff --git a/src/internal/types/testdata/check/unions.go b/src/internal/types/testdata/check/unions.go
new file mode 100644
index 0000000..5a3a9ed
--- /dev/null
+++ b/src/internal/types/testdata/check/unions.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check that overlong unions don't bog down type checking.
+// Disallow them for now.
+
+package p
+
+type t int
+
+type (
+ t00 t; t01 t; t02 t; t03 t; t04 t; t05 t; t06 t; t07 t; t08 t; t09 t
+ t10 t; t11 t; t12 t; t13 t; t14 t; t15 t; t16 t; t17 t; t18 t; t19 t
+ t20 t; t21 t; t22 t; t23 t; t24 t; t25 t; t26 t; t27 t; t28 t; t29 t
+ t30 t; t31 t; t32 t; t33 t; t34 t; t35 t; t36 t; t37 t; t38 t; t39 t
+ t40 t; t41 t; t42 t; t43 t; t44 t; t45 t; t46 t; t47 t; t48 t; t49 t
+ t50 t; t51 t; t52 t; t53 t; t54 t; t55 t; t56 t; t57 t; t58 t; t59 t
+ t60 t; t61 t; t62 t; t63 t; t64 t; t65 t; t66 t; t67 t; t68 t; t69 t
+ t70 t; t71 t; t72 t; t73 t; t74 t; t75 t; t76 t; t77 t; t78 t; t79 t
+ t80 t; t81 t; t82 t; t83 t; t84 t; t85 t; t86 t; t87 t; t88 t; t89 t
+ t90 t; t91 t; t92 t; t93 t; t94 t; t95 t; t96 t; t97 t; t98 t; t99 t
+)
+
+type u99 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98
+}
+
+type u100a interface {
+ u99|float32
+}
+
+type u100b interface {
+ u99|float64
+}
+
+type u101 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98|t99|
+ int // ERROR "cannot handle more than 100 union terms"
+}
+
+type u102 interface {
+ int /* ERROR "cannot handle more than 100 union terms" */ |string|u100a
+}
+
+type u200 interface {
+ u100a /* ERROR "cannot handle more than 100 union terms" */ |u100b
+}
diff --git a/src/internal/types/testdata/check/vardecl.go b/src/internal/types/testdata/check/vardecl.go
new file mode 100644
index 0000000..726b619
--- /dev/null
+++ b/src/internal/types/testdata/check/vardecl.go
@@ -0,0 +1,215 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vardecl
+
+// Prerequisites.
+import "math"
+func f() {}
+func g() (x, y int) { return }
+var m map[string]int
+
+// Var decls must have a type or an initializer.
+var _ int
+var _, _ int
+
+var _; /* ERROR "expected type" */
+var _, _; /* ERROR "expected type" */
+var _, _, _; /* ERROR "expected type" */
+
+// The initializer must be an expression.
+var _ = int /* ERROR "not an expression" */
+var _ = f /* ERROR "used as value" */ ()
+
+// Identifier and expression arity must match.
+var _, _ = 1, 2
+var _ = 1, 2 /* ERROR "extra init expr 2" */
+var _, _ = 1 /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */
+var _, _, _ /* ERROR "missing init expr for _" */ = 1, 2
+
+var _ = g /* ERROR "multiple-value g" */ ()
+var _, _ = g()
+var _, _, _ = g /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */ ()
+
+var _ = m["foo"]
+var _, _ = m["foo"]
+var _, _, _ = m /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */ ["foo"]
+
+var _, _ int = 1, 2
+var _ int = 1, 2 /* ERROR "extra init expr 2" */
+var _, _ int = 1 /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */
+var _, _, _ /* ERROR "missing init expr for _" */ int = 1, 2
+
+var (
+ _, _ = 1, 2
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+ _, _ = 1 /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */
+ _, _, _ /* ERROR "missing init expr for _" */ = 1, 2
+
+ _ = g /* ERROR "multiple-value g" */ ()
+ _, _ = g()
+ _, _, _ = g /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */ ()
+
+ _ = m["foo"]
+ _, _ = m["foo"]
+ _, _, _ = m /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */ ["foo"]
+
+ _, _ int = 1, 2
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+ _, _ int = 1 /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */
+ _, _, _ /* ERROR "missing init expr for _" */ int = 1, 2
+)
+
+// Variables declared in function bodies must be 'used'.
+type T struct{}
+func (r T) _(a, b, c int) (u, v, w int) {
+ var x1 /* ERROR "declared and not used" */ int
+ var x2 /* ERROR "declared and not used" */ int
+ x1 = 1
+ (x2) = 2
+
+ y1 /* ERROR "declared and not used" */ := 1
+ y2 /* ERROR "declared and not used" */ := 2
+ y1 = 1
+ (y1) = 2
+
+ {
+ var x1 /* ERROR "declared and not used" */ int
+ var x2 /* ERROR "declared and not used" */ int
+ x1 = 1
+ (x2) = 2
+
+ y1 /* ERROR "declared and not used" */ := 1
+ y2 /* ERROR "declared and not used" */ := 2
+ y1 = 1
+ (y1) = 2
+ }
+
+ if x /* ERROR "declared and not used" */ := 0; a < b {}
+
+ switch x /* ERROR "declared and not used" */, y := 0, 1; a {
+ case 0:
+ _ = y
+ case 1:
+ x /* ERROR "declared and not used" */ := 0
+ }
+
+ var t interface{}
+ switch t /* ERROR "declared and not used" */ := t.(type) {}
+
+ switch t /* ERROR "declared and not used" */ := t.(type) {
+ case int:
+ }
+
+ switch t /* ERROR "declared and not used" */ := t.(type) {
+ case int:
+ case float32, complex64:
+ t = nil
+ }
+
+ switch t := t.(type) {
+ case int:
+ case float32, complex64:
+ _ = t
+ }
+
+ switch t := t.(type) {
+ case int:
+ case float32:
+ case string:
+ _ = func() string {
+ return t
+ }
+ }
+
+ switch t := t; t /* ERROR "declared and not used" */ := t.(type) {}
+
+ var z1 /* ERROR "declared and not used" */ int
+ var z2 int
+ _ = func(a, b, c int) (u, v, w int) {
+ z1 = a
+ (z1) = b
+ a = z2
+ return
+ }
+
+ var s []int
+ var i /* ERROR "declared and not used" */ , j int
+ for i, j = range s {
+ _ = j
+ }
+
+ for i, j /* ERROR "declared and not used" */ := range s {
+ _ = func() int {
+ return i
+ }
+ }
+ return
+}
+
+// Unused variables in function literals must lead to only one error (issue #22524).
+func _() {
+ _ = func() {
+ var x /* ERROR "declared and not used" */ int
+ }
+}
+
+// Invalid variable declarations must not lead to "declared and not used errors".
+// TODO(gri) enable these tests once go/types follows types2 logic for declared and not used variables
+// func _() {
+// var a x // DISABLED_ERROR undefined: x
+// var b = x // DISABLED_ERROR undefined: x
+// var c int = x // DISABLED_ERROR undefined: x
+// var d, e, f x /* DISABLED_ERROR x */ /* DISABLED_ERROR x */ /* DISABLED_ERROR x */
+// var g, h, i = x, x, x /* DISABLED_ERROR x */ /* DISABLED_ERROR x */ /* DISABLED_ERROR x */
+// var j, k, l float32 = x, x, x /* DISABLED_ERROR x */ /* DISABLED_ERROR x */ /* DISABLED_ERROR x */
+// // but no "declared and not used" errors
+// }
+
+// Invalid (unused) expressions must not lead to spurious "declared and not used errors".
+func _() {
+ var a, b, c int
+ var x, y int
+ x, y = a /* ERRORx `assignment mismatch: [1-9]+ variables but.*[1-9]+ value(s)?` */ , b, c
+ _ = x
+ _ = y
+}
+
+func _() {
+ var x int
+ return x /* ERROR "too many return values" */
+ return math /* ERROR "too many return values" */ .Sin(0)
+}
+
+func _() int {
+ var x, y int
+ return x, y /* ERROR "too many return values" */
+}
+
+// Short variable declarations must declare at least one new non-blank variable.
+func _() {
+ _ := /* ERROR "no new variables" */ 0
+ _, a := 0, 1
+ _, a := /* ERROR "no new variables" */ 0, 1
+ _, a, b := 0, 1, 2
+ _, _, _ := /* ERROR "no new variables" */ 0, 1, 2
+
+ _ = a
+ _ = b
+}
+
+// Test case for variables depending on function literals (see also #22992).
+var A /* ERROR "initialization cycle" */ = func() int { return A }()
+
+func _() {
+ // The function literal below must not see a.
+ var a = func() int { return a /* ERROR "undefined" */ }()
+ var _ = func() int { return a }()
+
+ // The function literal below must not see x, y, or z.
+ var x, y, z = 0, 1, func() int { return x /* ERROR "undefined" */ + y /* ERROR "undefined" */ + z /* ERROR "undefined" */ }()
+ _, _, _ = x, y, z
+}
+
+// TODO(gri) consolidate other var decl checks in this file \ No newline at end of file
diff --git a/src/internal/types/testdata/examples/constraints.go b/src/internal/types/testdata/examples/constraints.go
new file mode 100644
index 0000000..4c97a40
--- /dev/null
+++ b/src/internal/types/testdata/examples/constraints.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type MyInt int
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ union interface{int|~string}
+
+ // Union terms must describe disjoint (non-overlapping) type sets.
+ _ interface{int|int /* ERROR "overlapping terms int" */ }
+ _ interface{int|~ /* ERROR "overlapping terms ~int" */ int }
+ _ interface{~int|~ /* ERROR "overlapping terms ~int" */ int }
+ _ interface{~int|MyInt /* ERROR "overlapping terms p.MyInt and ~int" */ }
+ _ interface{int|any}
+ _ interface{int|~string|union}
+ _ interface{int|~string|interface{int}}
+ _ interface{union|int} // interfaces (here: union) are ignored when checking for overlap
+ _ interface{union|union} // ditto
+
+ // For now we do not permit interfaces with methods in unions.
+ _ interface{~ /* ERROR "invalid use of ~" */ any}
+ _ interface{int|interface /* ERRORx `cannot use .* in union` */ { m() }}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar any
+ _ interface{foo}
+ _ interface{~ /* ERROR "invalid use of ~" */ foo }
+ _ interface{~ /* ERROR "invalid use of ~" */ bar }
+)
+
+// Stand-alone type parameters are not permitted as elements or terms in unions.
+type (
+ _[T interface{ *T } ] struct{} // ok
+ _[T interface{ int | *T } ] struct{} // ok
+ _[T interface{ T /* ERROR "term cannot be a type parameter" */ } ] struct{}
+ _[T interface{ ~T /* ERROR "type in term ~T cannot be a type parameter" */ } ] struct{}
+ _[T interface{ int|T /* ERROR "term cannot be a type parameter" */ }] struct{}
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR "cannot convert" */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR "cannot convert" */ ) }
+
+// Union elements may be interfaces as long as they don't define
+// any methods or embed comparable.
+
+type (
+ Integer interface{ ~int|~int8|~int16|~int32|~int64 }
+ Unsigned interface{ ~uint|~uint8|~uint16|~uint32|~uint64 }
+ Floats interface{ ~float32|~float64 }
+ Complex interface{ ~complex64|~complex128 }
+ Number interface{ Integer|Unsigned|Floats|Complex }
+ Ordered interface{ Integer|Unsigned|Floats|~string }
+
+ _ interface{ Number | error /* ERROR "cannot use error in union" */ }
+ _ interface{ Ordered | comparable /* ERROR "cannot use comparable in union" */ }
+)
diff --git a/src/internal/types/testdata/examples/functions.go b/src/internal/types/testdata/examples/functions.go
new file mode 100644
index 0000000..fdc67e7
--- /dev/null
+++ b/src/internal/types/testdata/examples/functions.go
@@ -0,0 +1,219 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of type-parameterized functions.
+
+package p
+
+// Reverse is a generic function that takes a []T argument and
+// reverses that slice in place.
+func Reverse[T any](list []T) {
+ i := 0
+ j := len(list)-1
+ for i < j {
+ list[i], list[j] = list[j], list[i]
+ i++
+ j--
+ }
+}
+
+func _() {
+ // Reverse can be called with an explicit type argument.
+ Reverse[int](nil)
+ Reverse[string]([]string{"foo", "bar"})
+ Reverse[struct{x, y int}]([]struct{x, y int}{{1, 2}, {2, 3}, {3, 4}})
+
+ // Since the type parameter is used for an incoming argument,
+ // it can be inferred from the provided argument's type.
+ Reverse([]string{"foo", "bar"})
+ Reverse([]struct{x, y int}{{1, 2}, {2, 3}, {3, 4}})
+
+ // But the incoming argument must have a type, even if it's a
+ // default type. An untyped nil won't work.
+ // Reverse(nil) // this won't type-check
+
+ // A typed nil will work, though.
+ Reverse([]int(nil))
+}
+
+// Certain functions, such as the built-in `new` could be written using
+// type parameters.
+func new[T any]() *T {
+ var x T
+ return &x
+}
+
+// When calling our own `new`, we need to pass the type parameter
+// explicitly since there is no (value) argument from which the
+// result type could be inferred. We don't try to infer the
+// result type from the assignment to keep things simple and
+// easy to understand.
+var _ = new[int]()
+var _ *float64 = new[float64]() // the result type is indeed *float64
+
+// A function may have multiple type parameters, of course.
+func foo[A, B, C any](a A, b []B, c *C) B {
+ // do something here
+ return b[0]
+}
+
+// As before, we can pass type parameters explicitly.
+var s = foo[int, string, float64](1, []string{"first"}, new[float64]())
+
+// Or we can use type inference.
+var _ float64 = foo(42, []float64{1.0}, &s)
+
+// Type inference works in a straight-forward manner even
+// for variadic functions.
+func variadic[A, B any](A, B, ...B) int { panic(0) }
+
+// var _ = variadic(1) // ERROR "not enough arguments"
+var _ = variadic(1, 2.3)
+var _ = variadic(1, 2.3, 3.4, 4.5)
+var _ = variadic[int, float64](1, 2.3, 3.4, 4)
+
+// Type inference also works in recursive function calls where
+// the inferred type is the type parameter of the caller.
+func f1[T any](x T) {
+ f1(x)
+}
+
+func f2a[T any](x, y T) {
+ f2a(x, y)
+}
+
+func f2b[T any](x, y T) {
+ f2b(y, x)
+}
+
+func g2a[P, Q any](x P, y Q) {
+ g2a(x, y)
+}
+
+func g2b[P, Q any](x P, y Q) {
+ g2b(y, x)
+}
+
+// Here's an example of a recursive function call with variadic
+// arguments and type inference inferring the type parameter of
+// the caller (i.e., itself).
+func max[T interface{ ~int }](x ...T) T {
+ var x0 T
+ if len(x) > 0 {
+ x0 = x[0]
+ }
+ if len(x) > 1 {
+ x1 := max(x[1:]...)
+ if x1 > x0 {
+ return x1
+ }
+ }
+ return x0
+}
+
+// When inferring channel types, the channel direction is ignored
+// for the purpose of type inference. Once the type has been in-
+// fered, the usual parameter passing rules are applied.
+// Thus even if a type can be inferred successfully, the function
+// call may not be valid.
+
+func fboth[T any](chan T) {}
+func frecv[T any](<-chan T) {}
+func fsend[T any](chan<- T) {}
+
+func _() {
+ var both chan int
+ var recv <-chan int
+ var send chan<-int
+
+ fboth(both)
+ fboth(recv /* ERROR "cannot use" */ )
+ fboth(send /* ERROR "cannot use" */ )
+
+ frecv(both)
+ frecv(recv)
+ frecv(send /* ERROR "cannot use" */ )
+
+ fsend(both)
+ fsend(recv /* ERROR "cannot use" */)
+ fsend(send)
+}
+
+func ffboth[T any](func(chan T)) {}
+func ffrecv[T any](func(<-chan T)) {}
+func ffsend[T any](func(chan<- T)) {}
+
+func _() {
+ var both func(chan int)
+ var recv func(<-chan int)
+ var send func(chan<- int)
+
+ ffboth(both)
+ ffboth(recv /* ERROR "does not match" */ )
+ ffboth(send /* ERROR "does not match" */ )
+
+ ffrecv(both /* ERROR "does not match" */ )
+ ffrecv(recv)
+ ffrecv(send /* ERROR "does not match" */ )
+
+ ffsend(both /* ERROR "does not match" */ )
+ ffsend(recv /* ERROR "does not match" */ )
+ ffsend(send)
+}
+
+// When inferring elements of unnamed composite parameter types,
+// if the arguments are defined types, use their underlying types.
+// Even though the matching types are not exactly structurally the
+// same (one is a type literal, the other a named type), because
+// assignment is permitted, parameter passing is permitted as well,
+// so type inference should be able to handle these cases well.
+
+func g1[T any]([]T) {}
+func g2[T any]([]T, T) {}
+func g3[T any](*T, ...T) {}
+
+func _() {
+ type intSlice []int
+ g1([]int{})
+ g1(intSlice{})
+ g2(nil, 0)
+
+ type myString string
+ var s1 string
+ g3(nil, "1", myString("2"), "3")
+ g3(& /* ERROR "cannot use &s1 (value of type *string) as *myString value in argument to g3" */ s1, "1", myString("2"), "3")
+ _ = s1
+
+ type myStruct struct{x int}
+ var s2 myStruct
+ g3(nil, struct{x int}{}, myStruct{})
+ g3(&s2, struct{x int}{}, myStruct{})
+ g3(nil, myStruct{}, struct{x int}{})
+ g3(&s2, myStruct{}, struct{x int}{})
+}
+
+// Here's a realistic example.
+
+func append[T any](s []T, t ...T) []T { panic(0) }
+
+func _() {
+ var f func()
+ type Funcs []func()
+ var funcs Funcs
+ _ = append(funcs, f)
+}
+
+// Generic type declarations cannot have empty type parameter lists
+// (that would indicate a slice type). Thus, generic functions cannot
+// have empty type parameter lists, either. This is a syntax error.
+
+func h[] /* ERROR "empty type parameter list" */ () {}
+
+func _() {
+ h /* ERROR "cannot index" */ [] /* ERROR "operand" */ ()
+}
+
+// Generic functions must have a function body.
+
+func _ /* ERROR "generic function is missing function body" */ [P any]()
diff --git a/src/internal/types/testdata/examples/inference.go b/src/internal/types/testdata/examples/inference.go
new file mode 100644
index 0000000..0aaaa82
--- /dev/null
+++ b/src/internal/types/testdata/examples/inference.go
@@ -0,0 +1,163 @@
+// -lang=go1.20
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of type inference.
+
+package p
+
+type Ordered interface {
+ ~int | ~float64 | ~string
+}
+
+func min[T Ordered](x, y T) T { panic(0) }
+
+func _() {
+ // min can be called with explicit instantiation.
+ _ = min[int](1, 2)
+
+ // Alternatively, the type argument can be inferred from
+ // one of the arguments. Untyped arguments will be considered
+ // last.
+ var x int
+ _ = min(x, x)
+ _ = min(x, 1)
+ _ = min(x, 1.0)
+ _ = min(1, 2)
+ _ = min(1, 2.3)
+
+ var y float64
+ _ = min(1, y)
+ _ = min(1.2, y)
+ _ = min(1.2, 3.4)
+ _ = min(1.2, 3)
+
+ var s string
+ _ = min(s, "foo")
+ _ = min("foo", "bar")
+}
+
+func mixed[T1, T2, T3 any](T1, T2, T3) {}
+
+func _() {
+ // mixed can be called with explicit instantiation.
+ mixed[int, string, bool](0, "", false)
+
+ // Alternatively, partial type arguments may be provided
+ // (from left to right), and the other may be inferred.
+ mixed[int, string](0, "", false)
+ mixed[int](0, "", false)
+ mixed(0, "", false)
+
+ // Provided type arguments always take precedence over
+ // inferred types.
+ mixed[int, string](1.1 /* ERROR "cannot use 1.1" */, "", false)
+}
+
+func related1[Slice interface{ ~[]Elem }, Elem any](s Slice, e Elem) {}
+
+func _() {
+ // related1 can be called with explicit instantiation.
+ var si []int
+ related1[[]int, int](si, 0)
+
+ // Alternatively, the 2nd type argument can be inferred
+ // from the first one through constraint type inference.
+ var ss []string
+ _ = related1[[]string]
+ related1[[]string](ss, "foo")
+
+ // A type argument inferred from another explicitly provided
+ // type argument overrides whatever value argument type is given.
+ related1[[]string](ss, 0 /* ERROR "cannot use 0" */)
+
+ // A type argument may be inferred from a value argument
+ // and then help infer another type argument via constraint
+ // type inference.
+ related1(si, 0)
+ related1(si, "foo" /* ERROR `cannot use "foo"` */)
+}
+
+func related2[Elem any, Slice interface{ []Elem }](e Elem, s Slice) {}
+
+func _() {
+ // related2 can be called with explicit instantiation.
+ var si []int
+ related2[int, []int](0, si)
+
+ // Alternatively, the 2nd type argument can be inferred
+ // from the first one through constraint type inference.
+ var ss []string
+ _ = related2[string]
+ related2[string]("foo", ss)
+
+ // A type argument may be inferred from a value argument
+ // and then help infer another type argument via constraint
+ // type inference. Untyped arguments are always considered
+ // last.
+ related2(1.2, []float64{})
+ related2(1.0, []int{})
+ related2 /* ERROR "Slice (type []int) does not satisfy interface{[]Elem}" */ (float64(1.0), []int{}) // TODO(gri) better error message
+}
+
+type List[P any] []P
+
+func related3[Elem any, Slice []Elem | List[Elem]]() Slice { return nil }
+
+func _() {
+ // related3 can be instantiated explicitly
+ related3[int, []int]()
+ related3[byte, List[byte]]()
+
+ // The 2nd type argument cannot be inferred from the first
+ // one because there's two possible choices: []Elem and
+ // List[Elem].
+ related3 /* ERROR "cannot infer Slice" */ [int]()
+}
+
+func wantsMethods[P interface {
+ m1(Q)
+ m2() R
+}, Q, R any](P) {
+}
+
+type hasMethods1 struct{}
+
+func (hasMethods1) m1(int)
+func (hasMethods1) m2() string
+
+type hasMethods2 struct{}
+
+func (*hasMethods2) m1(int)
+func (*hasMethods2) m2() string
+
+type hasMethods3 interface {
+ m1(float64)
+ m2() complex128
+}
+
+type hasMethods4 interface {
+ m1()
+}
+
+func _() {
+ // wantsMethod can be called with arguments that have the relevant methods
+ // and wantsMethod's type arguments are inferred from those types' method
+ // signatures.
+ wantsMethods(hasMethods1{})
+ wantsMethods(&hasMethods1{})
+ wantsMethods /* ERROR "P (type hasMethods2) does not satisfy interface{m1(Q); m2() R} (method m1 has pointer receiver)" */ (hasMethods2{})
+ wantsMethods(&hasMethods2{})
+ wantsMethods(hasMethods3(nil))
+ wantsMethods /* ERROR "P (type any) does not satisfy interface{m1(Q); m2() R} (missing method m1)" */ (any(nil))
+ wantsMethods /* ERROR "P (type hasMethods4) does not satisfy interface{m1(Q); m2() R} (wrong type for method m1)" */ (hasMethods4(nil))
+}
+
+// "Reverse" type inference is not yet permitted.
+
+func f[P any](P) {}
+
+// This must not crash.
+var _ func(int) = f // ERROR "implicitly instantiated function in assignment requires go1.21 or later"
diff --git a/src/internal/types/testdata/examples/inference2.go b/src/internal/types/testdata/examples/inference2.go
new file mode 100644
index 0000000..6097c2b
--- /dev/null
+++ b/src/internal/types/testdata/examples/inference2.go
@@ -0,0 +1,100 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of "reverse" type inference
+// where the type arguments for generic functions are determined
+// from assigning the functions.
+
+package p
+
+func f1[P any](P) {}
+func f2[P any]() P { var x P; return x }
+func f3[P, Q any](P) Q { var x Q; return x }
+func f4[P any](P, P) {}
+func f5[P any](P) []P { return nil }
+func f6[P any](int) P { var x P; return x }
+func f7[P any](P) string { return "" }
+
+// initialization expressions
+var (
+ v1 = f1 // ERROR "cannot use generic function f1 without instantiation"
+ v2 func(int) = f2 // ERROR "cannot infer P"
+
+ v3 func(int) = f1
+ v4 func() int = f2
+ v5 func(int) int = f3
+ _ func(int) int = f3[int]
+
+ v6 func(int, int) = f4
+ v7 func(int, string) = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)"
+ v8 func(int) []int = f5
+ v9 func(string) []int = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P"
+
+ _, _ func(int) = f1, f1
+ _, _ func(int) = f1, f2 // ERROR "cannot infer P"
+)
+
+// Regular assignments
+func _() {
+ v1 = f1 // no error here because v1 is invalid (we don't know its type) due to the error above
+ var v1_ func() int
+ _ = v1_
+ v1_ = f1 // ERROR "cannot infer P"
+ v2 = f2 // ERROR "cannot infer P"
+
+ v3 = f1
+ v4 = f2
+ v5 = f3
+ v5 = f3[int]
+
+ v6 = f4
+ v7 = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)"
+ v8 = f5
+ v9 = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P"
+}
+
+// Return statements
+func _() func(int) { return f1 }
+func _() func() int { return f2 }
+func _() func(int) int { return f3 }
+func _() func(int) int { return f3[int] }
+
+func _() func(int, int) { return f4 }
+func _() func(int, string) {
+ return f4 /* ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)" */
+}
+func _() func(int) []int { return f5 }
+func _() func(string) []int {
+ return f5 /* ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P" */
+}
+
+func _() (_, _ func(int)) { return f1, f1 }
+func _() (_, _ func(int)) { return f1, f2 /* ERROR "cannot infer P" */ }
+
+// Argument passing
+func g1(func(int)) {}
+func g2(func(int, int)) {}
+func g3(func(int) string) {}
+func g4[P any](func(P) string) {}
+func g5[P, Q any](func(P) string, func(P) Q) {}
+func g6(func(int), func(string)) {}
+
+func _() {
+ g1(f1)
+ g1(f2 /* ERROR "cannot infer P" */)
+ g2(f4)
+ g4(f6)
+ g5(f6, f7)
+ g6(f1, f1)
+}
+
+// Argument passing of partially instantiated functions
+func h(func(int, string), func(string, int)) {}
+
+func p[P, Q any](P, Q) {}
+
+func _() {
+ h(p, p)
+ h(p[int], p[string])
+}
diff --git a/src/internal/types/testdata/examples/methods.go b/src/internal/types/testdata/examples/methods.go
new file mode 100644
index 0000000..e92dc50
--- /dev/null
+++ b/src/internal/types/testdata/examples/methods.go
@@ -0,0 +1,112 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of methods on type-parameterized types.
+
+package p
+
+// Parameterized types may have methods.
+type T1[A any] struct{ a A }
+
+// When declaring a method for a parameterized type, the "instantiated"
+// receiver type acts as an implicit declaration of the type parameters
+// for the receiver type. In the example below, method m1 on type T1 has
+// the receiver type T1[A] which declares the type parameter A for use
+// with this method. That is, within the method m1, A stands for the
+// actual type argument provided to an instantiated T1.
+func (t T1[A]) m1() A { return t.a }
+
+// For instance, if T1 is instantiated with the type int, the type
+// parameter A in m1 assumes that type (int) as well and we can write
+// code like this:
+var x T1[int]
+var _ int = x.m1()
+
+// Because the type parameter provided to a parameterized receiver type
+// is declared through that receiver declaration, it must be an identifier.
+// It cannot possibly be some other type because the receiver type is not
+// instantiated with concrete types, it is standing for the parameterized
+// receiver type.
+func (t T1[[ /* ERROR "must be an identifier" */ ]int]) m2() {}
+
+// Note that using what looks like a predeclared identifier, say int,
+// as type parameter in this situation is deceptive and considered bad
+// style. In m3 below, int is the name of the local receiver type parameter
+// and it shadows the predeclared identifier int which then cannot be used
+// anymore as expected.
+// This is no different from locally re-declaring a predeclared identifier
+// and usually should be avoided. There are some notable exceptions; e.g.,
+// sometimes it makes sense to use the identifier "copy" which happens to
+// also be the name of a predeclared built-in function.
+func (t T1[int]) m3() { var _ int = 42 /* ERRORx `cannot use 42 .* as int` */ }
+
+// The names of the type parameters used in a parameterized receiver
+// type don't have to match the type parameter names in the declaration
+// of the type used for the receiver. In our example, even though T1 is
+// declared with type parameter named A, methods using that receiver type
+// are free to use their own name for that type parameter. That is, the
+// name of type parameters is always local to the declaration where they
+// are introduced. In our example we can write a method m2 and use the
+// name X instead of A for the type parameter w/o any difference.
+func (t T1[X]) m4() X { return t.a }
+
+// If the receiver type is parameterized, type parameters must always be
+// provided: this simply follows from the general rule that a parameterized
+// type must be instantiated before it can be used. A method receiver
+// declaration using a parameterized receiver type is no exception. It is
+// simply that such receiver type expressions perform two tasks simultaneously:
+// they declare the (local) type parameters and then use them to instantiate
+// the receiver type. Forgetting to provide a type parameter leads to an error.
+func (t T1 /* ERRORx `generic type .* without instantiation` */ ) m5() {}
+
+// However, sometimes we don't need the type parameter, and thus it is
+// inconvenient to have to choose a name. Since the receiver type expression
+// serves as a declaration for its type parameters, we are free to choose the
+// blank identifier:
+func (t T1[_]) m6() {}
+
+// Naturally, these rules apply to any number of type parameters on the receiver
+// type. Here are some more complex examples.
+type T2[A, B, C any] struct {
+ a A
+ b B
+ c C
+}
+
+// Naming of the type parameters is local and has no semantic impact:
+func (t T2[A, B, C]) m1() (A, B, C) { return t.a, t.b, t.c }
+func (t T2[C, B, A]) m2() (C, B, A) { return t.a, t.b, t.c }
+func (t T2[X, Y, Z]) m3() (X, Y, Z) { return t.a, t.b, t.c }
+
+// Type parameters may be left blank if they are not needed:
+func (t T2[A, _, C]) m4() (A, C) { return t.a, t.c }
+func (t T2[_, _, X]) m5() X { return t.c }
+func (t T2[_, _, _]) m6() {}
+
+// As usual, blank names may be used for any object which we don't care about
+// using later. For instance, we may write an unnamed method with a receiver
+// that cannot be accessed:
+func (_ T2[_, _, _]) _() int { return 42 }
+
+// Because a receiver parameter list is simply a parameter list, we can
+// leave the receiver argument away for receiver types.
+type T0 struct{}
+func (T0) _() {}
+func (T1[A]) _() {}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // A generic receiver type may constrain its type parameter such
+// // that it must be a pointer type. Such receiver types are not
+// // permitted.
+// type T3a[P interface{ ~int | ~string | ~float64 }] P
+//
+// func (T3a[_]) m() {} // this is ok
+//
+// type T3b[P interface{ ~unsafe.Pointer }] P
+//
+// func (T3b /* ERROR "invalid receiver" */ [_]) m() {}
+//
+// type T3c[P interface{ *int | *string }] P
+//
+// func (T3c /* ERROR "invalid receiver" */ [_]) m() {}
diff --git a/src/internal/types/testdata/examples/operations.go b/src/internal/types/testdata/examples/operations.go
new file mode 100644
index 0000000..9fb95d0
--- /dev/null
+++ b/src/internal/types/testdata/examples/operations.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// indirection
+
+func _[P any](p P) {
+ _ = *p // ERROR "cannot indirect p"
+}
+
+func _[P interface{ int }](p P) {
+ _ = *p // ERROR "cannot indirect p"
+}
+
+func _[P interface{ *int }](p P) {
+ _ = *p
+}
+
+func _[P interface{ *int | *string }](p P) {
+ _ = *p // ERROR "must have identical base types"
+}
+
+type intPtr *int
+
+func _[P interface{ *int | intPtr } ](p P) {
+ var _ int = *p
+}
diff --git a/src/internal/types/testdata/examples/types.go b/src/internal/types/testdata/examples/types.go
new file mode 100644
index 0000000..67f1534
--- /dev/null
+++ b/src/internal/types/testdata/examples/types.go
@@ -0,0 +1,315 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic types.
+
+package p
+
+// List is just what it says - a slice of E elements.
+type List[E any] []E
+
+// A generic (parameterized) type must always be instantiated
+// before it can be used to designate the type of a variable
+// (including a struct field, or function parameter); though
+// for the latter cases, the provided type may be another type
+// parameter. So:
+var _ List[byte] = []byte{}
+
+// A generic binary tree might be declared as follows.
+type Tree[E any] struct {
+ left, right *Tree[E]
+ payload E
+}
+
+// A simple instantiation of Tree:
+var root1 Tree[int]
+
+// The actual type parameter provided may be a generic type itself:
+var root2 Tree[List[int]]
+
+// A couple of more complex examples.
+// We don't need extra parentheses around the element type of the slices on
+// the right (unlike when we use ()'s rather than []'s for type parameters).
+var _ List[List[int]] = []List[int]{}
+var _ List[List[List[Tree[int]]]] = []List[List[Tree[int]]]{}
+
+// Type parameters act like type aliases when used in generic types
+// in the sense that we can "emulate" a specific type instantiation
+// with type aliases.
+type T1[P any] struct {
+ f P
+}
+
+type T2[P any] struct {
+ f struct {
+ g P
+ }
+}
+
+var x1 T1[struct{ g int }]
+var x2 T2[int]
+
+func _() {
+ // This assignment is invalid because the types of x1, x2 are T1(...)
+ // and T2(...) respectively, which are two different defined types.
+ x1 = x2 // ERROR "assignment"
+
+ // This assignment is valid because the types of x1.f and x2.f are
+ // both struct { g int }; the type parameters act like type aliases
+ // and their actual names don't come into play here.
+ x1.f = x2.f
+}
+
+// We can verify this behavior using type aliases instead:
+type T1a struct {
+ f A1
+}
+type A1 = struct { g int }
+
+type T2a struct {
+ f struct {
+ g A2
+ }
+}
+type A2 = int
+
+var x1a T1a
+var x2a T2a
+
+func _() {
+ x1a = x2a // ERROR "assignment"
+ x1a.f = x2a.f
+}
+
+// Another interesting corner case are generic types that don't use
+// their type arguments. For instance:
+type T[P any] struct{}
+
+var xint T[int]
+var xbool T[bool]
+
+// Are these two variables of the same type? After all, their underlying
+// types are identical. We consider them to be different because each type
+// instantiation creates a new named type, in this case T<int> and T<bool>
+// even if their underlying types are identical. This is sensible because
+// we might still have methods that have different signatures or behave
+// differently depending on the type arguments, and thus we can't possibly
+// consider such types identical. Consequently:
+func _() {
+ xint = xbool // ERROR "assignment"
+}
+
+// Generic types cannot be used without instantiation.
+var _ T // ERROR "cannot use generic type T"
+var _ = T /* ERROR "cannot use generic type T" */ (0)
+
+// In type context, generic (parameterized) types cannot be parenthesized before
+// being instantiated. See also NOTES entry from 12/4/2019.
+var _ (T /* ERROR "cannot use generic type T" */ )[ /* ERRORx `unexpected \[|expected ';'` */ int]
+
+// All types may be parameterized, including interfaces.
+type I1[T any] interface{
+ m1(T)
+}
+
+// There is no such thing as a variadic generic type.
+type _[T ... /* ERROR "invalid use of '...'" */ any] struct{}
+
+// Generic interfaces may be embedded as one would expect.
+type I2 interface {
+ I1(int) // method!
+ I1[string] // embedded I1
+}
+
+func _() {
+ var x I2
+ x.I1(0)
+ x.m1("foo")
+}
+
+type I0 interface {
+ m0()
+}
+
+type I3 interface {
+ I0
+ I1[bool]
+ m(string)
+}
+
+func _() {
+ var x I3
+ x.m0()
+ x.m1(true)
+ x.m("foo")
+}
+
+type _ struct {
+ ( /* ERROR "cannot parenthesize" */ int8)
+ ( /* ERROR "cannot parenthesize" */ *int16)
+ *( /* ERROR "cannot parenthesize" */ int32)
+ List[int]
+
+ int8 /* ERROR "int8 redeclared" */
+ *int16 /* ERROR "int16 redeclared" */
+ List /* ERROR "List redeclared" */ [int]
+}
+
+// Issue #45639: We don't allow this anymore. Keep this code
+// in case we decide to revisit this decision.
+//
+// It's possible to declare local types whose underlying types
+// are type parameters. As with ordinary type definitions, the
+// types underlying properties are "inherited" but the methods
+// are not.
+// func _[T interface{ m(); ~int }]() {
+// type L T
+// var x L
+//
+// // m is not defined on L (it is not "inherited" from
+// // its underlying type).
+// x.m /* ERROR "x.m undefined" */ ()
+//
+// // But the properties of T, such that as that it supports
+// // the operations of the types given by its type bound,
+// // are also the properties of L.
+// x++
+// _ = x - x
+//
+// // On the other hand, if we define a local alias for T,
+// // that alias stands for T as expected.
+// type A = T
+// var y A
+// y.m()
+// _ = y < 0
+// }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // It is not permitted to declare a local type whose underlying
+// // type is a type parameter not declared by that type declaration.
+// func _[T any]() {
+// type _ T // ERROR "cannot use function type parameter T as RHS in type declaration"
+// type _ [_ any] T // ERROR "cannot use function type parameter T as RHS in type declaration"
+// }
+
+// As a special case, an explicit type argument may be omitted
+// from a type parameter bound if the type bound expects exactly
+// one type argument. In that case, the type argument is the
+// respective type parameter to which the type bound applies.
+// Note: We may not permit this syntactic sugar at first.
+// Note: This is now disabled. All examples below are adjusted.
+type Adder[T any] interface {
+ Add(T) T
+}
+
+// We don't need to explicitly instantiate the Adder bound
+// if we have exactly one type parameter.
+func Sum[T Adder[T]](list []T) T {
+ var sum T
+ for _, x := range list {
+ sum = sum.Add(x)
+ }
+ return sum
+}
+
+// Valid and invalid variations.
+type B0 any
+type B1[_ any] any
+type B2[_, _ any] any
+
+func _[T1 B0]() {}
+func _[T1 B1[T1]]() {}
+func _[T1 B2 /* ERRORx `cannot use generic type .* without instantiation` */ ]() {}
+
+func _[T1, T2 B0]() {}
+func _[T1 B1[T1], T2 B1[T2]]() {}
+func _[T1, T2 B2 /* ERRORx `cannot use generic type .* without instantiation` */ ]() {}
+
+func _[T1 B0, T2 B1[T2]]() {} // here B1 applies to T2
+
+// When the type argument is left away, the type bound is
+// instantiated for each type parameter with that type
+// parameter.
+// Note: We may not permit this syntactic sugar at first.
+func _[A Adder[A], B Adder[B], C Adder[A]]() {
+ var a A // A's type bound is Adder[A]
+ a = a.Add(a)
+ var b B // B's type bound is Adder[B]
+ b = b.Add(b)
+ var c C // C's type bound is Adder[A]
+ a = c.Add(a)
+}
+
+// The type of variables (incl. parameters and return values) cannot
+// be an interface with type constraints or be/embed comparable.
+type I interface {
+ ~int
+}
+
+var (
+ _ interface /* ERROR "contains type constraints" */ {~int}
+ _ I /* ERROR "contains type constraints" */
+)
+
+func _(I /* ERROR "contains type constraints" */ )
+func _(x, y, z I /* ERROR "contains type constraints" */ )
+func _() I /* ERROR "contains type constraints" */
+
+func _() {
+ var _ I /* ERROR "contains type constraints" */
+}
+
+type C interface {
+ comparable
+}
+
+var _ comparable /* ERROR "comparable" */
+var _ C /* ERROR "comparable" */
+
+func _(_ comparable /* ERROR "comparable" */ , _ C /* ERROR "comparable" */ )
+
+func _() {
+ var _ comparable /* ERROR "comparable" */
+ var _ C /* ERROR "comparable" */
+}
+
+// Type parameters are never const types, i.e., it's
+// not possible to declare a constant of type parameter type.
+// (If a type set contains just a single const type, we could
+// allow it, but such type sets don't make much sense in the
+// first place.)
+func _[T interface{~int|~float64}]() {
+ // not valid
+ const _ = T /* ERROR "not constant" */ (0)
+ const _ T /* ERROR "invalid constant type T" */ = 1
+
+ // valid
+ var _ = T(0)
+ var _ T = 1
+ _ = T(0)
+}
+
+// It is possible to create composite literals of type parameter
+// type as long as it's possible to create a composite literal
+// of the core type of the type parameter's constraint.
+func _[P interface{ ~[]int }]() P {
+ return P{}
+ return P{1, 2, 3}
+}
+
+func _[P interface{ ~[]E }, E interface{ map[string]P } ]() P {
+ x := P{}
+ return P{{}}
+ return P{E{}}
+ return P{E{"foo": x}}
+ return P{{"foo": x}, {}}
+}
+
+// This is a degenerate case with a singleton type set, but we can create
+// composite literals even if the core type is a defined type.
+type MyInts []int
+
+func _[P MyInts]() P {
+ return P{}
+}
diff --git a/src/internal/types/testdata/examples/typesets.go b/src/internal/types/testdata/examples/typesets.go
new file mode 100644
index 0000000..93eada7
--- /dev/null
+++ b/src/internal/types/testdata/examples/typesets.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of constraint literals with elided interfaces.
+// These examples are permitted if proposal issue #48424 is accepted.
+
+package p
+
+// Constraint type sets of the form T, ~T, or A|B may omit the interface.
+type (
+ _[T int] struct{}
+ _[T ~int] struct{}
+ _[T int | string] struct{}
+ _[T ~int | ~string] struct{}
+)
+
+func min[T int | string](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func lookup[M ~map[K]V, K comparable, V any](m M, k K) V {
+ return m[k]
+}
+
+func deref[P ~*E, E any](p P) E {
+ return *p
+}
+
+func _() int {
+ p := new(int)
+ return deref(p)
+}
+
+func addrOfCopy[V any, P *V](v V) P {
+ return &v
+}
+
+func _() *int {
+ return addrOfCopy(0)
+}
+
+// A type parameter may not be embedded in an interface;
+// so it can also not be used as a constraint.
+func _[A any, B A /* ERROR "cannot use a type parameter as constraint" */]() {}
+func _[A any, B, C A /* ERROR "cannot use a type parameter as constraint" */]() {}
+
+// Error messages refer to the type constraint as it appears in the source.
+// (No implicit interface should be exposed.)
+func _[T string](x T) T {
+ return x /* ERROR "constrained by string" */ * x
+}
+
+func _[T int | string](x T) T {
+ return x /* ERROR "constrained by int | string" */ * x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue20583.go b/src/internal/types/testdata/fixedbugs/issue20583.go
new file mode 100644
index 0000000..55cbd94
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue20583.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue20583
+
+const (
+ _ = 6e886451608 /* ERROR "malformed constant" */ /2
+ _ = 6e886451608i /* ERROR "malformed constant" */ /2
+ _ = 0 * 1e+1000000000 // ERROR "malformed constant"
+
+ x = 1e100000000
+ _ = x*x*x*x*x*x* /* ERROR "not representable" */ x
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue23203a.go b/src/internal/types/testdata/fixedbugs/issue23203a.go
new file mode 100644
index 0000000..48cb588
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue23203a.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m1() {}
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+
+func main() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue23203b.go b/src/internal/types/testdata/fixedbugs/issue23203b.go
new file mode 100644
index 0000000..638ec6c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue23203b.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+func (T) m1() {}
+
+func main() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue25838.go b/src/internal/types/testdata/fixedbugs/issue25838.go
new file mode 100644
index 0000000..adbd138
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue25838.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// examples from the issue
+
+type (
+ e = f
+ f = g
+ g = []h
+ h i
+ i = j
+ j = e
+)
+
+type (
+ e1 = []h1
+ h1 e1
+)
+
+type (
+ P = *T
+ T P
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue26390.go b/src/internal/types/testdata/fixedbugs/issue26390.go
new file mode 100644
index 0000000..9e0101f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue26390.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// stand-alone test to ensure case is triggered
+
+package issue26390
+
+type A = T
+
+func (t *T) m() *A { return t }
+
+type T struct{}
diff --git a/src/internal/types/testdata/fixedbugs/issue28251.go b/src/internal/types/testdata/fixedbugs/issue28251.go
new file mode 100644
index 0000000..77fd369
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue28251.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for various forms of
+// method receiver declarations, per the spec clarification
+// https://golang.org/cl/142757.
+
+package issue28251
+
+// test case from issue28251
+type T struct{}
+
+type T0 = *T
+
+func (T0) m() {}
+
+func _() { (&T{}).m() }
+
+// various alternative forms
+type (
+ T1 = (((T)))
+)
+
+func ((*(T1))) m1() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T2 = (((T3)))
+ T3 = T
+)
+
+func (T2) m2() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T4 = ((*(T5)))
+ T5 = T
+)
+
+func (T4) m4() {}
+func _() { (T{}).m4 /* ERROR "cannot call pointer method m4 on T" */ () }
+func _() { (&T{}).m4() }
+
+type (
+ T6 = (((T7)))
+ T7 = (*(T8))
+ T8 = T
+)
+
+func (T6) m6() {}
+func _() { (T{}).m6 /* ERROR "cannot call pointer method m6 on T" */ () }
+func _() { (&T{}).m6() }
+
+type (
+ T9 = *T10
+ T10 = *T11
+ T11 = T
+)
+
+func (T9 /* ERROR "invalid receiver type **T" */ ) m9() {}
+func _() { (T{}).m9 /* ERROR "has no field or method m9" */ () }
+func _() { (&T{}).m9 /* ERROR "has no field or method m9" */ () }
diff --git a/src/internal/types/testdata/fixedbugs/issue3117.go b/src/internal/types/testdata/fixedbugs/issue3117.go
new file mode 100644
index 0000000..16c0afc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue3117.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S struct {
+ a [1]int
+}
+
+func _(m map[int]S, key int) {
+ m /* ERROR "cannot assign to m[key].a[0] (neither addressable nor a map index expression)" */ [key].a[0] = 0
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39634.go b/src/internal/types/testdata/fixedbugs/issue39634.go
new file mode 100644
index 0000000..591b00e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39634.go
@@ -0,0 +1,90 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Examples adjusted to match new [T any] syntax for type parameters.
+// Also, previously permitted empty type parameter lists and instantiations
+// are now syntax errors.
+
+package p
+
+// crash 1
+type nt1[_ any]interface{g /* ERROR "undefined" */ }
+type ph1[e nt1[e],g(d /* ERROR "undefined" */ )]s /* ERROR "undefined" */
+func(*ph1[e,e /* ERROR "redeclared" */ ])h(d /* ERROR "undefined" */ )
+
+// crash 2
+// Disabled: empty []'s are now syntax errors. This example leads to too many follow-on errors.
+// type Numeric2 interface{t2 /* ERROR "not a type" */ }
+// func t2[T Numeric2](s[]T){0 /* ERROR "not a type */ []{s /* ERROR cannot index" */ [0][0]}}
+
+// crash 3
+type t3 *interface{ t3.p /* ERROR "t3.p is not a type" */ }
+
+// crash 4
+type Numeric4 interface{t4 /* ERROR "not a type" */ }
+func t4[T Numeric4](s[]T){if( /* ERROR "non-boolean" */ 0){*s /* ERROR "cannot indirect" */ [0]}}
+
+// crash 7
+type foo7 interface { bar() }
+type x7[A any] struct{ foo7 }
+func main7() { var _ foo7 = x7[int]{} }
+
+// crash 8
+type foo8[A any] interface { ~A /* ERROR "cannot be a type parameter" */ }
+func bar8[A foo8[A]](a A) {}
+
+// crash 9
+type foo9[A any] interface { foo9 /* ERROR "invalid recursive type" */ [A] }
+func _() { var _ = new(foo9[int]) }
+
+// crash 12
+var u /* ERROR "cycle" */ , i [func /* ERROR "used as value" */ /* ERROR "used as value" */ (u, c /* ERROR "undefined" */ /* ERROR "undefined" */ ) {}(0, len /* ERROR "must be called" */ /* ERROR "must be called" */ )]c /* ERROR "undefined" */ /* ERROR "undefined" */
+
+// crash 15
+func y15() { var a /* ERROR "declared and not used" */ interface{ p() } = G15[string]{} }
+type G15[X any] s /* ERROR "undefined" */
+func (G15 /* ERRORx `generic type .* without instantiation` */ ) p()
+
+// crash 16
+type Foo16[T any] r16 /* ERROR "not a type" */
+func r16[T any]() Foo16[Foo16[T]] { panic(0) }
+
+// crash 17
+type Y17 interface{ c() }
+type Z17 interface {
+ c() Y17
+ Y17 /* ERROR "duplicate method" */
+}
+func F17[T Z17](T) {}
+
+// crash 18
+type o18[T any] []func(_ o18[[]_ /* ERROR "cannot use _" */ ])
+
+// crash 19
+type Z19 [][[]Z19{}[0][0]]c19 /* ERROR "undefined" */
+
+// crash 20
+type Z20 /* ERROR "invalid recursive type" */ interface{ Z20 }
+func F20[t Z20]() { F20(t /* ERROR "invalid composite literal type" */ /* ERROR "too many arguments in call to F20\n\thave (unknown type)\n\twant ()" */ {}) }
+
+// crash 21
+type Z21 /* ERROR "invalid recursive type" */ interface{ Z21 }
+func F21[T Z21]() { ( /* ERROR "not used" */ F21[Z21]) }
+
+// crash 24
+type T24[P any] P // ERROR "cannot use a type parameter as RHS in type declaration"
+func (r T24[P]) m() { T24 /* ERROR "without instantiation" */ .m() }
+
+// crash 25
+type T25[A any] int
+func (t T25[A]) m1() {}
+var x T25 /* ERROR "without instantiation" */ .m1
+
+// crash 26
+type T26 = interface{ F26[ /* ERROR "interface method must have no type parameters" */ Z any]() }
+func F26[Z any]() T26 { return F26[] /* ERROR "operand" */ }
+
+// crash 27
+func e27[T any]() interface{ x27 /* ERROR "not a type" */ } { panic(0) }
+func x27() { e27 /* ERROR "cannot infer T" */ () }
diff --git a/src/internal/types/testdata/fixedbugs/issue39664.go b/src/internal/types/testdata/fixedbugs/issue39664.go
new file mode 100644
index 0000000..a8148c6
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39664.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[_ any] struct {}
+
+func (T /* ERROR "instantiation" */ ) m()
+
+func _() {
+ var x interface { m() }
+ x = T[int]{}
+ _ = x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39680.go b/src/internal/types/testdata/fixedbugs/issue39680.go
new file mode 100644
index 0000000..e56bc35
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39680.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+
+/*
+import "fmt"
+
+// Minimal test case.
+func _[T interface{~T}](x T) T{
+ return x
+}
+
+// Test case from issue.
+type constr[T any] interface {
+ ~T
+}
+
+func Print[T constr[T]](s []T) {
+ for _, v := range s {
+ fmt.Print(v)
+ }
+}
+
+func f() {
+ Print([]string{"Hello, ", "playground\n"})
+}
+*/
diff --git a/src/internal/types/testdata/fixedbugs/issue39693.go b/src/internal/types/testdata/fixedbugs/issue39693.go
new file mode 100644
index 0000000..03f2789
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39693.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
+}
+
+func Add1[T Number1](a, b T) T {
+ return a /* ERROR "not defined" */ + b
+}
+
+type Number2 interface {
+ int | float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39699.go b/src/internal/types/testdata/fixedbugs/issue39699.go
new file mode 100644
index 0000000..73ba0c4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39699.go
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T0 interface{
+}
+
+type T1 interface{
+ ~int
+}
+
+type T2 interface{
+ comparable
+}
+
+type T3 interface {
+ T0
+ T1
+ T2
+}
+
+func _() {
+ _ = T0(0)
+ _ = T1 /* ERROR "cannot use interface T1 in conversion" */ (1)
+ _ = T2 /* ERROR "cannot use interface T2 in conversion" */ (2)
+ _ = T3 /* ERROR "cannot use interface T3 in conversion" */ (3)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39711.go b/src/internal/types/testdata/fixedbugs/issue39711.go
new file mode 100644
index 0000000..d85fa03
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39711.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Do not report a duplicate type error for this term list.
+// (Check types after interfaces have been completed.)
+type _ interface {
+ // TODO(rfindley) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in term lists.
+ // type interface{ Error() string }, interface{ String() string }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39723.go b/src/internal/types/testdata/fixedbugs/issue39723.go
new file mode 100644
index 0000000..19e5e80
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39723.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// A constraint must be an interface; it cannot
+// be a type parameter, for instance.
+func _[A interface{ ~int }, B A /* ERROR "cannot use a type parameter as constraint" */ ]() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue39725.go b/src/internal/types/testdata/fixedbugs/issue39725.go
new file mode 100644
index 0000000..0145667
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39725.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[T1, T2 any](T1, T2, struct{a T1; b T2}) {}
+func _() {
+ f1(42, string("foo"), struct /* ERROR "does not match inferred type struct{a int; b string}" */ {a, b int}{})
+}
+
+// simplified test case from issue
+func f2[T any](_ []T, _ func(T)) {}
+func _() {
+ f2([]string{}, func /* ERROR "does not match inferred type func(string)" */ (f []byte) {})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39754.go b/src/internal/types/testdata/fixedbugs/issue39754.go
new file mode 100644
index 0000000..a1bd5ba
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39754.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Optional[T any] struct {}
+
+func (_ Optional[T]) Val() (T, bool)
+
+type Box[T any] interface {
+ Val() (T, bool)
+}
+
+func f[V interface{}, A, B Box[V]]() {}
+
+func _() {
+ f[int, Optional[int], Optional[int]]()
+ _ = f[int, Optional[int], Optional /* ERROR "does not satisfy Box" */ [string]]
+ _ = f[int, Optional[int], Optional /* ERRORx "Optional.* does not satisfy Box.*" */ [string]]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39755.go b/src/internal/types/testdata/fixedbugs/issue39755.go
new file mode 100644
index 0000000..257b73a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39755.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T interface{~map[string]int}](x T) {
+ _ = x == nil
+}
+
+// simplified test case from issue
+
+type PathParamsConstraint interface {
+ ~map[string]string | ~[]struct{key, value string}
+}
+
+type PathParams[T PathParamsConstraint] struct {
+ t T
+}
+
+func (pp *PathParams[T]) IsNil() bool {
+ return pp.t == nil // this must succeed
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39768.go b/src/internal/types/testdata/fixedbugs/issue39768.go
new file mode 100644
index 0000000..51a4177
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39768.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T[P any] P
+// type A = T // ERROR "cannot use generic type"
+// var x A[int]
+// var _ A
+//
+// type B = T[int]
+// var y B = x
+// var _ B /* ERROR "not a generic type" */ [int]
+
+// test case from issue
+
+type Vector[T any] []T
+type VectorAlias = Vector // ERROR "cannot use generic type"
+var v Vector[int]
diff --git a/src/internal/types/testdata/fixedbugs/issue39938.go b/src/internal/types/testdata/fixedbugs/issue39938.go
new file mode 100644
index 0000000..bd5bdca
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39938.go
@@ -0,0 +1,54 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// All but E2 and E5 provide an "indirection" and break infinite expansion of a type.
+type E0[P any] []P
+type E1[P any] *P
+type E2[P any] struct{ _ P }
+type E3[P any] struct{ _ *P }
+type E5[P any] struct{ _ [10]P }
+
+type T0 struct {
+ _ E0[T0]
+}
+
+type T0_ struct {
+ E0[T0_]
+}
+
+type T1 struct {
+ _ E1[T1]
+}
+
+type T2 /* ERROR "invalid recursive type" */ struct {
+ _ E2[T2]
+}
+
+type T3 struct {
+ _ E3[T3]
+}
+
+type T4 /* ERROR "invalid recursive type" */ [10]E5[T4]
+
+type T5 struct {
+ _ E0[E2[T5]]
+}
+
+type T6 struct {
+ _ E0[E2[E0[E1[E2[[10]T6]]]]]
+}
+
+type T7 struct {
+ _ E0[[10]E2[E0[E2[E2[T7]]]]]
+}
+
+type T8 struct {
+ _ E0[[]E2[E0[E2[E2[T8]]]]]
+}
+
+type T9 /* ERROR "invalid recursive type" */ [10]E2[E5[E2[T9]]]
+
+type T10 [10]E2[E5[E2[func(T10)]]]
diff --git a/src/internal/types/testdata/fixedbugs/issue39948.go b/src/internal/types/testdata/fixedbugs/issue39948.go
new file mode 100644
index 0000000..e4430cf
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39948.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] interface{
+ P // ERROR "term cannot be a type parameter"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39976.go b/src/internal/types/testdata/fixedbugs/issue39976.go
new file mode 100644
index 0000000..b622cd9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39976.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type policy[K, V any] interface{}
+type LRU[K, V any] struct{}
+
+func NewCache[K, V any](p policy[K, V]) {}
+
+func _() {
+ var lru LRU[int, string]
+ NewCache[int, string](&lru)
+ NewCache /* ERROR "cannot infer K" */ (&lru)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue39982.go b/src/internal/types/testdata/fixedbugs/issue39982.go
new file mode 100644
index 0000000..9810b63
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue39982.go
@@ -0,0 +1,36 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ T[_ any] struct{}
+ S[_ any] struct {
+ data T[*T[int]]
+ }
+)
+
+func _() {
+ _ = S[int]{
+ data: T[*T[int]]{},
+ }
+}
+
+// full test case from issue
+
+type (
+ Element[TElem any] struct{}
+
+ entry[K comparable] struct{}
+
+ Cache[K comparable] struct {
+ data map[K]*Element[*entry[K]]
+ }
+)
+
+func _() {
+ _ = Cache[int]{
+ data: make(map[int](*Element[*entry[int]])),
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue40038.go b/src/internal/types/testdata/fixedbugs/issue40038.go
new file mode 100644
index 0000000..5f81fcb
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40038.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A[T any] int
+
+func (A[T]) m(A[T])
+
+func f[P interface{m(P)}]() {}
+
+func _() {
+ _ = f[A[int]]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue40056.go b/src/internal/types/testdata/fixedbugs/issue40056.go
new file mode 100644
index 0000000..ce882e7
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40056.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ NewS /* ERROR "cannot infer T" */ ().M()
+}
+
+type S struct {}
+
+func NewS[T any]() *S { panic(0) }
+
+func (_ *S /* ERROR "S is not a generic type" */ [T]) M()
diff --git a/src/internal/types/testdata/fixedbugs/issue40057.go b/src/internal/types/testdata/fixedbugs/issue40057.go
new file mode 100644
index 0000000..2996d39
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40057.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x interface{}
+ switch t := x.(type) {
+ case S /* ERROR "cannot use generic type" */ :
+ t.m()
+ }
+}
+
+type S[T any] struct {}
+
+func (_ S[T]) m()
diff --git a/src/internal/types/testdata/fixedbugs/issue40301.go b/src/internal/types/testdata/fixedbugs/issue40301.go
new file mode 100644
index 0000000..c78f9a1
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40301.go
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+func _[T any](x T) {
+ _ = unsafe.Alignof(x)
+ _ = unsafe.Sizeof(x)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue40350.go b/src/internal/types/testdata/fixedbugs/issue40350.go
new file mode 100644
index 0000000..b7ceb33
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40350.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type number interface {
+ ~float64 | ~int | ~int32
+ float64 | ~int32
+}
+
+func f[T number]() {}
+
+func _() {
+ _ = f[int /* ERROR "int does not satisfy number (number mentions int, but int is not in the type set of number)" */]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue40684.go b/src/internal/types/testdata/fixedbugs/issue40684.go
new file mode 100644
index 0000000..4805184
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40684.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[_ any] int
+
+func f[_ any]() {}
+func g[_, _ any]() {}
+
+func _() {
+ _ = f[T /* ERROR "without instantiation" */ ]
+ _ = g[T /* ERROR "without instantiation" */ , T /* ERROR "without instantiation" */ ]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue40789.go b/src/internal/types/testdata/fixedbugs/issue40789.go
new file mode 100644
index 0000000..9eea4ad
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue40789.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func main() {
+ m := map[string]int{
+ "a": 6,
+ "b": 7,
+ }
+ fmt.Println(copyMap[map[string]int, string, int](m))
+}
+
+type Map[K comparable, V any] interface {
+ map[K] V
+}
+
+func copyMap[M Map[K, V], K comparable, V any](m M) M {
+ m1 := make(M)
+ for k, v := range m {
+ m1[k] = v
+ }
+ return m1
+}
+
+// simpler test case from the same issue
+
+type A[X comparable] interface {
+ []X
+}
+
+func f[B A[X], X comparable]() B {
+ return nil
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue41124.go b/src/internal/types/testdata/fixedbugs/issue41124.go
new file mode 100644
index 0000000..0f828dc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue41124.go
@@ -0,0 +1,91 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Test case from issue.
+
+type Nat /* ERROR "invalid recursive type" */ interface {
+ Zero|Succ
+}
+
+type Zero struct{}
+type Succ struct{
+ Nat // Nat contains type constraints but is invalid, so no error
+}
+
+// Struct tests.
+
+type I1 interface {
+ comparable
+}
+
+type I2 interface {
+ ~int
+}
+
+type I3 interface {
+ I1
+ I2
+}
+
+type _ struct {
+ f I1 // ERRORx `interface is .* comparable`
+}
+
+type _ struct {
+ comparable // ERRORx `interface is .* comparable`
+}
+
+type _ struct{
+ I1 // ERRORx `interface is .* comparable`
+}
+
+type _ struct{
+ I2 // ERROR "interface contains type constraints"
+}
+
+type _ struct{
+ I3 // ERROR "interface contains type constraints"
+}
+
+// General composite types.
+
+type (
+ _ [10]I1 // ERRORx `interface is .* comparable`
+ _ [10]I2 // ERROR "interface contains type constraints"
+
+ _ []I1 // ERRORx `interface is .* comparable`
+ _ []I2 // ERROR "interface contains type constraints"
+
+ _ *I3 // ERROR "interface contains type constraints"
+ _ map[I1 /* ERRORx `interface is .* comparable` */ ]I2 // ERROR "interface contains type constraints"
+ _ chan I3 // ERROR "interface contains type constraints"
+ _ func(I1 /* ERRORx `interface is .* comparable` */ )
+ _ func() I2 // ERROR "interface contains type constraints"
+)
+
+// Other cases.
+
+var _ = [...]I3 /* ERROR "interface contains type constraints" */ {}
+
+func _(x interface{}) {
+ _ = x.(I3 /* ERROR "interface contains type constraints" */ )
+}
+
+type T1[_ any] struct{}
+type T3[_, _, _ any] struct{}
+var _ T1[I2 /* ERROR "interface contains type constraints" */ ]
+var _ T3[int, I2 /* ERROR "interface contains type constraints" */ , float32]
+
+func f1[_ any]() int { panic(0) }
+var _ = f1[I2 /* ERROR "interface contains type constraints" */ ]()
+func f3[_, _, _ any]() int { panic(0) }
+var _ = f3[int, I2 /* ERROR "interface contains type constraints" */ , float32]()
+
+func _(x interface{}) {
+ switch x.(type) {
+ case I2 /* ERROR "interface contains type constraints" */ :
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue41176.go b/src/internal/types/testdata/fixedbugs/issue41176.go
new file mode 100644
index 0000000..755e83a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue41176.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S struct{}
+
+func (S) M() byte {
+ return 0
+}
+
+type I[T any] interface {
+ M() T
+}
+
+func f[T any](x I[T]) {}
+
+func _() {
+ f(S{})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue42695.go b/src/internal/types/testdata/fixedbugs/issue42695.go
new file mode 100644
index 0000000..4551e9f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue42695.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue42695
+
+const _ = 6e5518446744 // ERROR "malformed constant"
+const _ uint8 = 6e5518446744 // ERROR "malformed constant"
+
+var _ = 6e5518446744 // ERROR "malformed constant"
+var _ uint8 = 6e5518446744 // ERROR "malformed constant"
+
+func f(x int) int {
+ return x + 6e5518446744 // ERROR "malformed constant"
+}
+
+var _ = f(6e5518446744 /* ERROR "malformed constant" */ )
diff --git a/src/internal/types/testdata/fixedbugs/issue42758.go b/src/internal/types/testdata/fixedbugs/issue42758.go
new file mode 100644
index 0000000..4e1df34
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue42758.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T any](x interface{}){
+ switch x.(type) {
+ case T: // ok to use a type parameter
+ case int:
+ }
+
+ switch x.(type) {
+ case T:
+ case T /* ERROR "duplicate case" */ :
+ }
+}
+
+type constraint interface {
+ ~int
+}
+
+func _[T constraint](x interface{}){
+ switch x.(type) {
+ case T: // ok to use a type parameter even if type set contains int
+ case int:
+ }
+}
+
+func _(x constraint /* ERROR "contains type constraints" */ ) {
+ switch x.(type) { // no need to report another error
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue42881.go b/src/internal/types/testdata/fixedbugs/issue42881.go
new file mode 100644
index 0000000..b766b5e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue42881.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ T1 interface{ comparable }
+ T2 interface{ int }
+)
+
+var (
+ _ comparable // ERROR "cannot use type comparable outside a type constraint: interface is (or embeds) comparable"
+ _ T1 // ERROR "cannot use type T1 outside a type constraint: interface is (or embeds) comparable"
+ _ T2 // ERROR "cannot use type T2 outside a type constraint: interface contains type constraints"
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue42987.go b/src/internal/types/testdata/fixedbugs/issue42987.go
new file mode 100644
index 0000000..21c14c1
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue42987.go
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check that there is only one error (no follow-on errors).
+
+package p
+var _ = [ ... /* ERROR "invalid use of [...] array" */ ]byte("foo") \ No newline at end of file
diff --git a/src/internal/types/testdata/fixedbugs/issue43056.go b/src/internal/types/testdata/fixedbugs/issue43056.go
new file mode 100644
index 0000000..8ff4e7f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43056.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// simplified example
+func f[T ~func(T)](a, b T) {}
+
+type F func(F)
+
+func _() {
+ var i F
+ var j func(F)
+
+ f(i, j)
+ f(j, i)
+}
+
+// example from issue
+func g[T interface{ Equal(T) bool }](a, b T) {}
+
+type I interface{ Equal(I) bool }
+
+func _() {
+ var i I
+ var j interface{ Equal(I) bool }
+
+ g(i, j)
+ g(j, i)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue43087.go b/src/internal/types/testdata/fixedbugs/issue43087.go
new file mode 100644
index 0000000..222fac8
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43087.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ a, b, b /* ERROR "b repeated on left side of :=" */ := 1, 2, 3
+ _ = a
+ _ = b
+}
+
+func _() {
+ a, _, _ := 1, 2, 3 // multiple _'s ok
+ _ = a
+}
+
+func _() {
+ var b int
+ a, b, b /* ERROR "b repeated on left side of :=" */ := 1, 2, 3
+ _ = a
+ _ = b
+}
+
+func _() {
+ var a []int
+ a /* ERRORx `non-name .* on left side of :=` */ [0], b := 1, 2
+ _ = a
+ _ = b
+}
+
+func _() {
+ var a int
+ a, a /* ERROR "a repeated on left side of :=" */ := 1, 2
+ _ = a
+}
+
+func _() {
+ var a, b int
+ a, b := /* ERROR "no new variables on left side of :=" */ 1, 2
+ _ = a
+ _ = b
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue43109.go b/src/internal/types/testdata/fixedbugs/issue43109.go
new file mode 100644
index 0000000..5d21a56
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43109.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Ensure there is no "imported and not used" error
+// if a package wasn't imported in the first place.
+
+package p
+
+import . "/foo" // ERROR "could not import /foo"
diff --git a/src/internal/types/testdata/fixedbugs/issue43110.go b/src/internal/types/testdata/fixedbugs/issue43110.go
new file mode 100644
index 0000000..1e85022
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43110.go
@@ -0,0 +1,43 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type P *struct{}
+
+func _() {
+ // want an error even if the switch is empty
+ var a struct{ _ func() }
+ switch a /* ERROR "cannot switch on a" */ {
+ }
+
+ switch a /* ERROR "cannot switch on a" */ {
+ case a: // no follow-on error here
+ }
+
+ // this is ok because f can be compared to nil
+ var f func()
+ switch f {
+ }
+
+ switch f {
+ case nil:
+ }
+
+ switch (func())(nil) {
+ case nil:
+ }
+
+ switch (func())(nil) {
+ case f /* ERRORx `invalid case f in switch on .* \(func can only be compared to nil\)` */ :
+ }
+
+ switch nil /* ERROR "use of untyped nil in switch expression" */ {
+ }
+
+ // this is ok
+ switch P(nil) {
+ case P(nil):
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue43124.go b/src/internal/types/testdata/fixedbugs/issue43124.go
new file mode 100644
index 0000000..ce26ae1
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43124.go
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = int(0 /* ERROR "invalid use of ... in conversion to int" */ ...)
+
+// test case from issue
+
+type M []string
+
+var (
+ x = []string{"a", "b"}
+ _ = M(x /* ERROR "invalid use of ... in conversion to M" */ ...)
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue43125.go b/src/internal/types/testdata/fixedbugs/issue43125.go
new file mode 100644
index 0000000..d0d6feb
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43125.go
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = new(- /* ERROR "not a type" */ 1)
+var _ = new(1 /* ERROR "not a type" */ + 1)
diff --git a/src/internal/types/testdata/fixedbugs/issue43190.go b/src/internal/types/testdata/fixedbugs/issue43190.go
new file mode 100644
index 0000000..b4d1fa4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43190.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The errors below are produced by the parser, but we check
+// them here for consistency with the types2 tests.
+
+package p
+
+import ; // ERROR "missing import path"
+import "" // ERROR "invalid import path (empty string)"
+import
+var /* ERROR "missing import path" */ _ int
+import .; // ERROR "missing import path"
+import 'x' // ERROR "import path must be a string"
+var _ int
+import /* ERROR "imports must appear before other declarations" */ _ "math"
+
+// Don't repeat previous error for each immediately following import ...
+import ()
+import (.) // ERROR "missing import path"
+import (
+ "fmt"
+ .
+) // ERROR "missing import path"
+
+// ... but remind with error again if we start a new import section after
+// other declarations
+var _ = fmt.Println
+import /* ERROR "imports must appear before other declarations" */ _ "math"
+import _ "math"
diff --git a/src/internal/types/testdata/fixedbugs/issue43527.go b/src/internal/types/testdata/fixedbugs/issue43527.go
new file mode 100644
index 0000000..473ab96
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43527.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const L = 10
+
+type (
+ _ [L]struct{}
+ _ [A /* ERROR "undefined array length A or missing type constraint" */ ]struct{}
+ _ [B /* ERROR "invalid array length B" */ ]struct{}
+ _[A any] struct{}
+
+ B int
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue43671.go b/src/internal/types/testdata/fixedbugs/issue43671.go
new file mode 100644
index 0000000..be4c9ee
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue43671.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | <-chan T }
+
+func _[T any](ch T) {
+ <-ch // ERRORx `cannot receive from ch .* \(no core type\)`
+}
+
+func _[T C0](ch T) {
+ <-ch // ERROR "cannot receive from non-channel ch"
+}
+
+func _[T C1](ch T) {
+ <-ch
+}
+
+func _[T C2](ch T) {
+ <-ch
+}
+
+func _[T C3](ch T) {
+ <-ch // ERRORx `cannot receive from ch .* \(no core type\)`
+}
+
+func _[T C4](ch T) {
+ <-ch // ERROR "cannot receive from send-only channel"
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ x = <-ch
+}
+
+// test case from issue, slightly modified
+type RecvChan[T any] interface {
+ ~chan T | ~<-chan T
+}
+
+func _[T any, C RecvChan[T]](ch C) T {
+ return <-ch
+}
+
+func f[T any, C interface{ chan T }](ch C) T {
+ return <-ch
+}
+
+func _(ch chan int) {
+ var x int = f(ch) // test constraint type inference for this case
+ _ = x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue44688.go b/src/internal/types/testdata/fixedbugs/issue44688.go
new file mode 100644
index 0000000..512bfcc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue44688.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+type A1[T any] struct{}
+
+func (*A1[T]) m1(T) {}
+
+type A2[T any] interface {
+ m2(T)
+}
+
+type B1[T any] struct {
+ filler int
+ *A1[T]
+ A2[T]
+}
+
+type B2[T any] interface {
+ A2[T]
+}
+
+type C[T any] struct {
+ filler1 int
+ filler2 int
+ B1[T]
+}
+
+type D[T any] struct {
+ filler1 int
+ filler2 int
+ filler3 int
+ C[T]
+}
+
+func _() {
+ // calling embedded methods
+ var b1 B1[string]
+
+ b1.A1.m1("")
+ b1.m1("")
+
+ b1.A2.m2("")
+ b1.m2("")
+
+ var b2 B2[string]
+ b2.m2("")
+
+ // a deeper nesting
+ var d D[string]
+ d.m1("")
+ d.m2("")
+
+ // calling method expressions
+ m1x := B1[string].m1
+ m1x(b1, "")
+ m2x := B2[string].m2
+ m2x(b2, "")
+
+ // calling method values
+ m1v := b1.m1
+ m1v("")
+ m2v := b1.m2
+ m2v("")
+ b2v := b2.m2
+ b2v("")
+}
+
+// actual test case from issue
+
+type A[T any] struct{}
+
+func (*A[T]) f(T) {}
+
+type B[T any] struct{ A[T] }
+
+func _() {
+ var b B[string]
+ b.A.f("")
+ b.f("")
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue44799.go b/src/internal/types/testdata/fixedbugs/issue44799.go
new file mode 100644
index 0000000..9e528a7
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue44799.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func Map[F, T any](s []F, f func(F) T) []T { return nil }
+
+func Reduce[Elem1, Elem2 any](s []Elem1, initializer Elem2, f func(Elem2, Elem1) Elem2) Elem2 { var x Elem2; return x }
+
+func main() {
+ var s []int
+ var f1 func(int) float64
+ var f2 func(float64, int) float64
+ _ = Map[int](s, f1)
+ _ = Map(s, f1)
+ _ = Reduce[int](s, 0, f2)
+ _ = Reduce(s, 0, f2)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue45114.go b/src/internal/types/testdata/fixedbugs/issue45114.go
new file mode 100644
index 0000000..e51b3f7
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45114.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var s uint
+var _ = string(1 /* ERRORx `shifted operand 1 .* must be integer` */ << s)
diff --git a/src/internal/types/testdata/fixedbugs/issue45548.go b/src/internal/types/testdata/fixedbugs/issue45548.go
new file mode 100644
index 0000000..01c9672
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45548.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[F interface{*Q}, G interface{*R}, Q, R any](q Q, r R) {}
+
+func _() {
+ f[*float64, *int](1, 2)
+ f[*float64](1, 2)
+ f(1, 2)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue45550.go b/src/internal/types/testdata/fixedbugs/issue45550.go
new file mode 100644
index 0000000..2ea4ffe
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45550.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Builder /* ERROR "invalid recursive type" */ [T interface{ struct{ Builder[T] } }] struct{}
+type myBuilder struct {
+ Builder[myBuilder]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue45635.go b/src/internal/types/testdata/fixedbugs/issue45635.go
new file mode 100644
index 0000000..b83d477
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45635.go
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ some /* ERROR "undefined" */ [int, int]()
+}
+
+type N[T any] struct{}
+
+var _ N [] // ERROR "expected type argument list"
+
+type I interface {
+ ~[]int
+}
+
+func _[T I](i, j int) {
+ var m map[int]int
+ _ = m[i, j /* ERROR "more than one index" */ ]
+
+ var a [3]int
+ _ = a[i, j /* ERROR "more than one index" */ ]
+
+ var s []int
+ _ = s[i, j /* ERROR "more than one index" */ ]
+
+ var t T
+ _ = t[i, j /* ERROR "more than one index" */ ]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue45639.go b/src/internal/types/testdata/fixedbugs/issue45639.go
new file mode 100644
index 0000000..a224aed
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45639.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // It is not permitted to declare a local type whose underlying
+// // type is a type parameters not declared by that type declaration.
+// func _[T any]() {
+// type _ T // ERROR "cannot use function type parameter T as RHS in type declaration"
+// type _ [_ any] T // ERROR "cannot use function type parameter T as RHS in type declaration"
+// }
diff --git a/src/internal/types/testdata/fixedbugs/issue45920.go b/src/internal/types/testdata/fixedbugs/issue45920.go
new file mode 100644
index 0000000..716abb1
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45920.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[T any, C chan T | <-chan T](ch C) {}
+
+func _(ch chan int) { f1(ch) }
+func _(ch <-chan int) { f1(ch) }
+func _(ch chan<- int) { f1 /* ERROR "chan<- int does not satisfy chan int | <-chan int" */ (ch) }
+
+func f2[T any, C chan T | chan<- T](ch C) {}
+
+func _(ch chan int) { f2(ch) }
+func _(ch <-chan int) { f2 /* ERROR "<-chan int does not satisfy chan int | chan<- int" */ (ch) }
+func _(ch chan<- int) { f2(ch) }
diff --git a/src/internal/types/testdata/fixedbugs/issue45985.go b/src/internal/types/testdata/fixedbugs/issue45985.go
new file mode 100644
index 0000000..c486150
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue45985.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue45985
+
+func app[S interface{ ~[]T }, T any](s S, e T) S {
+ return append(s, e)
+}
+
+func _() {
+ _ = app /* ERROR "S (type int) does not satisfy interface{~[]T}" */ [int] // TODO(gri) better error message
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue46090.go b/src/internal/types/testdata/fixedbugs/issue46090.go
new file mode 100644
index 0000000..59670da
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46090.go
@@ -0,0 +1,11 @@
+// -lang=go1.17
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The predeclared type comparable is not visible before Go 1.18.
+
+package p
+
+type _ comparable // ERROR "predeclared comparable"
diff --git a/src/internal/types/testdata/fixedbugs/issue46275.go b/src/internal/types/testdata/fixedbugs/issue46275.go
new file mode 100644
index 0000000..0862d5b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46275.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue46403.go b/src/internal/types/testdata/fixedbugs/issue46403.go
new file mode 100644
index 0000000..fc60340
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46403.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46403
+
+func _() {
+ // a should be used, despite the parser error below.
+ var a []int
+ var _ = a[] // ERROR "expected operand"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue46404.go b/src/internal/types/testdata/fixedbugs/issue46404.go
new file mode 100644
index 0000000..e3c93f6
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46404.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46404
+
+// TODO(gri) re-enable this test with matching errors
+// between go/types and types2
+// Check that we don't type check t[_] as an instantiation.
+// type t [t /* type parameters must be named */ /* not a generic type */ [_]]_ // cannot use
diff --git a/src/internal/types/testdata/fixedbugs/issue46461.go b/src/internal/types/testdata/fixedbugs/issue46461.go
new file mode 100644
index 0000000..ae70048
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46461.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// test case 1
+type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int
+
+type X int
+
+func (X) M() T[X] { return 0 }
+
+// test case 2
+type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{}
+
+// test case 3
+type A2 /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] }
+
+type I interface{ A2[I]; M() A2[I] }
diff --git a/src/internal/types/testdata/fixedbugs/issue46583.go b/src/internal/types/testdata/fixedbugs/issue46583.go
new file mode 100644
index 0000000..1901bff
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue46583.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 struct{}
+func (t T1) m(int) {}
+var f1 func(T1)
+
+type T2 struct{}
+func (t T2) m(x int) {}
+var f2 func(T2)
+
+type T3 struct{}
+func (T3) m(int) {}
+var f3 func(T3)
+
+type T4 struct{}
+func (T4) m(x int) {}
+var f4 func(T4)
+
+func _() {
+ f1 = T1 /* ERROR "func(T1, int)" */ .m
+ f2 = T2 /* ERROR "func(t T2, x int)" */ .m
+ f3 = T3 /* ERROR "func(T3, int)" */ .m
+ f4 = T4 /* ERROR "func(_ T4, x int)" */ .m
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue47031.go b/src/internal/types/testdata/fixedbugs/issue47031.go
new file mode 100644
index 0000000..23a9c55
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47031.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Mer interface { M() }
+
+func F[T Mer](p *T) {
+ p.M /* ERROR "p.M undefined" */ ()
+}
+
+type MyMer int
+
+func (MyMer) M() {}
+
+func _() {
+ F(new(MyMer))
+ F[Mer](nil)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue47115.go b/src/internal/types/testdata/fixedbugs/issue47115.go
new file mode 100644
index 0000000..2d2be34
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47115.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ ch <- /* ERRORx `cannot send to ch .* no core type` */ 0
+}
+
+func _[T C0](ch T) {
+ ch <- /* ERROR "cannot send to non-channel" */ 0
+}
+
+func _[T C1](ch T) {
+ ch <- 0
+}
+
+func _[T C2](ch T) {
+ ch <-/* ERROR "cannot send to receive-only channel" */ 0
+}
+
+func _[T C3](ch T) {
+ ch <- /* ERRORx `cannot send to ch .* no core type` */ 0
+}
+
+func _[T C4](ch T) {
+ ch <- 0
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ ch <- x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue47127.go b/src/internal/types/testdata/fixedbugs/issue47127.go
new file mode 100644
index 0000000..b663938
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47127.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Embedding of stand-alone type parameters is not permitted.
+
+package p
+
+type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "term cannot be a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "term cannot be a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+)
+
+func _[P any]() {
+ type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "term cannot be a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "term cannot be a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+
+ _ interface{ *P | []P | chan P | map[string]P }
+ _ interface{ P /* ERROR "term cannot be a type parameter" */ }
+ _ interface{ ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+ _ interface{ int | P /* ERROR "term cannot be a type parameter" */ }
+ _ interface{ int | ~P /* ERROR "type in term ~P cannot be a type parameter" */ }
+ )
+}
+
+func _[P any, Q interface{ *P | []P | chan P | map[string]P }]() {}
+func _[P any, Q interface{ P /* ERROR "term cannot be a type parameter" */ }]() {}
+func _[P any, Q interface{ ~P /* ERROR "type in term ~P cannot be a type parameter" */ }]() {}
+func _[P any, Q interface{ int | P /* ERROR "term cannot be a type parameter" */ }]() {}
+func _[P any, Q interface{ int | ~P /* ERROR "type in term ~P cannot be a type parameter" */ }]() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue47411.go b/src/internal/types/testdata/fixedbugs/issue47411.go
new file mode 100644
index 0000000..97b5942
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47411.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+func g[_ interface{interface{comparable; ~int|~string}}]() {}
+
+func _[P comparable,
+ Q interface{ comparable; ~int|~string },
+ R any, // not comparable
+ S interface{ comparable; ~func() }, // not comparable
+]() {
+ _ = f[int]
+ _ = f[P]
+ _ = f[Q]
+ _ = f[func /* ERROR "does not satisfy comparable" */ ()]
+ _ = f[R /* ERROR "R does not satisfy comparable" */ ]
+
+ _ = g[int]
+ _ = g[P /* ERROR "P does not satisfy interface{interface{comparable; ~int | ~string}" */ ]
+ _ = g[Q]
+ _ = g[func /* ERROR "func() does not satisfy interface{interface{comparable; ~int | ~string}}" */ ()]
+ _ = g[R /* ERROR "R does not satisfy interface{interface{comparable; ~int | ~string}" */ ]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue47747.go b/src/internal/types/testdata/fixedbugs/issue47747.go
new file mode 100644
index 0000000..34c78d3
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47747.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T1[P any] P
+//
+// func (T1[_]) m() {}
+//
+// func _[P any](x *T1[P]) {
+// // x.m exists because x is of type *T1 where T1 is a defined type
+// // (even though under(T1) is a type parameter)
+// x.m()
+// }
+
+
+func _[P interface{ m() }](x P) {
+ x.m()
+ // (&x).m doesn't exist because &x is of type *P
+ // and pointers to type parameters don't have methods
+ (&x).m /* ERROR "type *P is pointer to type parameter, not type parameter" */ ()
+}
+
+
+type T2 interface{ m() }
+
+func _(x *T2) {
+ // x.m doesn't exists because x is of type *T2
+ // and pointers to interfaces don't have methods
+ x.m /* ERROR "type *T2 is pointer to interface, not interface" */()
+}
+
+// Test case 1 from issue
+
+type Fooer1[t any] interface {
+ Foo(Barer[t])
+}
+type Barer[t any] interface {
+ Bar(t)
+}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type Foo1[t any] t
+// type Bar[t any] t
+//
+// func (l Foo1[t]) Foo(v Barer[t]) { v.Bar(t(l)) }
+// func (b *Bar[t]) Bar(l t) { *b = Bar[t](l) }
+//
+// func _[t any](f Fooer1[t]) t {
+// var b Bar[t]
+// f.Foo(&b)
+// return t(b)
+// }
+
+// Test case 2 from issue
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type Fooer2[t any] interface {
+// Foo()
+// }
+//
+// type Foo2[t any] t
+//
+// func (f *Foo2[t]) Foo() {}
+//
+// func _[t any](v t) {
+// var f = Foo2[t](v)
+// _ = Fooer2[t](&f)
+// }
diff --git a/src/internal/types/testdata/fixedbugs/issue47796.go b/src/internal/types/testdata/fixedbugs/issue47796.go
new file mode 100644
index 0000000..7f719ff
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47796.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// parameterized types with self-recursive constraints
+type (
+ T1 /* ERROR "invalid recursive type" */ [P T1[P]] interface{}
+ T2 /* ERROR "invalid recursive type" */ [P, Q T2[P, Q]] interface{}
+ T3[P T2[P, Q], Q interface{ ~string }] interface{}
+
+ T4a /* ERROR "invalid recursive type" */ [P T4a[P]] interface{ ~int }
+ T4b /* ERROR "invalid recursive type" */ [P T4b[int]] interface{ ~int }
+ T4c /* ERROR "invalid recursive type" */ [P T4c[string]] interface{ ~int }
+
+ // mutually recursive constraints
+ T5 /* ERROR "invalid recursive type" */ [P T6[P]] interface{ int }
+ T6[P T5[P]] interface{ int }
+)
+
+// verify that constraints are checked as expected
+var (
+ _ T1[int]
+ _ T2[int, string]
+ _ T3[int, string]
+)
+
+// test case from issue
+
+type Eq /* ERROR "invalid recursive type" */ [a Eq[a]] interface {
+ Equal(that a) bool
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue47818.go b/src/internal/types/testdata/fixedbugs/issue47818.go
new file mode 100644
index 0000000..21c8539
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47818.go
@@ -0,0 +1,61 @@
+// -lang=go1.17
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parser accepts type parameters but the type checker
+// needs to report any operations that are not permitted
+// before Go 1.18.
+
+package p
+
+type T[P /* ERROR "type parameter requires go1.18 or later" */ any /* ERROR "predeclared any requires go1.18 or later" */] struct{}
+
+// for init (and main, but we're not in package main) we should only get one error
+func init[P /* ERROR "func init must have no type parameters" */ any /* ERROR "predeclared any requires go1.18 or later" */]() {
+}
+func main[P /* ERROR "type parameter requires go1.18 or later" */ any /* ERROR "predeclared any requires go1.18 or later" */]() {
+}
+
+func f[P /* ERROR "type parameter requires go1.18 or later" */ any /* ERROR "predeclared any requires go1.18 or later" */](x P) {
+ var _ T[ /* ERROR "type instantiation requires go1.18 or later" */ int]
+ var _ (T[ /* ERROR "type instantiation requires go1.18 or later" */ int])
+ _ = T[ /* ERROR "type instantiation requires go1.18 or later" */ int]{}
+ _ = T[ /* ERROR "type instantiation requires go1.18 or later" */ int](struct{}{})
+}
+
+func (T[ /* ERROR "type instantiation requires go1.18 or later" */ P]) g(x int) {
+ f[ /* ERROR "function instantiation requires go1.18 or later" */ int](0) // explicit instantiation
+ (f[ /* ERROR "function instantiation requires go1.18 or later" */ int])(0) // parentheses (different code path)
+ f( /* ERROR "implicit function instantiation requires go1.18 or later" */ x) // implicit instantiation
+}
+
+type C1 interface {
+ comparable // ERROR "predeclared comparable requires go1.18 or later"
+}
+
+type C2 interface {
+ comparable // ERROR "predeclared comparable requires go1.18 or later"
+ int // ERROR "embedding non-interface type int requires go1.18 or later"
+ ~ /* ERROR "embedding interface element ~int requires go1.18 or later" */ int
+ int /* ERROR "embedding interface element int | ~string requires go1.18 or later" */ | ~string
+}
+
+type _ interface {
+ // errors for these were reported with their declaration
+ C1
+ C2
+}
+
+type (
+ _ comparable // ERROR "predeclared comparable requires go1.18 or later"
+ // errors for these were reported with their declaration
+ _ C1
+ _ C2
+
+ _ = comparable // ERROR "predeclared comparable requires go1.18 or later"
+ // errors for these were reported with their declaration
+ _ = C1
+ _ = C2
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue47887.go b/src/internal/types/testdata/fixedbugs/issue47887.go
new file mode 100644
index 0000000..4c4fc2f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47887.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Fooer[t any] interface {
+ foo(Barer[t])
+}
+type Barer[t any] interface {
+ bar(Bazer[t])
+}
+type Bazer[t any] interface {
+ Fooer[t]
+ baz(t)
+}
+
+type Int int
+
+func (n Int) baz(int) {}
+func (n Int) foo(b Barer[int]) { b.bar(n) }
+
+type F[t any] interface { f(G[t]) }
+type G[t any] interface { g(H[t]) }
+type H[t any] interface { F[t] }
+
+type T struct{}
+func (n T) f(b G[T]) { b.g(n) }
diff --git a/src/internal/types/testdata/fixedbugs/issue47968.go b/src/internal/types/testdata/fixedbugs/issue47968.go
new file mode 100644
index 0000000..c516eee
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue47968.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func (T[P]) m1()
+
+type A1 = T // ERROR "cannot use generic type"
+
+func (A1[P]) m2() {}
+
+type A2 = T[int]
+
+func (A2 /* ERROR "cannot define new methods on instantiated type T[int]" */) m3() {}
+func (_ /* ERROR "cannot define new methods on instantiated type T[int]" */ A2) m4() {}
+
+func (T[int]) m5() {} // int is the type parameter name, not an instantiation
+func (T[* /* ERROR "must be an identifier" */ int]) m6() {} // syntax error
diff --git a/src/internal/types/testdata/fixedbugs/issue48008.go b/src/internal/types/testdata/fixedbugs/issue48008.go
new file mode 100644
index 0000000..8d0c640
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48008.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func _(x interface{}) {
+ switch x.(type) {
+ case nil:
+ case int:
+
+ case T[int]:
+ case []T[int]:
+ case [10]T[int]:
+ case struct{T[int]}:
+ case *T[int]:
+ case func(T[int]):
+ case interface{m(T[int])}:
+ case map[T[int]] string:
+ case chan T[int]:
+
+ case T /* ERROR "cannot use generic type T[P any] without instantiation" */ :
+ case []T /* ERROR "cannot use generic type" */ :
+ case [10]T /* ERROR "cannot use generic type" */ :
+ case struct{T /* ERROR "cannot use generic type" */ }:
+ case *T /* ERROR "cannot use generic type" */ :
+ case func(T /* ERROR "cannot use generic type" */ ):
+ case interface{m(T /* ERROR "cannot use generic type" */ )}:
+ case map[T /* ERROR "cannot use generic type" */ ] string:
+ case chan T /* ERROR "cannot use generic type" */ :
+
+ case T /* ERROR "cannot use generic type" */ , *T /* ERROR "cannot use generic type" */ :
+ }
+}
+
+// Make sure a parenthesized nil is ok.
+
+func _(x interface{}) {
+ switch x.(type) {
+ case ((nil)), int:
+ }
+}
+
+// Make sure we look for the predeclared nil.
+
+func _(x interface{}) {
+ type nil int
+ switch x.(type) {
+ case nil: // ok - this is the type nil
+ }
+}
+
+func _(x interface{}) {
+ var nil int
+ switch x.(type) {
+ case nil /* ERROR "not a type" */ : // not ok - this is the variable nil
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48018.go b/src/internal/types/testdata/fixedbugs/issue48018.go
new file mode 100644
index 0000000..3df908a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48018.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type Box[A any] struct {
+ value A
+}
+
+func Nest[A /* ERROR "instantiation cycle" */ any](b Box[A], n int) interface{} {
+ if n == 0 {
+ return b
+ }
+ return Nest(Box[Box[A]]{b}, n-1)
+}
+
+func main() {
+ Nest(Box[int]{0}, 10)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48048.go b/src/internal/types/testdata/fixedbugs/issue48048.go
new file mode 100644
index 0000000..98a03ea
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48048.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func (T[_]) A() {}
+
+var _ = (T[int]).A
+var _ = (*T[int]).A
+
+var _ = (T /* ERROR "cannot use generic type" */).A
+var _ = (*T /* ERROR "cannot use generic type" */).A
diff --git a/src/internal/types/testdata/fixedbugs/issue48082.go b/src/internal/types/testdata/fixedbugs/issue48082.go
new file mode 100644
index 0000000..648c512
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48082.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue48082
+
+import "init" /* ERROR "init must be a func" */ /* ERROR "could not import init" */
diff --git a/src/internal/types/testdata/fixedbugs/issue48083.go b/src/internal/types/testdata/fixedbugs/issue48083.go
new file mode 100644
index 0000000..15e9b70
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48083.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+type _ interface{ int | T /* ERROR "cannot use generic type" */ } \ No newline at end of file
diff --git a/src/internal/types/testdata/fixedbugs/issue48136.go b/src/internal/types/testdata/fixedbugs/issue48136.go
new file mode 100644
index 0000000..b76322e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48136.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[P interface{ *P }]() {}
+func f2[P interface{ func(P) }]() {}
+func f3[P, Q interface{ func(Q) P }]() {}
+func f4[P interface{ *Q }, Q interface{ func(P) }]() {}
+func f5[P interface{ func(P) }]() {}
+func f6[P interface { *Tree[P] }, Q any ]() {}
+
+func _() {
+ f1 /* ERROR "cannot infer P" */ ()
+ f2 /* ERROR "cannot infer P" */ ()
+ f3 /* ERROR "cannot infer P" */ ()
+ f4 /* ERROR "cannot infer P" */ ()
+ f5 /* ERROR "cannot infer P" */ ()
+ f6 /* ERROR "cannot infer P" */ ()
+}
+
+type Tree[P any] struct {
+ left, right *Tree[P]
+ data P
+}
+
+// test case from issue
+
+func foo[Src interface { func() Src }]() Src {
+ return foo[Src]
+}
+
+func _() {
+ foo /* ERROR "cannot infer Src" */ ()
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48234.go b/src/internal/types/testdata/fixedbugs/issue48234.go
new file mode 100644
index 0000000..e069930
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48234.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = interface{
+ m()
+ m /* ERROR "duplicate method" */ ()
+}(nil)
diff --git a/src/internal/types/testdata/fixedbugs/issue48312.go b/src/internal/types/testdata/fixedbugs/issue48312.go
new file mode 100644
index 0000000..708201b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48312.go
@@ -0,0 +1,20 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T interface{ m() }
+type P *T
+
+func _(p *T) {
+ p.m /* ERROR "type *T is pointer to interface, not interface" */ ()
+}
+
+func _(p P) {
+ p.m /* ERROR "type P is pointer to interface, not interface" */ ()
+}
+
+func _[P T](p *P) {
+ p.m /* ERROR "type *P is pointer to type parameter, not type parameter" */ ()
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48472.go b/src/internal/types/testdata/fixedbugs/issue48472.go
new file mode 100644
index 0000000..169ab0d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48472.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g() {
+ var s string
+ var i int
+ _ = s /* ERROR "invalid operation: s + i (mismatched types string and int)" */ + i
+}
+
+func f(i int) int {
+ i /* ERROR `invalid operation: i += "1" (mismatched types int and untyped string)` */ += "1"
+ return i
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48529.go b/src/internal/types/testdata/fixedbugs/issue48529.go
new file mode 100644
index 0000000..bcc5e35
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48529.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U, int] }] int
+
+type X int
+
+func (X) M() T[X] { return 0 }
diff --git a/src/internal/types/testdata/fixedbugs/issue48582.go b/src/internal/types/testdata/fixedbugs/issue48582.go
new file mode 100644
index 0000000..8ffcd5a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48582.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type N /* ERROR "invalid recursive type" */ interface {
+ int | N
+}
+
+type A /* ERROR "invalid recursive type" */ interface {
+ int | B
+}
+
+type B interface {
+ int | A
+}
+
+type S /* ERROR "invalid recursive type" */ struct {
+ I // ERROR "interface contains type constraints"
+}
+
+type I interface {
+ int | S
+}
+
+type P interface {
+ *P // ERROR "interface contains type constraints"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48619.go b/src/internal/types/testdata/fixedbugs/issue48619.go
new file mode 100644
index 0000000..fc5dce0
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48619.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P any](a, _ P) {
+ var x int
+ // TODO(gri) these error messages, while correct, could be better
+ f(a, x /* ERROR "type int of x does not match inferred type P for P" */)
+ f(x, a /* ERROR "type P of a does not match inferred type int for P" */)
+}
+
+func g[P any](a, b P) {
+ g(a, b)
+ g(&a, &b)
+ g([]P{}, []P{})
+
+ // work-around: provide type argument explicitly
+ g[*P](&a, &b)
+ g[[]P]([]P{}, []P{})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48656.go b/src/internal/types/testdata/fixedbugs/issue48656.go
new file mode 100644
index 0000000..f77e08a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48656.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P *Q, Q any](P, Q) {
+ _ = f[P]
+}
+
+func f2[P /* ERROR "instantiation cycle" */ *Q, Q any](P, Q) {
+ _ = f2[*P]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48695.go b/src/internal/types/testdata/fixedbugs/issue48695.go
new file mode 100644
index 0000000..9f4a768
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48695.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g[P ~func(T) P, T any](P) {}
+
+func _() {
+ type F func(int) F
+ var f F
+ g(f)
+ _ = g[F]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48703.go b/src/internal/types/testdata/fixedbugs/issue48703.go
new file mode 100644
index 0000000..89c667b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48703.go
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// The actual example from the issue.
+type List[P any] struct{}
+
+func (_ List[P]) m() (_ List[List[P]]) { return }
+
+// Other types of recursion through methods.
+type R[P any] int
+
+func (*R[R /* ERROR "must be an identifier" */ [int]]) m0() {}
+func (R[P]) m1(R[R[P]]) {}
+func (R[P]) m2(R[*P]) {}
+func (R[P]) m3([unsafe.Sizeof(new(R[P]))]int) {}
+func (R[P]) m4([unsafe.Sizeof(new(R[R[P]]))]int) {}
+
+// Mutual recursion
+type M[P any] int
+
+func (R[P]) m5(M[M[P]]) {}
+func (M[P]) m(R[R[P]]) {}
diff --git a/src/internal/types/testdata/fixedbugs/issue48712.go b/src/internal/types/testdata/fixedbugs/issue48712.go
new file mode 100644
index 0000000..76ad16c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48712.go
@@ -0,0 +1,41 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P comparable](x, y P) {
+ _ = x == x
+ _ = x == y
+ _ = y == x
+ _ = y == y
+
+ _ = x /* ERROR "type parameter P is not comparable with <" */ < y
+}
+
+func _[P comparable](x P, y any) {
+ _ = x == x
+ _ = x == y
+ _ = y == x
+ _ = y == y
+
+ _ = x /* ERROR "type parameter P is not comparable with <" */ < y
+}
+
+func _[P any](x, y P) {
+ _ = x /* ERROR "incomparable types in type set" */ == x
+ _ = x /* ERROR "incomparable types in type set" */ == y
+ _ = y /* ERROR "incomparable types in type set" */ == x
+ _ = y /* ERROR "incomparable types in type set" */ == y
+
+ _ = x /* ERROR "type parameter P is not comparable with <" */ < y
+}
+
+func _[P any](x P, y any) {
+ _ = x /* ERROR "incomparable types in type set" */ == x
+ _ = x /* ERROR "incomparable types in type set" */ == y
+ _ = y == x // ERROR "incomparable types in type set"
+ _ = y == y
+
+ _ = x /* ERROR "type parameter P is not comparable with <" */ < y
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48819.go b/src/internal/types/testdata/fixedbugs/issue48819.go
new file mode 100644
index 0000000..916faaf
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48819.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type T /* ERROR "invalid recursive type: T refers to itself" */ struct {
+ T
+}
+
+func _(t T) {
+ _ = unsafe.Sizeof(t) // should not go into infinite recursion here
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48827.go b/src/internal/types/testdata/fixedbugs/issue48827.go
new file mode 100644
index 0000000..bd08835
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48827.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type G[P any] int
+
+type (
+ _ G[int]
+ _ G[G /* ERRORx `cannot use.*without instantiation` */]
+ _ bool /* ERROR "invalid operation: bool[int] (bool is not a generic type)" */ [int]
+ _ bool /* ERROR "invalid operation: bool[G] (bool is not a generic type)" */[G]
+)
+
+// The example from the issue.
+func _() {
+ _ = &([10]bool /* ERRORx `invalid operation.*bool is not a generic type` */ [1 /* ERROR "expected type" */ ]{})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48951.go b/src/internal/types/testdata/fixedbugs/issue48951.go
new file mode 100644
index 0000000..8d6f850
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48951.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ A1[P any] [10]A1 /* ERROR "invalid recursive type" */ [P]
+ A2[P any] [10]A2 /* ERROR "invalid recursive type" */ [*P]
+ A3[P any] [10]*A3[P]
+
+ L1[P any] []L1[P]
+
+ S1[P any] struct{ f S1 /* ERROR "invalid recursive type" */ [P] }
+ S2[P any] struct{ f S2 /* ERROR "invalid recursive type" */ [*P] } // like example in issue
+ S3[P any] struct{ f *S3[P] }
+
+ I1[P any] interface{ I1 /* ERROR "invalid recursive type" */ [P] }
+ I2[P any] interface{ I2 /* ERROR "invalid recursive type" */ [*P] }
+ I3[P any] interface{ *I3 /* ERROR "interface contains type constraints" */ [P] }
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue48962.go b/src/internal/types/testdata/fixedbugs/issue48962.go
new file mode 100644
index 0000000..4294cf0
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48962.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T0[P any] struct {
+ f P
+}
+
+type T1 /* ERROR "invalid recursive type" */ struct {
+ _ T0[T1]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue48974.go b/src/internal/types/testdata/fixedbugs/issue48974.go
new file mode 100644
index 0000000..08d8656
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue48974.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Fooer interface {
+ Foo()
+}
+
+type Fooable[F /* ERROR "instantiation cycle" */ Fooer] struct {
+ ptr F
+}
+
+func (f *Fooable[F]) Adapter() *Fooable[*FooerImpl[F]] {
+ return &Fooable[*FooerImpl[F]]{&FooerImpl[F]{}}
+}
+
+type FooerImpl[F Fooer] struct {
+}
+
+func (fi *FooerImpl[F]) Foo() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue49003.go b/src/internal/types/testdata/fixedbugs/issue49003.go
new file mode 100644
index 0000000..bf2e6c4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49003.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f(s string) int {
+ for range s {
+ }
+} // ERROR "missing return"
diff --git a/src/internal/types/testdata/fixedbugs/issue49005.go b/src/internal/types/testdata/fixedbugs/issue49005.go
new file mode 100644
index 0000000..d91c207
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49005.go
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 interface{ M() }
+
+func F1() T1
+
+var _ = F1().(*X1 /* ERROR "undefined: X1" */)
+
+func _() {
+ switch F1().(type) {
+ case *X1 /* ERROR "undefined: X1" */ :
+ }
+}
+
+type T2 interface{ M() }
+
+func F2() T2
+
+var _ = F2 /* ERROR "impossible type assertion: F2().(*X2)\n\t*X2 does not implement T2 (missing method M)" */ ().(*X2)
+
+type X2 struct{}
+
+func _() {
+ switch F2().(type) {
+ case * /* ERROR "impossible type switch case: *X2\n\tF2() (value of type T2) cannot have dynamic type *X2 (missing method M)" */ X2:
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49043.go b/src/internal/types/testdata/fixedbugs/issue49043.go
new file mode 100644
index 0000000..7594b32
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49043.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The example from the issue.
+type (
+ N[P any] M /* ERROR "invalid recursive type" */ [P]
+ M[P any] N[P]
+)
+
+// A slightly more complicated case.
+type (
+ A[P any] B /* ERROR "invalid recursive type" */ [P]
+ B[P any] C[P]
+ C[P any] A[P]
+)
+
+// Confusing but valid (note that `type T *T` is valid).
+type (
+ N1[P any] *M1[P]
+ M1[P any] *N1[P]
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue49112.go b/src/internal/types/testdata/fixedbugs/issue49112.go
new file mode 100644
index 0000000..e87d1c0
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49112.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P int](P) {}
+
+func _() {
+ _ = f[int]
+ _ = f[[ /* ERROR "[]int does not satisfy int ([]int missing in int)" */ ]int]
+
+ f(0)
+ f /* ERROR "P (type []int) does not satisfy int" */ ([]int{}) // TODO(gri) better error message
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49179.go b/src/internal/types/testdata/fixedbugs/issue49179.go
new file mode 100644
index 0000000..1f8da29
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49179.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[P int | string]() {}
+func f2[P ~int | string | float64]() {}
+func f3[P int](x P) {}
+
+type myInt int
+type myFloat float64
+
+func _() {
+ _ = f1[int]
+ _ = f1[myInt /* ERROR "possibly missing ~ for int in int | string" */]
+ _ = f2[myInt]
+ _ = f2[myFloat /* ERROR "possibly missing ~ for float64 in ~int | string | float64" */]
+ var x myInt
+ f3 /* ERROR "myInt does not satisfy int (possibly missing ~ for int in int)" */ (x)
+}
+
+// test case from the issue
+
+type SliceConstraint[T any] interface {
+ []T
+}
+
+func Map[S SliceConstraint[E], E any](s S, f func(E) E) S {
+ return s
+}
+
+type MySlice []int
+
+func f(s MySlice) {
+ Map[MySlice /* ERROR "MySlice does not satisfy SliceConstraint[int] (possibly missing ~ for []int in SliceConstraint[int])" */, int](s, nil)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49242.go b/src/internal/types/testdata/fixedbugs/issue49242.go
new file mode 100644
index 0000000..0415bf6
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49242.go
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P int](x P) int {
+ return x // ERRORx `cannot use x .* as int value in return statement`
+}
+
+func _[P int]() int {
+ return P /* ERRORx `cannot use P\(1\) .* as int value in return statement` */ (1)
+}
+
+func _[P int](x int) P {
+ return x // ERRORx `cannot use x .* as P value in return statement`
+}
+
+func _[P, Q any](x P) Q {
+ return x // ERRORx `cannot use x .* as Q value in return statement`
+}
+
+// test case from issue
+func F[G interface{ uint }]() int {
+ f := func(uint) int { return 0 }
+ return f(G /* ERRORx `cannot use G\(1\) .* as uint value in argument to f` */ (1))
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49247.go b/src/internal/types/testdata/fixedbugs/issue49247.go
new file mode 100644
index 0000000..0ad2e29
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49247.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func Add1024[T integer](s []T) {
+ for i, v := range s {
+ s[i] = v + 1024 // ERROR "cannot convert 1024 (untyped int constant) to type T"
+ }
+}
+
+func f[T interface{ int8 }]() {
+ println(T(1024 /* ERROR "cannot convert 1024 (untyped int value) to type T" */))
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49276.go b/src/internal/types/testdata/fixedbugs/issue49276.go
new file mode 100644
index 0000000..bdfb42f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49276.go
@@ -0,0 +1,46 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type S /* ERROR "invalid recursive type S" */ struct {
+ _ [unsafe.Sizeof(s)]byte
+}
+
+var s S
+
+// Since f is a pointer, this case could be valid.
+// But it's pathological and not worth the expense.
+type T struct {
+ f *[unsafe.Sizeof(T /* ERROR "invalid recursive type" */ {})]int
+}
+
+// a mutually recursive case using unsafe.Sizeof
+type (
+ A1 struct {
+ _ [unsafe.Sizeof(B1{})]int
+ }
+
+ B1 struct {
+ _ [unsafe.Sizeof(A1 /* ERROR "invalid recursive type" */ {})]int
+ }
+)
+
+// a mutually recursive case using len
+type (
+ A2 struct {
+ f [len(B2{}.f)]int
+ }
+
+ B2 struct {
+ f [len(A2 /* ERROR "invalid recursive type" */ {}.f)]int
+ }
+)
+
+// test case from issue
+type a struct {
+ _ [42 - unsafe.Sizeof(a /* ERROR "invalid recursive type" */ {})]byte
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49296.go b/src/internal/types/testdata/fixedbugs/issue49296.go
new file mode 100644
index 0000000..c8c5208
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49296.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[
+ T0 any,
+ T1 []int,
+ T2 ~float64 | ~complex128 | chan int,
+]() {
+ _ = T0(nil /* ERROR "cannot convert nil to type T0" */ )
+ _ = T1(1 /* ERRORx `cannot convert 1 .* to type T1` */ )
+ _ = T2(2 /* ERRORx `cannot convert 2 .* to type T2` */ )
+}
+
+// test case from issue
+func f[T interface{[]int}]() {
+ _ = T(1 /* ERROR "cannot convert" */ )
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49439.go b/src/internal/types/testdata/fixedbugs/issue49439.go
new file mode 100644
index 0000000..3852f16
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49439.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type T0 /* ERROR "invalid recursive type" */ [P T0[P]] struct{}
+
+type T1 /* ERROR "invalid recursive type" */ [P T2[P]] struct{}
+type T2[P T1[P]] struct{}
+
+type T3 /* ERROR "invalid recursive type" */ [P interface{ ~struct{ f T3[int] } }] struct{}
+
+// valid cycle in M
+type N[P M[P]] struct{}
+type M[Q any] struct { F *M[Q] }
+
+// "crazy" case
+type TC[P [unsafe.Sizeof(func() {
+ type T [P [unsafe.Sizeof(func(){})]byte] struct{}
+})]byte] struct{}
+
+// test case from issue
+type X /* ERROR "invalid recursive type" */ [T any, PT X[T]] interface{}
diff --git a/src/internal/types/testdata/fixedbugs/issue49482.go b/src/internal/types/testdata/fixedbugs/issue49482.go
new file mode 100644
index 0000000..7139bae
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49482.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The following is OK, per the special handling for type literals discussed in issue #49482.
+type _[P *struct{}] struct{}
+type _[P *int,] int
+type _[P (*int),] int
+
+const P = 2 // declare P to avoid noisy 'undefined' errors below.
+
+// The following parse as invalid array types due to parsing ambiguitiues.
+type _ [P *int /* ERROR "int (type) is not an expression" */ ]int
+type _ [P /* ERROR "non-function P" */ (*int)]int
+
+// Adding a trailing comma or an enclosing interface resolves the ambiguity.
+type _[P *int,] int
+type _[P (*int),] int
+type _[P interface{*int}] int
+type _[P interface{(*int)}] int
+
+// The following parse correctly as valid generic types.
+type _[P *struct{} | int] struct{}
+type _[P *struct{} | ~int] struct{}
diff --git a/src/internal/types/testdata/fixedbugs/issue49541.go b/src/internal/types/testdata/fixedbugs/issue49541.go
new file mode 100644
index 0000000..da37311
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49541.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S[A, B any] struct {
+ f int
+}
+
+func (S[A, B]) m() {}
+
+// TODO(gri): with type-type inference enabled we should only report one error
+// below. See issue #50588.
+
+func _[A any](s S /* ERROR "got 1 arguments but 2 type parameters" */ [A]) {
+ // we should see no follow-on errors below
+ s.f = 1
+ s.m()
+}
+
+// another test case from the issue
+
+func _() {
+ X /* ERROR "cannot infer Q" */ (Interface[*F /* ERROR "got 1 arguments but 2 type parameters" */ [string]](Impl{}))
+}
+
+func X[Q Qer](fs Interface[Q]) {
+}
+
+type Impl struct{}
+
+func (Impl) M() {}
+
+type Interface[Q Qer] interface {
+ M()
+}
+
+type Qer interface {
+ Q()
+}
+
+type F[A, B any] struct{}
+
+func (f *F[A, B]) Q() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue49579.go b/src/internal/types/testdata/fixedbugs/issue49579.go
new file mode 100644
index 0000000..780859c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49579.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type I[F any] interface {
+ Q(*F)
+}
+
+func G[F any]() I[any] {
+ return g /* ERRORx `cannot use g\[F\]{} .* as I\[any\] value in return statement: g\[F\] does not implement I\[any\] \(method Q has pointer receiver\)` */ [F]{}
+}
+
+type g[F any] struct{}
+
+func (*g[F]) Q(*any) {}
diff --git a/src/internal/types/testdata/fixedbugs/issue49592.go b/src/internal/types/testdata/fixedbugs/issue49592.go
new file mode 100644
index 0000000..846deaa
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49592.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x *interface{}
+ var y interface{}
+ _ = x == y
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49602.go b/src/internal/types/testdata/fixedbugs/issue49602.go
new file mode 100644
index 0000000..09cc969
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49602.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type M interface {
+ m()
+}
+
+type C interface {
+ comparable
+}
+
+type _ interface {
+ int | M // ERROR "cannot use p.M in union (p.M contains methods)"
+ int | comparable // ERROR "cannot use comparable in union"
+ int | C // ERROR "cannot use p.C in union (p.C embeds comparable)"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49705.go b/src/internal/types/testdata/fixedbugs/issue49705.go
new file mode 100644
index 0000000..5b5fba2
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49705.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func shl[I Integer](n int) I {
+ return 1 << n
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49735.go b/src/internal/types/testdata/fixedbugs/issue49735.go
new file mode 100644
index 0000000..0fcc778
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49735.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P1 any, P2 ~byte](s1 P1, s2 P2) {
+ _ = append(nil /* ERROR "first argument to append must be a slice; have untyped nil" */ , 0)
+ _ = append(s1 /* ERRORx `s1 .* has no core type` */ , 0)
+ _ = append(s2 /* ERRORx `s2 .* has core type byte` */ , 0)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49739.go b/src/internal/types/testdata/fixedbugs/issue49739.go
new file mode 100644
index 0000000..73825f4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49739.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Verify that we get an empty type set (not just an error)
+// when using an invalid ~A.
+
+package p
+
+type A int
+type C interface {
+ ~ /* ERROR "invalid use of ~" */ A
+}
+
+func f[_ C]() {}
+func g[_ interface{ C }]() {}
+func h[_ C | int]() {}
+
+func _() {
+ _ = f[int /* ERROR "cannot satisfy C (empty type set)" */]
+ _ = g[int /* ERROR "cannot satisfy interface{C} (empty type set)" */]
+ _ = h[int]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue49864.go b/src/internal/types/testdata/fixedbugs/issue49864.go
new file mode 100644
index 0000000..8ccd77c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue49864.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P ~int, Q any](p P) {
+ _ = Q(p /* ERROR "cannot convert" */ )
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50259.go b/src/internal/types/testdata/fixedbugs/issue50259.go
new file mode 100644
index 0000000..6df8c64
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50259.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var x T[B]
+
+type T[_ any] struct{}
+type A T[B]
+type B = T[A]
+
+// test case from issue
+
+var v Box[Step]
+type Box[T any] struct{}
+type Step = Box[StepBox]
+type StepBox Box[Step]
diff --git a/src/internal/types/testdata/fixedbugs/issue50276.go b/src/internal/types/testdata/fixedbugs/issue50276.go
new file mode 100644
index 0000000..97e477e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50276.go
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// simplified test case
+
+type transform[T any] struct{}
+type pair[S any] struct {}
+
+var _ transform[step]
+
+type box transform[step]
+type step = pair[box]
+
+// test case from issue
+
+type Transform[T any] struct{ hold T }
+type Pair[S, T any] struct {
+ First S
+ Second T
+}
+
+var first Transform[Step]
+
+// This line doesn't use the Step alias, and it compiles fine if you uncomment it.
+var second Transform[Pair[Box, interface{}]]
+
+type Box *Transform[Step]
+
+// This line is the same as the `first` line, but it comes after the Box declaration and
+// does not break the compile.
+var third Transform[Step]
+
+type Step = Pair[Box, interface{}]
+
+// This line also does not break the compile
+var fourth Transform[Step]
diff --git a/src/internal/types/testdata/fixedbugs/issue50281.go b/src/internal/types/testdata/fixedbugs/issue50281.go
new file mode 100644
index 0000000..f333e81
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50281.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[S string | []byte](s S) {
+ var buf []byte
+ _ = append(buf, s...)
+}
+
+func _[S ~string | ~[]byte](s S) {
+ var buf []byte
+ _ = append(buf, s...)
+}
+
+// test case from issue
+
+type byteseq interface {
+ string | []byte
+}
+
+// This should allow to eliminate the two functions above.
+func AppendByteString[source byteseq](buf []byte, s source) []byte {
+ return append(buf, s[1:6]...)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50321.go b/src/internal/types/testdata/fixedbugs/issue50321.go
new file mode 100644
index 0000000..ab2a31b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50321.go
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func Ln[A A /* ERROR "cannot use a type parameter as constraint" */ ](p A) {
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50372.go b/src/internal/types/testdata/fixedbugs/issue50372.go
new file mode 100644
index 0000000..10d2a24
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50372.go
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(s []int) {
+ var i, j, k, l int
+ _, _, _, _ = i, j, k, l
+
+ for range s {}
+ for i = range s {}
+ for i, j = range s {}
+ for i, j, k /* ERRORx "range clause permits at most two iteration variables|at most 2 expressions" */ = range s {}
+ for i, j, k, l /* ERRORx "range clause permits at most two iteration variables|at most 2 expressions" */ = range s {}
+}
+
+func _(s chan int) {
+ var i, j, k, l int
+ _, _, _, _ = i, j, k, l
+
+ for range s {}
+ for i = range s {}
+ for i, j /* ERRORx `range over .* permits only one iteration variable` */ = range s {}
+ for i, j, k /* ERRORx `range over .* permits only one iteration variable|at most 2 expressions` */ = range s {}
+ for i, j, k, l /* ERRORx `range over .* permits only one iteration variable|at most 2 expressions` */ = range s {}
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50417.go b/src/internal/types/testdata/fixedbugs/issue50417.go
new file mode 100644
index 0000000..c70898e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50417.go
@@ -0,0 +1,68 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Field accesses through type parameters are disabled
+// until we have a more thorough understanding of the
+// implications on the spec. See issue #51576.
+
+package p
+
+type Sf struct {
+ f int
+}
+
+func f0[P Sf](p P) {
+ _ = p.f // ERROR "p.f undefined"
+ p.f /* ERROR "p.f undefined" */ = 0
+}
+
+func f0t[P ~struct{f int}](p P) {
+ _ = p.f // ERROR "p.f undefined"
+ p.f /* ERROR "p.f undefined" */ = 0
+}
+
+var _ = f0[Sf]
+var _ = f0t[Sf]
+
+var _ = f0[Sm /* ERROR "does not satisfy" */ ]
+var _ = f0t[Sm /* ERROR "does not satisfy" */ ]
+
+func f1[P interface{ Sf; m() }](p P) {
+ _ = p.f // ERROR "p.f undefined"
+ p.f /* ERROR "p.f undefined" */ = 0
+ p.m()
+}
+
+var _ = f1[Sf /* ERROR "missing method m" */ ]
+var _ = f1[Sm /* ERROR "does not satisfy" */ ]
+
+type Sm struct {}
+
+func (Sm) m() {}
+
+type Sfm struct {
+ f int
+}
+
+func (Sfm) m() {}
+
+func f2[P interface{ Sfm; m() }](p P) {
+ _ = p.f // ERROR "p.f undefined"
+ p.f /* ERROR "p.f undefined" */ = 0
+ p.m()
+}
+
+var _ = f2[Sfm]
+
+// special case: core type is a named pointer type
+
+type PSfm *Sfm
+
+func f3[P interface{ PSfm }](p P) {
+ _ = p.f // ERROR "p.f undefined"
+ p.f /* ERROR "p.f undefined" */ = 0
+ p.m /* ERROR "type P has no field or method m" */ ()
+}
+
+var _ = f3[PSfm]
diff --git a/src/internal/types/testdata/fixedbugs/issue50426.go b/src/internal/types/testdata/fixedbugs/issue50426.go
new file mode 100644
index 0000000..17ec0ce
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50426.go
@@ -0,0 +1,44 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A1 [2]uint64
+type A2 [2]uint64
+
+func (a A1) m() A1 { return a }
+func (a A2) m() A2 { return a }
+
+func f[B any, T interface {
+ A1 | A2
+ m() T
+}](v T) {
+}
+
+func _() {
+ var v A2
+ // Use function type inference to infer type A2 for T.
+ // Don't use constraint type inference before function
+ // type inference for typed arguments, otherwise it would
+ // infer type [2]uint64 for T which doesn't have method m
+ // (was the bug).
+ f[int](v)
+}
+
+// Keep using constraint type inference before function type
+// inference for untyped arguments so we infer type float64
+// for E below, and not int (which would not work).
+func g[S ~[]E, E any](S, E) {}
+
+func _() {
+ var s []float64
+ g[[]float64](s, 0)
+}
+
+// Keep using constraint type inference after function
+// type inference for untyped arguments so we infer
+// missing type arguments for which we only have the
+// untyped arguments as starting point.
+func h[E any, R []E](v E) R { return R{v} }
+func _() []int { return h(0) }
diff --git a/src/internal/types/testdata/fixedbugs/issue50427.go b/src/internal/types/testdata/fixedbugs/issue50427.go
new file mode 100644
index 0000000..d89d63e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50427.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The parser no longer parses type parameters for methods.
+// In the past, type checking the code below led to a crash (#50427).
+
+type T interface{ m[ /* ERROR "must have no type parameters" */ P any]() }
+
+func _(t T) {
+ var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = t /* ERROR "does not implement" */
+}
+
+type S struct{}
+
+func (S) m[ /* ERROR "must have no type parameters" */ P any]() {}
+
+func _(s S) {
+ var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = s /* ERROR "does not implement" */
+
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50450.go b/src/internal/types/testdata/fixedbugs/issue50450.go
new file mode 100644
index 0000000..bae3111
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50450.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S struct{}
+
+func f[P S]() {}
+
+var _ = f[S]
diff --git a/src/internal/types/testdata/fixedbugs/issue50516.go b/src/internal/types/testdata/fixedbugs/issue50516.go
new file mode 100644
index 0000000..fcaefed
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50516.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P struct{ f int }](x P) {
+ _ = x.g // ERROR "type P has no field or method g"
+}
+
+func _[P struct{ f int } | struct{ g int }](x P) {
+ _ = x.g // ERROR "type P has no field or method g"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50646.go b/src/internal/types/testdata/fixedbugs/issue50646.go
new file mode 100644
index 0000000..2c16cfc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50646.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[_ comparable]() {}
+func f2[_ interface{ comparable }]() {}
+
+type T interface{ m() }
+
+func _[P comparable, Q ~int, R any]() {
+ _ = f1[int]
+ _ = f1[T]
+ _ = f1[any]
+ _ = f1[P]
+ _ = f1[Q]
+ _ = f1[R /* ERROR "R does not satisfy comparable" */]
+
+ _ = f2[int]
+ _ = f2[T]
+ _ = f2[any]
+ _ = f2[P]
+ _ = f2[Q]
+ _ = f2[R /* ERROR "R does not satisfy comparable" */]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50729.go b/src/internal/types/testdata/fixedbugs/issue50729.go
new file mode 100644
index 0000000..fe19fdf
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50729.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// version 1
+var x1 T1[B1]
+
+type T1[_ any] struct{}
+type A1 T1[B1]
+type B1 = T1[A1]
+
+// version 2
+type T2[_ any] struct{}
+type A2 T2[B2]
+type B2 = T2[A2]
+
+var x2 T2[B2]
diff --git a/src/internal/types/testdata/fixedbugs/issue50755.go b/src/internal/types/testdata/fixedbugs/issue50755.go
new file mode 100644
index 0000000..afc7b24
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50755.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The core type of M2 unifies with the type of m1
+// during function argument type inference.
+// M2's constraint is unnamed.
+func f1[K1 comparable, E1 any](m1 map[K1]E1) {}
+
+func f2[M2 map[string]int](m2 M2) {
+ f1(m2)
+}
+
+// The core type of M3 unifies with the type of m1
+// during function argument type inference.
+// M3's constraint is named.
+type Map3 map[string]int
+
+func f3[M3 Map3](m3 M3) {
+ f1(m3)
+}
+
+// The core type of M5 unifies with the core type of M4
+// during constraint type inference.
+func f4[M4 map[K4]int, K4 comparable](m4 M4) {}
+
+func f5[M5 map[K5]int, K5 comparable](m5 M5) {
+ f4(m5)
+}
+
+// test case from issue
+
+func Copy[MC ~map[KC]VC, KC comparable, VC any](dst, src MC) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
+
+func Merge[MM ~map[KM]VM, KM comparable, VM any](ms ...MM) MM {
+ result := MM{}
+ for _, m := range ms {
+ Copy(result, m)
+ }
+ return result
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50779.go b/src/internal/types/testdata/fixedbugs/issue50779.go
new file mode 100644
index 0000000..09ddf53
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50779.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type AC interface {
+ C
+}
+
+type ST []int
+
+type R[S any, P any] struct{}
+
+type SR = R[SS, ST]
+
+type SS interface {
+ NSR(any) *SR // ERROR "invalid use of type alias SR in recursive type"
+}
+
+type C interface {
+ NSR(any) *SR
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50782.go b/src/internal/types/testdata/fixedbugs/issue50782.go
new file mode 100644
index 0000000..97e8f6c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50782.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Field accesses through type parameters are disabled
+// until we have a more thorough understanding of the
+// implications on the spec. See issue #51576.
+
+package p
+
+// The first example from the issue.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// numericAbs matches numeric types with an Abs method.
+type numericAbs[T Numeric] interface {
+ ~struct{ Value T }
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func absDifference[T numericAbs[T /* ERROR "T does not satisfy Numeric" */]](a, b T) T {
+ // Field accesses are not permitted for now. Keep an error so
+ // we can find and fix this code once the situation changes.
+ return a.Value // ERROR "a.Value undefined"
+ // TODO: The error below should probably be positioned on the '-'.
+ // d := a /* ERROR "invalid operation: operator - not defined" */ .Value - b.Value
+ // return d.Abs()
+}
+
+// The second example from the issue.
+type T[P int] struct{ f P }
+
+func _[P T[P /* ERROR "P does not satisfy int" */ ]]() {}
+
+// Additional tests
+func _[P T[T /* ERROR "T[P] does not satisfy int" */ [P /* ERROR "P does not satisfy int" */ ]]]() {}
+func _[P T[Q /* ERROR "Q does not satisfy int" */ ], Q T[P /* ERROR "P does not satisfy int" */ ]]() {}
+func _[P T[Q], Q int]() {}
+
+type C[P comparable] struct{ f P }
+func _[P C[C[P]]]() {}
+func _[P C[C /* ERROR "C[Q] does not satisfy comparable" */ [Q /* ERROR "Q does not satisfy comparable" */]], Q func()]() {}
+func _[P [10]C[P]]() {}
+func _[P struct{ f C[C[P]]}]() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue50816.go b/src/internal/types/testdata/fixedbugs/issue50816.go
new file mode 100644
index 0000000..b7c28cd
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50816.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkg
+
+type I interface {
+ Foo()
+}
+
+type T1 struct{}
+
+func (T1) foo() {}
+
+type T2 struct{}
+
+func (T2) foo() string { return "" }
+
+func _() {
+ var i I
+ _ = i /* ERROR "impossible type assertion: i.(T1)\n\tT1 does not implement I (missing method Foo)\n\t\thave foo()\n\t\twant Foo()" */ .(T1)
+ _ = i /* ERROR "impossible type assertion: i.(T2)\n\tT2 does not implement I (missing method Foo)\n\t\thave foo() string\n\t\twant Foo()" */ .(T2)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50833.go b/src/internal/types/testdata/fixedbugs/issue50833.go
new file mode 100644
index 0000000..e912e4d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50833.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ S struct{ f int }
+ PS *S
+)
+
+func a() []*S { return []*S{{f: 1}} }
+func b() []PS { return []PS{{f: 1}} }
+
+func c[P *S]() []P { return []P{{f: 1}} }
+func d[P PS]() []P { return []P{{f: 1}} }
diff --git a/src/internal/types/testdata/fixedbugs/issue50912.go b/src/internal/types/testdata/fixedbugs/issue50912.go
new file mode 100644
index 0000000..a99fa7b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50912.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func Real[P ~complex128](x P) {
+ _ = real(x /* ERROR "not supported" */ )
+}
+
+func Imag[P ~complex128](x P) {
+ _ = imag(x /* ERROR "not supported" */ )
+}
+
+func Complex[P ~float64](x P) {
+ _ = complex(x /* ERROR "not supported" */ , 0)
+ _ = complex(0 /* ERROR "not supported" */ , x)
+ _ = complex(x /* ERROR "not supported" */ , x)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50918.go b/src/internal/types/testdata/fixedbugs/issue50918.go
new file mode 100644
index 0000000..5744fa8
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50918.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type thing1 struct {
+ things []string
+}
+
+type thing2 struct {
+ things []thing1
+}
+
+func _() {
+ var a1, b1 thing1
+ _ = a1 /* ERROR "struct containing []string cannot be compared" */ == b1
+
+ var a2, b2 thing2
+ _ = a2 /* ERROR "struct containing []thing1 cannot be compared" */ == b2
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50929.go b/src/internal/types/testdata/fixedbugs/issue50929.go
new file mode 100644
index 0000000..64c7cd6
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50929.go
@@ -0,0 +1,68 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+import "fmt"
+
+type F[A, B any] int
+
+func G[A, B any](F[A, B]) {
+}
+
+func _() {
+ // TODO(gri) only report one error below (issue #50932)
+ var x F /* ERROR "got 1 arguments but 2 type parameters" */ [int]
+ G(x /* ERROR "does not match" */)
+}
+
+// test case from issue
+// (lots of errors but doesn't crash anymore)
+
+type RC[G any, RG any] interface {
+ ~[]RG
+}
+
+type RG[G any] struct{}
+
+type RSC[G any] []*RG[G]
+
+type M[Rc RC[G, RG], G any, RG any] struct {
+ Fn func(Rc)
+}
+
+type NFn[Rc RC[G, RG], G any, RG any] func(Rc)
+
+func NC[Rc RC[G, RG], G any, RG any](nFn NFn[Rc, G, RG]) {
+ var empty Rc
+ nFn(empty)
+}
+
+func NSG[G any](c RSC[G]) {
+ fmt.Println(c)
+}
+
+func MMD[Rc RC /* ERROR "got 1 arguments" */ [RG], RG any, G any]() M /* ERROR "got 2 arguments" */ [Rc, RG] {
+
+ var nFn NFn /* ERROR "got 2 arguments" */ [Rc, RG]
+
+ var empty Rc
+ switch any(empty).(type) {
+ case BC /* ERROR "undefined: BC" */ :
+
+ case RSC[G]:
+ nFn = NSG /* ERROR "cannot use NSG[G]" */ [G]
+ }
+
+ return M /* ERROR "got 2 arguments" */ [Rc, RG]{
+ Fn: func(rc Rc) {
+ NC(nFn /* ERROR "does not match" */ )
+ },
+ }
+
+ return M /* ERROR "got 2 arguments" */ [Rc, RG]{}
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue50965.go b/src/internal/types/testdata/fixedbugs/issue50965.go
new file mode 100644
index 0000000..79059e9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue50965.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(x int, c string) {
+ switch x {
+ case c /* ERROR "invalid case c in switch on x (mismatched types string and int)" */ :
+ }
+}
+
+func _(x, c []int) {
+ switch x {
+ case c /* ERROR "invalid case c in switch on x (slice can only be compared to nil)" */ :
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51025.go b/src/internal/types/testdata/fixedbugs/issue51025.go
new file mode 100644
index 0000000..caaabd5
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51025.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ interface{ m() } = struct /* ERROR "m is a field, not a method" */ {
+ m func()
+}{}
+
+var _ interface{ m() } = & /* ERROR "m is a field, not a method" */ struct {
+ m func()
+}{}
+
+var _ interface{ M() } = struct /* ERROR "missing method M" */ {
+ m func()
+}{}
+
+var _ interface{ M() } = & /* ERROR "missing method M" */ struct {
+ m func()
+}{}
+
+// test case from issue
+type I interface{ m() }
+type T struct{ m func() }
+type M struct{}
+
+func (M) m() {}
+
+func _() {
+ var t T
+ var m M
+ var i I
+
+ i = m
+ i = t // ERROR "m is a field, not a method"
+ _ = i
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51048.go b/src/internal/types/testdata/fixedbugs/issue51048.go
new file mode 100644
index 0000000..5830837
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51048.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P int]() {
+ _ = f[P]
+}
+
+func f[T int]() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue51139.go b/src/internal/types/testdata/fixedbugs/issue51139.go
new file mode 100644
index 0000000..4c460d4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51139.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[S []T, T any](S, T) {}
+
+func _() {
+ type L chan int
+ f([]L{}, make(chan int))
+ f([]L{}, make(L))
+ f([]chan int{}, make(chan int))
+ f /* ERROR "[]chan int does not satisfy []L ([]chan int missing in []p.L)" */ ([]chan int{}, make(L))
+}
+
+// test case from issue
+
+func Append[S ~[]T, T any](s S, x ...T) S { /* implementation of append */ return s }
+
+func _() {
+ type MyPtr *int
+ var x []MyPtr
+ _ = append(x, new(int))
+ _ = Append(x, new(int))
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51145.go b/src/internal/types/testdata/fixedbugs/issue51145.go
new file mode 100644
index 0000000..1f970d9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51145.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type (
+ _ [fmt /* ERROR "invalid array length fmt" */ ]int
+ _ [float64 /* ERROR "invalid array length float64" */ ]int
+ _ [f /* ERROR "invalid array length f" */ ]int
+ _ [nil /* ERROR "invalid array length nil" */ ]int
+)
+
+func f()
+
+var _ fmt.Stringer // use fmt
diff --git a/src/internal/types/testdata/fixedbugs/issue51158.go b/src/internal/types/testdata/fixedbugs/issue51158.go
new file mode 100644
index 0000000..3edc505
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51158.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Type checking the following code should not cause an infinite recursion.
+func f[M map[K]int, K comparable](m M) {
+ f(m)
+}
+
+// Equivalent code using mutual recursion.
+func f1[M map[K]int, K comparable](m M) {
+ f2(m)
+}
+func f2[M map[K]int, K comparable](m M) {
+ f1(m)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51229.go b/src/internal/types/testdata/fixedbugs/issue51229.go
new file mode 100644
index 0000000..22a9113
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51229.go
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Constraint type inference should be independent of the
+// ordering of the type parameter declarations. Try all
+// permutations in the test case below.
+// Permutations produced by https://go.dev/play/p/PHcZNGJTEBZ.
+
+func f00[S1 ~[]E1, S2 ~[]E2, E1 ~byte, E2 ~byte](S1, S2) {}
+func f01[S2 ~[]E2, S1 ~[]E1, E1 ~byte, E2 ~byte](S1, S2) {}
+func f02[E1 ~byte, S1 ~[]E1, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f03[S1 ~[]E1, E1 ~byte, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f04[S2 ~[]E2, E1 ~byte, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f05[E1 ~byte, S2 ~[]E2, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f06[E2 ~byte, S2 ~[]E2, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f07[S2 ~[]E2, E2 ~byte, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f08[S1 ~[]E1, E2 ~byte, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f09[E2 ~byte, S1 ~[]E1, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f10[S2 ~[]E2, S1 ~[]E1, E2 ~byte, E1 ~byte](S1, S2) {}
+func f11[S1 ~[]E1, S2 ~[]E2, E2 ~byte, E1 ~byte](S1, S2) {}
+func f12[S1 ~[]E1, E1 ~byte, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f13[E1 ~byte, S1 ~[]E1, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f14[E2 ~byte, S1 ~[]E1, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f15[S1 ~[]E1, E2 ~byte, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f16[E1 ~byte, E2 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f17[E2 ~byte, E1 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f18[E2 ~byte, E1 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f19[E1 ~byte, E2 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f20[S2 ~[]E2, E2 ~byte, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f21[E2 ~byte, S2 ~[]E2, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f22[E1 ~byte, S2 ~[]E2, E2 ~byte, S1 ~[]E1](S1, S2) {}
+func f23[S2 ~[]E2, E1 ~byte, E2 ~byte, S1 ~[]E1](S1, S2) {}
+
+type myByte byte
+
+func _(a []byte, b []myByte) {
+ f00(a, b)
+ f01(a, b)
+ f02(a, b)
+ f03(a, b)
+ f04(a, b)
+ f05(a, b)
+ f06(a, b)
+ f07(a, b)
+ f08(a, b)
+ f09(a, b)
+ f10(a, b)
+ f11(a, b)
+ f12(a, b)
+ f13(a, b)
+ f14(a, b)
+ f15(a, b)
+ f16(a, b)
+ f17(a, b)
+ f18(a, b)
+ f19(a, b)
+ f20(a, b)
+ f21(a, b)
+ f22(a, b)
+ f23(a, b)
+}
+
+// Constraint type inference may have to iterate.
+// Again, the order of the type parameters shouldn't matter.
+
+func g0[S ~[]E, M ~map[string]S, E any](m M) {}
+func g1[M ~map[string]S, S ~[]E, E any](m M) {}
+func g2[E any, S ~[]E, M ~map[string]S](m M) {}
+func g3[S ~[]E, E any, M ~map[string]S](m M) {}
+func g4[M ~map[string]S, E any, S ~[]E](m M) {}
+func g5[E any, M ~map[string]S, S ~[]E](m M) {}
+
+func _(m map[string][]byte) {
+ g0(m)
+ g1(m)
+ g2(m)
+ g3(m)
+ g4(m)
+ g5(m)
+}
+
+// Worst-case scenario.
+// There are 10 unknown type parameters. In each iteration of
+// constraint type inference we infer one more, from right to left.
+// Each iteration looks repeatedly at all 11 type parameters,
+// requiring a total of 10*11 = 110 iterations with the current
+// implementation. Pathological case.
+
+func h[K any, J ~*K, I ~*J, H ~*I, G ~*H, F ~*G, E ~*F, D ~*E, C ~*D, B ~*C, A ~*B](x A) {}
+
+func _(x **********int) {
+ h(x)
+}
+
+// Examples with channel constraints and tilde.
+
+func ch1[P chan<- int]() (_ P) { return } // core(P) == chan<- int (single type, no tilde)
+func ch2[P ~chan int]() { return } // core(P) == ~chan<- int (tilde)
+func ch3[P chan E, E any](E) { return } // core(P) == chan<- E (single type, no tilde)
+func ch4[P chan E | ~chan<- E, E any](E) { return } // core(P) == ~chan<- E (tilde)
+func ch5[P chan int | chan<- int]() { return } // core(P) == chan<- int (not a single type)
+
+func _() {
+ // P can be inferred as there's a single specific type and no tilde.
+ var _ chan int = ch1 /* ERRORx `cannot use ch1.*value of type chan<- int` */ ()
+ var _ chan<- int = ch1()
+
+ // P cannot be inferred as there's a tilde.
+ ch2 /* ERROR "cannot infer P" */ ()
+ type myChan chan int
+ ch2[myChan]()
+
+ // P can be inferred as there's a single specific type and no tilde.
+ var e int
+ ch3(e)
+
+ // P cannot be inferred as there's more than one specific type and a tilde.
+ ch4 /* ERROR "cannot infer P" */ (e)
+ _ = ch4[chan int]
+
+ // P cannot be inferred as there's more than one specific type.
+ ch5 /* ERROR "cannot infer P" */ ()
+ ch5[chan<- int]()
+}
+
+// test case from issue
+
+func equal[M1 ~map[K1]V1, M2 ~map[K2]V2, K1, K2 ~uint32, V1, V2 ~string](m1 M1, m2 M2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || V2(v1) != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+func equalFixed[K1, K2 ~uint32, V1, V2 ~string](m1 map[K1]V1, m2 map[K2]V2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || v1 != V1(v2) {
+ return false
+ }
+ }
+ return true
+}
+
+type (
+ someNumericID uint32
+ someStringID string
+)
+
+func _() {
+ foo := map[uint32]string{10: "bar"}
+ bar := map[someNumericID]someStringID{10: "bar"}
+ equal(foo, bar)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51232.go b/src/internal/types/testdata/fixedbugs/issue51232.go
new file mode 100644
index 0000000..27693a3
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51232.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn /* ERROR "got 1 arguments" */ [RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn func() Fn /* ERROR "got 1 arguments" */ [RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] {
+ return c.makeFn()
+}
+
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR "got 1 arguments" */ [RCT] {
+ // TODO(rfindley): eliminate the duplicate error below.
+ return & /* ERRORx `cannot use .* as F\[RCT\]` */ concreteF /* ERROR "got 1 arguments" */ [RCT]{
+ makeFn: nil,
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51233.go b/src/internal/types/testdata/fixedbugs/issue51233.go
new file mode 100644
index 0000000..e2f97fc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51233.go
@@ -0,0 +1,27 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// As of issue #51527, type-type inference has been disabled.
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type FFn[RCT RC[RG], RG any] func() Fn /* ERROR "got 1 arguments" */ [RCT]
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn /* ERROR "got 1 arguments" */ [RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn FFn /* ERROR "got 1 arguments" */ [RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] {
+ return c.makeFn()
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51257.go b/src/internal/types/testdata/fixedbugs/issue51257.go
new file mode 100644
index 0000000..828612b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51257.go
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+
+type S1 struct{ x int }
+type S2 struct{ x any }
+type S3 struct{ x [10]interface{ m() } }
+
+func _[P1 comparable, P2 S2]() {
+ _ = f[S1]
+ _ = f[S2]
+ _ = f[S3]
+
+ type L1 struct { x P1 }
+ type L2 struct { x P2 }
+ _ = f[L1]
+ _ = f[L2 /* ERROR "L2 does not satisfy comparable" */ ]
+}
+
+
+// example from issue
+
+type Set[T comparable] map[T]struct{}
+
+func NewSetFromSlice[T comparable](items []T) *Set[T] {
+ s := Set[T]{}
+
+ for _, item := range items {
+ s[item] = struct{}{}
+ }
+
+ return &s
+}
+
+type T struct{ x any }
+
+func main() {
+ NewSetFromSlice([]T{
+ {"foo"},
+ {5},
+ })
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51335.go b/src/internal/types/testdata/fixedbugs/issue51335.go
new file mode 100644
index 0000000..04dc04e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51335.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S1 struct{}
+type S2 struct{}
+
+func _[P *S1|*S2]() {
+ _= []P{{ /* ERROR "invalid composite literal element type P (no core type)" */ }}
+}
+
+func _[P *S1|S1]() {
+ _= []P{{ /* ERROR "invalid composite literal element type P (no core type)" */ }}
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51339.go b/src/internal/types/testdata/fixedbugs/issue51339.go
new file mode 100644
index 0000000..65c2134
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51339.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+type T[P any, B *P] struct{}
+
+func (T /* ERROR "cannot use generic type" */ ) m0() {}
+
+// TODO(rfindley): eliminate the duplicate errors here.
+func (/* ERROR "got 1 type parameter, but receiver base type declares 2" */ T /* ERROR "got 1 arguments but 2 type parameters" */ [_]) m1() {}
+func (T[_, _]) m2() {}
+// TODO(gri) this error is unfortunate (issue #51343)
+func (T /* ERROR "got 3 arguments but 2 type parameters" */ [_, _, _]) m3() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue51360.go b/src/internal/types/testdata/fixedbugs/issue51360.go
new file mode 100644
index 0000000..1b9c45a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51360.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ len.Println /* ERROR "cannot select on len" */
+ len.Println /* ERROR "cannot select on len" */ ()
+ _ = len.Println /* ERROR "cannot select on len" */
+ _ = len /* ERROR "cannot index len" */ [0]
+ _ = *len /* ERROR "cannot indirect len" */
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51376.go b/src/internal/types/testdata/fixedbugs/issue51376.go
new file mode 100644
index 0000000..0f2ab8e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51376.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Map map[string]int
+
+func f[M ~map[K]V, K comparable, V any](M) {}
+func g[M map[K]V, K comparable, V any](M) {}
+
+func _[M1 ~map[K]V, M2 map[K]V, K comparable, V any]() {
+ var m1 M1
+ f(m1)
+ g /* ERROR "M1 does not satisfy map[K]V" */ (m1) // M1 has tilde
+
+ var m2 M2
+ f(m2)
+ g(m2) // M1 does not have tilde
+
+ var m3 Map
+ f(m3)
+ g /* ERROR "Map does not satisfy map[string]int" */ (m3) // M in g does not have tilde
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51386.go b/src/internal/types/testdata/fixedbugs/issue51386.go
new file mode 100644
index 0000000..ef62239
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51386.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myString string
+
+func _[P ~string | ~[]byte | ~[]rune]() {
+ _ = P("")
+ const s myString = ""
+ _ = P(s)
+}
+
+func _[P myString]() {
+ _ = P("")
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51437.go b/src/internal/types/testdata/fixedbugs/issue51437.go
new file mode 100644
index 0000000..3762615
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51437.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T struct{}
+
+func (T) m() []int { return nil }
+
+func f(x T) {
+ for _, x := range func() []int {
+ return x.m() // x declared in parameter list of f
+ }() {
+ _ = x // x declared by range clause
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51472.go b/src/internal/types/testdata/fixedbugs/issue51472.go
new file mode 100644
index 0000000..6dfff05
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51472.go
@@ -0,0 +1,54 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T comparable](x T) {
+ _ = x == x
+}
+
+func _[T interface{interface{comparable}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; interface{comparable}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; ~int}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; ~[]byte}](x T) {
+ _ = x /* ERROR "empty type set" */ == x
+}
+
+// TODO(gri) The error message here should be better. See issue #51525.
+func _[T interface{comparable; ~int; ~string}](x T) {
+ _ = x /* ERROR "empty type set" */ == x
+}
+
+// TODO(gri) The error message here should be better. See issue #51525.
+func _[T interface{~int; ~string}](x T) {
+ _ = x /* ERROR "empty type set" */ == x
+}
+
+func _[T interface{comparable; interface{~int}; interface{int|float64}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{interface{comparable; ~int}; interface{~float64; comparable; m()}}](x T) {
+ _ = x /* ERROR "empty type set" */ == x
+}
+
+// test case from issue
+
+func f[T interface{comparable; []byte|string}](x T) {
+ _ = x == x
+}
+
+func _(s []byte) {
+ f /* ERROR "T (type []byte) does not satisfy interface{comparable; []byte | string}" */ (s) // TODO(gri) better error message (T's type set only contains string!)
+ _ = f[[ /* ERROR "does not satisfy" */ ]byte]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51509.go b/src/internal/types/testdata/fixedbugs/issue51509.go
new file mode 100644
index 0000000..4737a82
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51509.go
@@ -0,0 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T /* ERROR "invalid recursive type" */ T.x
diff --git a/src/internal/types/testdata/fixedbugs/issue51525.go b/src/internal/types/testdata/fixedbugs/issue51525.go
new file mode 100644
index 0000000..af569c8
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51525.go
@@ -0,0 +1,20 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T interface {
+ int
+ string
+}](x T) {
+ _ = x /* ERROR "empty type set" */ == x
+ _ = x /* ERROR "empty type set" */ + x
+ <-x /* ERROR "empty type set" */
+ x <- /* ERROR "empty type set" */ 0
+ close(x /* ERROR "empty type set" */)
+}
+
+func _[T interface{ int | []byte }](x T) {
+ _ = x /* ERROR "incomparable types in type set" */ == x
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51533.go b/src/internal/types/testdata/fixedbugs/issue51533.go
new file mode 100644
index 0000000..166e5fe
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51533.go
@@ -0,0 +1,20 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(x any) {
+ switch x {
+ case 0:
+ fallthrough // ERROR "fallthrough statement out of place"
+ _ = x
+ default:
+ }
+
+ switch x.(type) {
+ case int:
+ fallthrough // ERROR "cannot fallthrough in type switch"
+ default:
+ }
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51578.go b/src/internal/types/testdata/fixedbugs/issue51578.go
new file mode 100644
index 0000000..d2e8a28
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51578.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = (*interface /* ERROR "interface contains type constraints" */ {int})(nil)
+
+// abbreviated test case from issue
+
+type TypeSet interface{ int | string }
+
+func _() {
+ f((*TypeSet /* ERROR "interface contains type constraints" */)(nil))
+}
+
+func f(any) {} \ No newline at end of file
diff --git a/src/internal/types/testdata/fixedbugs/issue51593.go b/src/internal/types/testdata/fixedbugs/issue51593.go
new file mode 100644
index 0000000..62b0a56
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51593.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P interface{ m(R) }, R any]() {}
+
+type T = interface { m(int) }
+
+func _() {
+ _ = f[T]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51607.go b/src/internal/types/testdata/fixedbugs/issue51607.go
new file mode 100644
index 0000000..298adad
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51607.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Interface types must be ignored during overlap test.
+
+type (
+ T1 interface{int}
+ T2 interface{~int}
+ T3 interface{T1 | bool | string}
+ T4 interface{T2 | ~bool | ~string}
+)
+
+type (
+ // overlap errors for non-interface terms
+ // (like the interface terms, but explicitly inlined)
+ _ interface{int | int /* ERROR "overlapping terms int and int" */ }
+ _ interface{int | ~ /* ERROR "overlapping terms ~int and int" */ int}
+ _ interface{~int | int /* ERROR "overlapping terms int and ~int" */ }
+ _ interface{~int | ~ /* ERROR "overlapping terms ~int and ~int" */ int}
+
+ _ interface{T1 | bool | string | T1 | bool /* ERROR "overlapping terms bool and bool" */ | string /* ERROR "overlapping terms string and string" */ }
+ _ interface{T1 | bool | string | T2 | ~ /* ERROR "overlapping terms ~bool and bool" */ bool | ~ /* ERROR "overlapping terms ~string and string" */ string}
+
+ // no errors for interface terms
+ _ interface{T1 | T1}
+ _ interface{T1 | T2}
+ _ interface{T2 | T1}
+ _ interface{T2 | T2}
+
+ _ interface{T3 | T3 | int}
+ _ interface{T3 | T4 | bool }
+ _ interface{T4 | T3 | string }
+ _ interface{T4 | T4 | float64 }
+)
+
+func _[_ T1 | bool | string | T1 | bool /* ERROR "overlapping terms" */ ]() {}
+func _[_ T1 | bool | string | T2 | ~ /* ERROR "overlapping terms" */ bool ]() {}
+func _[_ T2 | ~bool | ~string | T1 | bool /* ERROR "overlapping terms" */ ]() {}
+func _[_ T2 | ~bool | ~string | T2 | ~ /* ERROR "overlapping terms" */ bool ]() {}
+
+func _[_ T3 | T3 | int]() {}
+func _[_ T3 | T4 | bool]() {}
+func _[_ T4 | T3 | string]() {}
+func _[_ T4 | T4 | float64]() {}
+
+// test cases from issue
+
+type _ interface {
+ interface {bool | int} | interface {bool | string}
+}
+
+type _ interface {
+ interface {bool | int} ; interface {bool | string}
+}
+
+type _ interface {
+ interface {bool; int} ; interface {bool; string}
+}
+
+type _ interface {
+ interface {bool; int} | interface {bool; string}
+} \ No newline at end of file
diff --git a/src/internal/types/testdata/fixedbugs/issue51610.go b/src/internal/types/testdata/fixedbugs/issue51610.go
new file mode 100644
index 0000000..d0bc1ac
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51610.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P int | float64 | complex128]() {
+ _ = map[P]int{1: 1, 1.0 /* ERROR "duplicate key 1" */ : 2, 1 /* ERROR "duplicate key (1 + 0i)" */ + 0i: 3}
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51616.go b/src/internal/types/testdata/fixedbugs/issue51616.go
new file mode 100644
index 0000000..e0efc9e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51616.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ C[T any] interface{~int; M() T}
+
+ _ C[bool]
+ _ comparable
+ _ interface {~[]byte | ~string}
+
+ // Alias type declarations may refer to "constraint" types
+ // like ordinary type declarations.
+ _ = C[bool]
+ _ = comparable
+ _ = interface {~[]byte | ~string}
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue51658.go b/src/internal/types/testdata/fixedbugs/issue51658.go
new file mode 100644
index 0000000..36e2fdd
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51658.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test checks syntax errors which differ between
+// go/parser and the syntax package.
+// TODO: consolidate eventually
+
+package p
+
+type F { // ERRORx "expected type|type declaration"
+ float64
+} // ERRORx "expected declaration|non-declaration statement"
+
+func _[T F | int](x T) {
+ _ = x == 0 // don't crash when recording type of 0
+}
+
+// test case from issue
+
+type FloatType { // ERRORx "expected type|type declaration"
+ float32 | float64
+} // ERRORx "expected declaration|non-declaration statement"
+
+type IntegerType interface {
+ int8 | int16 | int32 | int64 | int |
+ uint8 | uint16 | uint32 | uint64 | uint
+}
+
+type ComplexType interface {
+ complex64 | complex128
+}
+
+type Number interface {
+ FloatType | IntegerType | ComplexType
+}
+
+func GetDefaultNumber[T Number](value, defaultValue T) T {
+ if value == 0 {
+ return defaultValue
+ }
+ return value
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue51877.go b/src/internal/types/testdata/fixedbugs/issue51877.go
new file mode 100644
index 0000000..4b0e5bc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue51877.go
@@ -0,0 +1,18 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S struct {
+ f1 int
+ f2 bool
+}
+
+var (
+ _ = S{0} /* ERROR "too few values in struct literal" */
+ _ = struct{ f1, f2 int }{0} /* ERROR "too few values in struct literal" */
+
+ _ = S{0, true, "foo" /* ERROR "too many values in struct literal" */}
+ _ = struct{ f1, f2 int }{0, 1, 2 /* ERROR "too many values in struct literal" */}
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue52031.go b/src/internal/types/testdata/fixedbugs/issue52031.go
new file mode 100644
index 0000000..5b75b75
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue52031.go
@@ -0,0 +1,33 @@
+// -lang=go1.12
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type resultFlags uint
+
+// Example from #52031.
+//
+// The following shifts should not produce errors on Go < 1.13, as their
+// untyped constant operands are representable by type uint.
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Invalid cases.
+var x int = 1
+var _ = (8 << x /* ERRORx `signed shift count .* requires go1.13 or later` */)
+
+const _ = (1 << 1.2 /* ERROR "truncated to uint" */)
+
+var y float64
+var _ = (1 << y /* ERROR "must be integer" */)
diff --git a/src/internal/types/testdata/fixedbugs/issue52401.go b/src/internal/types/testdata/fixedbugs/issue52401.go
new file mode 100644
index 0000000..ccc32d3
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue52401.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ const x = 0
+ x /* ERROR "cannot assign to x" */ += 1
+ x /* ERROR "cannot assign to x" */ ++
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue52529.go b/src/internal/types/testdata/fixedbugs/issue52529.go
new file mode 100644
index 0000000..de7b296
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue52529.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Foo[P any] struct {
+ _ *Bar[P]
+}
+
+type Bar[Q any] Foo[Q]
+
+func (v *Bar[R]) M() {
+ _ = (*Foo[R])(v)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue52698.go b/src/internal/types/testdata/fixedbugs/issue52698.go
new file mode 100644
index 0000000..20f24f0
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue52698.go
@@ -0,0 +1,62 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// correctness check: ensure that cycles through generic instantiations are detected
+type T[P any] struct {
+ _ P
+}
+
+type S /* ERROR "invalid recursive type" */ struct {
+ _ T[S]
+}
+
+// simplified test 1
+
+var _ A1[A1[string]]
+
+type A1[P any] struct {
+ _ B1[P]
+}
+
+type B1[P any] struct {
+ _ P
+}
+
+// simplified test 2
+var _ B2[A2]
+
+type A2 struct {
+ _ B2[string]
+}
+
+type B2[P any] struct {
+ _ C2[P]
+}
+
+type C2[P any] struct {
+ _ P
+}
+
+// test case from issue
+type T23 interface {
+ ~struct {
+ Field0 T13[T15]
+ }
+}
+
+type T1[P1 interface {
+}] struct {
+ Field2 P1
+}
+
+type T13[P2 interface {
+}] struct {
+ Field2 T1[P2]
+}
+
+type T15 struct {
+ Field0 T13[string]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue52915.go b/src/internal/types/testdata/fixedbugs/issue52915.go
new file mode 100644
index 0000000..70dc664
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue52915.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type T[P any] struct {
+ T /* ERROR "invalid recursive type" */ [P]
+}
+
+func _[P any]() {
+ _ = unsafe.Sizeof(T[int]{})
+ _ = unsafe.Sizeof(struct{ T[int] }{})
+
+ _ = unsafe.Sizeof(T[P]{})
+ _ = unsafe.Sizeof(struct{ T[P] }{})
+}
+
+// TODO(gri) This is a follow-on error due to T[int] being invalid.
+// We should try to avoid it.
+const _ = unsafe /* ERROR "not constant" */ .Sizeof(T[int]{})
diff --git a/src/internal/types/testdata/fixedbugs/issue53358.go b/src/internal/types/testdata/fixedbugs/issue53358.go
new file mode 100644
index 0000000..67c095c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue53358.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A struct{}
+
+func (*A) m() int { return 0 }
+
+var _ = A.m /* ERROR "invalid method expression A.m (needs pointer receiver (*A).m)" */ ()
+var _ = (*A).m(nil)
+
+type B struct{ A }
+
+var _ = B.m // ERROR "invalid method expression B.m (needs pointer receiver (*B).m)"
+var _ = (*B).m
+
+var _ = struct{ A }.m // ERROR "invalid method expression struct{A}.m (needs pointer receiver (*struct{A}).m)"
diff --git a/src/internal/types/testdata/fixedbugs/issue53650.go b/src/internal/types/testdata/fixedbugs/issue53650.go
new file mode 100644
index 0000000..4bba59e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue53650.go
@@ -0,0 +1,59 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+ "reflect"
+ "testing"
+)
+
+type T1 int
+type T2 int
+
+func f[P T1 | T2, _ []P]() {}
+
+var _ = f[T1]
+
+// test case from issue
+
+type BaseT interface {
+ Type1 | Type2
+}
+type BaseType int
+type Type1 BaseType
+type Type2 BaseType // float64
+
+type ValueT[T BaseT] struct {
+ A1 T
+}
+
+func NewType1() *ValueT[Type1] {
+ r := NewT[Type1]()
+ return r
+}
+func NewType2() *ValueT[Type2] {
+ r := NewT[Type2]()
+ return r
+}
+
+func NewT[TBase BaseT, TVal ValueT[TBase]]() *TVal {
+ ret := TVal{}
+ return &ret
+}
+func TestGoType(t *testing.T) {
+ r1 := NewType1()
+ r2 := NewType2()
+ t.Log(r1, r2)
+ t.Log(reflect.TypeOf(r1), reflect.TypeOf(r2))
+ fooT1(r1.A1)
+ fooT2(r2.A1)
+}
+
+func fooT1(t1 Type1) {
+
+}
+func fooT2(t2 Type2) {
+
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue53692.go b/src/internal/types/testdata/fixedbugs/issue53692.go
new file mode 100644
index 0000000..dc1a76c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue53692.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Cache[K comparable, V any] interface{}
+
+type LRU[K comparable, V any] struct{}
+
+func WithLocking2[K comparable, V any](Cache[K, V]) {}
+
+func _() {
+ WithLocking2 /* ERROR "cannot infer V" */ [string](LRU[string, int]{})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue54280.go b/src/internal/types/testdata/fixedbugs/issue54280.go
new file mode 100644
index 0000000..9465894
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue54280.go
@@ -0,0 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const C = 912_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_912_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_912_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_912 // ERROR "constant overflow"
diff --git a/src/internal/types/testdata/fixedbugs/issue54405.go b/src/internal/types/testdata/fixedbugs/issue54405.go
new file mode 100644
index 0000000..688fee1
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue54405.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we don't see spurious errors for ==
+// for values with invalid types due to prior errors.
+
+package p
+
+var x struct {
+ f *NotAType /* ERROR "undefined" */
+}
+var _ = x.f == nil // no error expected here
+
+var y *NotAType /* ERROR "undefined" */
+var _ = y == nil // no error expected here
diff --git a/src/internal/types/testdata/fixedbugs/issue54424.go b/src/internal/types/testdata/fixedbugs/issue54424.go
new file mode 100644
index 0000000..ebfb83d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue54424.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P ~*T, T any]() {
+ var p P
+ var tp *T
+ tp = p // this assignment is valid
+ _ = tp
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue54942.go b/src/internal/types/testdata/fixedbugs/issue54942.go
new file mode 100644
index 0000000..b9a5cce
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue54942.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+ "context"
+ "database/sql"
+)
+
+type I interface {
+ m(int, int, *int, int)
+}
+
+type T struct{}
+
+func (_ *T) m(a, b, c, d int) {}
+
+var _ I = new /* ERROR "have m(int, int, int, int)\n\t\twant m(int, int, *int, int)" */ (T)
+
+// (slightly modified) test case from issue
+
+type Result struct {
+ Value string
+}
+
+type Executor interface {
+ Execute(context.Context, sql.Stmt, int, []sql.NamedArg, int) (Result, error)
+}
+
+type myExecutor struct{}
+
+func (_ *myExecutor) Execute(ctx context.Context, stmt sql.Stmt, maxrows int, args []sql.NamedArg, urgency int) (*Result, error) {
+ return &Result{}, nil
+}
+
+var ex Executor = new /* ERROR "have Execute(context.Context, sql.Stmt, int, []sql.NamedArg, int) (*Result, error)\n\t\twant Execute(context.Context, sql.Stmt, int, []sql.NamedArg, int) (Result, error)" */ (myExecutor)
diff --git a/src/internal/types/testdata/fixedbugs/issue56351.go b/src/internal/types/testdata/fixedbugs/issue56351.go
new file mode 100644
index 0000000..eee142c
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue56351.go
@@ -0,0 +1,11 @@
+// -lang=go1.20
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(s []int) {
+ clear /* ERROR "clear requires go1.21 or later" */ (s)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue56425.go b/src/internal/types/testdata/fixedbugs/issue56425.go
new file mode 100644
index 0000000..d85733f
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue56425.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const s float32 = 0
+var _ = 0 << s
diff --git a/src/internal/types/testdata/fixedbugs/issue56665.go b/src/internal/types/testdata/fixedbugs/issue56665.go
new file mode 100644
index 0000000..1f787d0
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue56665.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Example from the issue:
+type A[T any] interface {
+ *T
+}
+
+type B[T any] interface {
+ B /* ERROR "invalid recursive type" */ [*T]
+}
+
+type C[T any, U B[U]] interface {
+ *T
+}
+
+// Simplified reproducer:
+type X[T any] interface {
+ X /* ERROR "invalid recursive type" */ [*T]
+}
+
+var _ X[int]
+
+// A related example that doesn't go through interfaces.
+type A2[P any] [10]A2 /* ERROR "invalid recursive type" */ [*P]
+
+var _ A2[int]
diff --git a/src/internal/types/testdata/fixedbugs/issue57155.go b/src/internal/types/testdata/fixedbugs/issue57155.go
new file mode 100644
index 0000000..ec9fb2b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57155.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P *Q, Q any](p P, q Q) {
+ func() {
+ _ = f[P]
+ f(p, q)
+ f[P](p, q)
+ f[P, Q](p, q)
+ }()
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue57160.go b/src/internal/types/testdata/fixedbugs/issue57160.go
new file mode 100644
index 0000000..446d019
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57160.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(x *int) {
+ _ = 0 < x // ERROR "invalid operation"
+ _ = x < 0 // ERROR "invalid operation"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue57192.go b/src/internal/types/testdata/fixedbugs/issue57192.go
new file mode 100644
index 0000000..6c7894a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57192.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type I1[T any] interface {
+ m1(T)
+}
+type I2[T any] interface {
+ I1[T]
+ m2(T)
+}
+
+var V1 I1[int]
+var V2 I2[int]
+
+func g[T any](I1[T]) {}
+func _() {
+ g(V1)
+ g(V2)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue57352.go b/src/internal/types/testdata/fixedbugs/issue57352.go
new file mode 100644
index 0000000..2b31700
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57352.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A interface {
+ a()
+}
+
+type AB interface {
+ A
+ b()
+}
+
+type AAB struct {
+ A
+ AB
+}
+
+var _ AB = AAB /* ERROR "ambiguous selector AAB.a" */ {}
diff --git a/src/internal/types/testdata/fixedbugs/issue57486.go b/src/internal/types/testdata/fixedbugs/issue57486.go
new file mode 100644
index 0000000..933eeb4
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57486.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C1 interface {
+ comparable
+}
+
+type C2 interface {
+ comparable
+ [2]any | int
+}
+
+func G1[T C1](t T) { _ = t == t }
+func G2[T C2](t T) { _ = t == t }
+
+func F1[V [2]any](v V) {
+ _ = G1[V /* ERROR "V does not satisfy comparable" */]
+ _ = G1[[2]any]
+ _ = G1[int]
+}
+
+func F2[V [2]any](v V) {
+ _ = G2[V /* ERROR "V does not satisfy C2" */]
+ _ = G2[[ /* ERROR "[2]any does not satisfy C2 (C2 mentions [2]any, but [2]any is not in the type set of C2)" */ 2]any]
+ _ = G2[int]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue57500.go b/src/internal/types/testdata/fixedbugs/issue57500.go
new file mode 100644
index 0000000..4a90d47
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57500.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C interface {
+ comparable
+ [2]any | int
+}
+
+func f[T C]() {}
+
+func _() {
+ _ = f[[ /* ERROR "[2]any does not satisfy C (C mentions [2]any, but [2]any is not in the type set of C)" */ 2]any]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue57522.go b/src/internal/types/testdata/fixedbugs/issue57522.go
new file mode 100644
index 0000000..d83e5b2
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue57522.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// A simplified version of the code in the original report.
+type S[T any] struct{}
+var V = S[any]{}
+func (fs *S[T]) M(V.M /* ERROR "V.M is not a type" */) {}
+
+// Other minimal reproducers.
+type S1[T any] V1.M /* ERROR "V1.M is not a type" */
+type V1 = S1[any]
+
+type S2[T any] struct{}
+type V2 = S2[any]
+func (fs *S2[T]) M(x V2.M /* ERROR "V2.M is not a type" */ ) {}
+
+// The following still panics, as the selector is reached from check.expr
+// rather than check.typexpr. TODO(rfindley): fix this.
+// type X[T any] int
+// func (X[T]) M(x [X[int].M]int) {}
+
diff --git a/src/internal/types/testdata/fixedbugs/issue58611.go b/src/internal/types/testdata/fixedbugs/issue58611.go
new file mode 100644
index 0000000..1ff30f7
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue58611.go
@@ -0,0 +1,27 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+ "sort"
+ "strings"
+)
+
+func f[int any](x int) {
+ x = 0 /* ERRORx "cannot use 0.*(as int.*with int declared at|type parameter)" */
+}
+
+// test case from issue
+
+type Set[T comparable] map[T]struct{}
+
+func (s *Set[string]) String() string {
+ keys := make([]string, 0, len(*s))
+ for k := range *s {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys /* ERRORx "cannot use keys.*with string declared at.*|type parameter" */ )
+ return strings /* ERROR "cannot use strings.Join" */ .Join(keys /* ERRORx "cannot use keys.*with string declared at.*|type parameter" */ , ",")
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue58612.go b/src/internal/types/testdata/fixedbugs/issue58612.go
new file mode 100644
index 0000000..db6a62d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue58612.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x = new(T)
+ f[x /* ERROR "not a type" */ /* ERROR "use of .(type) outside type switch" */ .(type)]()
+}
+
+type T struct{}
+
+func f[_ any]() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue58671.go b/src/internal/types/testdata/fixedbugs/issue58671.go
new file mode 100644
index 0000000..fa964aa
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue58671.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g[P any](...P) P { var x P; return x }
+
+func _() {
+ var (
+ _ int = g(1, 2)
+ _ rune = g(1, 'a')
+ _ float64 = g(1, 'a', 2.3)
+ _ float64 = g('a', 2.3)
+ _ complex128 = g(2.3, 'a', 1i)
+ )
+ g(true, 'a' /* ERROR "mismatched types untyped bool and untyped rune (cannot infer P)" */)
+ g(1, "foo" /* ERROR "mismatched types untyped int and untyped string (cannot infer P)" */)
+ g(1, 2.3, "bar" /* ERROR "mismatched types untyped float and untyped string (cannot infer P)" */)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue58742.go b/src/internal/types/testdata/fixedbugs/issue58742.go
new file mode 100644
index 0000000..b649a49
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue58742.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() (int, UndefinedType /* ERROR "undefined: UndefinedType" */ , string) {
+ return 0 // ERROR "not enough return values\n\thave (number)\n\twant (int, unknown type, string)"
+}
+
+func _() (int, UndefinedType /* ERROR "undefined: UndefinedType" */ ) {
+ return 0, 1, 2 // ERROR "too many return values\n\thave (number, number, number)\n\twant (int, unknown type)"
+}
+
+// test case from issue
+func _() UndefinedType /* ERROR "undefined: UndefinedType" */ {
+ return // ERROR "not enough return values\n\thave ()\n\twant (unknown type)"
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59190.go b/src/internal/types/testdata/fixedbugs/issue59190.go
new file mode 100644
index 0000000..fd08303
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59190.go
@@ -0,0 +1,36 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type E [1 << 30]complex128
+var a [1 << 30]E
+var _ = unsafe.Sizeof(a /* ERROR "too large" */ )
+
+var s struct {
+ _ [1 << 30]E
+ x int
+}
+var _ = unsafe.Offsetof(s /* ERROR "too large" */ .x)
+
+// Test case from issue (modified so it also triggers on 32-bit platforms).
+
+type A [1]int
+type S struct {
+ x A
+ y [1 << 30]A
+ z [1 << 30]struct{}
+}
+type T [1 << 30][1 << 30]S
+
+func _() {
+ var a A
+ var s S
+ var t T
+ _ = unsafe.Sizeof(a)
+ _ = unsafe.Sizeof(s)
+ _ = unsafe.Sizeof(t /* ERROR "too large" */ )
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59207.go b/src/internal/types/testdata/fixedbugs/issue59207.go
new file mode 100644
index 0000000..59b36e2
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59207.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type E [1 << 32]byte
+
+var a [1 << 32]E // size of a must not overflow to 0
+var _ = unsafe.Sizeof(a /* ERROR "too large" */ )
diff --git a/src/internal/types/testdata/fixedbugs/issue59209.go b/src/internal/types/testdata/fixedbugs/issue59209.go
new file mode 100644
index 0000000..870ae52
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59209.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ _ [1 /* ERROR "invalid array length" */ << 100]int
+ _ [1.0]int
+ _ [1.1 /* ERROR "must be integer" */ ]int
+)
diff --git a/src/internal/types/testdata/fixedbugs/issue59338a.go b/src/internal/types/testdata/fixedbugs/issue59338a.go
new file mode 100644
index 0000000..34864dc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59338a.go
@@ -0,0 +1,21 @@
+// -lang=go1.20
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g[P any](P) {}
+func h[P, Q any](P) Q { panic(0) }
+
+var _ func(int) = g /* ERROR "implicitly instantiated function in assignment requires go1.21 or later" */
+var _ func(int) string = h[ /* ERROR "partially instantiated function in assignment requires go1.21 or later" */ int]
+
+func f1(func(int)) {}
+func f2(int, func(int)) {}
+
+func _() {
+ f1(g /* ERROR "implicitly instantiated function as argument requires go1.21 or later" */)
+ f2(0, g /* ERROR "implicitly instantiated function as argument requires go1.21 or later" */)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59338b.go b/src/internal/types/testdata/fixedbugs/issue59338b.go
new file mode 100644
index 0000000..1a5530a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59338b.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g[P any](P) {}
+func h[P, Q any](P) Q { panic(0) }
+
+var _ func(int) = g
+var _ func(int) string = h[int]
+
+func f1(func(int)) {}
+func f2(int, func(int)) {}
+
+func _() {
+ f1(g)
+ f2(0, g)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59371.go b/src/internal/types/testdata/fixedbugs/issue59371.go
new file mode 100644
index 0000000..d5b4db6
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59371.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var m map[int]int
+
+func _() {
+ _, ok /* ERROR "undefined: ok" */ = m[0] // must not crash
+}
+
+func _() {
+ var ok = undef /* ERROR "undefined: undef" */
+ x, ok := m[0] // must not crash
+ _, _ = x, ok
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59639.go b/src/internal/types/testdata/fixedbugs/issue59639.go
new file mode 100644
index 0000000..1117668
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59639.go
@@ -0,0 +1,11 @@
+// -lang=go1.17
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P /* ERROR "requires go1.18" */ interface{}](P) {}
+
+var v func(int) = f /* ERROR "requires go1.18" */
diff --git a/src/internal/types/testdata/fixedbugs/issue59740.go b/src/internal/types/testdata/fixedbugs/issue59740.go
new file mode 100644
index 0000000..31cd03b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59740.go
@@ -0,0 +1,25 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type F[T any] func(func(F[T]))
+
+func f(F[int]) {}
+func g[T any](F[T]) {}
+
+func _() {
+ g(f /* ERROR "type func(F[int]) of f does not match F[T] (cannot infer T)" */) // type inference/unification must not panic
+}
+
+// original test case from issue
+
+type List[T any] func(T, func(T, List[T]) T) T
+
+func nil[T any](n T, _ List[T]) T { return n }
+func cons[T any](h T, t List[T]) List[T] { return func(n T, f func(T, List[T]) T) T { return f(h, t) } }
+
+func nums[T any](t T) List[T] {
+ return cons(t, cons(t, nil /* ERROR "type func(n T, _ List[T]) T of nil[T] does not match inferred type List[T] for List[T]" */ [T]))
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59848.go b/src/internal/types/testdata/fixedbugs/issue59848.go
new file mode 100644
index 0000000..51da747
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59848.go
@@ -0,0 +1,10 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T struct{}
+type I interface{ M() }
+var _ I = T /* ERROR "missing method M" */ {} // must not crash
+func (T) m() {}
diff --git a/src/internal/types/testdata/fixedbugs/issue59890.go b/src/internal/types/testdata/fixedbugs/issue59890.go
new file mode 100644
index 0000000..ed7afd9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59890.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() { g /* ERROR "cannot infer T" */ () }
+
+func g[T any]() (_ /* ERROR "cannot use _ as value or type" */, int) { panic(0) }
+
+// test case from issue
+
+var _ = append(f /* ERROR "cannot infer T" */ ()())
+
+func f[T any]() (_ /* ERROR "cannot use _" */, _ /* ERROR "cannot use _" */, int) {
+ panic("not implemented")
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59953.go b/src/internal/types/testdata/fixedbugs/issue59953.go
new file mode 100644
index 0000000..b10ced7
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59953.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() { f(g) }
+func f[P any](P) {}
+func g[Q int](Q) {}
diff --git a/src/internal/types/testdata/fixedbugs/issue59956.go b/src/internal/types/testdata/fixedbugs/issue59956.go
new file mode 100644
index 0000000..646b50e
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59956.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1(func(int))
+func f2(func(int), func(string))
+func f3(func(int), func(string), func(float32))
+
+func g1[P any](P) {}
+
+func _() {
+ f1(g1)
+ f2(g1, g1)
+ f3(g1, g1, g1)
+}
+
+// More complex examples
+
+func g2[P any](P, P) {}
+func h3[P any](func(P), func(P), func() P) {}
+func h4[P, Q any](func(P), func(P, Q), func() Q, func(P, Q)) {}
+
+func r1() int { return 0 }
+
+func _() {
+ h3(g1, g1, r1)
+ h4(g1, g2, r1, g2)
+}
+
+// Variadic cases
+
+func f(func(int))
+func g[P any](P) {}
+
+func d[P any](...func(P)) {}
+
+func _() {
+ d /* ERROR "cannot infer P" */ ()
+ d(f)
+ d(f, g)
+ d(f, g, g)
+ d /* ERROR "cannot infer P" */ (g, g, g)
+ d(g, g, f)
+ d(g, f, g, f)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue59958.go b/src/internal/types/testdata/fixedbugs/issue59958.go
new file mode 100644
index 0000000..4a4b4dc
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue59958.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f(func(int) string) {}
+
+func g2[P, Q any](P) Q { var q Q; return q }
+func g3[P, Q, R any](P) R { var r R; return r }
+
+func _() {
+ f(g2)
+ f(g2[int])
+ f(g2[int, string])
+
+ f(g3[int, bool])
+ f(g3[int, bool, string])
+
+ var _ func(int) string = g2
+ var _ func(int) string = g2[int]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60346.go b/src/internal/types/testdata/fixedbugs/issue60346.go
new file mode 100644
index 0000000..6dc057b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60346.go
@@ -0,0 +1,17 @@
+// -lang=go1.20
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F[P any, Q *P](p P) {}
+
+var _ = F[int]
+
+func G[R any](func(R)) {}
+
+func _() {
+ G(F[int])
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60377.go b/src/internal/types/testdata/fixedbugs/issue60377.go
new file mode 100644
index 0000000..b754f89
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60377.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The type parameter P is not used in interface T1.
+// T1 is a defined parameterized interface type which
+// can be assigned to any other interface with the same
+// methods. We cannot infer a type argument in this case
+// because any type would do.
+
+type T1[P any] interface{ m() }
+
+func g[P any](T1[P]) {}
+
+func _() {
+ var x T1[int]
+ g /* ERROR "cannot infer P" */ (x)
+ g[int](x) // int is ok for P
+ g[string](x) // string is also ok for P!
+}
+
+// This is analogous to the above example,
+// but uses two interface types of the same structure.
+
+type T2[P any] interface{ m() }
+
+func _() {
+ var x T2[int]
+ g /* ERROR "cannot infer P" */ (x)
+ g[int](x) // int is ok for P
+ g[string](x) // string is also ok for P!
+}
+
+// Analogous to the T2 example but using an unparameterized interface T3.
+
+type T3 interface{ m() }
+
+func _() {
+ var x T3
+ g /* ERROR "cannot infer P" */ (x)
+ g[int](x) // int is ok for P
+ g[string](x) // string is also ok for P!
+}
+
+// The type parameter P is not used in struct S.
+// S is a defined parameterized (non-interface) type which can only
+// be assigned to another type S with the same type argument.
+// Therefore we can infer a type argument in this case.
+
+type S[P any] struct{}
+
+func g4[P any](S[P]) {}
+
+func _() {
+ var x S[int]
+ g4(x) // we can infer int for P
+ g4[int](x) // int is the correct type argument
+ g4[string](x /* ERROR "cannot use x (variable of type S[int]) as S[string] value in argument to g4[string]" */)
+}
+
+// This is similar to the first example but here T1 is a component
+// of a func type. In this case types must match exactly: P must
+// match int.
+
+func g5[P any](func(T1[P])) {}
+
+func _() {
+ var f func(T1[int])
+ g5(f)
+ g5[int](f)
+ g5[string](f /* ERROR "cannot use f (variable of type func(T1[int])) as func(T1[string]) value in argument to g5[string]" */)
+}
+
+// This example would fail if we were to infer the type argument int for P
+// exactly because any type argument would be ok for the first argument.
+// Choosing the wrong type would cause the second argument to not match.
+
+type T[P any] interface{}
+
+func g6[P any](T[P], P) {}
+
+func _() {
+ var x T[int]
+ g6(x, 1.2)
+ g6(x, "")
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60460.go b/src/internal/types/testdata/fixedbugs/issue60460.go
new file mode 100644
index 0000000..a9cb3d9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60460.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Simplified (representative) test case.
+
+func _() {
+ f(R1{})
+}
+
+func f[T any](R[T]) {}
+
+type R[T any] interface {
+ m(R[T])
+}
+
+type R1 struct{}
+
+func (R1) m(R[int]) {}
+
+// Test case from issue.
+
+func _() {
+ r := newTestRules()
+ NewSet(r)
+ r2 := newTestRules2()
+ NewSet(r2)
+}
+
+type Set[T any] struct {
+ rules Rules[T]
+}
+
+func NewSet[T any](rules Rules[T]) Set[T] {
+ return Set[T]{
+ rules: rules,
+ }
+}
+
+func (s Set[T]) Copy() Set[T] {
+ return NewSet(s.rules)
+}
+
+type Rules[T any] interface {
+ Hash(T) int
+ Equivalent(T, T) bool
+ SameRules(Rules[T]) bool
+}
+
+type testRules struct{}
+
+func newTestRules() Rules[int] {
+ return testRules{}
+}
+
+func (r testRules) Hash(val int) int {
+ return val % 16
+}
+
+func (r testRules) Equivalent(val1 int, val2 int) bool {
+ return val1 == val2
+}
+
+func (r testRules) SameRules(other Rules[int]) bool {
+ _, ok := other.(testRules)
+ return ok
+}
+
+type testRules2 struct{}
+
+func newTestRules2() Rules[string] {
+ return testRules2{}
+}
+
+func (r testRules2) Hash(val string) int {
+ return 16
+}
+
+func (r testRules2) Equivalent(val1 string, val2 string) bool {
+ return val1 == val2
+}
+
+func (r testRules2) SameRules(other Rules[string]) bool {
+ _, ok := other.(testRules2)
+ return ok
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60500.go b/src/internal/types/testdata/fixedbugs/issue60500.go
new file mode 100644
index 0000000..be8ccaf
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60500.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ log("This is a test %v" /* ERROR "cannot use \"This is a test %v\" (untyped string constant) as bool value in argument to log" */, "foo")
+}
+
+func log(enabled bool, format string, args ...any)
diff --git a/src/internal/types/testdata/fixedbugs/issue60542.go b/src/internal/types/testdata/fixedbugs/issue60542.go
new file mode 100644
index 0000000..b536ddb
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60542.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func Clip[S ~[]E, E any](s S) S {
+ return s
+}
+
+var versions func()
+var _ = Clip /* ERROR "S (type func()) does not satisfy ~[]E" */ (versions)
diff --git a/src/internal/types/testdata/fixedbugs/issue60556.go b/src/internal/types/testdata/fixedbugs/issue60556.go
new file mode 100644
index 0000000..77e5034
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60556.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type I[T any] interface {
+ m(I[T])
+}
+
+type S[T any] struct{}
+
+func (S[T]) m(I[T]) {}
+
+func f[T I[E], E any](T) {}
+
+func _() {
+ f(S[int]{})
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60562.go b/src/internal/types/testdata/fixedbugs/issue60562.go
new file mode 100644
index 0000000..c08bbf3
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60562.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S[T any] struct{}
+
+func (S[T]) m(T) {}
+
+func f0[T any](chan S[T]) {}
+
+func _() {
+ var x chan interface{ m(int) }
+ f0(x /* ERROR "type chan interface{m(int)} of x does not match chan S[T] (cannot infer T)" */)
+}
+
+// variants of the theme
+
+func f1[T any]([]S[T]) {}
+
+func _() {
+ var x []interface{ m(int) }
+ f1(x /* ERROR "type []interface{m(int)} of x does not match []S[T] (cannot infer T)" */)
+}
+
+type I[T any] interface {
+ m(T)
+}
+
+func f2[T any](func(I[T])) {}
+
+func _() {
+ var x func(interface{ m(int) })
+ f2(x /* ERROR "type func(interface{m(int)}) of x does not match func(I[T]) (cannot infer T)" */)
+}
+
+func f3[T any](func(I[T])) {}
+
+func _() {
+ var x func(I[int])
+ f3(x) // but this is correct: I[T] and I[int] can be made identical with T == int
+}
+
+func f4[T any]([10]I[T]) {}
+
+func _() {
+ var x [10]interface{ I[int] }
+ f4(x /* ERROR "type [10]interface{I[int]} of x does not match [10]I[T] (cannot infer T)" */)
+}
+
+func f5[T any](I[T]) {}
+
+func _() {
+ var x interface {
+ m(int)
+ n()
+ }
+ f5(x)
+ f5[int](x) // ok
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60688.go b/src/internal/types/testdata/fixedbugs/issue60688.go
new file mode 100644
index 0000000..38d90ee
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60688.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type String string
+
+func g[P any](P, string) {}
+
+// String and string are not identical and thus must not unify
+// (they are element types of the func type and therefore must
+// be identical to match).
+// The result is an error from type inference, rather than an
+// error from an assignment mismatch.
+var f func(int, String) = g // ERROR "type func(int, String) of variable in assignment does not match inferred type func(int, string) for func(P, string)"
diff --git a/src/internal/types/testdata/fixedbugs/issue60906.go b/src/internal/types/testdata/fixedbugs/issue60906.go
new file mode 100644
index 0000000..2744e89
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60906.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x int
+ var f func() []int
+ _ = f /* ERROR "cannot index f" */ [x]
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue60933.go b/src/internal/types/testdata/fixedbugs/issue60933.go
new file mode 100644
index 0000000..9b10237
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60933.go
@@ -0,0 +1,67 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import (
+ "io"
+ "os"
+)
+
+func g[T any](...T) {}
+
+// Interface and non-interface types do not match.
+func _() {
+ var file *os.File
+ g(file, io /* ERROR "type io.Writer of io.Discard does not match inferred type *os.File for T" */ .Discard)
+ g(file, os.Stdout)
+}
+
+func _() {
+ var a *os.File
+ var b any
+ g(a, a)
+ g(a, b /* ERROR "type any of b does not match inferred type *os.File for T" */)
+}
+
+var writer interface {
+ Write(p []byte) (n int, err error)
+}
+
+func _() {
+ var file *os.File
+ g(file, writer /* ERROR "type interface{Write(p []byte) (n int, err error)} of writer does not match inferred type *os.File for T" */)
+ g(writer, file /* ERROR "type *os.File of file does not match inferred type interface{Write(p []byte) (n int, err error)} for T" */)
+}
+
+// Different named interface types do not match.
+func _() {
+ g(io.ReadWriter(nil), io.ReadWriter(nil))
+ g(io.ReadWriter(nil), io /* ERROR "does not match" */ .Writer(nil))
+ g(io.Writer(nil), io /* ERROR "does not match" */ .ReadWriter(nil))
+}
+
+// Named and unnamed interface types match if they have the same methods.
+func _() {
+ g(io.Writer(nil), writer)
+ g(io.ReadWriter(nil), writer /* ERROR "does not match" */ )
+}
+
+// There must be no order dependency for named and unnamed interfaces.
+func f[T interface{ m(T) }](a, b T) {}
+
+type F interface {
+ m(F)
+}
+
+func _() {
+ var i F
+ var j interface {
+ m(F)
+ }
+
+ // order doesn't matter
+ f(i, j)
+ f(j, i)
+} \ No newline at end of file
diff --git a/src/internal/types/testdata/fixedbugs/issue60946.go b/src/internal/types/testdata/fixedbugs/issue60946.go
new file mode 100644
index 0000000..a66254b
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue60946.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Tn interface{ m() }
+type T1 struct{}
+type T2 struct{}
+
+func (*T1) m() {}
+func (*T2) m() {}
+
+func g[P any](...P) {}
+
+func _() {
+ var t interface{ m() }
+ var tn Tn
+ var t1 *T1
+ var t2 *T2
+
+ // these are ok (interface types only)
+ g(t, t)
+ g(t, tn)
+ g(tn, t)
+ g(tn, tn)
+
+ // these are not ok (interface and non-interface types)
+ g(t, t1 /* ERROR "does not match" */)
+ g(t1, t /* ERROR "does not match" */)
+ g(tn, t1 /* ERROR "does not match" */)
+ g(t1, tn /* ERROR "does not match" */)
+
+ g(t, t1 /* ERROR "does not match" */, t2)
+ g(t1, t2 /* ERROR "does not match" */, t)
+ g(tn, t1 /* ERROR "does not match" */, t2)
+ g(t1, t2 /* ERROR "does not match" */, tn)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue61486.go b/src/internal/types/testdata/fixedbugs/issue61486.go
new file mode 100644
index 0000000..b12a800
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue61486.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(s uint) {
+ _ = min(1 << s)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue61879.go b/src/internal/types/testdata/fixedbugs/issue61879.go
new file mode 100644
index 0000000..542bc2d
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue61879.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type Interface[T any] interface {
+ m(Interface[T])
+}
+
+func f[S []Interface[T], T any](S) {}
+
+func _() {
+ var s []Interface[int]
+ f(s) // panic here
+}
+
+// Larger example from issue
+
+type InterfaceA[T comparable] interface {
+ setData(string) InterfaceA[T]
+}
+
+type ImplA[T comparable] struct {
+ data string
+ args []any
+}
+
+func NewInterfaceA[T comparable](args ...any) InterfaceA[T] {
+ return &ImplA[T]{
+ data: fmt.Sprintf("%v", args...),
+ args: args,
+ }
+}
+
+func (k *ImplA[T]) setData(data string) InterfaceA[T] {
+ k.data = data
+ return k
+}
+
+func Foo[M ~map[InterfaceA[T]]V, T comparable, V any](m M) {
+ // DO SOMETHING HERE
+ return
+}
+
+func Bar() {
+ keys := make([]InterfaceA[int], 0, 10)
+ m := make(map[InterfaceA[int]]int)
+ for i := 0; i < 10; i++ {
+ keys = append(keys, NewInterfaceA[int](i))
+ m[keys[i]] = i
+ }
+
+ Foo(m) // panic here
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue61903.go b/src/internal/types/testdata/fixedbugs/issue61903.go
new file mode 100644
index 0000000..8a6fcd9
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue61903.go
@@ -0,0 +1,20 @@
+// -lang=go1.20
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] interface{}
+
+func f1[P any](T[P]) {}
+func f2[P any](T[P], P) {}
+
+func _() {
+ var t T[int]
+ f1(t)
+
+ var s string
+ f2(t, s /* ERROR "type string of s does not match inferred type int for P" */)
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue62157.go b/src/internal/types/testdata/fixedbugs/issue62157.go
new file mode 100644
index 0000000..c44f921
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue62157.go
@@ -0,0 +1,128 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[T any](...T) T { var x T; return x }
+
+// Test case 1
+
+func _() {
+ var a chan string
+ var b <-chan string
+ f(a, b)
+ f(b, a)
+}
+
+// Test case 2
+
+type F[T any] func(T) bool
+
+func g[T any](T) F[<-chan T] { return nil }
+
+func f1[T any](T, F[T]) {}
+func f2[T any](F[T], T) {}
+
+func _() {
+ var ch chan string
+ f1(ch, g(""))
+ f2(g(""), ch)
+}
+
+// Test case 3: named and directional types combined
+
+func _() {
+ type namedA chan int
+ type namedB chan<- int
+
+ var a chan int
+ var A namedA
+ var b chan<- int
+ var B namedB
+
+ // Defined types win over channel types irrespective of channel direction.
+ f(A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
+ f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A)
+
+ f(a, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A)
+ f(a, A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
+ f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A, a)
+ f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, a, A)
+ f(A, a, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
+ f(A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, a)
+
+ // Unnamed directed channels win over bidirectional channels.
+ b = f(a, b)
+ b = f(b, a)
+
+ // Defined directed channels win over defined bidirectional channels.
+ A = f(A, a)
+ A = f(a, A)
+ B = f(B, b)
+ B = f(b, B)
+
+ f(a, b, B)
+ f(a, B, b)
+ f(b, B, a)
+ f(b, a, B)
+ f(B, a, b)
+ f(B, b, a)
+
+ // Differently named channel types conflict irrespective of channel direction.
+ f(A, B /* ERROR "type namedB of B does not match inferred type namedA for T" */)
+ f(B, A /* ERROR "type namedA of A does not match inferred type namedB for T" */)
+
+ // Ensure that all combinations of directional and
+ // bidirectional channels with a named directional
+ // channel lead to the correct (named) directional
+ // channel.
+ B = f(a, b)
+ B = f(a, B)
+ B = f(b, a)
+ B = f(B, a)
+
+ B = f(a, b, B)
+ B = f(a, B, b)
+ B = f(b, B, a)
+ B = f(b, a, B)
+ B = f(B, a, b)
+ B = f(B, b, a)
+
+ // verify type error
+ A = f /* ERROR "cannot use f(B, b, a) (value of type namedB) as namedA value in assignment" */ (B, b, a)
+}
+
+// Test case 4: some more combinations
+
+func _() {
+ type A chan int
+ type B chan int
+ type C = chan int
+ type D = chan<- int
+
+ var a A
+ var b B
+ var c C
+ var d D
+
+ f(a, b /* ERROR "type B of b does not match inferred type A for T" */, c)
+ f(c, a, b /* ERROR "type B of b does not match inferred type A for T" */)
+ f(a, b /* ERROR "type B of b does not match inferred type A for T" */, d)
+ f(d, a, b /* ERROR "type B of b does not match inferred type A for T" */)
+}
+
+// Simplified test case from issue
+
+type Matcher[T any] func(T) bool
+
+func Produces[T any](T) Matcher[<-chan T] { return nil }
+
+func Assert1[T any](Matcher[T], T) {}
+func Assert2[T any](T, Matcher[T]) {}
+
+func _() {
+ var ch chan string
+ Assert1(Produces(""), ch)
+ Assert2(ch, Produces(""))
+}
diff --git a/src/internal/types/testdata/fixedbugs/issue6977.go b/src/internal/types/testdata/fixedbugs/issue6977.go
new file mode 100644
index 0000000..c455d3a
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue6977.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "io"
+
+// Alan's initial report.
+
+type I interface { f(); String() string }
+type J interface { g(); String() string }
+
+type IJ1 = interface { I; J }
+type IJ2 = interface { f(); g(); String() string }
+
+var _ = (*IJ1)(nil) == (*IJ2)(nil) // static assert that IJ1 and IJ2 are identical types
+
+// The canonical example.
+
+type ReadWriteCloser interface { io.ReadCloser; io.WriteCloser }
+
+// Some more cases.
+
+type M interface { m() }
+type M32 interface { m() int32 }
+type M64 interface { m() int64 }
+
+type U1 interface { m() }
+type U2 interface { m(); M }
+type U3 interface { M; m() }
+type U4 interface { M; M; M }
+type U5 interface { U1; U2; U3; U4 }
+
+type U6 interface { m(); m /* ERROR "duplicate method" */ () }
+type U7 interface { M32 /* ERROR "duplicate method" */ ; m() }
+type U8 interface { m(); M32 /* ERROR "duplicate method" */ }
+type U9 interface { M32; M64 /* ERROR "duplicate method" */ }
+
+// Verify that repeated embedding of the same interface(s)
+// eliminates duplicate methods early (rather than at the
+// end) to prevent exponential memory and time use.
+// Without early elimination, computing T29 may take dozens
+// of minutes.
+type (
+ T0 interface { m() }
+ T1 interface { T0; T0 }
+ T2 interface { T1; T1 }
+ T3 interface { T2; T2 }
+ T4 interface { T3; T3 }
+ T5 interface { T4; T4 }
+ T6 interface { T5; T5 }
+ T7 interface { T6; T6 }
+ T8 interface { T7; T7 }
+ T9 interface { T8; T8 }
+
+ T10 interface { T9; T9 }
+ T11 interface { T10; T10 }
+ T12 interface { T11; T11 }
+ T13 interface { T12; T12 }
+ T14 interface { T13; T13 }
+ T15 interface { T14; T14 }
+ T16 interface { T15; T15 }
+ T17 interface { T16; T16 }
+ T18 interface { T17; T17 }
+ T19 interface { T18; T18 }
+
+ T20 interface { T19; T19 }
+ T21 interface { T20; T20 }
+ T22 interface { T21; T21 }
+ T23 interface { T22; T22 }
+ T24 interface { T23; T23 }
+ T25 interface { T24; T24 }
+ T26 interface { T25; T25 }
+ T27 interface { T26; T26 }
+ T28 interface { T27; T27 }
+ T29 interface { T28; T28 }
+)
+
+// Verify that m is present.
+var x T29
+var _ = x.m
diff --git a/src/internal/types/testdata/spec/assignability.go b/src/internal/types/testdata/spec/assignability.go
new file mode 100644
index 0000000..6670870
--- /dev/null
+++ b/src/internal/types/testdata/spec/assignability.go
@@ -0,0 +1,264 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package assignability
+
+// See the end of this package for the declarations
+// of the types and variables used in these tests.
+
+// "x's type is identical to T"
+func _[TP any](X TP) {
+ b = b
+ a = a
+ l = l
+ s = s
+ p = p
+ f = f
+ i = i
+ m = m
+ c = c
+ d = d
+
+ B = B
+ A = A
+ L = L
+ S = S
+ P = P
+ F = F
+ I = I
+ M = M
+ C = C
+ D = D
+ X = X
+}
+
+// "x's type V and T have identical underlying types
+// and at least one of V or T is not a named type."
+// (here a named type is a type with a name)
+func _[TP1, TP2 Interface](X1 TP1, X2 TP2) {
+ b = B // ERRORx `cannot use B .* as int value`
+ a = A
+ l = L
+ s = S
+ p = P
+ f = F
+ i = I
+ m = M
+ c = C
+ d = D
+
+ B = b // ERRORx `cannot use b .* as Basic value`
+ A = a
+ L = l
+ S = s
+ P = p
+ F = f
+ I = i
+ M = m
+ C = c
+ D = d
+ X1 = i // ERRORx `cannot use i .* as TP1 value`
+ X1 = X2 // ERRORx `cannot use X2 .* as TP1 value`
+}
+
+// "T is an interface type and x implements T and T is not a type parameter"
+func _[TP Interface](X TP) {
+ i = d // ERROR "missing method m"
+ i = D
+ i = X
+ X = i // ERRORx `cannot use i .* as TP value`
+}
+
+// "x is a bidirectional channel value, T is a channel type, x's type V and T have identical element types, and at least one of V or T is not a named type"
+// (here a named type is a type with a name)
+type (
+ _SendChan = chan<- int
+ _RecvChan = <-chan int
+
+ SendChan _SendChan
+ RecvChan _RecvChan
+)
+
+func _[
+ _CC ~_Chan,
+ _SC ~_SendChan,
+ _RC ~_RecvChan,
+
+ CC Chan,
+ SC SendChan,
+ RC RecvChan,
+]() {
+ var (
+ _ _SendChan = c
+ _ _RecvChan = c
+ _ _Chan = c
+
+ _ _SendChan = C
+ _ _RecvChan = C
+ _ _Chan = C
+
+ _ SendChan = c
+ _ RecvChan = c
+ _ Chan = c
+
+ _ SendChan = C // ERRORx `cannot use C .* as SendChan value`
+ _ RecvChan = C // ERRORx `cannot use C .* as RecvChan value`
+ _ Chan = C
+ _ Chan = make /* ERRORx `cannot use make\(chan Basic\) .* as Chan value` */ (chan Basic)
+ )
+
+ var (
+ _ _CC = C // ERRORx `cannot use C .* as _CC value`
+ _ _SC = C // ERRORx `cannot use C .* as _SC value`
+ _ _RC = C // ERRORx `cannot use C .* as _RC value`
+
+ _ CC = _CC /* ERRORx `cannot use _CC\(nil\) .* as CC value` */ (nil)
+ _ SC = _CC /* ERRORx `cannot use _CC\(nil\) .* as SC value` */ (nil)
+ _ RC = _CC /* ERRORx `cannot use _CC\(nil\) .* as RC value` */ (nil)
+
+ _ CC = C // ERRORx `cannot use C .* as CC value`
+ _ SC = C // ERRORx `cannot use C .* as SC value`
+ _ RC = C // ERRORx `cannot use C .* as RC value`
+ )
+}
+
+// "x's type V is not a named type and T is a type parameter, and x is assignable to each specific type in T's type set."
+func _[
+ TP0 any,
+ TP1 ~_Chan,
+ TP2 ~chan int | ~chan byte,
+]() {
+ var (
+ _ TP0 = c // ERRORx `cannot use c .* as TP0 value`
+ _ TP0 = C // ERRORx `cannot use C .* as TP0 value`
+ _ TP1 = c
+ _ TP1 = C // ERRORx `cannot use C .* as TP1 value`
+ _ TP2 = c // ERRORx `.* cannot assign chan int to chan byte`
+ )
+}
+
+// "x's type V is a type parameter and T is not a named type, and values x' of each specific type in V's type set are assignable to T."
+func _[
+ TP0 Interface,
+ TP1 ~_Chan,
+ TP2 ~chan int | ~chan byte,
+](X0 TP0, X1 TP1, X2 TP2) {
+ i = X0
+ I = X0
+ c = X1
+ C = X1 // ERRORx `cannot use X1 .* as Chan value`
+ c = X2 // ERRORx `.* cannot assign chan byte \(in TP2\) to chan int`
+}
+
+// "x is the predeclared identifier nil and T is a pointer, function, slice, map, channel, or interface type"
+func _[TP Interface](X TP) {
+ b = nil // ERROR "cannot use nil"
+ a = nil // ERROR "cannot use nil"
+ l = nil
+ s = nil // ERROR "cannot use nil"
+ p = nil
+ f = nil
+ i = nil
+ m = nil
+ c = nil
+ d = nil // ERROR "cannot use nil"
+
+ B = nil // ERROR "cannot use nil"
+ A = nil // ERROR "cannot use nil"
+ L = nil
+ S = nil // ERROR "cannot use nil"
+ P = nil
+ F = nil
+ I = nil
+ M = nil
+ C = nil
+ D = nil // ERROR "cannot use nil"
+ X = nil // ERROR "cannot use nil"
+}
+
+// "x is an untyped constant representable by a value of type T"
+func _[
+ Int8 ~int8,
+ Int16 ~int16,
+ Int32 ~int32,
+ Int64 ~int64,
+ Int8_16 ~int8 | ~int16,
+](
+ i8 Int8,
+ i16 Int16,
+ i32 Int32,
+ i64 Int64,
+ i8_16 Int8_16,
+) {
+ b = 42
+ b = 42.0
+ // etc.
+
+ i8 = -1 << 7
+ i8 = 1<<7 - 1
+ i16 = -1 << 15
+ i16 = 1<<15 - 1
+ i32 = -1 << 31
+ i32 = 1<<31 - 1
+ i64 = -1 << 63
+ i64 = 1<<63 - 1
+
+ i8_16 = -1 << 7
+ i8_16 = 1<<7 - 1
+ i8_16 = - /* ERRORx `cannot use .* as Int8_16` */ 1 << 15
+ i8_16 = 1 /* ERRORx `cannot use .* as Int8_16` */ <<15 - 1
+}
+
+// proto-types for tests
+
+type (
+ _Basic = int
+ _Array = [10]int
+ _Slice = []int
+ _Struct = struct{ f int }
+ _Pointer = *int
+ _Func = func(x int) string
+ _Interface = interface{ m() int }
+ _Map = map[string]int
+ _Chan = chan int
+
+ Basic _Basic
+ Array _Array
+ Slice _Slice
+ Struct _Struct
+ Pointer _Pointer
+ Func _Func
+ Interface _Interface
+ Map _Map
+ Chan _Chan
+ Defined _Struct
+)
+
+func (Defined) m() int
+
+// proto-variables for tests
+
+var (
+ b _Basic
+ a _Array
+ l _Slice
+ s _Struct
+ p _Pointer
+ f _Func
+ i _Interface
+ m _Map
+ c _Chan
+ d _Struct
+
+ B Basic
+ A Array
+ L Slice
+ S Struct
+ P Pointer
+ F Func
+ I Interface
+ M Map
+ C Chan
+ D Defined
+)
diff --git a/src/internal/types/testdata/spec/comparable.go b/src/internal/types/testdata/spec/comparable.go
new file mode 100644
index 0000000..211fa11
--- /dev/null
+++ b/src/internal/types/testdata/spec/comparable.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[_ comparable]() {}
+func f2[_ interface{ comparable }]() {}
+
+type T interface{ m() }
+
+func _[P comparable, Q ~int, R any]() {
+ _ = f1[int]
+ _ = f1[T /* T does satisfy comparable */]
+ _ = f1[any /* any does satisfy comparable */]
+ _ = f1[P]
+ _ = f1[Q]
+ _ = f1[R /* ERROR "R does not satisfy comparable" */]
+
+ _ = f2[int]
+ _ = f2[T /* T does satisfy comparable */]
+ _ = f2[any /* any does satisfy comparable */]
+ _ = f2[P]
+ _ = f2[Q]
+ _ = f2[R /* ERROR "R does not satisfy comparable" */]
+}
diff --git a/src/internal/types/testdata/spec/comparable1.19.go b/src/internal/types/testdata/spec/comparable1.19.go
new file mode 100644
index 0000000..7a4b2a0
--- /dev/null
+++ b/src/internal/types/testdata/spec/comparable1.19.go
@@ -0,0 +1,28 @@
+// -lang=go1.19
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[_ comparable]() {}
+func f2[_ interface{ comparable }]() {}
+
+type T interface{ m() }
+
+func _[P comparable, Q ~int, R any]() {
+ _ = f1[int]
+ _ = f1[T /* ERROR "T to satisfy comparable requires go1.20 or later" */]
+ _ = f1[any /* ERROR "any to satisfy comparable requires go1.20 or later" */]
+ _ = f1[P]
+ _ = f1[Q]
+ _ = f1[R /* ERROR "R does not satisfy comparable" */]
+
+ _ = f2[int]
+ _ = f2[T /* ERROR "T to satisfy comparable requires go1.20 or later" */]
+ _ = f2[any /* ERROR "any to satisfy comparable requires go1.20 or later" */]
+ _ = f2[P]
+ _ = f2[Q]
+ _ = f2[R /* ERROR "R does not satisfy comparable" */]
+}
diff --git a/src/internal/types/testdata/spec/comparisons.go b/src/internal/types/testdata/spec/comparisons.go
new file mode 100644
index 0000000..492890e
--- /dev/null
+++ b/src/internal/types/testdata/spec/comparisons.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package comparisons
+
+type (
+ B int // basic type representative
+ A [10]func()
+ L []byte
+ S struct{ f []byte }
+ P *S
+ F func()
+ I interface{}
+ M map[string]int
+ C chan int
+)
+
+var (
+ b B
+ a A
+ l L
+ s S
+ p P
+ f F
+ i I
+ m M
+ c C
+)
+
+func _() {
+ _ = nil == nil // ERROR "operator == not defined on untyped nil"
+ _ = b == b
+ _ = a /* ERROR "[10]func() cannot be compared" */ == a
+ _ = l /* ERROR "slice can only be compared to nil" */ == l
+ _ = s /* ERROR "struct containing []byte cannot be compared" */ == s
+ _ = p == p
+ _ = f /* ERROR "func can only be compared to nil" */ == f
+ _ = i == i
+ _ = m /* ERROR "map can only be compared to nil" */ == m
+ _ = c == c
+
+ _ = b == nil /* ERROR "mismatched types" */
+ _ = a == nil /* ERROR "mismatched types" */
+ _ = l == nil
+ _ = s == nil /* ERROR "mismatched types" */
+ _ = p == nil
+ _ = f == nil
+ _ = i == nil
+ _ = m == nil
+ _ = c == nil
+
+ _ = nil /* ERROR "operator < not defined on untyped nil" */ < nil
+ _ = b < b
+ _ = a /* ERROR "operator < not defined on array" */ < a
+ _ = l /* ERROR "operator < not defined on slice" */ < l
+ _ = s /* ERROR "operator < not defined on struct" */ < s
+ _ = p /* ERROR "operator < not defined on pointer" */ < p
+ _ = f /* ERROR "operator < not defined on func" */ < f
+ _ = i /* ERROR "operator < not defined on interface" */ < i
+ _ = m /* ERROR "operator < not defined on map" */ < m
+ _ = c /* ERROR "operator < not defined on chan" */ < c
+}
+
+func _[
+ B int,
+ A [10]func(),
+ L []byte,
+ S struct{ f []byte },
+ P *S,
+ F func(),
+ I interface{},
+ J comparable,
+ M map[string]int,
+ C chan int,
+](
+ b B,
+ a A,
+ l L,
+ s S,
+ p P,
+ f F,
+ i I,
+ j J,
+ m M,
+ c C,
+) {
+ _ = b == b
+ _ = a /* ERROR "incomparable types in type set" */ == a
+ _ = l /* ERROR "incomparable types in type set" */ == l
+ _ = s /* ERROR "incomparable types in type set" */ == s
+ _ = p == p
+ _ = f /* ERROR "incomparable types in type set" */ == f
+ _ = i /* ERROR "incomparable types in type set" */ == i
+ _ = j == j
+ _ = m /* ERROR "incomparable types in type set" */ == m
+ _ = c == c
+
+ _ = b == nil /* ERROR "mismatched types" */
+ _ = a == nil /* ERROR "mismatched types" */
+ _ = l == nil
+ _ = s == nil /* ERROR "mismatched types" */
+ _ = p == nil
+ _ = f == nil
+ _ = i == nil /* ERROR "mismatched types" */
+ _ = j == nil /* ERROR "mismatched types" */
+ _ = m == nil
+ _ = c == nil
+
+ _ = b < b
+ _ = a /* ERROR "type parameter A is not comparable with <" */ < a
+ _ = l /* ERROR "type parameter L is not comparable with <" */ < l
+ _ = s /* ERROR "type parameter S is not comparable with <" */ < s
+ _ = p /* ERROR "type parameter P is not comparable with <" */ < p
+ _ = f /* ERROR "type parameter F is not comparable with <" */ < f
+ _ = i /* ERROR "type parameter I is not comparable with <" */ < i
+ _ = j /* ERROR "type parameter J is not comparable with <" */ < j
+ _ = m /* ERROR "type parameter M is not comparable with <" */ < m
+ _ = c /* ERROR "type parameter C is not comparable with <" */ < c
+}
diff --git a/src/internal/types/testdata/spec/conversions.go b/src/internal/types/testdata/spec/conversions.go
new file mode 100644
index 0000000..081439e
--- /dev/null
+++ b/src/internal/types/testdata/spec/conversions.go
@@ -0,0 +1,208 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package conversions
+
+import "unsafe"
+
+// constant conversions
+
+func _[T ~byte]() T { return 255 }
+func _[T ~byte]() T { return 256 /* ERRORx `cannot use 256 .* as T value` */ }
+
+func _[T ~byte]() {
+ const _ = T /* ERRORx `T\(0\) .* is not constant` */ (0)
+ var _ T = 255
+ var _ T = 256 // ERRORx `cannot use 256 .* as T value`
+}
+
+func _[T ~string]() T { return T('a') }
+func _[T ~int | ~string]() T { return T('a') }
+func _[T ~byte | ~int | ~string]() T { return T(256 /* ERRORx `cannot convert 256 .* to type T` */) }
+
+// implicit conversions never convert to string
+func _[T ~string]() {
+ var _ string = 0 // ERRORx `cannot use .* as string value`
+ var _ T = 0 // ERRORx `cannot use .* as T value`
+}
+
+// failing const conversions of constants to type parameters report a cause
+func _[
+ T1 any,
+ T2 interface{ m() },
+ T3 ~int | ~float64 | ~bool,
+ T4 ~int | ~string,
+]() {
+ _ = T1(0 /* ERRORx `cannot convert 0 .* to type T1: T1 does not contain specific types` */)
+ _ = T2(1 /* ERRORx `cannot convert 1 .* to type T2: T2 does not contain specific types` */)
+ _ = T3(2 /* ERRORx `cannot convert 2 .* to type T3: cannot convert 2 .* to type bool \(in T3\)` */)
+ _ = T4(3.14 /* ERRORx `cannot convert 3.14 .* to type T4: cannot convert 3.14 .* to type int \(in T4\)` */)
+}
+
+// "x is assignable to T"
+// - tested via assignability tests
+
+// "x's type and T have identical underlying types if tags are ignored"
+
+func _[X ~int, T ~int](x X) T { return T(x) }
+func _[X struct {
+ f int "foo"
+}, T struct {
+ f int "bar"
+}](x X) T {
+ return T(x)
+}
+
+type Foo struct {
+ f int "foo"
+}
+type Bar struct {
+ f int "bar"
+}
+type Far struct{ f float64 }
+
+func _[X Foo, T Bar](x X) T { return T(x) }
+func _[X Foo | Bar, T Bar](x X) T { return T(x) }
+func _[X Foo, T Foo | Bar](x X) T { return T(x) }
+func _[X Foo, T Far](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by Foo) to type T: cannot convert Foo (in X) to type Far (in T)" */)
+}
+
+// "x's type and T are unnamed pointer types and their pointer base types
+// have identical underlying types if tags are ignored"
+
+func _[X ~*Foo, T ~*Bar](x X) T { return T(x) }
+func _[X ~*Foo | ~*Bar, T ~*Bar](x X) T { return T(x) }
+func _[X ~*Foo, T ~*Foo | ~*Bar](x X) T { return T(x) }
+func _[X ~*Foo, T ~*Far](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by ~*Foo) to type T: cannot convert *Foo (in X) to type *Far (in T)" */)
+}
+
+// Verify that the defined types in constraints are considered for the rule above.
+
+type (
+ B int
+ C int
+ X0 *B
+ T0 *C
+)
+
+func _(x X0) T0 { return T0(x /* ERROR "cannot convert" */) } // non-generic reference
+func _[X X0, T T0](x X) T { return T(x /* ERROR "cannot convert" */) }
+func _[T T0](x X0) T { return T(x /* ERROR "cannot convert" */) }
+func _[X X0](x X) T0 { return T0(x /* ERROR "cannot convert" */) }
+
+// "x's type and T are both integer or floating point types"
+
+func _[X Integer, T Integer](x X) T { return T(x) }
+func _[X Unsigned, T Integer](x X) T { return T(x) }
+func _[X Float, T Integer](x X) T { return T(x) }
+
+func _[X Integer, T Unsigned](x X) T { return T(x) }
+func _[X Unsigned, T Unsigned](x X) T { return T(x) }
+func _[X Float, T Unsigned](x X) T { return T(x) }
+
+func _[X Integer, T Float](x X) T { return T(x) }
+func _[X Unsigned, T Float](x X) T { return T(x) }
+func _[X Float, T Float](x X) T { return T(x) }
+
+func _[X, T Integer | Unsigned | Float](x X) T { return T(x) }
+func _[X, T Integer | ~string](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by Integer | ~string) to type T: cannot convert string (in X) to type int (in T)" */)
+}
+
+// "x's type and T are both complex types"
+
+func _[X, T Complex](x X) T { return T(x) }
+func _[X, T Float | Complex](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by Float | Complex) to type T: cannot convert float32 (in X) to type complex64 (in T)" */)
+}
+
+// "x is an integer or a slice of bytes or runes and T is a string type"
+
+type myInt int
+type myString string
+
+func _[T ~string](x int) T { return T(x) }
+func _[T ~string](x myInt) T { return T(x) }
+func _[X Integer](x X) string { return string(x) }
+func _[X Integer](x X) myString { return myString(x) }
+func _[X Integer](x X) *string {
+ return (*string)(x /* ERROR "cannot convert x (variable of type X constrained by Integer) to type *string: cannot convert int (in X) to type *string" */)
+}
+
+func _[T ~string](x []byte) T { return T(x) }
+func _[T ~string](x []rune) T { return T(x) }
+func _[X ~[]byte, T ~string](x X) T { return T(x) }
+func _[X ~[]rune, T ~string](x X) T { return T(x) }
+func _[X Integer | ~[]byte | ~[]rune, T ~string](x X) T { return T(x) }
+func _[X Integer | ~[]byte | ~[]rune, T ~*string](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by Integer | ~[]byte | ~[]rune) to type T: cannot convert int (in X) to type *string (in T)" */)
+}
+
+// "x is a string and T is a slice of bytes or runes"
+
+func _[T ~[]byte](x string) T { return T(x) }
+func _[T ~[]rune](x string) T { return T(x) }
+func _[T ~[]rune](x *string) T {
+ return T(x /* ERROR "cannot convert x (variable of type *string) to type T: cannot convert *string to type []rune (in T)" */)
+}
+
+func _[X ~string, T ~[]byte](x X) T { return T(x) }
+func _[X ~string, T ~[]rune](x X) T { return T(x) }
+func _[X ~string, T ~[]byte | ~[]rune](x X) T { return T(x) }
+func _[X ~*string, T ~[]byte | ~[]rune](x X) T {
+ return T(x /* ERROR "cannot convert x (variable of type X constrained by ~*string) to type T: cannot convert *string (in X) to type []byte (in T)" */)
+}
+
+// package unsafe:
+// "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+
+type myUintptr uintptr
+
+func _[X ~uintptr](x X) unsafe.Pointer { return unsafe.Pointer(x) }
+func _[T unsafe.Pointer](x myUintptr) T { return T(x) }
+func _[T unsafe.Pointer](x int64) T {
+ return T(x /* ERROR "cannot convert x (variable of type int64) to type T: cannot convert int64 to type unsafe.Pointer (in T)" */)
+}
+
+// "and vice versa"
+
+func _[T ~uintptr](x unsafe.Pointer) T { return T(x) }
+func _[X unsafe.Pointer](x X) uintptr { return uintptr(x) }
+func _[X unsafe.Pointer](x X) myUintptr { return myUintptr(x) }
+func _[X unsafe.Pointer](x X) int64 {
+ return int64(x /* ERROR "cannot convert x (variable of type X constrained by unsafe.Pointer) to type int64: cannot convert unsafe.Pointer (in X) to type int64" */)
+}
+
+// "x is a slice, T is an array or pointer-to-array type,
+// and the slice and array types have identical element types."
+
+func _[X ~[]E, T ~[10]E, E any](x X) T { return T(x) }
+func _[X ~[]E, T ~*[10]E, E any](x X) T { return T(x) }
+
+// ----------------------------------------------------------------------------
+// The following declarations can be replaced by the exported types of the
+// constraints package once all builders support importing interfaces with
+// type constraints.
+
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+type Integer interface {
+ Signed | Unsigned
+}
+
+type Float interface {
+ ~float32 | ~float64
+}
+
+type Complex interface {
+ ~complex64 | ~complex128
+}
diff --git a/src/internal/unsafeheader/unsafeheader.go b/src/internal/unsafeheader/unsafeheader.go
new file mode 100644
index 0000000..6d092c6
--- /dev/null
+++ b/src/internal/unsafeheader/unsafeheader.go
@@ -0,0 +1,37 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unsafeheader contains header declarations for the Go runtime's slice
+// and string implementations.
+//
+// This package allows packages that cannot import "reflect" to use types that
+// are tested to be equivalent to reflect.SliceHeader and reflect.StringHeader.
+package unsafeheader
+
+import (
+ "unsafe"
+)
+
+// Slice is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+//
+// Unlike reflect.SliceHeader, its Data field is sufficient to guarantee the
+// data it references will not be garbage collected.
+type Slice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+// String is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+//
+// Unlike reflect.StringHeader, its Data field is sufficient to guarantee the
+// data it references will not be garbage collected.
+type String struct {
+ Data unsafe.Pointer
+ Len int
+}
diff --git a/src/internal/unsafeheader/unsafeheader_test.go b/src/internal/unsafeheader/unsafeheader_test.go
new file mode 100644
index 0000000..f3d1a9b
--- /dev/null
+++ b/src/internal/unsafeheader/unsafeheader_test.go
@@ -0,0 +1,100 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unsafeheader_test
+
+import (
+ "bytes"
+ "internal/unsafeheader"
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// TestTypeMatchesReflectType ensures that the name and layout of the
+// unsafeheader types matches the corresponding Header types in the reflect
+// package.
+func TestTypeMatchesReflectType(t *testing.T) {
+ t.Run("Slice", func(t *testing.T) {
+ testHeaderMatchesReflect(t, unsafeheader.Slice{}, reflect.SliceHeader{})
+ })
+
+ t.Run("String", func(t *testing.T) {
+ testHeaderMatchesReflect(t, unsafeheader.String{}, reflect.StringHeader{})
+ })
+}
+
+func testHeaderMatchesReflect(t *testing.T, header, reflectHeader any) {
+ h := reflect.TypeOf(header)
+ rh := reflect.TypeOf(reflectHeader)
+
+ for i := 0; i < h.NumField(); i++ {
+ f := h.Field(i)
+ rf, ok := rh.FieldByName(f.Name)
+ if !ok {
+ t.Errorf("Field %d of %v is named %s, but no such field exists in %v", i, h, f.Name, rh)
+ continue
+ }
+ if !typeCompatible(f.Type, rf.Type) {
+ t.Errorf("%v.%s has type %v, but %v.%s has type %v", h, f.Name, f.Type, rh, rf.Name, rf.Type)
+ }
+ if f.Offset != rf.Offset {
+ t.Errorf("%v.%s has offset %d, but %v.%s has offset %d", h, f.Name, f.Offset, rh, rf.Name, rf.Offset)
+ }
+ }
+
+ if h.NumField() != rh.NumField() {
+ t.Errorf("%v has %d fields, but %v has %d", h, h.NumField(), rh, rh.NumField())
+ }
+ if h.Align() != rh.Align() {
+ t.Errorf("%v has alignment %d, but %v has alignment %d", h, h.Align(), rh, rh.Align())
+ }
+}
+
+var (
+ unsafePointerType = reflect.TypeOf(unsafe.Pointer(nil))
+ uintptrType = reflect.TypeOf(uintptr(0))
+)
+
+func typeCompatible(t, rt reflect.Type) bool {
+ return t == rt || (t == unsafePointerType && rt == uintptrType)
+}
+
+// TestWriteThroughHeader ensures that the headers in the unsafeheader package
+// can successfully mutate variables of the corresponding built-in types.
+//
+// This test is expected to fail under -race (which implicitly enables
+// -d=checkptr) if the runtime views the header types as incompatible with the
+// underlying built-in types.
+func TestWriteThroughHeader(t *testing.T) {
+ t.Run("Slice", func(t *testing.T) {
+ s := []byte("Hello, checkptr!")[:5]
+
+ var alias []byte
+ hdr := (*unsafeheader.Slice)(unsafe.Pointer(&alias))
+ hdr.Data = unsafe.Pointer(&s[0])
+ hdr.Cap = cap(s)
+ hdr.Len = len(s)
+
+ if !bytes.Equal(alias, s) {
+ t.Errorf("alias of %T(%q) constructed via Slice = %T(%q)", s, s, alias, alias)
+ }
+ if cap(alias) != cap(s) {
+ t.Errorf("alias of %T with cap %d has cap %d", s, cap(s), cap(alias))
+ }
+ })
+
+ t.Run("String", func(t *testing.T) {
+ s := "Hello, checkptr!"
+
+ var alias string
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&alias))
+ hdr.Data = (*unsafeheader.String)(unsafe.Pointer(&s)).Data
+ hdr.Len = len(s)
+
+ if alias != s {
+ t.Errorf("alias of %q constructed via String = %q", s, alias)
+ }
+ })
+}
diff --git a/src/internal/xcoff/ar.go b/src/internal/xcoff/ar.go
new file mode 100644
index 0000000..9cbd50d
--- /dev/null
+++ b/src/internal/xcoff/ar.go
@@ -0,0 +1,226 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ SAIAMAG = 0x8
+ AIAFMAG = "`\n"
+ AIAMAG = "<aiaff>\n"
+ AIAMAGBIG = "<bigaf>\n"
+
+ // Sizeof
+ FL_HSZ_BIG = 0x80
+ AR_HSZ_BIG = 0x70
+)
+
+type bigarFileHeader struct {
+ Flmagic [SAIAMAG]byte // Archive magic string
+ Flmemoff [20]byte // Member table offset
+ Flgstoff [20]byte // 32-bits global symtab offset
+ Flgst64off [20]byte // 64-bits global symtab offset
+ Flfstmoff [20]byte // First member offset
+ Fllstmoff [20]byte // Last member offset
+ Flfreeoff [20]byte // First member on free list offset
+}
+
+type bigarMemberHeader struct {
+ Arsize [20]byte // File member size
+ Arnxtmem [20]byte // Next member pointer
+ Arprvmem [20]byte // Previous member pointer
+ Ardate [12]byte // File member date
+ Aruid [12]byte // File member uid
+ Argid [12]byte // File member gid
+ Armode [12]byte // File member mode (octal)
+ Arnamlen [4]byte // File member name length
+ // _ar_nam is removed because it's easier to get name without it.
+}
+
+// Archive represents an open AIX big archive.
+type Archive struct {
+ ArchiveHeader
+ Members []*Member
+
+ closer io.Closer
+}
+
+// MemberHeader holds information about a big archive file header
+type ArchiveHeader struct {
+ magic string
+}
+
+// Member represents a member of an AIX big archive.
+type Member struct {
+ MemberHeader
+ sr *io.SectionReader
+}
+
+// MemberHeader holds information about a big archive member
+type MemberHeader struct {
+ Name string
+ Size uint64
+}
+
+// OpenArchive opens the named archive using os.Open and prepares it for use
+// as an AIX big archive.
+func OpenArchive(name string) (*Archive, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ arch, err := NewArchive(f)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ arch.closer = f
+ return arch, nil
+}
+
+// Close closes the Archive.
+// If the Archive was created using NewArchive directly instead of OpenArchive,
+// Close has no effect.
+func (a *Archive) Close() error {
+ var err error
+ if a.closer != nil {
+ err = a.closer.Close()
+ a.closer = nil
+ }
+ return err
+}
+
+// NewArchive creates a new Archive for accessing an AIX big archive in an underlying reader.
+func NewArchive(r io.ReaderAt) (*Archive, error) {
+ parseDecimalBytes := func(b []byte) (int64, error) {
+ return strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64)
+ }
+ sr := io.NewSectionReader(r, 0, 1<<63-1)
+
+ // Read File Header
+ var magic [SAIAMAG]byte
+ if _, err := sr.ReadAt(magic[:], 0); err != nil {
+ return nil, err
+ }
+
+ arch := new(Archive)
+ switch string(magic[:]) {
+ case AIAMAGBIG:
+ arch.magic = string(magic[:])
+ case AIAMAG:
+ return nil, fmt.Errorf("small AIX archive not supported")
+ default:
+ return nil, fmt.Errorf("unrecognised archive magic: 0x%x", magic)
+ }
+
+ var fhdr bigarFileHeader
+ if _, err := sr.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(sr, binary.BigEndian, &fhdr); err != nil {
+ return nil, err
+ }
+
+ off, err := parseDecimalBytes(fhdr.Flfstmoff[:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err)
+ }
+
+ if off == 0 {
+ // Occurs if the archive is empty.
+ return arch, nil
+ }
+
+ lastoff, err := parseDecimalBytes(fhdr.Fllstmoff[:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err)
+ }
+
+ // Read members
+ for {
+ // Read Member Header
+ // The member header is normally 2 bytes larger. But it's easier
+ // to read the name if the header is read without _ar_nam.
+ // However, AIAFMAG must be read afterward.
+ if _, err := sr.Seek(off, io.SeekStart); err != nil {
+ return nil, err
+ }
+
+ var mhdr bigarMemberHeader
+ if err := binary.Read(sr, binary.BigEndian, &mhdr); err != nil {
+ return nil, err
+ }
+
+ member := new(Member)
+ arch.Members = append(arch.Members, member)
+
+ size, err := parseDecimalBytes(mhdr.Arsize[:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing size in member header(%q); %v", mhdr, err)
+ }
+ member.Size = uint64(size)
+
+ // Read name
+ namlen, err := parseDecimalBytes(mhdr.Arnamlen[:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing name length in member header(%q); %v", mhdr, err)
+ }
+ name := make([]byte, namlen)
+ if err := binary.Read(sr, binary.BigEndian, name); err != nil {
+ return nil, err
+ }
+ member.Name = string(name)
+
+ fileoff := off + AR_HSZ_BIG + namlen
+ if fileoff&1 != 0 {
+ fileoff++
+ if _, err := sr.Seek(1, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ }
+
+ // Read AIAFMAG string
+ var fmag [2]byte
+ if err := binary.Read(sr, binary.BigEndian, &fmag); err != nil {
+ return nil, err
+ }
+ if string(fmag[:]) != AIAFMAG {
+ return nil, fmt.Errorf("AIAFMAG not found after member header")
+ }
+
+ fileoff += 2 // Add the two bytes of AIAFMAG
+ member.sr = io.NewSectionReader(sr, fileoff, size)
+
+ if off == lastoff {
+ break
+ }
+ off, err = parseDecimalBytes(mhdr.Arnxtmem[:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err)
+ }
+
+ }
+
+ return arch, nil
+}
+
+// GetFile returns the XCOFF file defined by member name.
+// FIXME: This doesn't work if an archive has two members with the same
+// name which can occur if an archive has both 32-bits and 64-bits files.
+func (arch *Archive) GetFile(name string) (*File, error) {
+ for _, mem := range arch.Members {
+ if mem.Name == name {
+ return NewFile(mem.sr)
+ }
+ }
+ return nil, fmt.Errorf("unknown member %s in archive", name)
+}
diff --git a/src/internal/xcoff/ar_test.go b/src/internal/xcoff/ar_test.go
new file mode 100644
index 0000000..83333d6
--- /dev/null
+++ b/src/internal/xcoff/ar_test.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+import (
+ "reflect"
+ "testing"
+)
+
+type archiveTest struct {
+ file string
+ hdr ArchiveHeader
+ members []*MemberHeader
+ membersFileHeader []FileHeader
+}
+
+var archTest = []archiveTest{
+ {
+ "testdata/bigar-ppc64",
+ ArchiveHeader{AIAMAGBIG},
+ []*MemberHeader{
+ {"printbye.o", 836},
+ {"printhello.o", 860},
+ },
+ []FileHeader{
+ {U64_TOCMAGIC},
+ {U64_TOCMAGIC},
+ },
+ },
+ {
+ "testdata/bigar-empty",
+ ArchiveHeader{AIAMAGBIG},
+ []*MemberHeader{},
+ []FileHeader{},
+ },
+}
+
+func TestOpenArchive(t *testing.T) {
+ for i := range archTest {
+ tt := &archTest[i]
+ arch, err := OpenArchive(tt.file)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(arch.ArchiveHeader, tt.hdr) {
+ t.Errorf("open archive %s:\n\thave %#v\n\twant %#v\n", tt.file, arch.ArchiveHeader, tt.hdr)
+ continue
+ }
+
+ for i, mem := range arch.Members {
+ if i >= len(tt.members) {
+ break
+ }
+ have := &mem.MemberHeader
+ want := tt.members[i]
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("open %s, member %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want)
+ }
+
+ f, err := arch.GetFile(mem.Name)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(f.FileHeader, tt.membersFileHeader[i]) {
+ t.Errorf("open %s, member file header %d:\n\thave %#v\n\twant %#v\n", tt.file, i, f.FileHeader, tt.membersFileHeader[i])
+ }
+ }
+ tn := len(tt.members)
+ an := len(arch.Members)
+ if tn != an {
+ t.Errorf("open %s: len(Members) = %d, want %d", tt.file, an, tn)
+ }
+
+ }
+}
diff --git a/src/internal/xcoff/file.go b/src/internal/xcoff/file.go
new file mode 100644
index 0000000..9135822
--- /dev/null
+++ b/src/internal/xcoff/file.go
@@ -0,0 +1,697 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xcoff implements access to XCOFF (Extended Common Object File Format) files.
+package xcoff
+
+import (
+ "debug/dwarf"
+ "encoding/binary"
+ "fmt"
+ "internal/saferio"
+ "io"
+ "os"
+ "strings"
+)
+
+// SectionHeader holds information about an XCOFF section header.
+type SectionHeader struct {
+ Name string
+ VirtualAddress uint64
+ Size uint64
+ Type uint32
+ Relptr uint64
+ Nreloc uint32
+}
+
+type Section struct {
+ SectionHeader
+ Relocs []Reloc
+ io.ReaderAt
+ sr *io.SectionReader
+}
+
+// AuxiliaryCSect holds information about an XCOFF symbol in an AUX_CSECT entry.
+type AuxiliaryCSect struct {
+ Length int64
+ StorageMappingClass int
+ SymbolType int
+}
+
+// AuxiliaryFcn holds information about an XCOFF symbol in an AUX_FCN entry.
+type AuxiliaryFcn struct {
+ Size int64
+}
+
+type Symbol struct {
+ Name string
+ Value uint64
+ SectionNumber int
+ StorageClass int
+ AuxFcn AuxiliaryFcn
+ AuxCSect AuxiliaryCSect
+}
+
+type Reloc struct {
+ VirtualAddress uint64
+ Symbol *Symbol
+ Signed bool
+ InstructionFixed bool
+ Length uint8
+ Type uint8
+}
+
+// ImportedSymbol holds information about an imported XCOFF symbol.
+type ImportedSymbol struct {
+ Name string
+ Library string
+}
+
+// FileHeader holds information about an XCOFF file header.
+type FileHeader struct {
+ TargetMachine uint16
+}
+
+// A File represents an open XCOFF file.
+type File struct {
+ FileHeader
+ Sections []*Section
+ Symbols []*Symbol
+ StringTable []byte
+ LibraryPaths []string
+
+ closer io.Closer
+}
+
+// Open opens the named file using os.Open and prepares it for use as an XCOFF binary.
+func Open(name string) (*File, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ ff, err := NewFile(f)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ ff.closer = f
+ return ff, nil
+}
+
+// Close closes the File.
+// If the File was created using NewFile directly instead of Open,
+// Close has no effect.
+func (f *File) Close() error {
+ var err error
+ if f.closer != nil {
+ err = f.closer.Close()
+ f.closer = nil
+ }
+ return err
+}
+
+// Section returns the first section with the given name, or nil if no such
+// section exists.
+// Xcoff have section's name limited to 8 bytes. Some sections like .gosymtab
+// can be trunked but this method will still find them.
+func (f *File) Section(name string) *Section {
+ for _, s := range f.Sections {
+ if s.Name == name || (len(name) > 8 && s.Name == name[:8]) {
+ return s
+ }
+ }
+ return nil
+}
+
+// SectionByType returns the first section in f with the
+// given type, or nil if there is no such section.
+func (f *File) SectionByType(typ uint32) *Section {
+ for _, s := range f.Sections {
+ if s.Type == typ {
+ return s
+ }
+ }
+ return nil
+}
+
+// cstring converts ASCII byte sequence b to string.
+// It stops once it finds 0 or reaches end of b.
+func cstring(b []byte) string {
+ var i int
+ for i = 0; i < len(b) && b[i] != 0; i++ {
+ }
+ return string(b[:i])
+}
+
+// getString extracts a string from an XCOFF string table.
+func getString(st []byte, offset uint32) (string, bool) {
+ if offset < 4 || int(offset) >= len(st) {
+ return "", false
+ }
+ return cstring(st[offset:]), true
+}
+
+// NewFile creates a new File for accessing an XCOFF binary in an underlying reader.
+func NewFile(r io.ReaderAt) (*File, error) {
+ sr := io.NewSectionReader(r, 0, 1<<63-1)
+ // Read XCOFF target machine
+ var magic uint16
+ if err := binary.Read(sr, binary.BigEndian, &magic); err != nil {
+ return nil, err
+ }
+ if magic != U802TOCMAGIC && magic != U64_TOCMAGIC {
+ return nil, fmt.Errorf("unrecognised XCOFF magic: 0x%x", magic)
+ }
+
+ f := new(File)
+ f.TargetMachine = magic
+
+ // Read XCOFF file header
+ if _, err := sr.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ var nscns uint16
+ var symptr uint64
+ var nsyms uint32
+ var opthdr uint16
+ var hdrsz int
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ fhdr := new(FileHeader32)
+ if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil {
+ return nil, err
+ }
+ nscns = fhdr.Fnscns
+ symptr = uint64(fhdr.Fsymptr)
+ nsyms = fhdr.Fnsyms
+ opthdr = fhdr.Fopthdr
+ hdrsz = FILHSZ_32
+ case U64_TOCMAGIC:
+ fhdr := new(FileHeader64)
+ if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil {
+ return nil, err
+ }
+ nscns = fhdr.Fnscns
+ symptr = fhdr.Fsymptr
+ nsyms = fhdr.Fnsyms
+ opthdr = fhdr.Fopthdr
+ hdrsz = FILHSZ_64
+ }
+
+ if symptr == 0 || nsyms <= 0 {
+ return nil, fmt.Errorf("no symbol table")
+ }
+
+ // Read string table (located right after symbol table).
+ offset := symptr + uint64(nsyms)*SYMESZ
+ if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
+ return nil, err
+ }
+ // The first 4 bytes contain the length (in bytes).
+ var l uint32
+ if err := binary.Read(sr, binary.BigEndian, &l); err != nil {
+ return nil, err
+ }
+ if l > 4 {
+ st, err := saferio.ReadDataAt(sr, uint64(l), int64(offset))
+ if err != nil {
+ return nil, err
+ }
+ f.StringTable = st
+ }
+
+ // Read section headers
+ if _, err := sr.Seek(int64(hdrsz)+int64(opthdr), io.SeekStart); err != nil {
+ return nil, err
+ }
+ c := saferio.SliceCap((**Section)(nil), uint64(nscns))
+ if c < 0 {
+ return nil, fmt.Errorf("too many XCOFF sections (%d)", nscns)
+ }
+ f.Sections = make([]*Section, 0, c)
+ for i := 0; i < int(nscns); i++ {
+ var scnptr uint64
+ s := new(Section)
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ shdr := new(SectionHeader32)
+ if err := binary.Read(sr, binary.BigEndian, shdr); err != nil {
+ return nil, err
+ }
+ s.Name = cstring(shdr.Sname[:])
+ s.VirtualAddress = uint64(shdr.Svaddr)
+ s.Size = uint64(shdr.Ssize)
+ scnptr = uint64(shdr.Sscnptr)
+ s.Type = shdr.Sflags
+ s.Relptr = uint64(shdr.Srelptr)
+ s.Nreloc = uint32(shdr.Snreloc)
+ case U64_TOCMAGIC:
+ shdr := new(SectionHeader64)
+ if err := binary.Read(sr, binary.BigEndian, shdr); err != nil {
+ return nil, err
+ }
+ s.Name = cstring(shdr.Sname[:])
+ s.VirtualAddress = shdr.Svaddr
+ s.Size = shdr.Ssize
+ scnptr = shdr.Sscnptr
+ s.Type = shdr.Sflags
+ s.Relptr = shdr.Srelptr
+ s.Nreloc = shdr.Snreloc
+ }
+ r2 := r
+ if scnptr == 0 { // .bss must have all 0s
+ r2 = zeroReaderAt{}
+ }
+ s.sr = io.NewSectionReader(r2, int64(scnptr), int64(s.Size))
+ s.ReaderAt = s.sr
+ f.Sections = append(f.Sections, s)
+ }
+
+ // Symbol map needed by relocation
+ var idxToSym = make(map[int]*Symbol)
+
+ // Read symbol table
+ if _, err := sr.Seek(int64(symptr), io.SeekStart); err != nil {
+ return nil, err
+ }
+ f.Symbols = make([]*Symbol, 0)
+ for i := 0; i < int(nsyms); i++ {
+ var numaux int
+ var ok, needAuxFcn bool
+ sym := new(Symbol)
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ se := new(SymEnt32)
+ if err := binary.Read(sr, binary.BigEndian, se); err != nil {
+ return nil, err
+ }
+ numaux = int(se.Nnumaux)
+ sym.SectionNumber = int(se.Nscnum)
+ sym.StorageClass = int(se.Nsclass)
+ sym.Value = uint64(se.Nvalue)
+ needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1
+ zeroes := binary.BigEndian.Uint32(se.Nname[:4])
+ if zeroes != 0 {
+ sym.Name = cstring(se.Nname[:])
+ } else {
+ offset := binary.BigEndian.Uint32(se.Nname[4:])
+ sym.Name, ok = getString(f.StringTable, offset)
+ if !ok {
+ goto skip
+ }
+ }
+ case U64_TOCMAGIC:
+ se := new(SymEnt64)
+ if err := binary.Read(sr, binary.BigEndian, se); err != nil {
+ return nil, err
+ }
+ numaux = int(se.Nnumaux)
+ sym.SectionNumber = int(se.Nscnum)
+ sym.StorageClass = int(se.Nsclass)
+ sym.Value = se.Nvalue
+ needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1
+ sym.Name, ok = getString(f.StringTable, se.Noffset)
+ if !ok {
+ goto skip
+ }
+ }
+ if sym.StorageClass != C_EXT && sym.StorageClass != C_WEAKEXT && sym.StorageClass != C_HIDEXT {
+ goto skip
+ }
+ // Must have at least one csect auxiliary entry.
+ if numaux < 1 || i+numaux >= int(nsyms) {
+ goto skip
+ }
+
+ if sym.SectionNumber > int(nscns) {
+ goto skip
+ }
+ if sym.SectionNumber == 0 {
+ sym.Value = 0
+ } else {
+ sym.Value -= f.Sections[sym.SectionNumber-1].VirtualAddress
+ }
+
+ idxToSym[i] = sym
+
+ // If this symbol is a function, it must retrieve its size from
+ // its AUX_FCN entry.
+ // It can happen that a function symbol doesn't have any AUX_FCN.
+ // In this case, needAuxFcn is false and their size will be set to 0.
+ if needAuxFcn {
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ aux := new(AuxFcn32)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxFcn.Size = int64(aux.Xfsize)
+ case U64_TOCMAGIC:
+ aux := new(AuxFcn64)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxFcn.Size = int64(aux.Xfsize)
+ }
+ }
+
+ // Read csect auxiliary entry (by convention, it is the last).
+ if !needAuxFcn {
+ if _, err := sr.Seek(int64(numaux-1)*SYMESZ, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ }
+ i += numaux
+ numaux = 0
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ aux := new(AuxCSect32)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7)
+ sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas)
+ sym.AuxCSect.Length = int64(aux.Xscnlen)
+ case U64_TOCMAGIC:
+ aux := new(AuxCSect64)
+ if err := binary.Read(sr, binary.BigEndian, aux); err != nil {
+ return nil, err
+ }
+ sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7)
+ sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas)
+ sym.AuxCSect.Length = int64(aux.Xscnlenhi)<<32 | int64(aux.Xscnlenlo)
+ }
+ f.Symbols = append(f.Symbols, sym)
+ skip:
+ i += numaux // Skip auxiliary entries
+ if _, err := sr.Seek(int64(numaux)*SYMESZ, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ }
+
+ // Read relocations
+ // Only for .data or .text section
+ for sectNum, sect := range f.Sections {
+ if sect.Type != STYP_TEXT && sect.Type != STYP_DATA {
+ continue
+ }
+ if sect.Relptr == 0 {
+ continue
+ }
+ c := saferio.SliceCap((*Reloc)(nil), uint64(sect.Nreloc))
+ if c < 0 {
+ return nil, fmt.Errorf("too many relocs (%d) for section %d", sect.Nreloc, sectNum)
+ }
+ sect.Relocs = make([]Reloc, 0, c)
+ if _, err := sr.Seek(int64(sect.Relptr), io.SeekStart); err != nil {
+ return nil, err
+ }
+ for i := uint32(0); i < sect.Nreloc; i++ {
+ var reloc Reloc
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ rel := new(Reloc32)
+ if err := binary.Read(sr, binary.BigEndian, rel); err != nil {
+ return nil, err
+ }
+ reloc.VirtualAddress = uint64(rel.Rvaddr)
+ reloc.Symbol = idxToSym[int(rel.Rsymndx)]
+ reloc.Type = rel.Rtype
+ reloc.Length = rel.Rsize&0x3F + 1
+
+ if rel.Rsize&0x80 != 0 {
+ reloc.Signed = true
+ }
+ if rel.Rsize&0x40 != 0 {
+ reloc.InstructionFixed = true
+ }
+
+ case U64_TOCMAGIC:
+ rel := new(Reloc64)
+ if err := binary.Read(sr, binary.BigEndian, rel); err != nil {
+ return nil, err
+ }
+ reloc.VirtualAddress = rel.Rvaddr
+ reloc.Symbol = idxToSym[int(rel.Rsymndx)]
+ reloc.Type = rel.Rtype
+ reloc.Length = rel.Rsize&0x3F + 1
+ if rel.Rsize&0x80 != 0 {
+ reloc.Signed = true
+ }
+ if rel.Rsize&0x40 != 0 {
+ reloc.InstructionFixed = true
+ }
+ }
+
+ sect.Relocs = append(sect.Relocs, reloc)
+ }
+ }
+
+ return f, nil
+}
+
+// zeroReaderAt is ReaderAt that reads 0s.
+type zeroReaderAt struct{}
+
+// ReadAt writes len(p) 0s into p.
+func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+ for i := range p {
+ p[i] = 0
+ }
+ return len(p), nil
+}
+
+// Data reads and returns the contents of the XCOFF section s.
+func (s *Section) Data() ([]byte, error) {
+ dat := make([]byte, s.sr.Size())
+ n, err := s.sr.ReadAt(dat, 0)
+ if n == len(dat) {
+ err = nil
+ }
+ return dat[:n], err
+}
+
+// CSect reads and returns the contents of a csect.
+func (f *File) CSect(name string) []byte {
+ for _, sym := range f.Symbols {
+ if sym.Name == name && sym.AuxCSect.SymbolType == XTY_SD {
+ if i := sym.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
+ s := f.Sections[i]
+ if sym.Value+uint64(sym.AuxCSect.Length) <= s.Size {
+ dat := make([]byte, sym.AuxCSect.Length)
+ _, err := s.sr.ReadAt(dat, int64(sym.Value))
+ if err != nil {
+ return nil
+ }
+ return dat
+ }
+ }
+ break
+ }
+ }
+ return nil
+}
+
+func (f *File) DWARF() (*dwarf.Data, error) {
+ // There are many other DWARF sections, but these
+ // are the ones the debug/dwarf package uses.
+ // Don't bother loading others.
+ var subtypes = [...]uint32{SSUBTYP_DWABREV, SSUBTYP_DWINFO, SSUBTYP_DWLINE, SSUBTYP_DWRNGES, SSUBTYP_DWSTR}
+ var dat [len(subtypes)][]byte
+ for i, subtype := range subtypes {
+ s := f.SectionByType(STYP_DWARF | subtype)
+ if s != nil {
+ b, err := s.Data()
+ if err != nil && uint64(len(b)) < s.Size {
+ return nil, err
+ }
+ dat[i] = b
+ }
+ }
+
+ abbrev, info, line, ranges, str := dat[0], dat[1], dat[2], dat[3], dat[4]
+ return dwarf.New(abbrev, nil, nil, info, line, nil, ranges, str)
+}
+
+// readImportID returns the import file IDs stored inside the .loader section.
+// Library name pattern is either path/base/member or base/member
+func (f *File) readImportIDs(s *Section) ([]string, error) {
+ // Read loader header
+ if _, err := s.sr.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ var istlen uint32
+ var nimpid uint32
+ var impoff uint64
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ lhdr := new(LoaderHeader32)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ istlen = lhdr.Listlen
+ nimpid = lhdr.Lnimpid
+ impoff = uint64(lhdr.Limpoff)
+ case U64_TOCMAGIC:
+ lhdr := new(LoaderHeader64)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ istlen = lhdr.Listlen
+ nimpid = lhdr.Lnimpid
+ impoff = lhdr.Limpoff
+ }
+
+ // Read loader import file ID table
+ if _, err := s.sr.Seek(int64(impoff), io.SeekStart); err != nil {
+ return nil, err
+ }
+ table := make([]byte, istlen)
+ if _, err := io.ReadFull(s.sr, table); err != nil {
+ return nil, err
+ }
+
+ offset := 0
+ // First import file ID is the default LIBPATH value
+ libpath := cstring(table[offset:])
+ f.LibraryPaths = strings.Split(libpath, ":")
+ offset += len(libpath) + 3 // 3 null bytes
+ all := make([]string, 0)
+ for i := 1; i < int(nimpid); i++ {
+ impidpath := cstring(table[offset:])
+ offset += len(impidpath) + 1
+ impidbase := cstring(table[offset:])
+ offset += len(impidbase) + 1
+ impidmem := cstring(table[offset:])
+ offset += len(impidmem) + 1
+ var path string
+ if len(impidpath) > 0 {
+ path = impidpath + "/" + impidbase + "/" + impidmem
+ } else {
+ path = impidbase + "/" + impidmem
+ }
+ all = append(all, path)
+ }
+
+ return all, nil
+}
+
+// ImportedSymbols returns the names of all symbols
+// referred to by the binary f that are expected to be
+// satisfied by other libraries at dynamic load time.
+// It does not return weak symbols.
+func (f *File) ImportedSymbols() ([]ImportedSymbol, error) {
+ s := f.SectionByType(STYP_LOADER)
+ if s == nil {
+ return nil, nil
+ }
+ // Read loader header
+ if _, err := s.sr.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ var stlen uint32
+ var stoff uint64
+ var nsyms uint32
+ var symoff uint64
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ lhdr := new(LoaderHeader32)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ stlen = lhdr.Lstlen
+ stoff = uint64(lhdr.Lstoff)
+ nsyms = lhdr.Lnsyms
+ symoff = LDHDRSZ_32
+ case U64_TOCMAGIC:
+ lhdr := new(LoaderHeader64)
+ if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil {
+ return nil, err
+ }
+ stlen = lhdr.Lstlen
+ stoff = lhdr.Lstoff
+ nsyms = lhdr.Lnsyms
+ symoff = lhdr.Lsymoff
+ }
+
+ // Read loader section string table
+ if _, err := s.sr.Seek(int64(stoff), io.SeekStart); err != nil {
+ return nil, err
+ }
+ st := make([]byte, stlen)
+ if _, err := io.ReadFull(s.sr, st); err != nil {
+ return nil, err
+ }
+
+ // Read imported libraries
+ libs, err := f.readImportIDs(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // Read loader symbol table
+ if _, err := s.sr.Seek(int64(symoff), io.SeekStart); err != nil {
+ return nil, err
+ }
+ all := make([]ImportedSymbol, 0)
+ for i := 0; i < int(nsyms); i++ {
+ var name string
+ var ifile uint32
+ var ok bool
+ switch f.TargetMachine {
+ case U802TOCMAGIC:
+ ldsym := new(LoaderSymbol32)
+ if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil {
+ return nil, err
+ }
+ if ldsym.Lsmtype&0x40 == 0 {
+ continue // Imported symbols only
+ }
+ zeroes := binary.BigEndian.Uint32(ldsym.Lname[:4])
+ if zeroes != 0 {
+ name = cstring(ldsym.Lname[:])
+ } else {
+ offset := binary.BigEndian.Uint32(ldsym.Lname[4:])
+ name, ok = getString(st, offset)
+ if !ok {
+ continue
+ }
+ }
+ ifile = ldsym.Lifile
+ case U64_TOCMAGIC:
+ ldsym := new(LoaderSymbol64)
+ if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil {
+ return nil, err
+ }
+ if ldsym.Lsmtype&0x40 == 0 {
+ continue // Imported symbols only
+ }
+ name, ok = getString(st, ldsym.Loffset)
+ if !ok {
+ continue
+ }
+ ifile = ldsym.Lifile
+ }
+ var sym ImportedSymbol
+ sym.Name = name
+ if ifile >= 1 && int(ifile) <= len(libs) {
+ sym.Library = libs[ifile-1]
+ }
+ all = append(all, sym)
+ }
+
+ return all, nil
+}
+
+// ImportedLibraries returns the names of all libraries
+// referred to by the binary f that are expected to be
+// linked with the binary at dynamic link time.
+func (f *File) ImportedLibraries() ([]string, error) {
+ s := f.SectionByType(STYP_LOADER)
+ if s == nil {
+ return nil, nil
+ }
+ all, err := f.readImportIDs(s)
+ return all, err
+}
diff --git a/src/internal/xcoff/file_test.go b/src/internal/xcoff/file_test.go
new file mode 100644
index 0000000..a6722e9
--- /dev/null
+++ b/src/internal/xcoff/file_test.go
@@ -0,0 +1,102 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+import (
+ "reflect"
+ "testing"
+)
+
+type fileTest struct {
+ file string
+ hdr FileHeader
+ sections []*SectionHeader
+ needed []string
+}
+
+var fileTests = []fileTest{
+ {
+ "testdata/gcc-ppc32-aix-dwarf2-exec",
+ FileHeader{U802TOCMAGIC},
+ []*SectionHeader{
+ {".text", 0x10000290, 0x00000bbd, STYP_TEXT, 0x7ae6, 0x36},
+ {".data", 0x20000e4d, 0x00000437, STYP_DATA, 0x7d02, 0x2b},
+ {".bss", 0x20001284, 0x0000021c, STYP_BSS, 0, 0},
+ {".loader", 0x00000000, 0x000004b3, STYP_LOADER, 0, 0},
+ {".dwline", 0x00000000, 0x000000df, STYP_DWARF | SSUBTYP_DWLINE, 0x7eb0, 0x7},
+ {".dwinfo", 0x00000000, 0x00000314, STYP_DWARF | SSUBTYP_DWINFO, 0x7ef6, 0xa},
+ {".dwabrev", 0x00000000, 0x000000d6, STYP_DWARF | SSUBTYP_DWABREV, 0, 0},
+ {".dwarnge", 0x00000000, 0x00000020, STYP_DWARF | SSUBTYP_DWARNGE, 0x7f5a, 0x2},
+ {".dwloc", 0x00000000, 0x00000074, STYP_DWARF | SSUBTYP_DWLOC, 0, 0},
+ {".debug", 0x00000000, 0x00005e4f, STYP_DEBUG, 0, 0},
+ },
+ []string{"libc.a/shr.o"},
+ },
+ {
+ "testdata/gcc-ppc64-aix-dwarf2-exec",
+ FileHeader{U64_TOCMAGIC},
+ []*SectionHeader{
+ {".text", 0x10000480, 0x00000afd, STYP_TEXT, 0x8322, 0x34},
+ {".data", 0x20000f7d, 0x000002f3, STYP_DATA, 0x85fa, 0x25},
+ {".bss", 0x20001270, 0x00000428, STYP_BSS, 0, 0},
+ {".loader", 0x00000000, 0x00000535, STYP_LOADER, 0, 0},
+ {".dwline", 0x00000000, 0x000000b4, STYP_DWARF | SSUBTYP_DWLINE, 0x8800, 0x4},
+ {".dwinfo", 0x00000000, 0x0000036a, STYP_DWARF | SSUBTYP_DWINFO, 0x8838, 0x7},
+ {".dwabrev", 0x00000000, 0x000000b5, STYP_DWARF | SSUBTYP_DWABREV, 0, 0},
+ {".dwarnge", 0x00000000, 0x00000040, STYP_DWARF | SSUBTYP_DWARNGE, 0x889a, 0x2},
+ {".dwloc", 0x00000000, 0x00000062, STYP_DWARF | SSUBTYP_DWLOC, 0, 0},
+ {".debug", 0x00000000, 0x00006605, STYP_DEBUG, 0, 0},
+ },
+ []string{"libc.a/shr_64.o"},
+ },
+}
+
+func TestOpen(t *testing.T) {
+ for i := range fileTests {
+ tt := &fileTests[i]
+
+ f, err := Open(tt.file)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(f.FileHeader, tt.hdr) {
+ t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr)
+ continue
+ }
+
+ for i, sh := range f.Sections {
+ if i >= len(tt.sections) {
+ break
+ }
+ have := &sh.SectionHeader
+ want := tt.sections[i]
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("open %s, section %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want)
+ }
+ }
+ tn := len(tt.sections)
+ fn := len(f.Sections)
+ if tn != fn {
+ t.Errorf("open %s: len(Sections) = %d, want %d", tt.file, fn, tn)
+ }
+ tl := tt.needed
+ fl, err := f.ImportedLibraries()
+ if err != nil {
+ t.Error(err)
+ }
+ if !reflect.DeepEqual(tl, fl) {
+ t.Errorf("open %s: loader import = %v, want %v", tt.file, tl, fl)
+ }
+ }
+}
+
+func TestOpenFailure(t *testing.T) {
+ filename := "file.go" // not an XCOFF object file
+ _, err := Open(filename) // don't crash
+ if err == nil {
+ t.Errorf("open %s: succeeded unexpectedly", filename)
+ }
+}
diff --git a/src/internal/xcoff/testdata/bigar-empty b/src/internal/xcoff/testdata/bigar-empty
new file mode 100644
index 0000000..851ccc5
--- /dev/null
+++ b/src/internal/xcoff/testdata/bigar-empty
@@ -0,0 +1,2 @@
+<bigaf>
+0 0 0 0 0 0 \ No newline at end of file
diff --git a/src/internal/xcoff/testdata/bigar-ppc64 b/src/internal/xcoff/testdata/bigar-ppc64
new file mode 100644
index 0000000..a8d4979
--- /dev/null
+++ b/src/internal/xcoff/testdata/bigar-ppc64
Binary files differ
diff --git a/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec b/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec
new file mode 100644
index 0000000..810e21a
--- /dev/null
+++ b/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec
Binary files differ
diff --git a/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec b/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec
new file mode 100644
index 0000000..707d01e
--- /dev/null
+++ b/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec
Binary files differ
diff --git a/src/internal/xcoff/testdata/hello.c b/src/internal/xcoff/testdata/hello.c
new file mode 100644
index 0000000..34d9ee7
--- /dev/null
+++ b/src/internal/xcoff/testdata/hello.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+void
+main(int argc, char *argv[])
+{
+ printf("hello, world\n");
+}
diff --git a/src/internal/xcoff/testdata/printbye.c b/src/internal/xcoff/testdata/printbye.c
new file mode 100644
index 0000000..9045079
--- /dev/null
+++ b/src/internal/xcoff/testdata/printbye.c
@@ -0,0 +1,5 @@
+#include <stdio.h>
+
+void printbye(){
+ printf("Goodbye\n");
+}
diff --git a/src/internal/xcoff/testdata/printhello.c b/src/internal/xcoff/testdata/printhello.c
new file mode 100644
index 0000000..182aa09
--- /dev/null
+++ b/src/internal/xcoff/testdata/printhello.c
@@ -0,0 +1,5 @@
+#include <stdio.h>
+
+void printhello(){
+ printf("Helloworld\n");
+}
diff --git a/src/internal/xcoff/xcoff.go b/src/internal/xcoff/xcoff.go
new file mode 100644
index 0000000..db81542
--- /dev/null
+++ b/src/internal/xcoff/xcoff.go
@@ -0,0 +1,367 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xcoff
+
+// File Header.
+type FileHeader32 struct {
+ Fmagic uint16 // Target machine
+ Fnscns uint16 // Number of sections
+ Ftimedat uint32 // Time and date of file creation
+ Fsymptr uint32 // Byte offset to symbol table start
+ Fnsyms uint32 // Number of entries in symbol table
+ Fopthdr uint16 // Number of bytes in optional header
+ Fflags uint16 // Flags
+}
+
+type FileHeader64 struct {
+ Fmagic uint16 // Target machine
+ Fnscns uint16 // Number of sections
+ Ftimedat uint32 // Time and date of file creation
+ Fsymptr uint64 // Byte offset to symbol table start
+ Fopthdr uint16 // Number of bytes in optional header
+ Fflags uint16 // Flags
+ Fnsyms uint32 // Number of entries in symbol table
+}
+
+const (
+ FILHSZ_32 = 20
+ FILHSZ_64 = 24
+)
+const (
+ U802TOCMAGIC = 0737 // AIX 32-bit XCOFF
+ U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF
+)
+
+// Flags that describe the type of the object file.
+const (
+ F_RELFLG = 0x0001
+ F_EXEC = 0x0002
+ F_LNNO = 0x0004
+ F_FDPR_PROF = 0x0010
+ F_FDPR_OPTI = 0x0020
+ F_DSA = 0x0040
+ F_VARPG = 0x0100
+ F_DYNLOAD = 0x1000
+ F_SHROBJ = 0x2000
+ F_LOADONLY = 0x4000
+)
+
+// Section Header.
+type SectionHeader32 struct {
+ Sname [8]byte // Section name
+ Spaddr uint32 // Physical address
+ Svaddr uint32 // Virtual address
+ Ssize uint32 // Section size
+ Sscnptr uint32 // Offset in file to raw data for section
+ Srelptr uint32 // Offset in file to relocation entries for section
+ Slnnoptr uint32 // Offset in file to line number entries for section
+ Snreloc uint16 // Number of relocation entries
+ Snlnno uint16 // Number of line number entries
+ Sflags uint32 // Flags to define the section type
+}
+
+type SectionHeader64 struct {
+ Sname [8]byte // Section name
+ Spaddr uint64 // Physical address
+ Svaddr uint64 // Virtual address
+ Ssize uint64 // Section size
+ Sscnptr uint64 // Offset in file to raw data for section
+ Srelptr uint64 // Offset in file to relocation entries for section
+ Slnnoptr uint64 // Offset in file to line number entries for section
+ Snreloc uint32 // Number of relocation entries
+ Snlnno uint32 // Number of line number entries
+ Sflags uint32 // Flags to define the section type
+ Spad uint32 // Needs to be 72 bytes long
+}
+
+// Flags defining the section type.
+const (
+ STYP_DWARF = 0x0010
+ STYP_TEXT = 0x0020
+ STYP_DATA = 0x0040
+ STYP_BSS = 0x0080
+ STYP_EXCEPT = 0x0100
+ STYP_INFO = 0x0200
+ STYP_TDATA = 0x0400
+ STYP_TBSS = 0x0800
+ STYP_LOADER = 0x1000
+ STYP_DEBUG = 0x2000
+ STYP_TYPCHK = 0x4000
+ STYP_OVRFLO = 0x8000
+)
+const (
+ SSUBTYP_DWINFO = 0x10000 // DWARF info section
+ SSUBTYP_DWLINE = 0x20000 // DWARF line-number section
+ SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section
+ SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section
+ SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section
+ SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section
+ SSUBTYP_DWSTR = 0x70000 // DWARF strings section
+ SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section
+ SSUBTYP_DWLOC = 0x90000 // DWARF location lists section
+ SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section
+ SSUBTYP_DWMAC = 0xB0000 // DWARF macros section
+)
+
+// Symbol Table Entry.
+type SymEnt32 struct {
+ Nname [8]byte // Symbol name
+ Nvalue uint32 // Symbol value
+ Nscnum uint16 // Section number of symbol
+ Ntype uint16 // Basic and derived type specification
+ Nsclass uint8 // Storage class of symbol
+ Nnumaux uint8 // Number of auxiliary entries
+}
+
+type SymEnt64 struct {
+ Nvalue uint64 // Symbol value
+ Noffset uint32 // Offset of the name in string table or .debug section
+ Nscnum uint16 // Section number of symbol
+ Ntype uint16 // Basic and derived type specification
+ Nsclass uint8 // Storage class of symbol
+ Nnumaux uint8 // Number of auxiliary entries
+}
+
+const SYMESZ = 18
+
+const (
+ // Nscnum
+ N_DEBUG = -2
+ N_ABS = -1
+ N_UNDEF = 0
+
+ //Ntype
+ SYM_V_INTERNAL = 0x1000
+ SYM_V_HIDDEN = 0x2000
+ SYM_V_PROTECTED = 0x3000
+ SYM_V_EXPORTED = 0x4000
+ SYM_TYPE_FUNC = 0x0020 // is function
+)
+
+// Storage Class.
+const (
+ C_NULL = 0 // Symbol table entry marked for deletion
+ C_EXT = 2 // External symbol
+ C_STAT = 3 // Static symbol
+ C_BLOCK = 100 // Beginning or end of inner block
+ C_FCN = 101 // Beginning or end of function
+ C_FILE = 103 // Source file name and compiler information
+ C_HIDEXT = 107 // Unnamed external symbol
+ C_BINCL = 108 // Beginning of include file
+ C_EINCL = 109 // End of include file
+ C_WEAKEXT = 111 // Weak external symbol
+ C_DWARF = 112 // DWARF symbol
+ C_GSYM = 128 // Global variable
+ C_LSYM = 129 // Automatic variable allocated on stack
+ C_PSYM = 130 // Argument to subroutine allocated on stack
+ C_RSYM = 131 // Register variable
+ C_RPSYM = 132 // Argument to function or procedure stored in register
+ C_STSYM = 133 // Statically allocated symbol
+ C_BCOMM = 135 // Beginning of common block
+ C_ECOML = 136 // Local member of common block
+ C_ECOMM = 137 // End of common block
+ C_DECL = 140 // Declaration of object
+ C_ENTRY = 141 // Alternate entry
+ C_FUN = 142 // Function or procedure
+ C_BSTAT = 143 // Beginning of static block
+ C_ESTAT = 144 // End of static block
+ C_GTLS = 145 // Global thread-local variable
+ C_STTLS = 146 // Static thread-local variable
+)
+
+// File Auxiliary Entry
+type AuxFile64 struct {
+ Xfname [8]byte // Name or offset inside string table
+ Xftype uint8 // Source file string type
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// Function Auxiliary Entry
+type AuxFcn32 struct {
+ Xexptr uint32 // File offset to exception table entry
+ Xfsize uint32 // Size of function in bytes
+ Xlnnoptr uint32 // File pointer to line number
+ Xendndx uint32 // Symbol table index of next entry
+ Xpad uint16 // Unused
+}
+type AuxFcn64 struct {
+ Xlnnoptr uint64 // File pointer to line number
+ Xfsize uint32 // Size of function in bytes
+ Xendndx uint32 // Symbol table index of next entry
+ Xpad uint8 // Unused
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+type AuxSect64 struct {
+ Xscnlen uint64 // section length
+ Xnreloc uint64 // Num RLDs
+ pad uint8
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// csect Auxiliary Entry.
+type AuxCSect32 struct {
+ Xscnlen uint32 // Length or symbol table index
+ Xparmhash uint32 // Offset of parameter type-check string
+ Xsnhash uint16 // .typchk section number
+ Xsmtyp uint8 // Symbol alignment and type
+ Xsmclas uint8 // Storage-mapping class
+ Xstab uint32 // Reserved
+ Xsnstab uint16 // Reserved
+}
+
+type AuxCSect64 struct {
+ Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index
+ Xparmhash uint32 // Offset of parameter type-check string
+ Xsnhash uint16 // .typchk section number
+ Xsmtyp uint8 // Symbol alignment and type
+ Xsmclas uint8 // Storage-mapping class
+ Xscnlenhi uint32 // Upper 4 bytes of length or symbol table index
+ Xpad uint8 // Unused
+ Xauxtype uint8 // Type of auxiliary entry
+}
+
+// Auxiliary type
+const (
+ _AUX_EXCEPT = 255
+ _AUX_FCN = 254
+ _AUX_SYM = 253
+ _AUX_FILE = 252
+ _AUX_CSECT = 251
+ _AUX_SECT = 250
+)
+
+// Symbol type field.
+const (
+ XTY_ER = 0 // External reference
+ XTY_SD = 1 // Section definition
+ XTY_LD = 2 // Label definition
+ XTY_CM = 3 // Common csect definition
+)
+
+// Defines for File auxiliary definitions: x_ftype field of x_file
+const (
+ XFT_FN = 0 // Source File Name
+ XFT_CT = 1 // Compile Time Stamp
+ XFT_CV = 2 // Compiler Version Number
+ XFT_CD = 128 // Compiler Defined Information
+)
+
+// Storage-mapping class.
+const (
+ XMC_PR = 0 // Program code
+ XMC_RO = 1 // Read-only constant
+ XMC_DB = 2 // Debug dictionary table
+ XMC_TC = 3 // TOC entry
+ XMC_UA = 4 // Unclassified
+ XMC_RW = 5 // Read/Write data
+ XMC_GL = 6 // Global linkage
+ XMC_XO = 7 // Extended operation
+ XMC_SV = 8 // 32-bit supervisor call descriptor
+ XMC_BS = 9 // BSS class
+ XMC_DS = 10 // Function descriptor
+ XMC_UC = 11 // Unnamed FORTRAN common
+ XMC_TC0 = 15 // TOC anchor
+ XMC_TD = 16 // Scalar data entry in the TOC
+ XMC_SV64 = 17 // 64-bit supervisor call descriptor
+ XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit
+ XMC_TL = 20 // Read/Write thread-local data
+ XMC_UL = 21 // Read/Write thread-local data (.tbss)
+ XMC_TE = 22 // TOC entry
+)
+
+// Loader Header.
+type LoaderHeader32 struct {
+ Lversion uint32 // Loader section version number
+ Lnsyms uint32 // Number of symbol table entries
+ Lnreloc uint32 // Number of relocation table entries
+ Listlen uint32 // Length of import file ID string table
+ Lnimpid uint32 // Number of import file IDs
+ Limpoff uint32 // Offset to start of import file IDs
+ Lstlen uint32 // Length of string table
+ Lstoff uint32 // Offset to start of string table
+}
+
+type LoaderHeader64 struct {
+ Lversion uint32 // Loader section version number
+ Lnsyms uint32 // Number of symbol table entries
+ Lnreloc uint32 // Number of relocation table entries
+ Listlen uint32 // Length of import file ID string table
+ Lnimpid uint32 // Number of import file IDs
+ Lstlen uint32 // Length of string table
+ Limpoff uint64 // Offset to start of import file IDs
+ Lstoff uint64 // Offset to start of string table
+ Lsymoff uint64 // Offset to start of symbol table
+ Lrldoff uint64 // Offset to start of relocation entries
+}
+
+const (
+ LDHDRSZ_32 = 32
+ LDHDRSZ_64 = 56
+)
+
+// Loader Symbol.
+type LoaderSymbol32 struct {
+ Lname [8]byte // Symbol name or byte offset into string table
+ Lvalue uint32 // Address field
+ Lscnum uint16 // Section number containing symbol
+ Lsmtype uint8 // Symbol type, export, import flags
+ Lsmclas uint8 // Symbol storage class
+ Lifile uint32 // Import file ID; ordinal of import file IDs
+ Lparm uint32 // Parameter type-check field
+}
+
+type LoaderSymbol64 struct {
+ Lvalue uint64 // Address field
+ Loffset uint32 // Byte offset into string table of symbol name
+ Lscnum uint16 // Section number containing symbol
+ Lsmtype uint8 // Symbol type, export, import flags
+ Lsmclas uint8 // Symbol storage class
+ Lifile uint32 // Import file ID; ordinal of import file IDs
+ Lparm uint32 // Parameter type-check field
+}
+
+type Reloc32 struct {
+ Rvaddr uint32 // (virtual) address of reference
+ Rsymndx uint32 // Index into symbol table
+ Rsize uint8 // Sign and reloc bit len
+ Rtype uint8 // Toc relocation type
+}
+
+type Reloc64 struct {
+ Rvaddr uint64 // (virtual) address of reference
+ Rsymndx uint32 // Index into symbol table
+ Rsize uint8 // Sign and reloc bit len
+ Rtype uint8 // Toc relocation type
+}
+
+const (
+ R_POS = 0x00 // A(sym) Positive Relocation
+ R_NEG = 0x01 // -A(sym) Negative Relocation
+ R_REL = 0x02 // A(sym-*) Relative to self
+ R_TOC = 0x03 // A(sym-TOC) Relative to TOC
+ R_TRL = 0x12 // A(sym-TOC) TOC Relative indirect load.
+
+ R_TRLA = 0x13 // A(sym-TOC) TOC Rel load address. modifiable inst
+ R_GL = 0x05 // A(external TOC of sym) Global Linkage
+ R_TCL = 0x06 // A(local TOC of sym) Local object TOC address
+ R_RL = 0x0C // A(sym) Pos indirect load. modifiable instruction
+ R_RLA = 0x0D // A(sym) Pos Load Address. modifiable instruction
+ R_REF = 0x0F // AL0(sym) Non relocating ref. No garbage collect
+ R_BA = 0x08 // A(sym) Branch absolute. Cannot modify instruction
+ R_RBA = 0x18 // A(sym) Branch absolute. modifiable instruction
+ R_BR = 0x0A // A(sym-*) Branch rel to self. non modifiable
+ R_RBR = 0x1A // A(sym-*) Branch rel to self. modifiable instr
+
+ R_TLS = 0x20 // General-dynamic reference to TLS symbol
+ R_TLS_IE = 0x21 // Initial-exec reference to TLS symbol
+ R_TLS_LD = 0x22 // Local-dynamic reference to TLS symbol
+ R_TLS_LE = 0x23 // Local-exec reference to TLS symbol
+ R_TLSM = 0x24 // Module reference to TLS symbol
+ R_TLSML = 0x25 // Module reference to local (own) module
+
+ R_TOCU = 0x30 // Relative to TOC - high order bits
+ R_TOCL = 0x31 // Relative to TOC - low order bits
+)
diff --git a/src/internal/zstd/bits.go b/src/internal/zstd/bits.go
new file mode 100644
index 0000000..c9a2f70
--- /dev/null
+++ b/src/internal/zstd/bits.go
@@ -0,0 +1,130 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "math/bits"
+)
+
+// block is the data for a single compressed block.
+// The data starts immediately after the 3 byte block header,
+// and is Block_Size bytes long.
+type block []byte
+
+// bitReader reads a bit stream going forward.
+type bitReader struct {
+ r *Reader // for error reporting
+ data block // the bits to read
+ off uint32 // current offset into data
+ bits uint32 // bits ready to be returned
+ cnt uint32 // number of valid bits in the bits field
+}
+
+// makeBitReader makes a bit reader starting at off.
+func (r *Reader) makeBitReader(data block, off int) bitReader {
+ return bitReader{
+ r: r,
+ data: data,
+ off: uint32(off),
+ }
+}
+
+// moreBits is called to read more bits.
+// This ensures that at least 16 bits are available.
+func (br *bitReader) moreBits() error {
+ for br.cnt < 16 {
+ if br.off >= uint32(len(br.data)) {
+ return br.r.makeEOFError(int(br.off))
+ }
+ c := br.data[br.off]
+ br.off++
+ br.bits |= uint32(c) << br.cnt
+ br.cnt += 8
+ }
+ return nil
+}
+
+// val is called to fetch a value of b bits.
+func (br *bitReader) val(b uint8) uint32 {
+ r := br.bits & ((1 << b) - 1)
+ br.bits >>= b
+ br.cnt -= uint32(b)
+ return r
+}
+
+// backup steps back to the last byte we used.
+func (br *bitReader) backup() {
+ for br.cnt >= 8 {
+ br.off--
+ br.cnt -= 8
+ }
+}
+
+// makeError returns an error at the current offset wrapping a string.
+func (br *bitReader) makeError(msg string) error {
+ return br.r.makeError(int(br.off), msg)
+}
+
+// reverseBitReader reads a bit stream in reverse.
+type reverseBitReader struct {
+ r *Reader // for error reporting
+ data block // the bits to read
+ off uint32 // current offset into data
+ start uint32 // start in data; we read backward to start
+ bits uint32 // bits ready to be returned
+ cnt uint32 // number of valid bits in bits field
+}
+
+// makeReverseBitReader makes a reverseBitReader reading backward
+// from off to start. The bitstream starts with a 1 bit in the last
+// byte, at off.
+func (r *Reader) makeReverseBitReader(data block, off, start int) (reverseBitReader, error) {
+ streamStart := data[off]
+ if streamStart == 0 {
+ return reverseBitReader{}, r.makeError(off, "zero byte at reverse bit stream start")
+ }
+ rbr := reverseBitReader{
+ r: r,
+ data: data,
+ off: uint32(off),
+ start: uint32(start),
+ bits: uint32(streamStart),
+ cnt: uint32(7 - bits.LeadingZeros8(streamStart)),
+ }
+ return rbr, nil
+}
+
+// val is called to fetch a value of b bits.
+func (rbr *reverseBitReader) val(b uint8) (uint32, error) {
+ if !rbr.fetch(b) {
+ return 0, rbr.r.makeEOFError(int(rbr.off))
+ }
+
+ rbr.cnt -= uint32(b)
+ v := (rbr.bits >> rbr.cnt) & ((1 << b) - 1)
+ return v, nil
+}
+
+// fetch is called to ensure that at least b bits are available.
+// It reports false if this can't be done,
+// in which case only rbr.cnt bits are available.
+func (rbr *reverseBitReader) fetch(b uint8) bool {
+ for rbr.cnt < uint32(b) {
+ if rbr.off <= rbr.start {
+ return false
+ }
+ rbr.off--
+ c := rbr.data[rbr.off]
+ rbr.bits <<= 8
+ rbr.bits |= uint32(c)
+ rbr.cnt += 8
+ }
+ return true
+}
+
+// makeError returns an error at the current offset wrapping a string.
+func (rbr *reverseBitReader) makeError(msg string) error {
+ return rbr.r.makeError(int(rbr.off), msg)
+}
diff --git a/src/internal/zstd/block.go b/src/internal/zstd/block.go
new file mode 100644
index 0000000..bd3040c
--- /dev/null
+++ b/src/internal/zstd/block.go
@@ -0,0 +1,436 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "io"
+)
+
+// debug can be set in the source to print debug info using println.
+const debug = false
+
+// compressedBlock decompresses a compressed block, storing the decompressed
+// data in r.buffer. The blockSize argument is the compressed size.
+// RFC 3.1.1.3.
+func (r *Reader) compressedBlock(blockSize int) error {
+ if len(r.compressedBuf) >= blockSize {
+ r.compressedBuf = r.compressedBuf[:blockSize]
+ } else {
+ // We know that blockSize <= 128K,
+ // so this won't allocate an enormous amount.
+ need := blockSize - len(r.compressedBuf)
+ r.compressedBuf = append(r.compressedBuf, make([]byte, need)...)
+ }
+
+ if _, err := io.ReadFull(r.r, r.compressedBuf); err != nil {
+ return r.wrapNonEOFError(0, err)
+ }
+
+ data := block(r.compressedBuf)
+ off := 0
+ r.buffer = r.buffer[:0]
+
+ litoff, litbuf, err := r.readLiterals(data, off, r.literals[:0])
+ if err != nil {
+ return err
+ }
+ r.literals = litbuf
+
+ off = litoff
+
+ seqCount, off, err := r.initSeqs(data, off)
+ if err != nil {
+ return err
+ }
+
+ if seqCount == 0 {
+ // No sequences, just literals.
+ if off < len(data) {
+ return r.makeError(off, "extraneous data after no sequences")
+ }
+ if len(litbuf) == 0 {
+ return r.makeError(off, "no sequences and no literals")
+ }
+ r.buffer = append(r.buffer, litbuf...)
+ return nil
+ }
+
+ return r.execSeqs(data, off, litbuf, seqCount)
+}
+
+// seqCode is the kind of sequence codes we have to handle.
+type seqCode int
+
+const (
+ seqLiteral seqCode = iota
+ seqOffset
+ seqMatch
+)
+
+// seqCodeInfoData is the information needed to set up seqTables and
+// seqTableBits for a particular kind of sequence code.
+type seqCodeInfoData struct {
+ predefTable []fseBaselineEntry // predefined FSE
+ predefTableBits int // number of bits in predefTable
+ maxSym int // max symbol value in FSE
+ maxBits int // max bits for FSE
+
+ // toBaseline converts from an FSE table to an FSE baseline table.
+ toBaseline func(*Reader, int, []fseEntry, []fseBaselineEntry) error
+}
+
+// seqCodeInfo is the seqCodeInfoData for each kind of sequence code.
+var seqCodeInfo = [3]seqCodeInfoData{
+ seqLiteral: {
+ predefTable: predefinedLiteralTable[:],
+ predefTableBits: 6,
+ maxSym: 35,
+ maxBits: 9,
+ toBaseline: (*Reader).makeLiteralBaselineFSE,
+ },
+ seqOffset: {
+ predefTable: predefinedOffsetTable[:],
+ predefTableBits: 5,
+ maxSym: 31,
+ maxBits: 8,
+ toBaseline: (*Reader).makeOffsetBaselineFSE,
+ },
+ seqMatch: {
+ predefTable: predefinedMatchTable[:],
+ predefTableBits: 6,
+ maxSym: 52,
+ maxBits: 9,
+ toBaseline: (*Reader).makeMatchBaselineFSE,
+ },
+}
+
+// initSeqs reads the Sequences_Section_Header and sets up the FSE
+// tables used to read the sequence codes. It returns the number of
+// sequences and the new offset. RFC 3.1.1.3.2.1.
+func (r *Reader) initSeqs(data block, off int) (int, int, error) {
+ if off >= len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+
+ seqHdr := data[off]
+ off++
+ if seqHdr == 0 {
+ return 0, off, nil
+ }
+
+ var seqCount int
+ if seqHdr < 128 {
+ seqCount = int(seqHdr)
+ } else if seqHdr < 255 {
+ if off >= len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+ seqCount = ((int(seqHdr) - 128) << 8) + int(data[off])
+ off++
+ } else {
+ if off+1 >= len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+ seqCount = int(data[off]) + (int(data[off+1]) << 8) + 0x7f00
+ off += 2
+ }
+
+ // Read the Symbol_Compression_Modes byte.
+
+ if off >= len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+ symMode := data[off]
+ if symMode&3 != 0 {
+ return 0, 0, r.makeError(off, "invalid symbol compression mode")
+ }
+ off++
+
+ // Set up the FSE tables used to decode the sequence codes.
+
+ var err error
+ off, err = r.setSeqTable(data, off, seqLiteral, (symMode>>6)&3)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ off, err = r.setSeqTable(data, off, seqOffset, (symMode>>4)&3)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ off, err = r.setSeqTable(data, off, seqMatch, (symMode>>2)&3)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return seqCount, off, nil
+}
+
+// setSeqTable uses the Compression_Mode in mode to set up r.seqTables and
+// r.seqTableBits for kind. We store these in the Reader because one of
+// the modes simply reuses the value from the last block in the frame.
+func (r *Reader) setSeqTable(data block, off int, kind seqCode, mode byte) (int, error) {
+ info := &seqCodeInfo[kind]
+ switch mode {
+ case 0:
+ // Predefined_Mode
+ r.seqTables[kind] = info.predefTable
+ r.seqTableBits[kind] = uint8(info.predefTableBits)
+ return off, nil
+
+ case 1:
+ // RLE_Mode
+ if off >= len(data) {
+ return 0, r.makeEOFError(off)
+ }
+ rle := data[off]
+ off++
+
+ // Build a simple baseline table that always returns rle.
+
+ entry := []fseEntry{
+ {
+ sym: rle,
+ bits: 0,
+ base: 0,
+ },
+ }
+ if cap(r.seqTableBuffers[kind]) == 0 {
+ r.seqTableBuffers[kind] = make([]fseBaselineEntry, 1<<info.maxBits)
+ }
+ r.seqTableBuffers[kind] = r.seqTableBuffers[kind][:1]
+ if err := info.toBaseline(r, off, entry, r.seqTableBuffers[kind]); err != nil {
+ return 0, err
+ }
+
+ r.seqTables[kind] = r.seqTableBuffers[kind]
+ r.seqTableBits[kind] = 0
+ return off, nil
+
+ case 2:
+ // FSE_Compressed_Mode
+ if cap(r.fseScratch) < 1<<info.maxBits {
+ r.fseScratch = make([]fseEntry, 1<<info.maxBits)
+ }
+ r.fseScratch = r.fseScratch[:1<<info.maxBits]
+
+ tableBits, roff, err := r.readFSE(data, off, info.maxSym, info.maxBits, r.fseScratch)
+ if err != nil {
+ return 0, err
+ }
+ r.fseScratch = r.fseScratch[:1<<tableBits]
+
+ if cap(r.seqTableBuffers[kind]) == 0 {
+ r.seqTableBuffers[kind] = make([]fseBaselineEntry, 1<<info.maxBits)
+ }
+ r.seqTableBuffers[kind] = r.seqTableBuffers[kind][:1<<tableBits]
+
+ if err := info.toBaseline(r, roff, r.fseScratch, r.seqTableBuffers[kind]); err != nil {
+ return 0, err
+ }
+
+ r.seqTables[kind] = r.seqTableBuffers[kind]
+ r.seqTableBits[kind] = uint8(tableBits)
+ return roff, nil
+
+ case 3:
+ // Repeat_Mode
+ if len(r.seqTables[kind]) == 0 {
+ return 0, r.makeError(off, "missing repeat sequence FSE table")
+ }
+ return off, nil
+ }
+ panic("unreachable")
+}
+
+// execSeqs reads and executes the sequences. RFC 3.1.1.3.2.1.2.
+func (r *Reader) execSeqs(data block, off int, litbuf []byte, seqCount int) error {
+ // Set up the initial states for the sequence code readers.
+
+ rbr, err := r.makeReverseBitReader(data, len(data)-1, off)
+ if err != nil {
+ return err
+ }
+
+ literalState, err := rbr.val(r.seqTableBits[seqLiteral])
+ if err != nil {
+ return err
+ }
+
+ offsetState, err := rbr.val(r.seqTableBits[seqOffset])
+ if err != nil {
+ return err
+ }
+
+ matchState, err := rbr.val(r.seqTableBits[seqMatch])
+ if err != nil {
+ return err
+ }
+
+ // Read and perform all the sequences. RFC 3.1.1.4.
+
+ seq := 0
+ for seq < seqCount {
+ if len(r.buffer)+len(litbuf) > 128<<10 {
+ return rbr.makeError("uncompressed size too big")
+ }
+
+ ptoffset := &r.seqTables[seqOffset][offsetState]
+ ptmatch := &r.seqTables[seqMatch][matchState]
+ ptliteral := &r.seqTables[seqLiteral][literalState]
+
+ add, err := rbr.val(ptoffset.basebits)
+ if err != nil {
+ return err
+ }
+ offset := ptoffset.baseline + add
+
+ add, err = rbr.val(ptmatch.basebits)
+ if err != nil {
+ return err
+ }
+ match := ptmatch.baseline + add
+
+ add, err = rbr.val(ptliteral.basebits)
+ if err != nil {
+ return err
+ }
+ literal := ptliteral.baseline + add
+
+ // Handle repeat offsets. RFC 3.1.1.5.
+ // See the comment in makeOffsetBaselineFSE.
+ if ptoffset.basebits > 1 {
+ r.repeatedOffset3 = r.repeatedOffset2
+ r.repeatedOffset2 = r.repeatedOffset1
+ r.repeatedOffset1 = offset
+ } else {
+ if literal == 0 {
+ offset++
+ }
+ switch offset {
+ case 1:
+ offset = r.repeatedOffset1
+ case 2:
+ offset = r.repeatedOffset2
+ r.repeatedOffset2 = r.repeatedOffset1
+ r.repeatedOffset1 = offset
+ case 3:
+ offset = r.repeatedOffset3
+ r.repeatedOffset3 = r.repeatedOffset2
+ r.repeatedOffset2 = r.repeatedOffset1
+ r.repeatedOffset1 = offset
+ case 4:
+ offset = r.repeatedOffset1 - 1
+ r.repeatedOffset3 = r.repeatedOffset2
+ r.repeatedOffset2 = r.repeatedOffset1
+ r.repeatedOffset1 = offset
+ }
+ }
+
+ seq++
+ if seq < seqCount {
+ // Update the states.
+ add, err = rbr.val(ptliteral.bits)
+ if err != nil {
+ return err
+ }
+ literalState = uint32(ptliteral.base) + add
+
+ add, err = rbr.val(ptmatch.bits)
+ if err != nil {
+ return err
+ }
+ matchState = uint32(ptmatch.base) + add
+
+ add, err = rbr.val(ptoffset.bits)
+ if err != nil {
+ return err
+ }
+ offsetState = uint32(ptoffset.base) + add
+ }
+
+ // The next sequence is now in literal, offset, match.
+
+ if debug {
+ println("literal", literal, "offset", offset, "match", match)
+ }
+
+ // Copy literal bytes from litbuf.
+ if literal > uint32(len(litbuf)) {
+ return rbr.makeError("literal byte overflow")
+ }
+ if literal > 0 {
+ r.buffer = append(r.buffer, litbuf[:literal]...)
+ litbuf = litbuf[literal:]
+ }
+
+ if match > 0 {
+ if err := r.copyFromWindow(&rbr, offset, match); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(litbuf) > 0 {
+ r.buffer = append(r.buffer, litbuf...)
+ }
+
+ if rbr.cnt != 0 {
+ return r.makeError(off, "extraneous data after sequences")
+ }
+
+ return nil
+}
+
+// Copy match bytes from the decoded output, or the window, at offset.
+func (r *Reader) copyFromWindow(rbr *reverseBitReader, offset, match uint32) error {
+ if offset == 0 {
+ return rbr.makeError("invalid zero offset")
+ }
+
+ lenBlock := uint32(len(r.buffer))
+ if lenBlock < offset {
+ lenWindow := uint32(len(r.window))
+ windowOffset := offset - lenBlock
+ if windowOffset > lenWindow {
+ return rbr.makeError("offset past window")
+ }
+ from := lenWindow - windowOffset
+ if from+match <= lenWindow {
+ r.buffer = append(r.buffer, r.window[from:from+match]...)
+ return nil
+ }
+ r.buffer = append(r.buffer, r.window[from:]...)
+ copied := lenWindow - from
+ offset -= copied
+ match -= copied
+
+ if offset == 0 && match > 0 {
+ return rbr.makeError("invalid offset")
+ }
+ }
+
+ from := lenBlock - offset
+ if offset >= match {
+ r.buffer = append(r.buffer, r.buffer[from:from+match]...)
+ return nil
+ }
+
+ // We are being asked to copy data that we are adding to the
+ // buffer in the same copy.
+ for match > 0 {
+ var copy uint32
+ if offset >= match {
+ copy = match
+ } else {
+ copy = offset
+ }
+ r.buffer = append(r.buffer, r.buffer[from:from+copy]...)
+ match -= copy
+ from += copy
+ }
+ return nil
+}
diff --git a/src/internal/zstd/fse.go b/src/internal/zstd/fse.go
new file mode 100644
index 0000000..ea661d4
--- /dev/null
+++ b/src/internal/zstd/fse.go
@@ -0,0 +1,437 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "math/bits"
+)
+
+// fseEntry is one entry in an FSE table.
+type fseEntry struct {
+ sym uint8 // value that this entry records
+ bits uint8 // number of bits to read to determine next state
+ base uint16 // add those bits to this state to get the next state
+}
+
+// readFSE reads an FSE table from data starting at off.
+// maxSym is the maximum symbol value.
+// maxBits is the maximum number of bits permitted for symbols in the table.
+// The FSE is written into table, which must be at least 1<<maxBits in size.
+// This returns the number of bits in the FSE table and the new offset.
+// RFC 4.1.1.
+func (r *Reader) readFSE(data block, off, maxSym, maxBits int, table []fseEntry) (tableBits, roff int, err error) {
+ br := r.makeBitReader(data, off)
+ if err := br.moreBits(); err != nil {
+ return 0, 0, err
+ }
+
+ accuracyLog := int(br.val(4)) + 5
+ if accuracyLog > maxBits {
+ return 0, 0, br.makeError("FSE accuracy log too large")
+ }
+
+ // The number of remaining probabilities, plus 1.
+ // This determines the number of bits to be read for the next value.
+ remaining := (1 << accuracyLog) + 1
+
+ // The current difference between small and large values,
+ // which depends on the number of remaining values.
+ // Small values use 1 less bit.
+ threshold := 1 << accuracyLog
+
+ // The number of bits needed to compute threshold.
+ bitsNeeded := accuracyLog + 1
+
+ // The next character value.
+ sym := 0
+
+ // Whether the last count was 0.
+ prev0 := false
+
+ var norm [256]int16
+
+ for remaining > 1 && sym <= maxSym {
+ if err := br.moreBits(); err != nil {
+ return 0, 0, err
+ }
+
+ if prev0 {
+ // Previous count was 0, so there is a 2-bit
+ // repeat flag. If the 2-bit flag is 0b11,
+ // it adds 3 and then there is another repeat flag.
+ zsym := sym
+ for (br.bits & 0xfff) == 0xfff {
+ zsym += 3 * 6
+ br.bits >>= 12
+ br.cnt -= 12
+ if err := br.moreBits(); err != nil {
+ return 0, 0, err
+ }
+ }
+ for (br.bits & 3) == 3 {
+ zsym += 3
+ br.bits >>= 2
+ br.cnt -= 2
+ if err := br.moreBits(); err != nil {
+ return 0, 0, err
+ }
+ }
+
+ // We have at least 14 bits here,
+ // no need to call moreBits
+
+ zsym += int(br.val(2))
+
+ if zsym > maxSym {
+ return 0, 0, br.makeError("FSE symbol index overflow")
+ }
+
+ for ; sym < zsym; sym++ {
+ norm[uint8(sym)] = 0
+ }
+
+ prev0 = false
+ continue
+ }
+
+ max := (2*threshold - 1) - remaining
+ var count int
+ if int(br.bits&uint32(threshold-1)) < max {
+ // A small value.
+ count = int(br.bits & uint32((threshold - 1)))
+ br.bits >>= bitsNeeded - 1
+ br.cnt -= uint32(bitsNeeded - 1)
+ } else {
+ // A large value.
+ count = int(br.bits & uint32((2*threshold - 1)))
+ if count >= threshold {
+ count -= max
+ }
+ br.bits >>= bitsNeeded
+ br.cnt -= uint32(bitsNeeded)
+ }
+
+ count--
+ if count >= 0 {
+ remaining -= count
+ } else {
+ remaining--
+ }
+ if sym >= 256 {
+ return 0, 0, br.makeError("FSE sym overflow")
+ }
+ norm[uint8(sym)] = int16(count)
+ sym++
+
+ prev0 = count == 0
+
+ for remaining < threshold {
+ bitsNeeded--
+ threshold >>= 1
+ }
+ }
+
+ if remaining != 1 {
+ return 0, 0, br.makeError("too many symbols in FSE table")
+ }
+
+ for ; sym <= maxSym; sym++ {
+ norm[uint8(sym)] = 0
+ }
+
+ br.backup()
+
+ if err := r.buildFSE(off, norm[:maxSym+1], table, accuracyLog); err != nil {
+ return 0, 0, err
+ }
+
+ return accuracyLog, int(br.off), nil
+}
+
+// buildFSE builds an FSE decoding table from a list of probabilities.
+// The probabilities are in norm. next is scratch space. The number of bits
+// in the table is tableBits.
+func (r *Reader) buildFSE(off int, norm []int16, table []fseEntry, tableBits int) error {
+ tableSize := 1 << tableBits
+ highThreshold := tableSize - 1
+
+ var next [256]uint16
+
+ for i, n := range norm {
+ if n >= 0 {
+ next[uint8(i)] = uint16(n)
+ } else {
+ table[highThreshold].sym = uint8(i)
+ highThreshold--
+ next[uint8(i)] = 1
+ }
+ }
+
+ pos := 0
+ step := (tableSize >> 1) + (tableSize >> 3) + 3
+ mask := tableSize - 1
+ for i, n := range norm {
+ for j := 0; j < int(n); j++ {
+ table[pos].sym = uint8(i)
+ pos = (pos + step) & mask
+ for pos > highThreshold {
+ pos = (pos + step) & mask
+ }
+ }
+ }
+ if pos != 0 {
+ return r.makeError(off, "FSE count error")
+ }
+
+ for i := 0; i < tableSize; i++ {
+ sym := table[i].sym
+ nextState := next[sym]
+ next[sym]++
+
+ if nextState == 0 {
+ return r.makeError(off, "FSE state error")
+ }
+
+ highBit := 15 - bits.LeadingZeros16(nextState)
+
+ bits := tableBits - highBit
+ table[i].bits = uint8(bits)
+ table[i].base = (nextState << bits) - uint16(tableSize)
+ }
+
+ return nil
+}
+
+// fseBaselineEntry is an entry in an FSE baseline table.
+// We use these for literal/match/length values.
+// Those require mapping the symbol to a baseline value,
+// and then reading zero or more bits and adding the value to the baseline.
+// Rather than looking thees up in separate tables,
+// we convert the FSE table to an FSE baseline table.
+type fseBaselineEntry struct {
+ baseline uint32 // baseline for value that this entry represents
+ basebits uint8 // number of bits to read to add to baseline
+ bits uint8 // number of bits to read to determine next state
+ base uint16 // add the bits to this base to get the next state
+}
+
+// Given a literal length code, we need to read a number of bits and
+// add that to a baseline. For states 0 to 15 the baseline is the
+// state and the number of bits is zero. RFC 3.1.1.3.2.1.1.
+
+const literalLengthOffset = 16
+
+var literalLengthBase = []uint32{
+ 16 | (1 << 24),
+ 18 | (1 << 24),
+ 20 | (1 << 24),
+ 22 | (1 << 24),
+ 24 | (2 << 24),
+ 28 | (2 << 24),
+ 32 | (3 << 24),
+ 40 | (3 << 24),
+ 48 | (4 << 24),
+ 64 | (6 << 24),
+ 128 | (7 << 24),
+ 256 | (8 << 24),
+ 512 | (9 << 24),
+ 1024 | (10 << 24),
+ 2048 | (11 << 24),
+ 4096 | (12 << 24),
+ 8192 | (13 << 24),
+ 16384 | (14 << 24),
+ 32768 | (15 << 24),
+ 65536 | (16 << 24),
+}
+
+// makeLiteralBaselineFSE converts the literal length fseTable to baselineTable.
+func (r *Reader) makeLiteralBaselineFSE(off int, fseTable []fseEntry, baselineTable []fseBaselineEntry) error {
+ for i, e := range fseTable {
+ be := fseBaselineEntry{
+ bits: e.bits,
+ base: e.base,
+ }
+ if e.sym < literalLengthOffset {
+ be.baseline = uint32(e.sym)
+ be.basebits = 0
+ } else {
+ if e.sym > 35 {
+ return r.makeError(off, "FSE baseline symbol overflow")
+ }
+ idx := e.sym - literalLengthOffset
+ basebits := literalLengthBase[idx]
+ be.baseline = basebits & 0xffffff
+ be.basebits = uint8(basebits >> 24)
+ }
+ baselineTable[i] = be
+ }
+ return nil
+}
+
+// makeOffsetBaselineFSE converts the offset length fseTable to baselineTable.
+func (r *Reader) makeOffsetBaselineFSE(off int, fseTable []fseEntry, baselineTable []fseBaselineEntry) error {
+ for i, e := range fseTable {
+ be := fseBaselineEntry{
+ bits: e.bits,
+ base: e.base,
+ }
+ if e.sym > 31 {
+ return r.makeError(off, "FSE offset symbol overflow")
+ }
+
+ // The simple way to write this is
+ // be.baseline = 1 << e.sym
+ // be.basebits = e.sym
+ // That would give us an offset value that corresponds to
+ // the one described in the RFC. However, for offsets > 3
+ // we have to subtract 3. And for offset values 1, 2, 3
+ // we use a repeated offset.
+ //
+ // The baseline is always a power of 2, and is never 0,
+ // so for those low values we will see one entry that is
+ // baseline 1, basebits 0, and one entry that is baseline 2,
+ // basebits 1. All other entries will have baseline >= 4
+ // basebits >= 2.
+ //
+ // So we can check for RFC offset <= 3 by checking for
+ // basebits <= 1. That means that we can subtract 3 here
+ // and not worry about doing it in the hot loop.
+
+ be.baseline = 1 << e.sym
+ if e.sym >= 2 {
+ be.baseline -= 3
+ }
+ be.basebits = e.sym
+ baselineTable[i] = be
+ }
+ return nil
+}
+
+// Given a match length code, we need to read a number of bits and add
+// that to a baseline. For states 0 to 31 the baseline is state+3 and
+// the number of bits is zero. RFC 3.1.1.3.2.1.1.
+
+const matchLengthOffset = 32
+
+var matchLengthBase = []uint32{
+ 35 | (1 << 24),
+ 37 | (1 << 24),
+ 39 | (1 << 24),
+ 41 | (1 << 24),
+ 43 | (2 << 24),
+ 47 | (2 << 24),
+ 51 | (3 << 24),
+ 59 | (3 << 24),
+ 67 | (4 << 24),
+ 83 | (4 << 24),
+ 99 | (5 << 24),
+ 131 | (7 << 24),
+ 259 | (8 << 24),
+ 515 | (9 << 24),
+ 1027 | (10 << 24),
+ 2051 | (11 << 24),
+ 4099 | (12 << 24),
+ 8195 | (13 << 24),
+ 16387 | (14 << 24),
+ 32771 | (15 << 24),
+ 65539 | (16 << 24),
+}
+
+// makeMatchBaselineFSE converts the match length fseTable to baselineTable.
+func (r *Reader) makeMatchBaselineFSE(off int, fseTable []fseEntry, baselineTable []fseBaselineEntry) error {
+ for i, e := range fseTable {
+ be := fseBaselineEntry{
+ bits: e.bits,
+ base: e.base,
+ }
+ if e.sym < matchLengthOffset {
+ be.baseline = uint32(e.sym) + 3
+ be.basebits = 0
+ } else {
+ if e.sym > 52 {
+ return r.makeError(off, "FSE baseline symbol overflow")
+ }
+ idx := e.sym - matchLengthOffset
+ basebits := matchLengthBase[idx]
+ be.baseline = basebits & 0xffffff
+ be.basebits = uint8(basebits >> 24)
+ }
+ baselineTable[i] = be
+ }
+ return nil
+}
+
+// predefinedLiteralTable is the predefined table to use for literal lengths.
+// Generated from table in RFC 3.1.1.3.2.2.1.
+// Checked by TestPredefinedTables.
+var predefinedLiteralTable = [...]fseBaselineEntry{
+ {0, 0, 4, 0}, {0, 0, 4, 16}, {1, 0, 5, 32},
+ {3, 0, 5, 0}, {4, 0, 5, 0}, {6, 0, 5, 0},
+ {7, 0, 5, 0}, {9, 0, 5, 0}, {10, 0, 5, 0},
+ {12, 0, 5, 0}, {14, 0, 6, 0}, {16, 1, 5, 0},
+ {20, 1, 5, 0}, {22, 1, 5, 0}, {28, 2, 5, 0},
+ {32, 3, 5, 0}, {48, 4, 5, 0}, {64, 6, 5, 32},
+ {128, 7, 5, 0}, {256, 8, 6, 0}, {1024, 10, 6, 0},
+ {4096, 12, 6, 0}, {0, 0, 4, 32}, {1, 0, 4, 0},
+ {2, 0, 5, 0}, {4, 0, 5, 32}, {5, 0, 5, 0},
+ {7, 0, 5, 32}, {8, 0, 5, 0}, {10, 0, 5, 32},
+ {11, 0, 5, 0}, {13, 0, 6, 0}, {16, 1, 5, 32},
+ {18, 1, 5, 0}, {22, 1, 5, 32}, {24, 2, 5, 0},
+ {32, 3, 5, 32}, {40, 3, 5, 0}, {64, 6, 4, 0},
+ {64, 6, 4, 16}, {128, 7, 5, 32}, {512, 9, 6, 0},
+ {2048, 11, 6, 0}, {0, 0, 4, 48}, {1, 0, 4, 16},
+ {2, 0, 5, 32}, {3, 0, 5, 32}, {5, 0, 5, 32},
+ {6, 0, 5, 32}, {8, 0, 5, 32}, {9, 0, 5, 32},
+ {11, 0, 5, 32}, {12, 0, 5, 32}, {15, 0, 6, 0},
+ {18, 1, 5, 32}, {20, 1, 5, 32}, {24, 2, 5, 32},
+ {28, 2, 5, 32}, {40, 3, 5, 32}, {48, 4, 5, 32},
+ {65536, 16, 6, 0}, {32768, 15, 6, 0}, {16384, 14, 6, 0},
+ {8192, 13, 6, 0},
+}
+
+// predefinedOffsetTable is the predefined table to use for offsets.
+// Generated from table in RFC 3.1.1.3.2.2.3.
+// Checked by TestPredefinedTables.
+var predefinedOffsetTable = [...]fseBaselineEntry{
+ {1, 0, 5, 0}, {61, 6, 4, 0}, {509, 9, 5, 0},
+ {32765, 15, 5, 0}, {2097149, 21, 5, 0}, {5, 3, 5, 0},
+ {125, 7, 4, 0}, {4093, 12, 5, 0}, {262141, 18, 5, 0},
+ {8388605, 23, 5, 0}, {29, 5, 5, 0}, {253, 8, 4, 0},
+ {16381, 14, 5, 0}, {1048573, 20, 5, 0}, {1, 2, 5, 0},
+ {125, 7, 4, 16}, {2045, 11, 5, 0}, {131069, 17, 5, 0},
+ {4194301, 22, 5, 0}, {13, 4, 5, 0}, {253, 8, 4, 16},
+ {8189, 13, 5, 0}, {524285, 19, 5, 0}, {2, 1, 5, 0},
+ {61, 6, 4, 16}, {1021, 10, 5, 0}, {65533, 16, 5, 0},
+ {268435453, 28, 5, 0}, {134217725, 27, 5, 0}, {67108861, 26, 5, 0},
+ {33554429, 25, 5, 0}, {16777213, 24, 5, 0},
+}
+
+// predefinedMatchTable is the predefined table to use for match lengths.
+// Generated from table in RFC 3.1.1.3.2.2.2.
+// Checked by TestPredefinedTables.
+var predefinedMatchTable = [...]fseBaselineEntry{
+ {3, 0, 6, 0}, {4, 0, 4, 0}, {5, 0, 5, 32},
+ {6, 0, 5, 0}, {8, 0, 5, 0}, {9, 0, 5, 0},
+ {11, 0, 5, 0}, {13, 0, 6, 0}, {16, 0, 6, 0},
+ {19, 0, 6, 0}, {22, 0, 6, 0}, {25, 0, 6, 0},
+ {28, 0, 6, 0}, {31, 0, 6, 0}, {34, 0, 6, 0},
+ {37, 1, 6, 0}, {41, 1, 6, 0}, {47, 2, 6, 0},
+ {59, 3, 6, 0}, {83, 4, 6, 0}, {131, 7, 6, 0},
+ {515, 9, 6, 0}, {4, 0, 4, 16}, {5, 0, 4, 0},
+ {6, 0, 5, 32}, {7, 0, 5, 0}, {9, 0, 5, 32},
+ {10, 0, 5, 0}, {12, 0, 6, 0}, {15, 0, 6, 0},
+ {18, 0, 6, 0}, {21, 0, 6, 0}, {24, 0, 6, 0},
+ {27, 0, 6, 0}, {30, 0, 6, 0}, {33, 0, 6, 0},
+ {35, 1, 6, 0}, {39, 1, 6, 0}, {43, 2, 6, 0},
+ {51, 3, 6, 0}, {67, 4, 6, 0}, {99, 5, 6, 0},
+ {259, 8, 6, 0}, {4, 0, 4, 32}, {4, 0, 4, 48},
+ {5, 0, 4, 16}, {7, 0, 5, 32}, {8, 0, 5, 32},
+ {10, 0, 5, 32}, {11, 0, 5, 32}, {14, 0, 6, 0},
+ {17, 0, 6, 0}, {20, 0, 6, 0}, {23, 0, 6, 0},
+ {26, 0, 6, 0}, {29, 0, 6, 0}, {32, 0, 6, 0},
+ {65539, 16, 6, 0}, {32771, 15, 6, 0}, {16387, 14, 6, 0},
+ {8195, 13, 6, 0}, {4099, 12, 6, 0}, {2051, 11, 6, 0},
+ {1027, 10, 6, 0},
+}
diff --git a/src/internal/zstd/fse_test.go b/src/internal/zstd/fse_test.go
new file mode 100644
index 0000000..6f106b6
--- /dev/null
+++ b/src/internal/zstd/fse_test.go
@@ -0,0 +1,89 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "slices"
+ "testing"
+)
+
+// literalPredefinedDistribution is the predefined distribution table
+// for literal lengths. RFC 3.1.1.3.2.2.1.
+var literalPredefinedDistribution = []int16{
+ 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1,
+}
+
+// offsetPredefinedDistribution is the predefined distribution table
+// for offsets. RFC 3.1.1.3.2.2.3.
+var offsetPredefinedDistribution = []int16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
+}
+
+// matchPredefinedDistribution is the predefined distribution table
+// for match lengths. RFC 3.1.1.3.2.2.2.
+var matchPredefinedDistribution = []int16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1,
+}
+
+// TestPredefinedTables verifies that we can generate the predefined
+// literal/offset/match tables from the input data in RFC 8878.
+// This serves as a test of the predefined tables, and also of buildFSE
+// and the functions that make baseline FSE tables.
+func TestPredefinedTables(t *testing.T) {
+ tests := []struct {
+ name string
+ distribution []int16
+ tableBits int
+ toBaseline func(*Reader, int, []fseEntry, []fseBaselineEntry) error
+ predef []fseBaselineEntry
+ }{
+ {
+ name: "literal",
+ distribution: literalPredefinedDistribution,
+ tableBits: 6,
+ toBaseline: (*Reader).makeLiteralBaselineFSE,
+ predef: predefinedLiteralTable[:],
+ },
+ {
+ name: "offset",
+ distribution: offsetPredefinedDistribution,
+ tableBits: 5,
+ toBaseline: (*Reader).makeOffsetBaselineFSE,
+ predef: predefinedOffsetTable[:],
+ },
+ {
+ name: "match",
+ distribution: matchPredefinedDistribution,
+ tableBits: 6,
+ toBaseline: (*Reader).makeMatchBaselineFSE,
+ predef: predefinedMatchTable[:],
+ },
+ }
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ var r Reader
+ table := make([]fseEntry, 1<<test.tableBits)
+ if err := r.buildFSE(0, test.distribution, table, test.tableBits); err != nil {
+ t.Fatal(err)
+ }
+
+ baselineTable := make([]fseBaselineEntry, len(table))
+ if err := test.toBaseline(&r, 0, table, baselineTable); err != nil {
+ t.Fatal(err)
+ }
+
+ if !slices.Equal(baselineTable, test.predef) {
+ t.Errorf("got %v, want %v", baselineTable, test.predef)
+ }
+ })
+ }
+}
diff --git a/src/internal/zstd/fuzz_test.go b/src/internal/zstd/fuzz_test.go
new file mode 100644
index 0000000..bb6f0a9
--- /dev/null
+++ b/src/internal/zstd/fuzz_test.go
@@ -0,0 +1,140 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+// badStrings is some inputs that FuzzReader failed on earlier.
+var badStrings = []string{
+ "(\xb5/\xfdd00,\x05\x00\xc4\x0400000000000000000000000000000000000000000000000000000000000000000000000000000 \xa07100000000000000000000000000000000000000000000000000000000000000000000000000aM\x8a2y0B\b",
+ "(\xb5/\xfd00$\x05\x0020 00X70000a70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "(\xb5/\xfd00$\x05\x0020 00B00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "(\xb5/\xfd00}\x00\x0020\x00\x9000000000000",
+ "(\xb5/\xfd00}\x00\x00&0\x02\x830!000000000",
+ "(\xb5/\xfd\x1002000$\x05\x0010\xcc0\xa8100000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "(\xb5/\xfd\x1002000$\x05\x0000\xcc0\xa8100d\x0000001000000000000000000000000000000000000000000000000000000000000000000000000\x000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "(\xb5/\xfd001\x00\x0000000000000000000",
+}
+
+// This is a simple fuzzer to see if the decompressor panics.
+func FuzzReader(f *testing.F) {
+ for _, test := range tests {
+ f.Add([]byte(test.compressed))
+ }
+ for _, s := range badStrings {
+ f.Add([]byte(s))
+ }
+ f.Fuzz(func(t *testing.T, b []byte) {
+ r := NewReader(bytes.NewReader(b))
+ io.Copy(io.Discard, r)
+ })
+}
+
+// Fuzz test to verify that what we decompress is what we compress.
+// This isn't a great fuzz test because the fuzzer can't efficiently
+// explore the space of decompressor behavior, since it can't see
+// what the compressor is doing. But it's better than nothing.
+func FuzzDecompressor(f *testing.F) {
+ if _, err := os.Stat("/usr/bin/zstd"); err != nil {
+ f.Skip("skipping because /usr/bin/zstd does not exist")
+ }
+
+ for _, test := range tests {
+ f.Add([]byte(test.uncompressed))
+ }
+
+ // Add some larger data, as that has more interesting compression.
+ f.Add(bytes.Repeat([]byte("abcdefghijklmnop"), 256))
+ var buf bytes.Buffer
+ for i := 0; i < 256; i++ {
+ buf.WriteByte(byte(i))
+ }
+ f.Add(bytes.Repeat(buf.Bytes(), 64))
+ f.Add(bigData(f))
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ cmd := exec.Command("/usr/bin/zstd", "-z")
+ cmd.Stdin = bytes.NewReader(b)
+ var compressed bytes.Buffer
+ cmd.Stdout = &compressed
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ t.Errorf("running zstd failed: %v", err)
+ }
+
+ r := NewReader(bytes.NewReader(compressed.Bytes()))
+ got, err := io.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(got, b) {
+ showDiffs(t, got, b)
+ }
+ })
+}
+
+// Fuzz test to check that if we can decompress some data,
+// so can zstd, and that we get the same result.
+func FuzzReverse(f *testing.F) {
+ if _, err := os.Stat("/usr/bin/zstd"); err != nil {
+ f.Skip("skipping because /usr/bin/zstd does not exist")
+ }
+
+ for _, test := range tests {
+ f.Add([]byte(test.compressed))
+ }
+
+ // Set a hook to reject some cases where we don't match zstd.
+ fuzzing = true
+ defer func() { fuzzing = false }()
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ r := NewReader(bytes.NewReader(b))
+ goExp, goErr := io.ReadAll(r)
+
+ cmd := exec.Command("/usr/bin/zstd", "-d")
+ cmd.Stdin = bytes.NewReader(b)
+ var uncompressed bytes.Buffer
+ cmd.Stdout = &uncompressed
+ cmd.Stderr = os.Stderr
+ zstdErr := cmd.Run()
+ zstdExp := uncompressed.Bytes()
+
+ if goErr == nil && zstdErr == nil {
+ if !bytes.Equal(zstdExp, goExp) {
+ showDiffs(t, zstdExp, goExp)
+ }
+ } else {
+ // Ideally we should check that this package and
+ // the zstd program both fail or both succeed,
+ // and that if they both fail one byte sequence
+ // is an exact prefix of the other.
+ // Actually trying this proved to be frustrating,
+ // as the zstd program appears to accept invalid
+ // byte sequences using rules that are difficult
+ // to determine.
+ // So we just check the prefix.
+
+ c := len(goExp)
+ if c > len(zstdExp) {
+ c = len(zstdExp)
+ }
+ goExp = goExp[:c]
+ zstdExp = zstdExp[:c]
+ if !bytes.Equal(goExp, zstdExp) {
+ t.Error("byte mismatch after error")
+ t.Logf("Go error: %v\n", goErr)
+ t.Logf("zstd error: %v\n", zstdErr)
+ showDiffs(t, zstdExp, goExp)
+ }
+ }
+ })
+}
diff --git a/src/internal/zstd/huff.go b/src/internal/zstd/huff.go
new file mode 100644
index 0000000..452e24b
--- /dev/null
+++ b/src/internal/zstd/huff.go
@@ -0,0 +1,204 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "io"
+ "math/bits"
+)
+
+// maxHuffmanBits is the largest possible Huffman table bits.
+const maxHuffmanBits = 11
+
+// readHuff reads Huffman table from data starting at off into table.
+// Each entry in a Huffman table is a pair of bytes.
+// The high byte is the encoded value. The low byte is the number
+// of bits used to encode that value. We index into the table
+// with a value of size tableBits. A value that requires fewer bits
+// appear in the table multiple times.
+// This returns the number of bits in the Huffman table and the new offset.
+// RFC 4.2.1.
+func (r *Reader) readHuff(data block, off int, table []uint16) (tableBits, roff int, err error) {
+ if off >= len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+
+ hdr := data[off]
+ off++
+
+ var weights [256]uint8
+ var count int
+ if hdr < 128 {
+ // The table is compressed using an FSE. RFC 4.2.1.2.
+ if len(r.fseScratch) < 1<<6 {
+ r.fseScratch = make([]fseEntry, 1<<6)
+ }
+ fseBits, noff, err := r.readFSE(data, off, 255, 6, r.fseScratch)
+ if err != nil {
+ return 0, 0, err
+ }
+ fseTable := r.fseScratch
+
+ if off+int(hdr) > len(data) {
+ return 0, 0, r.makeEOFError(off)
+ }
+
+ rbr, err := r.makeReverseBitReader(data, off+int(hdr)-1, noff)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ state1, err := rbr.val(uint8(fseBits))
+ if err != nil {
+ return 0, 0, err
+ }
+
+ state2, err := rbr.val(uint8(fseBits))
+ if err != nil {
+ return 0, 0, err
+ }
+
+ // There are two independent FSE streams, tracked by
+ // state1 and state2. We decode them alternately.
+
+ for {
+ pt := &fseTable[state1]
+ if !rbr.fetch(pt.bits) {
+ if count >= 254 {
+ return 0, 0, rbr.makeError("Huffman count overflow")
+ }
+ weights[count] = pt.sym
+ weights[count+1] = fseTable[state2].sym
+ count += 2
+ break
+ }
+
+ v, err := rbr.val(pt.bits)
+ if err != nil {
+ return 0, 0, err
+ }
+ state1 = uint32(pt.base) + v
+
+ if count >= 255 {
+ return 0, 0, rbr.makeError("Huffman count overflow")
+ }
+
+ weights[count] = pt.sym
+ count++
+
+ pt = &fseTable[state2]
+
+ if !rbr.fetch(pt.bits) {
+ if count >= 254 {
+ return 0, 0, rbr.makeError("Huffman count overflow")
+ }
+ weights[count] = pt.sym
+ weights[count+1] = fseTable[state1].sym
+ count += 2
+ break
+ }
+
+ v, err = rbr.val(pt.bits)
+ if err != nil {
+ return 0, 0, err
+ }
+ state2 = uint32(pt.base) + v
+
+ if count >= 255 {
+ return 0, 0, rbr.makeError("Huffman count overflow")
+ }
+
+ weights[count] = pt.sym
+ count++
+ }
+
+ off += int(hdr)
+ } else {
+ // The table is not compressed. Each weight is 4 bits.
+
+ count = int(hdr) - 127
+ if off+((count+1)/2) >= len(data) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ for i := 0; i < count; i += 2 {
+ b := data[off]
+ off++
+ weights[i] = b >> 4
+ weights[i+1] = b & 0xf
+ }
+ }
+
+ // RFC 4.2.1.3.
+
+ var weightMark [13]uint32
+ weightMask := uint32(0)
+ for _, w := range weights[:count] {
+ if w > 12 {
+ return 0, 0, r.makeError(off, "Huffman weight overflow")
+ }
+ weightMark[w]++
+ if w > 0 {
+ weightMask += 1 << (w - 1)
+ }
+ }
+ if weightMask == 0 {
+ return 0, 0, r.makeError(off, "bad Huffman weights")
+ }
+
+ tableBits = 32 - bits.LeadingZeros32(weightMask)
+ if tableBits > maxHuffmanBits {
+ return 0, 0, r.makeError(off, "bad Huffman weights")
+ }
+
+ if len(table) < 1<<tableBits {
+ return 0, 0, r.makeError(off, "Huffman table too small")
+ }
+
+ // Work out the last weight value, which is omitted because
+ // the weights must sum to a power of two.
+ left := (uint32(1) << tableBits) - weightMask
+ if left == 0 {
+ return 0, 0, r.makeError(off, "bad Huffman weights")
+ }
+ highBit := 31 - bits.LeadingZeros32(left)
+ if uint32(1)<<highBit != left {
+ return 0, 0, r.makeError(off, "bad Huffman weights")
+ }
+ if count >= 256 {
+ return 0, 0, r.makeError(off, "Huffman weight overflow")
+ }
+ weights[count] = uint8(highBit + 1)
+ count++
+ weightMark[highBit+1]++
+
+ if weightMark[1] < 2 || weightMark[1]&1 != 0 {
+ return 0, 0, r.makeError(off, "bad Huffman weights")
+ }
+
+ // Change weightMark from a count of weights to the index of
+ // the first symbol for that weight. We shift the indexes to
+ // also store how many we have seen so far,
+ next := uint32(0)
+ for i := 0; i < tableBits; i++ {
+ cur := next
+ next += weightMark[i+1] << i
+ weightMark[i+1] = cur
+ }
+
+ for i, w := range weights[:count] {
+ if w == 0 {
+ continue
+ }
+ length := uint32(1) << (w - 1)
+ tval := uint16(i)<<8 | (uint16(tableBits) + 1 - uint16(w))
+ start := weightMark[w]
+ for j := uint32(0); j < length; j++ {
+ table[start+j] = tval
+ }
+ weightMark[w] += length
+ }
+
+ return tableBits, off, nil
+}
diff --git a/src/internal/zstd/literals.go b/src/internal/zstd/literals.go
new file mode 100644
index 0000000..b46d668
--- /dev/null
+++ b/src/internal/zstd/literals.go
@@ -0,0 +1,330 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "encoding/binary"
+)
+
+// readLiterals reads and decompresses the literals from data at off.
+// The literals are appended to outbuf, which is returned.
+// Also returns the new input offset. RFC 3.1.1.3.1.
+func (r *Reader) readLiterals(data block, off int, outbuf []byte) (int, []byte, error) {
+ if off >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+
+ // Literals section header. RFC 3.1.1.3.1.1.
+ hdr := data[off]
+ off++
+
+ if (hdr&3) == 0 || (hdr&3) == 1 {
+ return r.readRawRLELiterals(data, off, hdr, outbuf)
+ } else {
+ return r.readHuffLiterals(data, off, hdr, outbuf)
+ }
+}
+
+// readRawRLELiterals reads and decompresses a Raw_Literals_Block or
+// a RLE_Literals_Block. RFC 3.1.1.3.1.1.
+func (r *Reader) readRawRLELiterals(data block, off int, hdr byte, outbuf []byte) (int, []byte, error) {
+ raw := (hdr & 3) == 0
+
+ var regeneratedSize int
+ switch (hdr >> 2) & 3 {
+ case 0, 2:
+ regeneratedSize = int(hdr >> 3)
+ case 1:
+ if off >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+ regeneratedSize = int(hdr>>4) + (int(data[off]) << 4)
+ off++
+ case 3:
+ if off+1 >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+ regeneratedSize = int(hdr>>4) + (int(data[off]) << 4) + (int(data[off+1]) << 12)
+ off += 2
+ }
+
+ // We are going to use the entire literal block in the output.
+ // The maximum size of one decompressed block is 128K,
+ // so we can't have more literals than that.
+ if regeneratedSize > 128<<10 {
+ return 0, nil, r.makeError(off, "literal size too large")
+ }
+
+ if raw {
+ // RFC 3.1.1.3.1.2.
+ if off+regeneratedSize > len(data) {
+ return 0, nil, r.makeError(off, "raw literal size too large")
+ }
+ outbuf = append(outbuf, data[off:off+regeneratedSize]...)
+ off += regeneratedSize
+ } else {
+ // RFC 3.1.1.3.1.3.
+ if off >= len(data) {
+ return 0, nil, r.makeError(off, "RLE literal missing")
+ }
+ rle := data[off]
+ off++
+ for i := 0; i < regeneratedSize; i++ {
+ outbuf = append(outbuf, rle)
+ }
+ }
+
+ return off, outbuf, nil
+}
+
+// readHuffLiterals reads and decompresses a Compressed_Literals_Block or
+// a Treeless_Literals_Block. RFC 3.1.1.3.1.4.
+func (r *Reader) readHuffLiterals(data block, off int, hdr byte, outbuf []byte) (int, []byte, error) {
+ var (
+ regeneratedSize int
+ compressedSize int
+ streams int
+ )
+ switch (hdr >> 2) & 3 {
+ case 0, 1:
+ if off+1 >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+ regeneratedSize = (int(hdr) >> 4) | ((int(data[off]) & 0x3f) << 4)
+ compressedSize = (int(data[off]) >> 6) | (int(data[off+1]) << 2)
+ off += 2
+ if ((hdr >> 2) & 3) == 0 {
+ streams = 1
+ } else {
+ streams = 4
+ }
+ case 2:
+ if off+2 >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+ regeneratedSize = (int(hdr) >> 4) | (int(data[off]) << 4) | ((int(data[off+1]) & 3) << 12)
+ compressedSize = (int(data[off+1]) >> 2) | (int(data[off+2]) << 6)
+ off += 3
+ streams = 4
+ case 3:
+ if off+3 >= len(data) {
+ return 0, nil, r.makeEOFError(off)
+ }
+ regeneratedSize = (int(hdr) >> 4) | (int(data[off]) << 4) | ((int(data[off+1]) & 0x3f) << 12)
+ compressedSize = (int(data[off+1]) >> 6) | (int(data[off+2]) << 2) | (int(data[off+3]) << 10)
+ off += 4
+ streams = 4
+ }
+
+ // We are going to use the entire literal block in the output.
+ // The maximum size of one decompressed block is 128K,
+ // so we can't have more literals than that.
+ if regeneratedSize > 128<<10 {
+ return 0, nil, r.makeError(off, "literal size too large")
+ }
+
+ roff := off + compressedSize
+ if roff > len(data) || roff < 0 {
+ return 0, nil, r.makeEOFError(off)
+ }
+
+ totalStreamsSize := compressedSize
+ if (hdr & 3) == 2 {
+ // Compressed_Literals_Block.
+ // Read new huffman tree.
+
+ if len(r.huffmanTable) < 1<<maxHuffmanBits {
+ r.huffmanTable = make([]uint16, 1<<maxHuffmanBits)
+ }
+
+ huffmanTableBits, hoff, err := r.readHuff(data, off, r.huffmanTable)
+ if err != nil {
+ return 0, nil, err
+ }
+ r.huffmanTableBits = huffmanTableBits
+
+ if totalStreamsSize < hoff-off {
+ return 0, nil, r.makeError(off, "Huffman table too big")
+ }
+ totalStreamsSize -= hoff - off
+ off = hoff
+ } else {
+ // Treeless_Literals_Block
+ // Reuse previous Huffman tree.
+ if r.huffmanTableBits == 0 {
+ return 0, nil, r.makeError(off, "missing literals Huffman tree")
+ }
+ }
+
+ // Decompress compressedSize bytes of data at off using the
+ // Huffman tree.
+
+ var err error
+ if streams == 1 {
+ outbuf, err = r.readLiteralsOneStream(data, off, totalStreamsSize, regeneratedSize, outbuf)
+ } else {
+ outbuf, err = r.readLiteralsFourStreams(data, off, totalStreamsSize, regeneratedSize, outbuf)
+ }
+
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return roff, outbuf, nil
+}
+
+// readLiteralsOneStream reads a single stream of compressed literals.
+func (r *Reader) readLiteralsOneStream(data block, off, compressedSize, regeneratedSize int, outbuf []byte) ([]byte, error) {
+ // We let the reverse bit reader read earlier bytes,
+ // because the Huffman table ignores bits that it doesn't need.
+ rbr, err := r.makeReverseBitReader(data, off+compressedSize-1, off-2)
+ if err != nil {
+ return nil, err
+ }
+
+ huffTable := r.huffmanTable
+ huffBits := uint32(r.huffmanTableBits)
+ huffMask := (uint32(1) << huffBits) - 1
+
+ for i := 0; i < regeneratedSize; i++ {
+ if !rbr.fetch(uint8(huffBits)) {
+ return nil, rbr.makeError("literals Huffman stream out of bits")
+ }
+
+ var t uint16
+ idx := (rbr.bits >> (rbr.cnt - huffBits)) & huffMask
+ t = huffTable[idx]
+ outbuf = append(outbuf, byte(t>>8))
+ rbr.cnt -= uint32(t & 0xff)
+ }
+
+ return outbuf, nil
+}
+
+// readLiteralsFourStreams reads four interleaved streams of
+// compressed literals.
+func (r *Reader) readLiteralsFourStreams(data block, off, totalStreamsSize, regeneratedSize int, outbuf []byte) ([]byte, error) {
+ // Read the jump table to find out where the streams are.
+ // RFC 3.1.1.3.1.6.
+ if off+5 >= len(data) {
+ return nil, r.makeEOFError(off)
+ }
+ if totalStreamsSize < 6 {
+ return nil, r.makeError(off, "total streams size too small for jump table")
+ }
+
+ streamSize1 := binary.LittleEndian.Uint16(data[off:])
+ streamSize2 := binary.LittleEndian.Uint16(data[off+2:])
+ streamSize3 := binary.LittleEndian.Uint16(data[off+4:])
+ off += 6
+
+ tot := uint64(streamSize1) + uint64(streamSize2) + uint64(streamSize3)
+ if tot > uint64(totalStreamsSize)-6 {
+ return nil, r.makeEOFError(off)
+ }
+ streamSize4 := uint32(totalStreamsSize) - 6 - uint32(tot)
+
+ off--
+ off1 := off + int(streamSize1)
+ start1 := off + 1
+
+ off2 := off1 + int(streamSize2)
+ start2 := off1 + 1
+
+ off3 := off2 + int(streamSize3)
+ start3 := off2 + 1
+
+ off4 := off3 + int(streamSize4)
+ start4 := off3 + 1
+
+ // We let the reverse bit readers read earlier bytes,
+ // because the Huffman tables ignore bits that they don't need.
+
+ rbr1, err := r.makeReverseBitReader(data, off1, start1-2)
+ if err != nil {
+ return nil, err
+ }
+
+ rbr2, err := r.makeReverseBitReader(data, off2, start2-2)
+ if err != nil {
+ return nil, err
+ }
+
+ rbr3, err := r.makeReverseBitReader(data, off3, start3-2)
+ if err != nil {
+ return nil, err
+ }
+
+ rbr4, err := r.makeReverseBitReader(data, off4, start4-2)
+ if err != nil {
+ return nil, err
+ }
+
+ regeneratedStreamSize := (regeneratedSize + 3) / 4
+
+ out1 := len(outbuf)
+ out2 := out1 + regeneratedStreamSize
+ out3 := out2 + regeneratedStreamSize
+ out4 := out3 + regeneratedStreamSize
+
+ regeneratedStreamSize4 := regeneratedSize - regeneratedStreamSize*3
+
+ outbuf = append(outbuf, make([]byte, regeneratedSize)...)
+
+ huffTable := r.huffmanTable
+ huffBits := uint32(r.huffmanTableBits)
+ huffMask := (uint32(1) << huffBits) - 1
+
+ for i := 0; i < regeneratedStreamSize; i++ {
+ use4 := i < regeneratedStreamSize4
+
+ fetchHuff := func(rbr *reverseBitReader) (uint16, error) {
+ if !rbr.fetch(uint8(huffBits)) {
+ return 0, rbr.makeError("literals Huffman stream out of bits")
+ }
+ idx := (rbr.bits >> (rbr.cnt - huffBits)) & huffMask
+ return huffTable[idx], nil
+ }
+
+ t1, err := fetchHuff(&rbr1)
+ if err != nil {
+ return nil, err
+ }
+
+ t2, err := fetchHuff(&rbr2)
+ if err != nil {
+ return nil, err
+ }
+
+ t3, err := fetchHuff(&rbr3)
+ if err != nil {
+ return nil, err
+ }
+
+ if use4 {
+ t4, err := fetchHuff(&rbr4)
+ if err != nil {
+ return nil, err
+ }
+ outbuf[out4] = byte(t4 >> 8)
+ out4++
+ rbr4.cnt -= uint32(t4 & 0xff)
+ }
+
+ outbuf[out1] = byte(t1 >> 8)
+ out1++
+ rbr1.cnt -= uint32(t1 & 0xff)
+
+ outbuf[out2] = byte(t2 >> 8)
+ out2++
+ rbr2.cnt -= uint32(t2 & 0xff)
+
+ outbuf[out3] = byte(t3 >> 8)
+ out3++
+ rbr3.cnt -= uint32(t3 & 0xff)
+ }
+
+ return outbuf, nil
+}
diff --git a/src/internal/zstd/xxhash.go b/src/internal/zstd/xxhash.go
new file mode 100644
index 0000000..4d579ee
--- /dev/null
+++ b/src/internal/zstd/xxhash.go
@@ -0,0 +1,148 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+const (
+ xxhPrime64c1 = 0x9e3779b185ebca87
+ xxhPrime64c2 = 0xc2b2ae3d27d4eb4f
+ xxhPrime64c3 = 0x165667b19e3779f9
+ xxhPrime64c4 = 0x85ebca77c2b2ae63
+ xxhPrime64c5 = 0x27d4eb2f165667c5
+)
+
+// xxhash64 is the state of a xxHash-64 checksum.
+type xxhash64 struct {
+ len uint64 // total length hashed
+ v [4]uint64 // accumulators
+ buf [32]byte // buffer
+ cnt int // number of bytes in buffer
+}
+
+// reset discards the current state and prepares to compute a new hash.
+// We assume a seed of 0 since that is what zstd uses.
+func (xh *xxhash64) reset() {
+ xh.len = 0
+
+ // Separate addition for awkward constant overflow.
+ xh.v[0] = xxhPrime64c1
+ xh.v[0] += xxhPrime64c2
+
+ xh.v[1] = xxhPrime64c2
+ xh.v[2] = 0
+
+ // Separate negation for awkward constant overflow.
+ xh.v[3] = xxhPrime64c1
+ xh.v[3] = -xh.v[3]
+
+ for i := range xh.buf {
+ xh.buf[i] = 0
+ }
+ xh.cnt = 0
+}
+
+// update adds a buffer to the has.
+func (xh *xxhash64) update(b []byte) {
+ xh.len += uint64(len(b))
+
+ if xh.cnt+len(b) < len(xh.buf) {
+ copy(xh.buf[xh.cnt:], b)
+ xh.cnt += len(b)
+ return
+ }
+
+ if xh.cnt > 0 {
+ n := copy(xh.buf[xh.cnt:], b)
+ b = b[n:]
+ xh.v[0] = xh.round(xh.v[0], binary.LittleEndian.Uint64(xh.buf[:]))
+ xh.v[1] = xh.round(xh.v[1], binary.LittleEndian.Uint64(xh.buf[8:]))
+ xh.v[2] = xh.round(xh.v[2], binary.LittleEndian.Uint64(xh.buf[16:]))
+ xh.v[3] = xh.round(xh.v[3], binary.LittleEndian.Uint64(xh.buf[24:]))
+ xh.cnt = 0
+ }
+
+ for len(b) >= 32 {
+ xh.v[0] = xh.round(xh.v[0], binary.LittleEndian.Uint64(b))
+ xh.v[1] = xh.round(xh.v[1], binary.LittleEndian.Uint64(b[8:]))
+ xh.v[2] = xh.round(xh.v[2], binary.LittleEndian.Uint64(b[16:]))
+ xh.v[3] = xh.round(xh.v[3], binary.LittleEndian.Uint64(b[24:]))
+ b = b[32:]
+ }
+
+ if len(b) > 0 {
+ copy(xh.buf[:], b)
+ xh.cnt = len(b)
+ }
+}
+
+// digest returns the final hash value.
+func (xh *xxhash64) digest() uint64 {
+ var h64 uint64
+ if xh.len < 32 {
+ h64 = xh.v[2] + xxhPrime64c5
+ } else {
+ h64 = bits.RotateLeft64(xh.v[0], 1) +
+ bits.RotateLeft64(xh.v[1], 7) +
+ bits.RotateLeft64(xh.v[2], 12) +
+ bits.RotateLeft64(xh.v[3], 18)
+ h64 = xh.mergeRound(h64, xh.v[0])
+ h64 = xh.mergeRound(h64, xh.v[1])
+ h64 = xh.mergeRound(h64, xh.v[2])
+ h64 = xh.mergeRound(h64, xh.v[3])
+ }
+
+ h64 += xh.len
+
+ len := xh.len
+ len &= 31
+ buf := xh.buf[:]
+ for len >= 8 {
+ k1 := xh.round(0, binary.LittleEndian.Uint64(buf))
+ buf = buf[8:]
+ h64 ^= k1
+ h64 = bits.RotateLeft64(h64, 27)*xxhPrime64c1 + xxhPrime64c4
+ len -= 8
+ }
+ if len >= 4 {
+ h64 ^= uint64(binary.LittleEndian.Uint32(buf)) * xxhPrime64c1
+ buf = buf[4:]
+ h64 = bits.RotateLeft64(h64, 23)*xxhPrime64c2 + xxhPrime64c3
+ len -= 4
+ }
+ for len > 0 {
+ h64 ^= uint64(buf[0]) * xxhPrime64c5
+ buf = buf[1:]
+ h64 = bits.RotateLeft64(h64, 11) * xxhPrime64c1
+ len--
+ }
+
+ h64 ^= h64 >> 33
+ h64 *= xxhPrime64c2
+ h64 ^= h64 >> 29
+ h64 *= xxhPrime64c3
+ h64 ^= h64 >> 32
+
+ return h64
+}
+
+// round updates a value.
+func (xh *xxhash64) round(v, n uint64) uint64 {
+ v += n * xxhPrime64c2
+ v = bits.RotateLeft64(v, 31)
+ v *= xxhPrime64c1
+ return v
+}
+
+// mergeRound updates a value in the final round.
+func (xh *xxhash64) mergeRound(v, n uint64) uint64 {
+ n = xh.round(0, n)
+ v ^= n
+ v = v*xxhPrime64c1 + xxhPrime64c4
+ return v
+}
diff --git a/src/internal/zstd/xxhash_test.go b/src/internal/zstd/xxhash_test.go
new file mode 100644
index 0000000..646cee8
--- /dev/null
+++ b/src/internal/zstd/xxhash_test.go
@@ -0,0 +1,105 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "strconv"
+ "testing"
+)
+
+var xxHashTests = []struct {
+ data string
+ hash uint64
+}{
+ {
+ "hello, world",
+ 0xb33a384e6d1b1242,
+ },
+ {
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$",
+ 0x1032d841e824f998,
+ },
+}
+
+func TestXXHash(t *testing.T) {
+ var xh xxhash64
+ for i, test := range xxHashTests {
+ xh.reset()
+ xh.update([]byte(test.data))
+ if got := xh.digest(); got != test.hash {
+ t.Errorf("#%d: got %#x want %#x", i, got, test.hash)
+ }
+ }
+}
+
+func TestLargeXXHash(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping expensive test in short mode")
+ }
+
+ data := bigData(t)
+ var xh xxhash64
+ xh.reset()
+ i := 0
+ for i < len(data) {
+ // Write varying amounts to test buffering.
+ c := i%4094 + 1
+ if i+c > len(data) {
+ c = len(data) - i
+ }
+ xh.update(data[i : i+c])
+ i += c
+ }
+
+ got := xh.digest()
+ want := uint64(0xf0dd39fd7e063f82)
+ if got != want {
+ t.Errorf("got %#x want %#x", got, want)
+ }
+}
+
+func FuzzXXHash(f *testing.F) {
+ if _, err := os.Stat("/usr/bin/xxhsum"); err != nil {
+ f.Skip("skipping because /usr/bin/xxhsum does not exist")
+ }
+
+ for _, test := range xxHashTests {
+ f.Add([]byte(test.data))
+ }
+ f.Add(bytes.Repeat([]byte("abcdefghijklmnop"), 256))
+ var buf bytes.Buffer
+ for i := 0; i < 256; i++ {
+ buf.WriteByte(byte(i))
+ }
+ f.Add(bytes.Repeat(buf.Bytes(), 64))
+ f.Add(bigData(f))
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ cmd := exec.Command("/usr/bin/xxhsum", "-H64")
+ cmd.Stdin = bytes.NewReader(b)
+ var hhsumHash bytes.Buffer
+ cmd.Stdout = &hhsumHash
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("running hhsum failed: %v", err)
+ }
+ hhHashBytes := bytes.Fields(bytes.TrimSpace(hhsumHash.Bytes()))[0]
+ hhHash, err := strconv.ParseUint(string(hhHashBytes), 16, 64)
+ if err != nil {
+ t.Fatalf("could not parse hash %q: %v", hhHashBytes, err)
+ }
+
+ var xh xxhash64
+ xh.reset()
+ xh.update(b)
+ goHash := xh.digest()
+
+ if goHash != hhHash {
+ t.Errorf("Go hash %#x != xxhsum hash %#x", goHash, hhHash)
+ }
+ })
+}
diff --git a/src/internal/zstd/zstd.go b/src/internal/zstd/zstd.go
new file mode 100644
index 0000000..a860789
--- /dev/null
+++ b/src/internal/zstd/zstd.go
@@ -0,0 +1,508 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package zstd provides a decompressor for zstd streams,
+// described in RFC 8878. It does not support dictionaries.
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// fuzzing is a fuzzer hook set to true when fuzzing.
+// This is used to reject cases where we don't match zstd.
+var fuzzing = false
+
+// Reader implements [io.Reader] to read a zstd compressed stream.
+type Reader struct {
+ // The underlying Reader.
+ r io.Reader
+
+ // Whether we have read the frame header.
+ // This is of interest when buffer is empty.
+ // If true we expect to see a new block.
+ sawFrameHeader bool
+
+ // Whether the current frame expects a checksum.
+ hasChecksum bool
+
+ // Whether we have read at least one frame.
+ readOneFrame bool
+
+ // True if the frame size is not known.
+ frameSizeUnknown bool
+
+ // The number of uncompressed bytes remaining in the current frame.
+ // If frameSizeUnknown is true, this is not valid.
+ remainingFrameSize uint64
+
+ // The number of bytes read from r up to the start of the current
+ // block, for error reporting.
+ blockOffset int64
+
+ // Buffered decompressed data.
+ buffer []byte
+ // Current read offset in buffer.
+ off int
+
+ // The current repeated offsets.
+ repeatedOffset1 uint32
+ repeatedOffset2 uint32
+ repeatedOffset3 uint32
+
+ // The current Huffman tree used for compressing literals.
+ huffmanTable []uint16
+ huffmanTableBits int
+
+ // The window for back references.
+ windowSize int // maximum required window size
+ window []byte // window data
+
+ // A buffer available to hold a compressed block.
+ compressedBuf []byte
+
+ // A buffer for literals.
+ literals []byte
+
+ // Sequence decode FSE tables.
+ seqTables [3][]fseBaselineEntry
+ seqTableBits [3]uint8
+
+ // Buffers for sequence decode FSE tables.
+ seqTableBuffers [3][]fseBaselineEntry
+
+ // Scratch space used for small reads, to avoid allocation.
+ scratch [16]byte
+
+ // A scratch table for reading an FSE. Only temporarily valid.
+ fseScratch []fseEntry
+
+ // For checksum computation.
+ checksum xxhash64
+}
+
+// NewReader creates a new Reader that decompresses data from the given reader.
+func NewReader(input io.Reader) *Reader {
+ r := new(Reader)
+ r.Reset(input)
+ return r
+}
+
+// Reset discards the current state and starts reading a new stream from r.
+// This permits reusing a Reader rather than allocating a new one.
+func (r *Reader) Reset(input io.Reader) {
+ r.r = input
+
+ // Several fields are preserved to avoid allocation.
+ // Others are always set before they are used.
+ r.sawFrameHeader = false
+ r.hasChecksum = false
+ r.readOneFrame = false
+ r.frameSizeUnknown = false
+ r.remainingFrameSize = 0
+ r.blockOffset = 0
+ // buffer
+ r.off = 0
+ // repeatedOffset1
+ // repeatedOffset2
+ // repeatedOffset3
+ // huffmanTable
+ // huffmanTableBits
+ // windowSize
+ // window
+ // compressedBuf
+ // literals
+ // seqTables
+ // seqTableBits
+ // seqTableBuffers
+ // scratch
+ // fseScratch
+}
+
+// Read implements [io.Reader].
+func (r *Reader) Read(p []byte) (int, error) {
+ if err := r.refillIfNeeded(); err != nil {
+ return 0, err
+ }
+ n := copy(p, r.buffer[r.off:])
+ r.off += n
+ return n, nil
+}
+
+// ReadByte implements [io.ByteReader].
+func (r *Reader) ReadByte() (byte, error) {
+ if err := r.refillIfNeeded(); err != nil {
+ return 0, err
+ }
+ ret := r.buffer[r.off]
+ r.off++
+ return ret, nil
+}
+
+// refillIfNeeded reads the next block if necessary.
+func (r *Reader) refillIfNeeded() error {
+ for r.off >= len(r.buffer) {
+ if err := r.refill(); err != nil {
+ return err
+ }
+ r.off = 0
+ }
+ return nil
+}
+
+// refill reads and decompresses the next block.
+func (r *Reader) refill() error {
+ if !r.sawFrameHeader {
+ if err := r.readFrameHeader(); err != nil {
+ return err
+ }
+ }
+ return r.readBlock()
+}
+
+// readFrameHeader reads the frame header and prepares to read a block.
+func (r *Reader) readFrameHeader() error {
+retry:
+ relativeOffset := 0
+
+ // Read magic number. RFC 3.1.1.
+ if _, err := io.ReadFull(r.r, r.scratch[:4]); err != nil {
+ // We require that the stream contain at least one frame.
+ if err == io.EOF && !r.readOneFrame {
+ err = io.ErrUnexpectedEOF
+ }
+ return r.wrapError(relativeOffset, err)
+ }
+
+ if magic := binary.LittleEndian.Uint32(r.scratch[:4]); magic != 0xfd2fb528 {
+ if magic >= 0x184d2a50 && magic <= 0x184d2a5f {
+ // This is a skippable frame.
+ r.blockOffset += int64(relativeOffset) + 4
+ if err := r.skipFrame(); err != nil {
+ return err
+ }
+ goto retry
+ }
+
+ return r.makeError(relativeOffset, "invalid magic number")
+ }
+
+ relativeOffset += 4
+
+ // Read Frame_Header_Descriptor. RFC 3.1.1.1.1.
+ if _, err := io.ReadFull(r.r, r.scratch[:1]); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+ descriptor := r.scratch[0]
+
+ singleSegment := descriptor&(1<<5) != 0
+
+ fcsFieldSize := 1 << (descriptor >> 6)
+ if fcsFieldSize == 1 && !singleSegment {
+ fcsFieldSize = 0
+ }
+
+ var windowDescriptorSize int
+ if singleSegment {
+ windowDescriptorSize = 0
+ } else {
+ windowDescriptorSize = 1
+ }
+
+ if descriptor&(1<<3) != 0 {
+ return r.makeError(relativeOffset, "reserved bit set in frame header descriptor")
+ }
+
+ r.hasChecksum = descriptor&(1<<2) != 0
+ if r.hasChecksum {
+ r.checksum.reset()
+ }
+
+ if descriptor&3 != 0 {
+ return r.makeError(relativeOffset, "dictionaries are not supported")
+ }
+
+ relativeOffset++
+
+ headerSize := windowDescriptorSize + fcsFieldSize
+
+ if _, err := io.ReadFull(r.r, r.scratch[:headerSize]); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+
+ // Figure out the maximum amount of data we need to retain
+ // for backreferences.
+
+ if singleSegment {
+ // No window required, as all the data is in a single buffer.
+ r.windowSize = 0
+ } else {
+ // Window descriptor. RFC 3.1.1.1.2.
+ windowDescriptor := r.scratch[0]
+ exponent := uint64(windowDescriptor >> 3)
+ mantissa := uint64(windowDescriptor & 7)
+ windowLog := exponent + 10
+ windowBase := uint64(1) << windowLog
+ windowAdd := (windowBase / 8) * mantissa
+ windowSize := windowBase + windowAdd
+
+ // Default zstd sets limits on the window size.
+ if fuzzing && (windowLog > 31 || windowSize > 1<<27) {
+ return r.makeError(relativeOffset, "windowSize too large")
+ }
+
+ // RFC 8878 permits us to set an 8M max on window size.
+ if windowSize > 8<<20 {
+ windowSize = 8 << 20
+ }
+
+ r.windowSize = int(windowSize)
+ }
+
+ // Frame_Content_Size. RFC 3.1.1.4.
+ r.frameSizeUnknown = false
+ r.remainingFrameSize = 0
+ fb := r.scratch[windowDescriptorSize:]
+ switch fcsFieldSize {
+ case 0:
+ r.frameSizeUnknown = true
+ case 1:
+ r.remainingFrameSize = uint64(fb[0])
+ case 2:
+ r.remainingFrameSize = 256 + uint64(binary.LittleEndian.Uint16(fb))
+ case 4:
+ r.remainingFrameSize = uint64(binary.LittleEndian.Uint32(fb))
+ case 8:
+ r.remainingFrameSize = binary.LittleEndian.Uint64(fb)
+ default:
+ panic("unreachable")
+ }
+
+ relativeOffset += headerSize
+
+ r.sawFrameHeader = true
+ r.readOneFrame = true
+ r.blockOffset += int64(relativeOffset)
+
+ // Prepare to read blocks from the frame.
+ r.repeatedOffset1 = 1
+ r.repeatedOffset2 = 4
+ r.repeatedOffset3 = 8
+ r.huffmanTableBits = 0
+ r.window = r.window[:0]
+ r.seqTables[0] = nil
+ r.seqTables[1] = nil
+ r.seqTables[2] = nil
+
+ return nil
+}
+
+// skipFrame skips a skippable frame. RFC 3.1.2.
+func (r *Reader) skipFrame() error {
+ relativeOffset := 0
+
+ if _, err := io.ReadFull(r.r, r.scratch[:4]); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+
+ relativeOffset += 4
+
+ size := binary.LittleEndian.Uint32(r.scratch[:4])
+
+ if seeker, ok := r.r.(io.Seeker); ok {
+ if _, err := seeker.Seek(int64(size), io.SeekCurrent); err != nil {
+ return err
+ }
+ r.blockOffset += int64(relativeOffset) + int64(size)
+ return nil
+ }
+
+ var skip []byte
+ const chunk = 1 << 20 // 1M
+ for size >= chunk {
+ if len(skip) == 0 {
+ skip = make([]byte, chunk)
+ }
+ if _, err := io.ReadFull(r.r, skip); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+ relativeOffset += chunk
+ size -= chunk
+ }
+ if size > 0 {
+ if len(skip) == 0 {
+ skip = make([]byte, size)
+ }
+ if _, err := io.ReadFull(r.r, skip); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+ relativeOffset += int(size)
+ }
+
+ r.blockOffset += int64(relativeOffset)
+
+ return nil
+}
+
+// readBlock reads the next block from a frame.
+func (r *Reader) readBlock() error {
+ relativeOffset := 0
+
+ // Read Block_Header. RFC 3.1.1.2.
+ if _, err := io.ReadFull(r.r, r.scratch[:3]); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+
+ relativeOffset += 3
+
+ header := uint32(r.scratch[0]) | (uint32(r.scratch[1]) << 8) | (uint32(r.scratch[2]) << 16)
+
+ lastBlock := header&1 != 0
+ blockType := (header >> 1) & 3
+ blockSize := int(header >> 3)
+
+ // Maximum block size is smaller of window size and 128K.
+ // We don't record the window size for a single segment frame,
+ // so just use 128K. RFC 3.1.1.2.3, 3.1.1.2.4.
+ if blockSize > 128<<10 || (r.windowSize > 0 && blockSize > r.windowSize) {
+ return r.makeError(relativeOffset, "block size too large")
+ }
+
+ // Handle different block types. RFC 3.1.1.2.2.
+ switch blockType {
+ case 0:
+ r.setBufferSize(blockSize)
+ if _, err := io.ReadFull(r.r, r.buffer); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+ relativeOffset += blockSize
+ r.blockOffset += int64(relativeOffset)
+ case 1:
+ r.setBufferSize(blockSize)
+ if _, err := io.ReadFull(r.r, r.scratch[:1]); err != nil {
+ return r.wrapNonEOFError(relativeOffset, err)
+ }
+ relativeOffset++
+ v := r.scratch[0]
+ for i := range r.buffer {
+ r.buffer[i] = v
+ }
+ r.blockOffset += int64(relativeOffset)
+ case 2:
+ r.blockOffset += int64(relativeOffset)
+ if err := r.compressedBlock(blockSize); err != nil {
+ return err
+ }
+ r.blockOffset += int64(blockSize)
+ case 3:
+ return r.makeError(relativeOffset, "invalid block type")
+ }
+
+ if !r.frameSizeUnknown {
+ if uint64(len(r.buffer)) > r.remainingFrameSize {
+ return r.makeError(relativeOffset, "too many uncompressed bytes in frame")
+ }
+ r.remainingFrameSize -= uint64(len(r.buffer))
+ }
+
+ if r.hasChecksum {
+ r.checksum.update(r.buffer)
+ }
+
+ if !lastBlock {
+ r.saveWindow(r.buffer)
+ } else {
+ if !r.frameSizeUnknown && r.remainingFrameSize != 0 {
+ return r.makeError(relativeOffset, "not enough uncompressed bytes for frame")
+ }
+ // Check for checksum at end of frame. RFC 3.1.1.
+ if r.hasChecksum {
+ if _, err := io.ReadFull(r.r, r.scratch[:4]); err != nil {
+ return r.wrapNonEOFError(0, err)
+ }
+
+ inputChecksum := binary.LittleEndian.Uint32(r.scratch[:4])
+ dataChecksum := uint32(r.checksum.digest())
+ if inputChecksum != dataChecksum {
+ return r.wrapError(0, fmt.Errorf("invalid checksum: got %#x want %#x", dataChecksum, inputChecksum))
+ }
+
+ r.blockOffset += 4
+ }
+ r.sawFrameHeader = false
+ }
+
+ return nil
+}
+
+// setBufferSize sets the decompressed buffer size.
+// When this is called the buffer is empty.
+func (r *Reader) setBufferSize(size int) {
+ if cap(r.buffer) < size {
+ need := size - cap(r.buffer)
+ r.buffer = append(r.buffer[:cap(r.buffer)], make([]byte, need)...)
+ }
+ r.buffer = r.buffer[:size]
+}
+
+// saveWindow saves bytes in the backreference window.
+// TODO: use a circular buffer for less data movement.
+func (r *Reader) saveWindow(buf []byte) {
+ if r.windowSize == 0 {
+ return
+ }
+
+ if len(buf) >= r.windowSize {
+ from := len(buf) - r.windowSize
+ r.window = append(r.window[:0], buf[from:]...)
+ return
+ }
+
+ keep := r.windowSize - len(buf) // must be positive
+ if keep < len(r.window) {
+ remove := len(r.window) - keep
+ copy(r.window[:], r.window[remove:])
+ }
+
+ r.window = append(r.window, buf...)
+}
+
+// zstdError is an error while decompressing.
+type zstdError struct {
+ offset int64
+ err error
+}
+
+func (ze *zstdError) Error() string {
+ return fmt.Sprintf("zstd decompression error at %d: %v", ze.offset, ze.err)
+}
+
+func (ze *zstdError) Unwrap() error {
+ return ze.err
+}
+
+func (r *Reader) makeEOFError(off int) error {
+ return r.wrapError(off, io.ErrUnexpectedEOF)
+}
+
+func (r *Reader) wrapNonEOFError(off int, err error) error {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return r.wrapError(off, err)
+}
+
+func (r *Reader) makeError(off int, msg string) error {
+ return r.wrapError(off, errors.New(msg))
+}
+
+func (r *Reader) wrapError(off int, err error) error {
+ if err == io.EOF {
+ return err
+ }
+ return &zstdError{r.blockOffset + int64(off), err}
+}
diff --git a/src/internal/zstd/zstd_test.go b/src/internal/zstd/zstd_test.go
new file mode 100644
index 0000000..bc75e0f
--- /dev/null
+++ b/src/internal/zstd/zstd_test.go
@@ -0,0 +1,249 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "bytes"
+ "fmt"
+ "internal/race"
+ "internal/testenv"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+ "testing"
+)
+
+// tests holds some simple test cases, including some found by fuzzing.
+var tests = []struct {
+ name, uncompressed, compressed string
+}{
+ {
+ "hello",
+ "hello, world\n",
+ "\x28\xb5\x2f\xfd\x24\x0d\x69\x00\x00\x68\x65\x6c\x6c\x6f\x2c\x20\x77\x6f\x72\x6c\x64\x0a\x4c\x1f\xf9\xf1",
+ },
+ {
+ // a small compressed .debug_ranges section.
+ "ranges",
+ "\xcc\x11\x00\x00\x00\x00\x00\x00\xd5\x13\x00\x00\x00\x00\x00\x00" +
+ "\x1c\x14\x00\x00\x00\x00\x00\x00\x72\x14\x00\x00\x00\x00\x00\x00" +
+ "\x9d\x14\x00\x00\x00\x00\x00\x00\xd5\x14\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\xfb\x12\x00\x00\x00\x00\x00\x00\x09\x13\x00\x00\x00\x00\x00\x00" +
+ "\x0c\x13\x00\x00\x00\x00\x00\x00\xcb\x13\x00\x00\x00\x00\x00\x00" +
+ "\x29\x14\x00\x00\x00\x00\x00\x00\x4e\x14\x00\x00\x00\x00\x00\x00" +
+ "\x9d\x14\x00\x00\x00\x00\x00\x00\xd5\x14\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\xfb\x12\x00\x00\x00\x00\x00\x00\x09\x13\x00\x00\x00\x00\x00\x00" +
+ "\x67\x13\x00\x00\x00\x00\x00\x00\xcb\x13\x00\x00\x00\x00\x00\x00" +
+ "\x9d\x14\x00\x00\x00\x00\x00\x00\xd5\x14\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\x5f\x0b\x00\x00\x00\x00\x00\x00\x6c\x0b\x00\x00\x00\x00\x00\x00" +
+ "\x7d\x0b\x00\x00\x00\x00\x00\x00\x7e\x0c\x00\x00\x00\x00\x00\x00" +
+ "\x38\x0f\x00\x00\x00\x00\x00\x00\x5c\x0f\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\x83\x0c\x00\x00\x00\x00\x00\x00\xfa\x0c\x00\x00\x00\x00\x00\x00" +
+ "\xfd\x0d\x00\x00\x00\x00\x00\x00\xef\x0e\x00\x00\x00\x00\x00\x00" +
+ "\x14\x0f\x00\x00\x00\x00\x00\x00\x38\x0f\x00\x00\x00\x00\x00\x00" +
+ "\x9f\x0f\x00\x00\x00\x00\x00\x00\xac\x0f\x00\x00\x00\x00\x00\x00" +
+ "\xdb\x0f\x00\x00\x00\x00\x00\x00\xff\x0f\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\xfd\x0d\x00\x00\x00\x00\x00\x00\xd8\x0e\x00\x00\x00\x00\x00\x00" +
+ "\x9f\x0f\x00\x00\x00\x00\x00\x00\xac\x0f\x00\x00\x00\x00\x00\x00" +
+ "\xdb\x0f\x00\x00\x00\x00\x00\x00\xff\x0f\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\xfa\x0c\x00\x00\x00\x00\x00\x00\xea\x0d\x00\x00\x00\x00\x00\x00" +
+ "\xef\x0e\x00\x00\x00\x00\x00\x00\x14\x0f\x00\x00\x00\x00\x00\x00" +
+ "\x5c\x0f\x00\x00\x00\x00\x00\x00\x9f\x0f\x00\x00\x00\x00\x00\x00" +
+ "\xac\x0f\x00\x00\x00\x00\x00\x00\xdb\x0f\x00\x00\x00\x00\x00\x00" +
+ "\xff\x0f\x00\x00\x00\x00\x00\x00\x2c\x10\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\x60\x11\x00\x00\x00\x00\x00\x00\xd1\x16\x00\x00\x00\x00\x00\x00" +
+ "\x40\x0b\x00\x00\x00\x00\x00\x00\x2c\x10\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\x7a\x00\x00\x00\x00\x00\x00\x00\xb6\x00\x00\x00\x00\x00\x00\x00" +
+ "\x9f\x01\x00\x00\x00\x00\x00\x00\xa7\x01\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "\x7a\x00\x00\x00\x00\x00\x00\x00\xa9\x00\x00\x00\x00\x00\x00\x00" +
+ "\x9f\x01\x00\x00\x00\x00\x00\x00\xa7\x01\x00\x00\x00\x00\x00\x00" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+
+ "\x28\xb5\x2f\xfd\x64\xa0\x01\x2d\x05\x00\xc4\x04\xcc\x11\x00\xd5" +
+ "\x13\x00\x1c\x14\x00\x72\x9d\xd5\xfb\x12\x00\x09\x0c\x13\xcb\x13" +
+ "\x29\x4e\x67\x5f\x0b\x6c\x0b\x7d\x0b\x7e\x0c\x38\x0f\x5c\x0f\x83" +
+ "\x0c\xfa\x0c\xfd\x0d\xef\x0e\x14\x38\x9f\x0f\xac\x0f\xdb\x0f\xff" +
+ "\x0f\xd8\x9f\xac\xdb\xff\xea\x5c\x2c\x10\x60\xd1\x16\x40\x0b\x7a" +
+ "\x00\xb6\x00\x9f\x01\xa7\x01\xa9\x36\x20\xa0\x83\x14\x34\x63\x4a" +
+ "\x21\x70\x8c\x07\x46\x03\x4e\x10\x62\x3c\x06\x4e\xc8\x8c\xb0\x32" +
+ "\x2a\x59\xad\xb2\xf1\x02\x82\x7c\x33\xcb\x92\x6f\x32\x4f\x9b\xb0" +
+ "\xa2\x30\xf0\xc0\x06\x1e\x98\x99\x2c\x06\x1e\xd8\xc0\x03\x56\xd8" +
+ "\xc0\x03\x0f\x6c\xe0\x01\xf1\xf0\xee\x9a\xc6\xc8\x97\x99\xd1\x6c" +
+ "\xb4\x21\x45\x3b\x10\xe4\x7b\x99\x4d\x8a\x36\x64\x5c\x77\x08\x02" +
+ "\xcb\xe0\xce",
+ },
+ {
+ "fuzz1",
+ "0\x00\x00\x00\x00\x000\x00\x00\x00\x00\x001\x00\x00\x00\x00\x000000",
+ "(\xb5/\xfd\x04X\x8d\x00\x00P0\x000\x001\x000000\x03T\x02\x00\x01\x01m\xf9\xb7G",
+ },
+}
+
+func TestSamples(t *testing.T) {
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ r := NewReader(strings.NewReader(test.compressed))
+ got, err := io.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotstr := string(got)
+ if gotstr != test.uncompressed {
+ t.Errorf("got %q want %q", gotstr, test.uncompressed)
+ }
+ })
+ }
+}
+
+var (
+ bigDataOnce sync.Once
+ bigDataBytes []byte
+ bigDataErr error
+)
+
+// bigData returns the contents of our large test file.
+func bigData(t testing.TB) []byte {
+ bigDataOnce.Do(func() {
+ bigDataBytes, bigDataErr = os.ReadFile("../../testdata/Isaac.Newton-Opticks.txt")
+ })
+ if bigDataErr != nil {
+ t.Fatal(bigDataErr)
+ }
+ return bigDataBytes
+}
+
+var (
+ zstdBigOnce sync.Once
+ zstdBigBytes []byte
+ zstdBigSkip bool
+ zstdBigErr error
+)
+
+// zstdBigData returns the compressed contents of our large test file.
+// This will only run on Unix systems with zstd installed.
+// That's OK as the package is GOOS-independent.
+func zstdBigData(t testing.TB) []byte {
+ input := bigData(t)
+
+ zstdBigOnce.Do(func() {
+ if _, err := os.Stat("/usr/bin/zstd"); err != nil {
+ zstdBigSkip = true
+ return
+ }
+
+ cmd := exec.Command("/usr/bin/zstd", "-z")
+ cmd.Stdin = bytes.NewReader(input)
+ var compressed bytes.Buffer
+ cmd.Stdout = &compressed
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ zstdBigErr = fmt.Errorf("running zstd failed: %v", err)
+ return
+ }
+
+ zstdBigBytes = compressed.Bytes()
+ })
+ if zstdBigSkip {
+ t.Skip("skipping because /usr/bin/zstd does not exist")
+ }
+ if zstdBigErr != nil {
+ t.Fatal(zstdBigErr)
+ }
+ return zstdBigBytes
+}
+
+// Test decompressing a large file. We don't have a compressor,
+// so this test only runs on systems with zstd installed.
+func TestLarge(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping expensive test in short mode")
+ }
+
+ data := bigData(t)
+ compressed := zstdBigData(t)
+
+ t.Logf("/usr/bin/zstd compressed %d bytes to %d", len(data), len(compressed))
+
+ r := NewReader(bytes.NewReader(compressed))
+ got, err := io.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(got, data) {
+ showDiffs(t, got, data)
+ }
+}
+
+// showDiffs reports the first few differences in two []byte.
+func showDiffs(t *testing.T, got, want []byte) {
+ t.Error("data mismatch")
+ if len(got) != len(want) {
+ t.Errorf("got data length %d, want %d", len(got), len(want))
+ }
+ diffs := 0
+ for i, b := range got {
+ if i >= len(want) {
+ break
+ }
+ if b != want[i] {
+ diffs++
+ if diffs > 20 {
+ break
+ }
+ t.Logf("%d: %#x != %#x", i, b, want[i])
+ }
+ }
+}
+
+func TestAlloc(t *testing.T) {
+ testenv.SkipIfOptimizationOff(t)
+ if race.Enabled {
+ t.Skip("skipping allocation test under race detector")
+ }
+
+ compressed := zstdBigData(t)
+ input := bytes.NewReader(compressed)
+ r := NewReader(input)
+ c := testing.AllocsPerRun(10, func() {
+ input.Reset(compressed)
+ r.Reset(input)
+ io.Copy(io.Discard, r)
+ })
+ if c != 0 {
+ t.Errorf("got %v allocs, want 0", c)
+ }
+}
+
+func BenchmarkLarge(b *testing.B) {
+ b.StopTimer()
+ b.ReportAllocs()
+
+ compressed := zstdBigData(b)
+
+ b.SetBytes(int64(len(compressed)))
+
+ input := bytes.NewReader(compressed)
+ r := NewReader(input)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ input.Reset(compressed)
+ r.Reset(input)
+ io.Copy(io.Discard, r)
+ }
+}